query
stringlengths 9
3.4k
| document
stringlengths 9
87.4k
| metadata
dict | negatives
sequencelengths 4
101
| negative_scores
sequencelengths 4
101
| document_score
stringlengths 3
10
| document_rank
stringclasses 102
values |
---|---|---|---|---|---|---|
return dictionary with extra keys used in model.__init__ | def _get_init_kwds(self):
kwds = dict(((key, getattr(self, key, None))
for key in self._init_keys))
return kwds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, **kwds ):\n super(Model, self).__init__()\n self.__key = None \n for name, value in kwds.items():\n self[name] = value",
"def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())",
"def extra_from_record(self, record):\n return {\n attr_name: record.__dict__[attr_name]\n for attr_name in record.__dict__\n if attr_name not in BUILTIN_ATTRS\n }",
"def _identifying_params(self) -> Mapping[str, Any]:\n return {**{\"model_name\": self.model_name}, **self._default_params}",
"def model_dict(self) -> dict:\n model_dict = dict()\n for key, value in self.kwargs.items():\n current = model_dict\n if not isinstance(key, tuple):\n key = (key,)\n for part in key[:-1]:\n if part not in current:\n current[part] = dict()\n current = current[part]\n current[key[-1]] = value\n return model_dict",
"def get_modelDict(self):\n return self.__modelDict",
"def record_dict(self):\n return {p.key: getattr(self, p.key) for p in self.__mapper__.attrs}",
"def dict(self) -> Dict:\r\n return super().dict()",
"def dict(self) -> Dict:\r\n return super().dict()",
"def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)",
"def attributes(self):\n return dict(self.__attributes)",
"def get_dictionary(self, verbosity='all'):\n return dict([\n # This maps the model to its columns except for id, for which the\n # database mapping and python mapping differ.\n (c.name, str(getattr(self, c.name if c.name != 'id' else 'id_')))\n for c in self.__table__.columns\n ]) if verbosity == 'all' else {}",
"def extra(self) -> Dict[str, Any]:\n extra = self.extras.copy()\n if isinstance(self.author, str):\n extra['Author'] = self.author\n if isinstance(self.email, str):\n extra['Email'] = self.email\n if isinstance(self.description, str):\n extra['Description'] = self.description\n return extra",
"def insertable_dict(self):\n # .strip('_') is for type_\n return {\n 'f_' +\n p.key.strip('_'): getattr(\n self,\n p.key) for p in self.__mapper__.attrs}",
"def _to_request_dict(self):\n return {\"attr1\": self.attr1, \"attr2\": \"test\"}",
"def extra_state_attributes(self):\n return dict(\n self._instrument.attributes,\n model=\"{}/{}\".format(\n self._instrument.vehicle_model, self._instrument.vehicle_name\n ),\n model_year=self._instrument.vehicle_model_year,\n model_family=self._instrument.vehicle_model_family,\n title=self._instrument.vehicle_name,\n csid=self._instrument.vehicle_csid,\n vin=self._instrument.vehicle_vin,\n )",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def model_attributes(self, app_label, model):\n model_name = model.__name__\n model_name_plural = self.model_name_plural(model)\n slug_field = self.get_unique_slug_field_name(model)\n slug_field_name = slug_field.name if slug_field else \"slug\"\n lookup_field = slug_field_name if slug_field else \"pk\"\n return {\n 'app_label': app_label,\n 'model': model,\n 'model_name': model_name,\n 'model_name_slug': self.camel_to_slug(model_name),\n 'model_name_plural': model_name_plural,\n 'model_name_plural_slug': self.camel_to_slug(model_name_plural),\n 'model_fields': self.get_field_names_for_model(model),\n 'slug_field': slug_field,\n 'slug_field_name': slug_field_name,\n 'lookup_field': lookup_field\n }",
"def _identifying_params(self) -> dict[str, Any]:\n return {**{\"model_path\": self.model_path}, **self._default_params}",
"def _extra_keys(self):\r\n return []",
"def extra_state_attributes(self) -> dict[str, Any]:\n ret = {\n ATTR_SOURCE: self._source_entity_id,\n ATTR_COEFFICIENTS: self._coefficients,\n }\n if self._source_attribute:\n ret[ATTR_SOURCE_ATTRIBUTE] = self._source_attribute\n return ret",
"def attributes(self):\n return { k: getattr(self, k) for k in self.__class__.columns().keys() }",
"def get_model_meta_info(model_name):\n return dict(dict(model_meta_info)[model_name])",
"def extra_state_attributes(self) -> dict[str, Any]:\n data = {ATTR_ENTITY_ID: self.tracking, ATTR_ORDER: self._order}\n if not self.user_defined:\n data[ATTR_AUTO] = True\n\n return data",
"def serialize(self) -> dict:\n\n attributes = self.include or inspect(self).attrs.keys()\n return {c: getattr(self, c) for c in attributes}",
"def __dict__(self):\r\n return",
"def insertable_dict(self):\n\n d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes')}\n\n x = {('c_' + k).strip('_'): v for k, v in d.items()}\n\n return x",
"def extra_state_attributes(self) -> dict[str, Any]:\n return {\n ATTR_LAST_UPDATE: self.coordinator.data.now.date,\n ATTR_SENSOR_ID: self.entity_description.key,\n ATTR_SITE_ID: self.coordinator.data.site.id,\n ATTR_SITE_NAME: self.coordinator.data.site.name,\n }",
"def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n all_required_field_names = {field.alias for field in cls.__fields__.values()}\n\n extra = values.get(\"model_kwargs\", {})\n for field_name in list(values):\n if field_name not in all_required_field_names:\n if field_name in extra:\n raise ValueError(f\"Found {field_name} supplied twice.\")\n logger.warning(\n f\"\"\"WARNING! {field_name} is not default parameter.\n {field_name} was transfered to model_kwargs.\n Please confirm that {field_name} is what you intended.\"\"\"\n )\n extra[field_name] = values.pop(field_name)\n values[\"model_kwargs\"] = extra\n return values",
"def __dict__(self):\n return {TREE: self._tree,\n MATRIX: self._matrix,\n TREE_FEATURES: self._tree_features,\n MATRIX_FEATURES: self._matrix_features,\n ID: self._id,\n METADATA: self._metadata}",
"def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict",
"def secondary_keys_dicts(self):",
"def _get_base_dict(self):\n res = dict(\n task=self._task,\n timestamp=self._timestamp,\n metric=self._metric,\n variant=self._variant\n )\n if self._iter is not None:\n res.update(iter=self._iter)\n if self._model_event is not None:\n res.update(model_event=self._model_event)\n return res",
"def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}",
"def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]:\n if self._constructed_manually:\n raise UnsupportedError(\n \"Surrogates constructed manually (ie Surrogate.from_botorch) may not \"\n \"be serialized. If serialization is necessary please initialize from \"\n \"the constructor.\"\n )\n\n return {\n \"botorch_model_class\": self.botorch_model_class,\n \"model_options\": self.model_options,\n \"mll_class\": self.mll_class,\n \"mll_options\": self.mll_options,\n \"outcome_transform\": self.outcome_transform,\n \"input_transform\": self.input_transform,\n \"covar_module_class\": self.covar_module_class,\n \"covar_module_options\": self.covar_module_options,\n \"likelihood_class\": self.likelihood_class,\n \"likelihood_options\": self.likelihood_options,\n \"allow_batched_models\": self.allow_batched_models,\n }",
"def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}",
"def extra_state_attributes(self) -> dict[str, Any]:\n return {\n \"zone_idx\": self._device.idx,\n \"heating_type\": self._device.heating_type,\n \"mode\": self._device.mode,\n \"config\": self._device.config,\n **super().extra_state_attributes,\n \"schedule\": self._device.schedule,\n \"schedule_version\": self._device.schedule_version,\n }",
"def extra_state_attributes(self):\n return {attr: getattr(self, '_' + prop)\n for attr, prop in ATTRIBUTES_IRHVAC.items()}",
"def _kwargs(self):\n dict = {\"name\":self.name}\n return dict",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)",
"def get_kwargs(self):\n return {}",
"def extra_state_attributes(self) -> dict[str, Any]:\n return self._attributes",
"def to_dict(self):\n _, base_dict = super(ResultFigure, self).to_dict(excludes=['id', 'fk_from_operation', 'fk_for_user',\n 'fk_in_project', 'operation', 'project'])\n base_dict['fk_from_operation'] = self.operation.gid\n base_dict['fk_in_project'] = self.project.gid\n return self.__class__.__name__, base_dict",
"def extra_state_attributes(self):\n state_attr = {}\n if self.vendor_id is not None:\n state_attr[ATTR_VENDOR_ID] = self.vendor_id\n state_attr[ATTR_VENDOR_NAME] = self.vendor_name\n if self.type_id is not None:\n state_attr[ATTR_TYPE_ID] = self.type_id\n state_attr[ATTR_TYPE] = self.type\n if self.physical_address is not None:\n state_attr[ATTR_PHYSICAL_ADDRESS] = self.physical_address\n return state_attr",
"def dict(self):\n d = {p.key: getattr(self,p.key) for p in self.__mapper__.attrs\n if p.key not in ('table','stats','_codes', 'data')}\n\n if not d:\n raise Exception(self.__dict__)\n\n d['schema_type'] = self.schema_type\n\n if self.data:\n # Copy data fields into top level dict, but don't overwrite existind values.\n for k, v in self.data.items() :\n if k not in d and k not in ('table','stats','_codes', 'data'):\n d[k] = v\n\n return d",
"def getCustomDict(self):\n return self.custom",
"def original_dict(self):\n return self.obj.__dict__",
"def get_dict(self):\n return",
"def to_dict(self):\n ret = {}\n for key in dir(self):\n if key.startswith(\"_\"):\n continue\n\n if key in ['id', 'objects', 'pk', 'STRICT']:\n continue\n\n obj = getattr(self, key)\n if callable(obj):\n continue\n ret[key] = obj\n return ret",
"def get_data_to_create_object(self):\n return {}",
"def __getstate__(self):\n result_dict = {}\n for pickle_attr in self.pickle_attrs:\n result_dict[pickle_attr] = self.__dict__[pickle_attr]\n return result_dict",
"def model_dict(self):\n model_dict = {}\n model_dict[\"model_type\"] = self.model_type\n model_dict[\"num_features\"] = self.num_features\n model_dict[\"num_classes\"] = self.num_classes\n model_dict[\"normalize\"] = self.normalize\n model_dict[\"reparam_mode\"] = self.reparam_mode\n model_dict[\"prior_mode\"] = self.prior_mode\n model_dict[\"struct_dropout_mode\"] = self.struct_dropout_mode\n model_dict[\"dropout\"] = self.dropout\n model_dict[\"latent_size\"] = self.latent_size\n model_dict[\"sample_size\"] = self.sample_size\n model_dict[\"num_layers\"] = self.num_layers\n model_dict[\"with_relu\"] = self.with_relu\n model_dict[\"val_use_mean\"] = self.val_use_mean\n model_dict[\"reparam_all_layers\"] = self.reparam_all_layers\n model_dict[\"state_dict\"] = to_cpu_recur(self.state_dict())\n return model_dict",
"def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result",
"def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }",
"def extra_state_attributes(self) -> dict[str, Any]:\n return {\n \"heat_demand\": self._device.heat_demand,\n \"heat_demands\": self._device.heat_demands,\n \"relay_demands\": self._device.relay_demands,\n \"system_mode\": self._device.system_mode,\n \"tpi_params\": self._device.tpi_params,\n # \"faults\": self._device.faultlog,\n }",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'type') and self.type is not None:\n _dict['type'] = self.type\n if hasattr(self, 'updates') and self.updates is not None:\n _dict['updates'] = self.updates\n return _dict",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }",
"def test_before_todict(self):\n b1 = BaseModel()\n b1_dict = b1.__dict__\n self.assertEqual(type(b1).__name__, \"BaseModel\")\n self.assertTrue(hasattr(b1, '__class__'))\n self.assertEqual(str(b1.__class__),\n \"<class 'models.base_model.BaseModel'>\")\n self.assertTrue(type(b1_dict['created_at']), 'datetime.datetime')\n self.assertTrue(type(b1_dict['updated_at']), 'datetime.datetime')\n self.assertTrue(type(b1_dict['id']), 'str')",
"def get_save_meta(self):\n return {}",
"def to_dict(self):\n excluded_keys = ['idx', 'json', 'identifier']\n keys_to_store = {\n key for key in self.__dict__\n if key in self._included_attr or (\n key not in excluded_keys and\n key not in self._excluded_attr and\n not (key.startswith('_') and self._exclude_private_attr)\n )\n }\n return {\n key: self.__dict__[key] for key in keys_to_store\n }",
"def __load(self) -> Dict:\n return dict()",
"def attrs(self):\n return self.__dict__",
"def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}",
"def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}",
"def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}",
"def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}",
"def attributes(self):\n params = self.model.param_array\n return {'parameters': params}",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes",
"def as_dict(self):\n return self.__dict__",
"def meta_data(self) -> Dict:\n pass",
"def get_parameter_dict(self):\n prm = ModelParameters()\n prm.define(\"a\", self.a)\n return prm",
"def _localWhatDoINeed(self):\n needDict = super()._localWhatDoINeed()\n\n return needDict",
"def as_dict(self):\n d = {}\n for k in self._kwargs:\n d[k] = getattr(self, k)\n return d",
"def serialize(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}",
"def get_initial(self):\n if self.fields is None:\n return self.object.__dict__.copy()\n else:\n return {field:getattr(self.object,field,'') for field in self.fields}",
"def getFeatureDicts(self):\n feature_dicts = super().getFeatureDicts()\n feature_dicts.extend([self.__suffixes, self.__prefixes, self.__tags, self.__numbers, self.__caps, self.__caps_no_start])\n return feature_dicts",
"def to_dict(self):\n return {key: getattr(self, key) for key in self.keys}",
"def extra_state_attributes(self):\n return {\n ATTR_SOURCE_ENTITY: self._source_entity,\n ATTR_SOURCE_DOMAIN: self._source_domain,\n }",
"def test_basemodel_kwargs_to_dict(self):\n B1 = BaseModel()\n dict = B1.to_dict()\n B2 = BaseModel(**dict)\n self.assertEqual(B1.id, B1.id)\n self.assertEqual(B1.created_at, B2.created_at)\n self.assertEqual(B1.updated_at, B2.updated_at)\n self.assertNotEqual(B1, B2)",
"def test_basedict2(self):\n tester = BaseModel()\n self.assertIn(\"id\", tester.to_dict())\n self.assertIn(\"created_at\", tester.to_dict())\n self.assertIn(\"updated_at\", tester.to_dict())",
"def attributes(self):\n return {\n 'parameters': {\n 'coef_': self.model.coef_.tolist(),\n 'intercept_': self.model.intercept_.tolist(),\n 'n_iter': self.model.n_iter_.tolist()\n }\n }",
"def as_dict(self) -> dict:\n return dict(vars(self))",
"def init_attrs(self):\n raise NotImplementedError",
"def _get_model_state(self) -> dict:\n raise NotImplementedError",
"def to_dict(self):\n\n dic = dict(**self.__dict__)\n dic['__class__'] = str(type(self).__name__)\n dic['created_at'] = self.created_at.isoformat()\n dic['updated_at'] = self.updated_at.isoformat()\n return (dic)",
"def to_dict(self):",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'description') and self.description is not None:\n _dict['description'] = self.description\n if hasattr(self, 'address') and self.address is not None:\n _dict['address'] = self.address\n if hasattr(self, 'enabled') and self.enabled is not None:\n _dict['enabled'] = self.enabled\n return _dict",
"def to_dict(self):\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n self.__dict__['__class__'] = self.__class__.__name__\n return self.__dict__",
"def serialize(self):\n return {\n\n\n }",
"def _get_all_attributes(self) -> Dict[str, Any]:\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes",
"def get_info(self):\n return {}",
"def as_dict(self):\n\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}",
"def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d",
"def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'id') and self.id is not None:\n _dict['id'] = self.id\n if hasattr(self, 'name') and self.name is not None:\n _dict['name'] = self.name\n if hasattr(self, 'description') and self.description is not None:\n _dict['description'] = self.description\n if hasattr(self, 'enabled') and self.enabled is not None:\n _dict['enabled'] = self.enabled\n if hasattr(self, 'healthy_origins_threshold') and self.healthy_origins_threshold is not None:\n _dict['healthy_origins_threshold'] = self.healthy_origins_threshold\n if hasattr(self, 'origins') and self.origins is not None:\n _dict['origins'] = [x.to_dict() for x in self.origins]\n if hasattr(self, 'monitor') and self.monitor is not None:\n _dict['monitor'] = self.monitor\n if hasattr(self, 'notification_channel') and self.notification_channel is not None:\n _dict['notification_channel'] = self.notification_channel\n if hasattr(self, 'health') and self.health is not None:\n _dict['health'] = self.health\n if hasattr(self, 'healthcheck_region') and self.healthcheck_region is not None:\n _dict['healthcheck_region'] = self.healthcheck_region\n if hasattr(self, 'healthcheck_subnets') and self.healthcheck_subnets is not None:\n _dict['healthcheck_subnets'] = self.healthcheck_subnets\n if hasattr(self, 'created_on') and self.created_on is not None:\n _dict['created_on'] = self.created_on\n if hasattr(self, 'modified_on') and self.modified_on is not None:\n _dict['modified_on'] = self.modified_on\n return _dict",
"def to_dictionary(self):\n dic = {}\n ls = ['id', 'size', 'x', 'y']\n for i in ls:\n dic[i] = getattr(self, i)\n return dic",
"def to_dict(self):\n my_dict = self.__dict__.copy()\n my_dict[\"__class__\"] = self.__class__.__name__\n my_dict[\"created_at\"] = datetime.isoformat(self.created_at)\n my_dict[\"updated_at\"] = datetime.isoformat(self.updated_at)\n return my_dict",
"def serialize(self):\n return{\n 'name':self.name,\n 'id' :self.id,\n }",
"def ExtraInfo(self) -> object:"
] | [
"0.68339634",
"0.6825465",
"0.6760999",
"0.67576766",
"0.67556274",
"0.67232966",
"0.6719624",
"0.6663634",
"0.6663634",
"0.6660793",
"0.66108835",
"0.65923446",
"0.657657",
"0.65584314",
"0.64814067",
"0.6439023",
"0.6415135",
"0.6399043",
"0.638495",
"0.6370132",
"0.63672423",
"0.6352319",
"0.63487417",
"0.63364106",
"0.6328213",
"0.63276404",
"0.63258183",
"0.63139045",
"0.6301814",
"0.62803453",
"0.62798035",
"0.62714905",
"0.6268679",
"0.6264617",
"0.62584525",
"0.6252736",
"0.6249344",
"0.62406135",
"0.62313664",
"0.62258506",
"0.6216815",
"0.62114394",
"0.6205303",
"0.61965454",
"0.6190555",
"0.61865926",
"0.61842585",
"0.6174351",
"0.61730623",
"0.6169238",
"0.6156683",
"0.6155653",
"0.6145392",
"0.61412907",
"0.6136291",
"0.61340404",
"0.61339533",
"0.61339533",
"0.61246544",
"0.6123307",
"0.6122803",
"0.6113399",
"0.61085117",
"0.6092749",
"0.6092749",
"0.6092749",
"0.6092749",
"0.6086743",
"0.6074881",
"0.6074881",
"0.6074881",
"0.6072683",
"0.6065549",
"0.6054815",
"0.6051693",
"0.60426974",
"0.6041597",
"0.60331535",
"0.6029998",
"0.6028061",
"0.60258454",
"0.6023925",
"0.6019176",
"0.60131514",
"0.6006442",
"0.5999679",
"0.599777",
"0.59977543",
"0.5982141",
"0.5980255",
"0.59796906",
"0.59795684",
"0.5977387",
"0.5975617",
"0.5974931",
"0.59690344",
"0.5962915",
"0.5962777",
"0.5959503",
"0.59568936",
"0.59540784"
] | 0.0 | -1 |
Create a Model from a formula and dataframe. | def from_formula(cls, formula, data, subset=None,
drop_cols=None, *args, **kwargs):
# TODO: provide a docs template for args/kwargs from child models
# TODO: subset could use syntax. GH#469.
if subset is not None:
data = data.loc[subset]
eval_env = kwargs.pop('eval_env', None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get('missing', 'drop')
if missing == 'none': # with patsy it's drop or raise. let's raise.
missing = 'raise'
tmp = handle_formula_data(data, None, formula, depth=eval_env,
missing=missing)
((endog, exog), missing_idx, design_info) = tmp
if drop_cols is not None and len(drop_cols) > 0:
# TODO: not hit in tests
cols = [x for x in exog.columns if x not in drop_cols]
if len(cols) < len(exog.columns):
exog = exog[cols]
cols = list(design_info.term_names)
for col in drop_cols:
try:
cols.remove(col)
except ValueError:
pass # OK if not present
design_info = design_info.subset(cols)
kwargs.update({'missing_idx': missing_idx,
'missing': missing,
'formula': formula, # attach formula for unpckling
'design_info': design_info})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_formula():\n config = {\"samples\": {\"x1\": onp.ones((2, 10)), \"x2\": onp.ones((2, 10))}}\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(\n x1=dict(transformer=1, prior=dist.Normal(0, 1)),\n x2=dict(transformer=2, prior=dist.Normal(0, 1)),\n )\n\n model = Model.from_dict(config)\n formula = model.formula\n expected = \"y = exp(\\n x1 * 1.00000(+-0.00000)\\n + x2 * 1.00000(+-0.00000)\\n)\"\n assert formula == expected",
"def convert(self, df):\n return convert_df_to_model(\n model_type=self.model_type, df=df,\n outcome_variables=self.outcome_variables,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n weight=self.weight\n )",
"def _create_model(self):\n\n model_formula = self.get_model_formula()\n\n removed_observation_index = self._model_dataset.index.isin(self._excluded_observations)\n\n # TODO: Handle error that occurs when all model observations are invalid\n model = smf.ols(model_formula,\n data=self._model_dataset,\n subset=~removed_observation_index,\n missing='drop')\n\n self._model = model",
"def fit(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n y = df.values\n if y.shape[1] == 1:\n y = y.ravel()\n X = date_part(df.index, method=self.datepart_method)\n from autots.models.sklearn import retrieve_regressor\n\n multioutput = True\n if y.ndim < 2:\n multioutput = False\n elif y.shape[1] < 2:\n multioutput = False\n self.model = retrieve_regressor(\n regression_model=self.regression_model,\n verbose=0,\n verbose_bool=False,\n random_seed=2020,\n multioutput=multioutput,\n )\n self.model = self.model.fit(X, y)\n self.shape = df.shape\n return self",
"def build_model_fn(self):",
"def __init__(self, x_function, x_derivative, data_f, data_df, a):\n self.x_function = x_function\n self.x_derivative = x_derivative\n self.data_f = data_f\n self.data_df = data_df\n self.a = a\n self.linear_model = LinearModel(self.x_function, self.x_derivative)",
"def build_model():",
"def get_trained_model(dataframe, features, target, method='logistic'):\n if method == 'logistic':\n model = LogisticRegression()\n model.fit(dataframe[features], dataframe[target])\n return model\n else:\n raise NotImplementedError",
"def eval(self, df):\n ## Check invariant; model inputs must be subset of df columns\n if not set(self.var).issubset(set(df.columns)):\n raise ValueError(\n \"Model function `{}` var not a subset of given columns\".format(\n self.name\n )\n )\n\n ## Set up output\n n_rows = df.shape[0]\n results = zeros((n_rows, len(self.out)))\n\n for ind in range(n_rows):\n results[ind] = self.func(*df.loc[ind, self.var])\n\n ## Package output as DataFrame\n return DataFrame(data=results, columns=self.out)",
"def build_model(self, X: pd.DataFrame, y: pd.DataFrame = None) -> pm.Model:\n idx = X.index\n \n if y is None:\n y = pd.Series(0, index=idx)\n elif self.oversample: # only if y is given\n n_pos = (y == 1).sum()\n n_neg = (y == 0).sum()\n to_add = int(np.ceil(n_neg/n_pos) - 1)\n # print(n_pos, n_neg, to_add)\n if to_add > 4:\n to_add = 4\n for i in range(to_add):\n idx = idx.append(y[y==1].index)\n X = X.loc[idx]\n y = y.loc[idx]\n \n A = X[self.v_known + self.v_oob_bio]\n B_vals = X[self.v_fuzzy]\n B_mask = (B_vals == -1).astype(int)\n C_raw = X[self.v_float_adm + self.v_float_bio]\n # C_scaled = (C_raw - self.C_mean_) / self.C_std_ \n C_scaled = np.log1p(C_raw/self.C_mean_)\n C_scaled[~np.isfinite(C_scaled)] = np.nan\n C_vals = C_scaled.fillna(0)\n C_mask = C_scaled.isnull().astype(int)\n \n coords = {\"idx\": idx, \"a\": A.columns, \"b\": B_vals.columns, \"c\": C_vals.columns}\n with pm.Model(coords=coords) as m:\n pm.Data(\"A\", A, dims=[\"idx\", \"a\"])\n pm.Data(\"B_vals\", B_vals, dims=[\"idx\", \"b\"])\n pm.Data(\"B_mask\", B_mask, dims=[\"idx\", \"b\"])\n pm.Data(\"C_vals\", C_vals, dims=[\"idx\", \"c\"])\n pm.Data(\"C_mask\", C_mask, dims=[\"idx\", \"c\"])\n pm.Data(\"y\", y, dims=[\"idx\"])\n\n pm.Normal(\"avg\", mu=0, sd=1)\n\n pm.Beta(\"h_a_incl\", alpha=1, beta=4)\n pm.Normal(\"a_coef_raw\", mu=0, sd=1, dims=[\"a\"])\n pm.Bernoulli(\"a_incl\", p=m[\"h_a_incl\"], dims=[\"a\"])\n pm.Deterministic(\"a_coef\", m['a_coef_raw'] * m['a_incl'], dims=[\"a\"])\n \n pm.Normal(\"b_vals_coef\", mu=0, sd=1, dims=[\"b\"])\n pm.Normal(\"b_mask_coef_raw\", mu=0, sd=1, dims=[\"b\"])\n pm.Beta(\"h_b_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"b_mask_incl\", p=m[\"h_b_mask_incl\"], dims=[\"b\"])\n pm.Deterministic(\"b_mask_coef\", m['b_mask_coef_raw'] * m['b_mask_incl'], dims=[\"b\"])\n \n pm.Normal(\"c_vals_coef\", mu=0, sd=1, dims=[\"c\"])\n pm.Normal(\"c_mask_coef_raw\", mu=0, sd=1, dims=[\"c\"])\n pm.Beta(\"h_c_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"c_mask_incl\", p=m[\"h_c_mask_incl\"], dims=[\"c\"])\n pm.Deterministic(\"c_mask_coef\", m['c_mask_coef_raw'] * m['c_mask_incl'], dims=[\"c\"])\n unprob = pm.Deterministic(\n \"logit\",\n m['avg']\n + tt.dot(m[\"A\"], m[\"a_coef\"])\n + tt.dot(m[\"B_vals\"] * (1 - m['B_mask']), m[\"b_vals_coef\"])\n + tt.dot(m[\"B_mask\"], m[\"b_mask_coef\"])\n + tt.dot(m[\"C_vals\"] * (1 - m['C_mask']), m[\"c_vals_coef\"])\n + tt.dot(m[\"C_mask\"], m[\"c_mask_coef\"])\n )\n pm.Bernoulli(\"y_pred\", p = tt.nnet.sigmoid(unprob), dims=['idx'], observed=m['y'])\n\n m.graph = pm.model_to_graphviz()\n\n return m",
"def ml_df(df, parameters, t_size, model = DecisionTreeRegressor()):\n ndf = df[parameters]\n x = ndf.loc[:, ndf.columns != 'T_exp']\n y = ndf['T_exp']\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=t_size)\n model = model\n p = PolynomialFeatures(degree = 2)\n X_poly = p.fit_transform(x_train)\n X_poly_test = p.fit_transform(x_test)\n model.fit(X_poly,y_train)\n y_train_pred = model.predict(X_poly)\n y_test_pred = model.predict(X_poly_test)\n result = pd.DataFrame()\n result['T_exp'] = y_test\n result['T_prd'] = y_test_pred\n result['ratio'] = result['T_exp']/result['T_prd']\n return result",
"async def _build_model(\n self,\n data: Timeseries\n ) -> Prophet:\n model = Prophet()\n model.fit(data.get_dataframe())\n return model",
"def from_formula(cls, formula, data, re_formula=None, subset=None,\n *args, **kwargs):\n\n if \"groups\" not in kwargs.keys():\n raise AttributeError(\"'groups' is a required keyword argument in MixedLM.from_formula\")\n\n # If `groups` is a variable name, retrieve the data for the\n # groups variable.\n if type(kwargs[\"groups\"]) == str:\n kwargs[\"groups\"] = np.asarray(data[kwargs[\"groups\"]])\n\n if re_formula is not None:\n eval_env = kwargs.get('eval_env', None)\n if eval_env is None:\n eval_env = 1\n elif eval_env == -1:\n from patsy import EvalEnvironment\n eval_env = EvalEnvironment({})\n exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)\n exog_re_names = exog_re.design_info.column_names\n exog_re = np.asarray(exog_re)\n else:\n exog_re = np.ones((data.shape[0], 1),\n dtype=np.float64)\n exog_re_names = [\"Intercept\"]\n\n mod = super(MixedLM, cls).from_formula(formula, data,\n subset=None,\n exog_re=exog_re,\n *args, **kwargs)\n\n # expand re names to account for pairs of RE\n (param_names,\n exog_re_names,\n exog_re_names_full) = mod._make_param_names(exog_re_names)\n mod.data.param_names = param_names\n mod.data.exog_re_names = exog_re_names\n mod.data.exog_re_names_full = exog_re_names_full\n\n return mod",
"def model(data_x, parameters):\n return data_x @ parameters",
"def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple",
"def fit(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n Y = df.to_numpy()\n X = pd.to_numeric(df.index, errors='coerce', downcast='integer').to_numpy()\n if self.model == 'GLS':\n from statsmodels.regression.linear_model import GLS\n\n self.trained_model = GLS(Y, X, missing='drop').fit()\n else:\n self.trained_model = self._retrieve_detrend(detrend=self.model)\n if self.model in self.need_positive:\n self.trnd_trans = PositiveShift(\n log=False, center_one=True, squared=False\n )\n Y = pd.DataFrame(self.trnd_trans.fit_transform(df)).to_numpy()\n X = X.reshape((-1, 1))\n self.trained_model.fit(X, Y)\n self.shape = df.shape\n return self",
"def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)",
"def from_dataframe(cls, dataframe):\n return cls(dataframe)",
"def makeCalc(self, dataSet):\n\n #cyl = sasmodels.core.load_model_info('cylinder')\n #hs = sasmodels.core.load_model_info('hardsphere')\n #cylhs = sasmodels.core.load_model_info('cylinder@hardsphere')\n cylhmsa = sasmodels.core.load_model_info('cylinder@hayter_msa')\n\n # Build using c version instead of python. Avoids pyopencl\n model = sasmodels.core.build_model(cylhmsa, platform='dll')\n self.calculator = sasmodels.direct_model.DirectModel(dataSet, model)\n\n return",
"def build_numeric_model(movie_df):\n import statsmodels.formula.api as smf\n #build a multivariate reg model\n linmodel_multi_f = smf.ols(formula='domestic_gross ~ opening_per_theater + opening_weekend_take + production_budget + widest_release + worldwide_gross', data=movie_df).fit()\n linmodel_multi_f.summary()",
"def eval_input_fn(df):\n fts = df.drop(columns=['class'])\n labs = df.filter(items=['class']).values.astype(int)\n\n features = {k:list(v.values) for k,v in fts.items()}\n features = dict(features)\n x = fts.values\n x = np.array([[x]]).reshape((np.shape(x)[0], np.shape(x)[1], 1, 1))\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices({\"x_ph\":x,\"y_ph\":convert_to_one_hot(labs)})\n \n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).batch(np.shape(x)[0]).repeat()\n # Return the read end of the pipeline.\n return dataset.make_one_shot_iterator().get_next()",
"def parseL3FormulaWithModel(*args):\n return _libsbml.parseL3FormulaWithModel(*args)",
"def convert_to_model(self, *args):",
"def create_model(data, cont, cat, target): \n\n cont_features = '+'.join(cont)\n\n cat_features = '+'.join([f'C({x})' for x in cat])\n\n f = f'{target}~+{cont_features}+{cat_features}'\n\n print(f)\n\n model = smf.ols(formula=f, data=data).fit()\n \n diagnose_model(model)\n \n return model",
"def predict(self, load_script=False, variant=\"predict\"):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData']\n col_headers = ['model_name', 'n_features']\n feature_col_num = 1\n \n # An additional key field column is expected if the call is made through the load script\n if load_script:\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'key', 'n_features']\n feature_col_num = 2\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n if load_script:\n # Set the key column as the index\n self.request_df.set_index(\"key\", drop=False, inplace=True)\n \n try:\n # Split the features provided as a string into individual columns\n self.X = pd.DataFrame([x[feature_col_num].split(\"|\") for x in self.request_df.values.tolist()],\\\n columns=self.model.features_df.loc[:,\"name\"].tolist(),\\\n index=self.request_df.index)\n except AssertionError as ae:\n err = \"The number of input columns do not match feature definitions. Ensure you are using the | delimiter and that the target is not included in your input to the prediction function.\"\n raise AssertionError(err) from ae\n \n # Convert the data types based on feature definitions \n self.X = utils.convert_types(self.X, self.model.features_df, sort=False)\n\n if variant in ('predict_proba', 'predict_log_proba'):\n # If probabilities need to be returned\n if variant == 'predict_proba':\n # Get the predicted probability for each sample \n self.y = self.model.pipe.predict_proba(self.X)\n elif variant == 'predict_log_proba':\n # Get the log probability for each sample\n self.y = self.model.pipe.predict_log_proba(self.X)\n \n # Prepare a list of probability by class for each sample\n probabilities = []\n\n for a in self.y:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i = i + 1\n probabilities.append(s[2:])\n \n self.y = probabilities\n \n else:\n # Predict y for X using the previously fit pipeline\n self.y = self.model.pipe.predict(self.X)\n\n # Inverse transformations on the targets if required\n if self.model.scale_target or self.model.make_stationary:\n # Apply the transformer to the test targets\n self.y = self.model.target_transformer.inverse_transform(self.y) \n\n # Prepare the response\n self.response = pd.DataFrame(self.y, columns=[\"result\"], index=self.X.index)\n \n if load_script:\n # Add the key field column to the response\n self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)\n \n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']",
"def __init__(self, df, y_list, X_list, *,\r\n alpha=0.05, printing=True):\r\n # Model inputs (attributes from arguments):\r\n self._df = df\r\n [y_name] = y_list # sequence unpacking in order to make Series\r\n self._y = df[y_name] # Pandas Series\r\n if len(X_list) == 1:\r\n [x_name] = X_list\r\n self._X = df[x_name].to_frame() # Pandas dataframe\r\n else:\r\n self._X = df[X_list] # Pandas dataframe\r\n self._y_list, self._X_list = y_list, X_list\r\n self._alpha = alpha\r\n self._is_fitted = False",
"def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model",
"def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m",
"def fit(self):\n self.model = RegressionModel(model_expression=self.model_expression,\n fit_filters=self.filters, predict_filters=self.out_filters,\n ytransform=None, name=self.name)\n\n df = get_data(tables = self.tables,\n filters = self.filters,\n model_expression = self.model_expression)\n \n results = self.model.fit(df)\n \n self.name = self._generate_name()\n self.summary_table = str(results.summary())\n print(self.summary_table)\n \n # We don't strictly need to save the fitted parameters, because they are also\n # contained in the urbansim.models.RegressionModel() sub-object. But maintaining\n # a parallel data structure to other templates will make it easier to refactor the\n # code later on to not rely on RegressionModel any more. \n \n self.fitted_parameters = results.params.tolist()\n self.residuals = results.resid",
"def create_model(self):\n model = solph.Model(self.es)\n return model",
"def make_dataset(self, df, **kwargs):\n\t\treturn df",
"def prep_input_data(parameter_df):\n # Make sure the shape is 3D\n # assert len(x_shape) == 3, \"Input shape should be 3D\"\n\n # Create an empty list in which to store objects\n n_mods = len(parameter_df)\n mods_for_default = [None for _ in range(n_mods)]\n hyperopt_args = [None for _ in range(n_mods)]\n model_params = [None for _ in range(n_mods)]\n for i, (_, row) in enumerate(parameter_df.iterrows()):\n\n # Pull info needed to instantiate model\n major_model = row[\"ModelClass\"]\n specific_model = row[\"SpecificModel\"]\n\n # Define the model and training parameters\n model_params[i] = DEFAULT_MODEL_PARAMS[major_model][specific_model].copy()\n\n # Instantiate a model with default parameters\n mods_for_default[i] = MldeModel(major_model, specific_model,\n model_params=model_params[i],\n training_params=DEFAULT_TRAINING_PARAMS[major_model],\n eval_metric=mse)\n\n # Package args for hyperopt\n hyperopt_args[i] = (major_model, specific_model, row[\"NHyperopt\"])\n\n # Return the instantiated models and the hyperopt args\n return mods_for_default, hyperopt_args,model_params",
"def fit_transform(self, load_script=False):\n\n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData']\n col_headers = ['model_name', 'n_features']\n feature_col_num = 1\n \n # An additional key field column is expected if the call is made through the load script\n if load_script:\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'key', 'n_features']\n feature_col_num = 2\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n # Check that the estimator is an unsupervised ML algorithm\n if self.model.estimator_type not in [\"decomposer\", \"clusterer\"]:\n err = \"Incorrect usage. The estimator specified is not a known decompostion or clustering algorithm: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n if load_script:\n # Set the key column as the index\n self.request_df.set_index(\"key\", drop=False, inplace=True)\n\n # Split the features provided as a string into individual columns\n self.X = pd.DataFrame([x[feature_col_num].split(\"|\") for x in self.request_df.values.tolist()], columns=self.model.features_df.loc[:,\"name\"].tolist(),\\\n index=self.request_df.index)\n \n # Convert the data types based on feature definitions \n self.X = utils.convert_types(self.X, self.model.features_df)\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n \n # Create a chache for the pipeline's transformers\n # https://scikit-learn.org/stable/modules/compose.html#caching-transformers-avoid-repeated-computation\n # cachedir = mkdtemp()\n\n # Construct a sklearn pipeline\n self.model.pipe = Pipeline([('preprocessor', prep)]) #, memory=cachedir)\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the sklearn pipeline\n self.model.pipe.steps.insert(1, ('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the sklearn pipeline\n self.model.pipe.steps.append(('estimator', estimator)) \n\n # Fit the data to the pipeline\n if self.model.estimator_type == \"decomposer\":\n # If the estimator is a decomposer we apply the fit_transform method at the end of the pipeline\n self.y = self.model.pipe.fit_transform(self.X)\n\n # Prepare the response\n self.response = pd.DataFrame(self.y, index=self.X.index)\n\n elif self.model.estimator_type == \"clusterer\":\n # If the estimator is a decomposer we apply the fit_predict method at the end of the pipeline\n self.y = self.model.pipe.fit_predict(self.X)\n\n # Prepare the response\n self.response = pd.DataFrame(self.y, columns=[\"result\"], index=self.X.index)\n \n # Clear the cache directory setup for the pipeline's transformers\n # rmtree(cachedir)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n if load_script:\n # Add the key field column to the response\n self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)\n \n # If the function was called through the load script we return a Data Frame\n if self.model.estimator_type == \"decomposer\":\n self._send_table_description(\"reduce\")\n elif self.model.estimator_type == \"clusterer\":\n self._send_table_description(\"cluster\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Dimensionality reduction is only possible through the load script\n if self.model.estimator_type == \"decomposer\":\n err = \"Dimensionality reduction is only possible through the load script.\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']",
"def fit(self, df):\n return self",
"def fit(self, df):\n return self",
"def lm(formula, data):\n\ty, X = patsy.dmatrices(formula, data, return_type='dataframe')\n\tresults = sm.OLS(y, X).fit()\n\tprint(results.summary())\n\treturn results",
"def build(self, agg_df):\n queries = agg_df['query'].unique()\n for query in queries:\n history_df = agg_df[agg_df['query'] == query][['date', 'count']]\n counts = history_df['count']\n mean, std = self._compute_mean_std(counts)\n self.model[query] = {'mean': mean, 'std': std}\n\n pickle.dump(self.model, open(self.model_path, 'wb'))\n print(\"Model successfully built.\")",
"def evaluate_model(fn_string, df, features,\n coefficients=None,\n target=None,\n fit_intercept=False):\n features = list(set(df.columns).intersection(features))\n array = df[features].to_numpy()\n func = process_fn(fn_string, features)\n n_samples = len(df)\n predictions = func(array.T)\n if coefficients is None:\n if target is None:\n target = df.columns[0]\n target_values = df[target]\n coefficients = lsq_coefficients(predictions, target_values,\n fit_intercept=fit_intercept)\n slope, intercept = coefficients\n else:\n slope, intercept = coefficients\n predictions = np.add(np.multiply(predictions, slope), intercept)\n return predictions, coefficients",
"def predict(self, exog=None, transform=True, *args, **kwargs):\n is_pandas = _is_using_pandas(exog, None)\n exog_index = exog.index if is_pandas else None\n\n if transform and hasattr(self.model, 'formula') and (exog is not None):\n design_info = self.model.data.design_info\n from patsy import dmatrix\n if isinstance(exog, pd.Series):\n # we are guessing whether it should be column or row\n if (hasattr(exog, 'name') and\n isinstance(exog.name, str) and\n exog.name in design_info.describe()):\n # assume we need one column\n exog = pd.DataFrame(exog)\n else:\n # assume we need a row\n exog = pd.DataFrame(exog).T\n\n orig_exog_len = len(exog)\n is_dict = isinstance(exog, dict)\n exog = dmatrix(design_info, exog, return_type=\"dataframe\")\n if orig_exog_len > len(exog) and not is_dict:\n if exog_index is None:\n warnings.warn(\"nan values have been dropped\", ValueWarning)\n else:\n exog = exog.reindex(exog_index)\n exog_index = exog.index\n\n if exog is not None:\n exog = np.asarray(exog)\n if exog.ndim == 1 and (self.model.exog.ndim == 1 or\n self.model.exog.shape[1] == 1):\n exog = exog[:, None]\n exog = np.atleast_2d(exog) # needed in count model shape[1]\n\n predict_results = self.model.predict(self.params, exog,\n *args, **kwargs)\n\n # TODO: Shouldn't this be done by wrapping?\n if exog_index is not None and not hasattr(predict_results,\n 'predicted_values'):\n if predict_results.ndim == 1:\n return pd.Series(predict_results, index=exog_index)\n else:\n # FIXME: columns-->neq_names for e.g. MNLogit, VAR\n ynames = self.model.data.ynames\n return pd.DataFrame(predict_results, index=exog_index,\n columns=ynames)\n else:\n return predict_results",
"def make_engine(model_info, data, dtype, cutoff):\n if dtype == 'sasview':\n return eval_sasview(model_info, data)\n elif dtype.endswith('!'):\n return eval_ctypes(model_info, data, dtype=dtype[:-1], cutoff=cutoff)\n else:\n return eval_opencl(model_info, data, dtype=dtype, cutoff=cutoff)",
"def build_benchmark_model(X, y):\n model = LinearRegression()\n model = model.fit(X, y)\n \n \"\"\"Displays model summary.\"\"\"\n print(\"Model coefficient: {}\".format(model.coef_))\n print(\"Model intercept: {}\".format(model.intercept_))\n \n return model",
"def exchange_df(self, df: pandas.DataFrame) -> \"Predictions\":\n return self.__class__(**collections.ChainMap(dict(df=df), dataclasses.asdict(self)))",
"def _fit(self, df):\n return df",
"def create_module(sbml_model_file, model_name, model_output_dir, condition_df,\n observable_df):\n\n from amici.petab_import import import_model\n import_model(sbml_model=sbml_model_file, observable_table=observable_df,\n model_name=model_name, model_output_dir=model_output_dir,\n verbose=True, condition_table=condition_df)",
"def create_data_model():\n data = {}\n data['distance_matrix'] =[]\n data['num_vehicles'] = 1\n data['depot'] = 0\n data['demands'] = [0, 1, 1, 2, 4, 2, 4, 8, 8, 1, 2, 1, 2, 4, 4, 8, 8]\n data['vehicle_capacities'] = [15, 15, 15, 15]\n return data",
"def create_data_model():\n data = {}\n data['distance_matrix'] = transit_c\n data['post'] = pospt_c\n data['fixed_cost'] = fc*1000\n data['demands'] = total_demand\n data['vehicle_capacities'] = capacity_list_function(routes,S)\n data['time_capacities'] = time_list_function(routes,Tmax)\n data['num_vehicles'] = routes+1\n data['depot'] = 0\n return data",
"def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = distance_matrix.tolist()\r\n data['time_matrix'] = time_matrix.tolist()\r\n data['time_windows'] = time_windows.tolist()\r\n data['pickups_deliveries'] = pickup_deliveries.tolist()\r\n data['demands'] = demand\r\n data['num_vehicles'] = 20\r\n data['vehicle_capacities'] = [20 * i / i for i in range(1, num_vehicles+1)]\r\n data['depot'] = (2 * length) - 1\r\n return data",
"def create_model(self):\n pass",
"def create_model(self):\n pass",
"def from_dataframe(cls, df, data_cls):\n pass",
"def init_model_df(self):\n\n self.model_df = pd.DataFrame(columns=self.query_df[self.column_name].unique())\n\n # add _TIMESTAMP column to dataframe\n self.model_df[self.column_index] = self.min_increments\n\n # set row index to _TIMESTAMP\n self.model_df.set_index(self.column_index, inplace=True)",
"def __init__(self, md, ev=None, var=None, out=None):\n self.model = md\n\n ## Construct default evaluator\n if ev is None:\n\n def _ev(md, df):\n df_res = md.evaluate_df(df)\n return df_res[md.out]\n\n self.ev = _ev\n self.var = self.model.var\n self.out = self.model.out\n\n ## Use given evaluator\n else:\n self.ev = ev\n self.var = var\n self.out = out\n\n ## Copy model data\n self.runtime = md.runtime(1)\n self.name = copy.copy(md.name)",
"def MakeModel(self):\n pass",
"def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')",
"def _get_model_equation(self):\n\n res = self._model.fit()\n\n explanatory_variables = []\n for variable in self._model.exog_names:\n if variable is 'Intercept':\n explanatory_variables.append(self._FLOAT_STRING_FORMAT.format(res.params[variable]))\n else:\n explanatory_variables.append(self._FLOAT_STRING_FORMAT.format(res.params[variable]) + variable)\n\n response_variable = self._model.endog_names\n\n # TODO: Correct formula format for negative coefficients (minus)\n\n model_equation = response_variable + ' = ' + ' + '.join(explanatory_variables)\n\n return SimpleTable(data=[[model_equation]], headers=['Linear regression model:'])",
"def run_lmm(formula, df, reml=False, **kwargs):\n model = sm.MixedLM.from_formula(formula, df, **kwargs)\n return model.fit(reml=reml)",
"def build_more_model(movie_df):\n #number of theaters open to\n movie_df['number_of_theaters_open']= movie_df['opening_weekend_take']/movie_df['opening_per_theater']\n movie_df['title_length']=[len(n) for n in movie_df['alt_title']]\n #create some models\n import statsmodels.formula.api as smf\n #build a multivariate reg model\n linmodel_multi_2 = smf.ols(formula='domestic_gross ~ production_budget + widest_release + number_of_theaters_open+worldwide_gross', data=movie_df).fit()\n return linmodel_multi_2.summary()",
"def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])",
"def buildmodel(label_name, category_features, non_category_features,\n model=None, save=True):\n # Reading the train/test data as data frames.\n train_df, test_df = read_data()\n whole_df = pd.concat([train_df, test_df])\n\n # Preprocess the data frame.\n new_df = preprocess(whole_df, label_name, category_features, non_category_features)\n\n # Names of final columns\n final_columns = new_df.columns[1:]\n\n # Find averages for non-category features.\n averages = {}\n for col in non_category_features:\n averages[col] = new_df[col].mean()\n\n # Dropping na's\n new_df = new_df.dropna()\n\n # getting X & y for the train set\n y = np.array(new_df.fraud)\n X = new_df.drop(['fraud'], axis=1).values\n\n if model is None:\n model = RandomForestClassifier(n_estimators=150) # Our base model.\n model = model.fit(X, y)\n\n # Save our model and names of columns in the file as a pickle object.\n if save:\n with open(filename_pickle, 'wb') as f:\n pickle.dump((model, final_columns, averages), f)\n return (model, final_columns, averages)",
"def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['key'] = self.key\n content['id'] = self.id\n content['system-family'] = self.family\n for cp in self.parameters:\n content.append('calculation-parameter', DM(cp))\n\n self._set_model(model)\n return model",
"def create_data(storage, df, df_contains='xy', y_col_name=None, y_pred_col_name=None):\n return DataFactory.factories[storage].create(df, df_contains, y_col_name, y_pred_col_name)",
"def transform(self, X):\n\t\tv_X = share_data(X)\n\t\tpredict_model = build_predict_model(self.formula_.dA_layers[-1], {self.formula_.X: v_X})\n\t\treturn predict_model()",
"def build_model(self):\n raise NotImplementedError",
"def eval(self, df):\n return self.ev(self.model, df)",
"def from_dict(cls, d):\n # Pass values from the dictionary to the __init__() method\n obj = cls(tables=d['tables'], model_expression=d['model_expression'], \n filters=d['filters'], out_tables=d['out_tables'], \n out_column=d['out_column'], out_transform=d['out_transform'],\n out_filters=d['out_filters'], name=d['name'], tags=d['tags'])\n\n obj.summary_table = d['summary_table']\n obj.fitted_parameters = d['fitted_parameters']\n obj.model = None\n \n # Unpack the urbansim.models.RegressionModel() sub-object and resuscitate it\n if d['model'] is not None:\n model_config = yamlio.convert_to_yaml(d['model'], None)\n obj.model = RegressionModel.from_yaml(model_config)\n \n return obj",
"def __call__(self, X):\n return self.model(X)",
"def _build_regression(endog, exog, model, lasso_positive, alpha):\n if model=='Ridge':\n mod = Ridge(alpha=alpha)\n elif model=='Lasso':\n mod = Lasso(alpha=alpha, positive=lasso_positive)\n else:\n raise ValueError(\"Model must be of type Ridge or Lasso\")\n \n mod.fit(endog, exog)\n return mod",
"def load(self, ldf) -> View:\t\t\n\t\tfrom lux.compiler.Parser import Parser\n\t\tfrom lux.compiler.Validator import Validator\n\t\tfrom lux.compiler.Compiler import Compiler\n\t\tfrom lux.executor.PandasExecutor import PandasExecutor #TODO: temporary (generalize to executor)\n\t\t#TODO: handle case when user input vanilla Pandas dataframe\n\t\tself.specLst = Parser.parse(self.specLst)\n\t\tValidator.validateSpec(self.specLst,ldf)\n\t\tvc = Compiler.compile(ldf,ldf.context,[self],enumerateCollection=False)\n\t\tPandasExecutor.execute(vc,ldf)\n\t\treturn vc[0]",
"def init_model(n_factors, n_dates, n_tickers):\n date = tf.keras.Input((1,), name=\"date\", dtype=\"int32\")\n ticker = tf.keras.Input((1,), name=\"ticker\", dtype=\"int32\")\n\n # learnable table of date -> factor returns\n date_embedded = tf.keras.layers.Embedding(\n n_dates, n_factors, name=\"date_embedding\"\n )(date)\n\n # learnable table of ticker -> factor loadings\n ticker_embedded = tf.keras.layers.Embedding(\n n_tickers, n_factors, name=\"ticker_embedding\"\n )(ticker)\n\n pred = tf.keras.layers.Reshape((1,))(\n tf.keras.layers.Dot(axes=-1)([date_embedded, ticker_embedded])\n )\n\n model = tf.keras.Model(inputs=[date, ticker], outputs=pred)\n model.compile(\"Adagrad\", \"mse\")\n return model",
"def transformerXLModel(*args, **kwargs):\n model = TransfoXLModel.from_pretrained(*args, **kwargs)\n return model",
"def fit_model():\n global _HOME_OWNERSHIP\n _HOME_OWNERSHIP = {x: i for i, x in enumerate([\"rent\", \"own\", \"mortgage\", \"other\"])}\n df = pd.read_csv(os.path.join(settings.BASE_DIR, \"LoanStats3a.csv\"), skiprows=1).head(5000)\n df = df[df.apply(is_poor_coverage, axis=1)]\n df['year_issued'] = df.issue_d.apply(lambda x: int(x.split(\"-\")[0]))\n df_term = df[df.year_issued < 2012]\n\n bad_indicators = [\n \"Late (16-30 days)\",\n \"Late (31-120 days)\",\n \"Default\",\n \"Charged Off\"\n ]\n df_term['is_rent'] = df_term.home_ownership == \"RENT\"\n df_term = df_term[df_term.home_ownership.apply(lambda x: x is not None and x != 'NONE')]\n df_term['is_bad'] = df_term.loan_status.apply(lambda x: x in bad_indicators)\n df_term['term'] = df_term.term.apply(lambda x: x.split()[0])\n df_term['home_ownership'] = df_term.home_ownership.apply(lambda x: _HOME_OWNERSHIP[x.lower()])\n global _LENDING_PREDICT_MODEL\n _LENDING_PREDICT_MODEL = LogisticRegression()\n _LENDING_PREDICT_MODEL.fit(df_term[_FEATURES], df_term.is_bad)",
"def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction",
"def instantiate(formula, instantiation_map):\n if is_constant(formula.root):\n return Formula(formula.root)\n\n if is_variable(formula.root):\n return instantiation_map[formula.root] if formula.root in instantiation_map else formula\n\n first = instantiate(formula.first, instantiation_map)\n if is_unary(formula.root):\n return Formula(formula.root, first)\n\n second = instantiate(formula.second, instantiation_map)\n if is_binary(formula.root):\n return Formula(formula.root, first, second)\n\n return Formula(formula.root, first, second, instantiate(formula.third, instantiation_map))",
"def model_to_df(self, transpose=True):\n X = np.vstack([self.sales(), self.unit_contribution(),\n self.net_revenue(), self.depreciation(),\n self.before_tax_profit(), self.after_tax_profit(), self.cash_flow()])\n\n if transpose:\n X = np.transpose(X)\n df = pd.DataFrame(X, columns=['sales', 'unit_contribution', 'net_revenue',\n 'depreciation', 'before_tax_profit', 'after_tax_profit',\n 'cash_flow'])\n else:\n df = pd.DataFrame(X, index=['sales', 'unit_contribution', 'net_revenue',\n 'depreciation', 'before_tax_profit', 'after_tax_profit',\n 'cash_flow'])\n\n return df",
"def __init__(self, model_options, input_options, stock_code, load=False, saved_model_dir=None, saved_model_path=None):\n\n IndexRegressionModel.__init__(self, model_options, input_options, stock_code)\n\n if not load or saved_model_dir is None:\n self.model = linear_model.LinearRegression()\n else:\n model_path = saved_model_path if saved_model_path is not None else self.get_saved_model_path(saved_model_dir)\n if model_path is not None:\n self.load_model(path.join(saved_model_dir, model_path), self.SKLEARN_MODEL)",
"def _trainedmodel(continuous, modelform, Vr, m=20):\n if continuous == \"inferred\":\n ModelClass = roi._core.InferredContinuousROM\n elif continuous:\n ModelClass = roi._core._ContinuousROM\n else:\n ModelClass = roi._core._DiscreteROM\n\n n,r = Vr.shape\n c, A, H, Hc, G, Gc, B = _get_operators(r, m)\n operators = {}\n if \"c\" in modelform:\n operators['c_'] = c\n if \"A\" in modelform:\n operators['A_'] = A\n if \"H\" in modelform:\n operators['Hc_'] = Hc\n if \"G\" in modelform:\n operators['Gc_'] = Gc\n if \"B\" in modelform:\n operators['B_'] = B\n\n model = roi._core.trained_model_from_operators(ModelClass, modelform,\n Vr, **operators)\n model.datacond_ = np.random.random()\n model.dataregcond_ = model.datacond_ / 2\n model.residual_ = np.random.random()\n model.misfit_ = model.residual_ / 2\n\n return model",
"def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model",
"def create(data, row_label=None, features=None, feature_model='auto',\n method='lsh', verbose=True):\n\n _mt._get_metric_tracker().track(__name__ + '.create')\n\n _raise_error_if_not_of_type(data, [_SFrame])\n _raise_error_if_not_of_type(features, [str])\n _raise_error_if_column_exists(data, features)\n\n if data[features].dtype() != _Image:\n raise _ToolkitError(\"Feature `%s` must be of type Image\" \\\n % features)\n\n return SimilaritySearchModel(data, row_label=row_label, feature=features,\n feature_model=feature_model, method=method, verbose=verbose)",
"def create_data_model(dima):\n data = {}\n data['distance_matrix'] = dima\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data",
"def formula(self):\n terms = []\n for ff in self.formulae:\n terms += list(ff.terms)\n return Formula(terms)",
"def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = dis_array\r\n\r\n data['demands'] = [0, 1100, 700, 800, 1400, 2100, 400, 800, 100, 500, 600, 1200, 1300, 1300, 300, 900, 2100, 1000, 900, 2500, 1800, 700]\r\n data['vehicle_capacities'] = [6000, 6000, 6000, 6000]\r\n data['num_vehicles'] = 4\r\n #data['demands'] = [0, 125, 84, 60, 500, 300, 175, 350, 150, 1100, 4100, 225, 300, 250, 500, 150, 100, 250, 120, 600, 500, 175, 75]\r\n #data['vehicle_capacities'] = [4500, 4500, 4500]\r\n #data['num_vehicles'] = 3\r\n data['depot'] = 0\r\n return data",
"def build_model(self):\n pass",
"def build_model(self):\n pass",
"def create_model_definition(request):\n modelname = request.matchdict['modelname']\n results = db_model_token(request.db)[modelname]\n tokens = [t.value for t in results]\n if len(tokens) > 0:\n token = tokens[0]\n if token != request.GET.get('token'):\n # provided token does not match\n request.errors.add('query', 'token',\n 'invalid token for model %s' % modelname)\n request.errors.status = 403\n return json_error(request.errors)\n else:\n # Generate a unique token\n token = os.urandom(8).encode('hex')\n token_doc = {'type': 'token', 'token': token, 'model': modelname}\n request.db.save(token_doc)\n\n model_doc = {\n 'type': 'definition',\n 'model': modelname,\n 'definition': json.loads(request.body)\n }\n request.db.save(model_doc) # save to couchdb\n return {'token': token}",
"def load_model():\n global columns\n global data\n \n model = pickle.load(open('MedCostModel.pkl', 'rb'))\n data = pd.read_csv('MedCosts.csv')\n data = data.drop(columns=['charges'])\n columns = data.columns\n return(model)",
"def create_data_model(con,route_id):\n data = {}\n df1 = pd.read_sql('SELECT * FROM travel_times WHERE route_id = \"{0}\";'.format(route_id), con)\n #df1_data = df1.pivot().values\n data['distance_matrix'] = df1.pivot(index='stop1',columns='stop2',values='travel_time').values\n print('data loaded for {0}'.format(route_id))\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data",
"def make_predictions(df):\n t_labels = get_labels(\"labels_pca\")\n # clean data\n df = clean_data(df)\n # engineer data\n df = engineer_features(df)\n # predict\n with open(\"model.pkl\",\"r\") as mdl:\n model = pickle.load(mdl)\n mdl.close()\n predictions = model.predict(df[t_labels])\n return predictions",
"def build_model(self, **kwargs):\n raise NotImplementedError()",
"def from_pandas(cls, df, data_cls):\n pass",
"def generate_data_model(\n model_name: str, attribute_values: Mapping[str, ATTRIBUTE_TYPES]\n) -> DataModel:\n return DataModel(\n model_name,\n [Attribute(key, type(value), True) for key, value in attribute_values.items()],\n )",
"def model_from_gdsfactory(\n component: Component, dirpath=gf.CONFIG[\"sparameters\"], **kwargs\n) -> Model:\n kwargs.pop(\"function_name\", \"\")\n kwargs.pop(\"module\", \"\")\n component = gf.call_if_func(component, **kwargs)\n pins, f, s = sim.read_sparameters_lumerical(component=component, dirpath=dirpath)\n\n def interpolate_sp(freq):\n return interpolate(freq, f, s)\n\n Model.pin_count = len(pins)\n m = Model()\n m.pins = PinList([Pin(component=m, name=pins[i]) for i, _ in enumerate(pins)])\n m.__setattr__(\"sparams\", (f, s))\n m.s_parameters = interpolate_sp\n m.freq_range = (m.sparams[0][0], m.sparams[0][-1])\n m.wavelengths = speed_of_light / np.array(f)\n m.s = s\n return m",
"def fit_anchor_model(df, fit_genes, model, deg, x_col='lfc_target', y_col='lfc'):\n if fit_genes is not None:\n train_df = df.loc[df.target_gene.isin(fit_genes), :].copy()\n else:\n train_df = df\n train_x = train_df[x_col].copy()\n train_y = train_df[y_col].copy()\n test_x = df[x_col].copy()\n test_y = df[y_col].copy()\n if model == 'linear':\n predictions, model_info = model_linear(train_x, train_y, test_x)\n elif model == 'fixed slope':\n predictions, model_info = model_fixed_slope(train_x, train_y, test_x)\n elif model == 'spline':\n predictions, model_info = model_spline(train_x, train_y, test_x, deg)\n elif model == 'quadratic':\n predictions, model_info = model_quadratic(train_x, train_y, test_x)\n else:\n raise ValueError('Model ' + model + ' not implemented')\n out_df = df.copy()\n out_df['prediction'] = predictions\n out_df['residual'] = test_y - predictions\n out_df['residual_z'] = (out_df['residual'] - out_df['residual'].mean())/out_df['residual'].std()\n return out_df, model_info",
"def train(self, train_df: pd.DataFrame) -> None:\n\n # get feature list\n target_columns, features = PropensityModel.get_feature_and_target_columns(train_df)\n\n # train model\n x_train = train_df[features]\n y_train = train_df[target_columns]\n self.model.fit(x_train, y_train)",
"def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = mtrx.create_distance_matrix(mtrx.create_data()) \r\n data['demands'] = clean.demands\r\n # Each location has a demand corresponding to the quantity—for example, \r\n # weight or volume—of the item to be picked up.\r\n data['vehicle_capacities'] = capacity\r\n # Each vehicle has a capacity: the maximum quantity that the vehicle can hold. \r\n # As a vehicle travels along its route, the total quantity of the items it is carrying \r\n # can never exceed its capacity.\r\n data['num_vehicles'] = number\r\n data['depot'] = 0\r\n return data",
"def eval_model(config, period, test_data):\n if config.network == 'MLPwithGAN':\n model = MLPwithGAN(config)\n elif config.network == 'MLP':\n model = MLP(config)\n elif config.network == 'LSTM':\n model = VanillaLSTM(config)\n elif config.network == 'CNN':\n model = CNNfeature(config)\n else:\n raise Exception('Unknown model type:{}'.format(config.network))\n\n if config.ensemble:\n m = model\n model = []\n\n for i in glob(gen_path(config.path, str(period)) + '/m*'):\n m.load_state_dict(\n torch.load(gen_path(i, filename=config.network + '.pkl')))\n m.to(config.device)\n m.eval()\n model.append(m)\n else:\n model.load_state_dict(\n torch.load(gen_path(config.path, str(period), 'model', filename=config.network + '.pkl')))\n model.to(config.device)\n model.eval()\n dataloader_test = test_data[0]\n test_date = test_data[1]\n test_symbol = test_data[2]\n sc_y = joblib.load(gen_path(config.path, str(period), 'scaler', filename='training_sc_y.pkl'))\n predict_y_test, real_y_test, valid_index_test = make_prediction(dataloader_test, sc_y, model, config)\n\n stock_score = pd.DataFrame()\n stock_score[\"symbol\"] = test_symbol[valid_index_test]\n stock_score[\"score\"] = predict_y_test\n stock_score['truth'] = real_y_test\n stock_score[\"date\"] = test_date[valid_index_test]\n stock_score = stock_score.sort_values(by=[\"date\"])\n stock_score.to_csv(gen_path(config.path, 'stock_score', filename=str(period) + '.csv'), index=False)",
"def create_data_tables(model, condition_df):\n timepoints = np.logspace(-5, 1, 20)\n sigma_default = 0.1 # parameters are lin\n sigma_parameter = 0.2\n offset_batch_1 = 3.0\n offset_batch_2 = 4.0\n\n parameter_ids = list(model.getParameterIds())\n observable_ids = list(model.getObservableIds())\n\n sigma_parameter_observable_idx = \\\n observable_ids.index('obs_x1withsigma')\n model_offset_parameter_idx = \\\n parameter_ids.index('observableParameter1_obs_x2_offsetted')\n sigma_parameter_idx = \\\n parameter_ids.index('noiseParameter1_obs_x1withsigma')\n\n # set true parameters\n default_parameters = np.array(model.getParameters())\n default_parameters[sigma_parameter_idx] = sigma_parameter\n print('Default model parameters:')\n for p, val in zip(model.getParameterIds(), model.getParameters()):\n print(f'\\t{p}: {val}')\n print()\n\n true_parameters = {pid: val for pid, val in zip(model.getParameterIds(),\n default_parameters)}\n # output parameters don't have default values from SBML mdoel\n true_parameters['observableParameter1_obs_x1_scaled'] = 2.0\n true_parameters['noiseParameter1_obs_x1withsigma'] = 0.2\n true_parameters['noiseParameter1_obs_x1_scaled'] = 0.2\n true_parameters['noiseParameter1_obs_x2'] = 0.2\n true_parameters['noiseParameter1_obs_x1'] = 0.2\n true_parameters['noiseParameter1_obs_x2_offsetted'] = 0.2\n true_parameters['noiseParameter1_obs_x3'] = 0.2\n true_parameters['observableParameter1_obs_x2_offsetted'] = 3.0\n\n true_parameters['scaling_x1_common'] = \\\n true_parameters['observableParameter1_obs_x1_scaled']\n # extend to optimization parameter vector: add second offset parameter\n true_parameters['offset_x2_batch_0'] = offset_batch_1\n true_parameters['offset_x2_batch_1'] = offset_batch_2\n true_parameters['x1withsigma_sigma'] = sigma_parameter\n\n print('True parameters:\\t%s' % true_parameters)\n\n # setup model\n model.setTimepoints(timepoints)\n model.setParameters(default_parameters)\n\n # setup solver\n solver = model.getSolver()\n solver.setMaxSteps(10000)\n\n print(condition_df)\n measurement_df = petab.create_measurement_df()\n\n print()\n\n # set sigmas\n sigmay = np.ones(shape=(model.nt(), model.nytrue)) * sigma_default\n # observable with sigma parameter\n sigmay[:, sigma_parameter_observable_idx] = np.nan\n\n # llh for noisy simulated data with true parameters\n expected_llh = 0.0\n\n for condition_idx, condition_id in enumerate(condition_df.index.values):\n condition_parameters = condition_df.loc[condition_id, :]\n print(f'Condition {condition_idx} \"{condition_id}\": {condition_parameters}')\n\n # different offset for two \"batches\"\n batch_id = condition_idx % 2\n model_parameters = default_parameters[:]\n if batch_id == 0:\n model_parameters[\n model_offset_parameter_idx] = offset_batch_1\n else:\n model_parameters[\n model_offset_parameter_idx] = offset_batch_2\n\n print('Model parameters:', model_parameters)\n\n # simulate condition\n rdata = get_return_data_for_condition(\n model, solver, condition_parameters,\n model_parameters, sigmay,\n sigma_parameter_observable_idx,\n sigma_parameter_idx\n )\n\n print('\\tllh: ', rdata['llh'])\n print('\\tsllh', rdata['sllh'])\n\n expected_llh += rdata['llh']\n\n measurement_df = append_measurements_for_condition(\n model, measurement_df, sigmay, condition_id, batch_id, rdata)\n print()\n\n print('Expected llh: ', expected_llh)\n\n return measurement_df, true_parameters, expected_llh",
"def create_data_model():\n data = {}\n\n data['addresses'] = ['-0.068372,109.362745']\n data['demands'] = [0]\n data['depot'] = 0 \n data['vehicle_capacities'] = []\n\n orders = Order.query.all()\n for u in orders:\n data['addresses'].append(u.latlon)\n data['demands'].append(int(u.load))\n\n vehicles = Vehicle.query.all()\n for u in vehicles:\n data['vehicle_capacities'].append(int(u.capacity))\n\n data['num_vehicles'] = len(vehicles)\n\n data['distance_matrix'] = create_distance_matrix(data)\n\n print(len(data['addresses']))\n print(data['demands'])\n\n return data",
"def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance",
"def get_model_formula(self):\n\n if self._response_variable and self._explanatory_variables[0]:\n\n explanatory_variable = self.get_explanatory_variable()\n\n model_formula = self._response_variable + ' ~ ' + explanatory_variable\n\n else:\n\n model_formula = None\n\n return model_formula",
"def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model"
] | [
"0.64051425",
"0.6352155",
"0.6185541",
"0.5942793",
"0.58994234",
"0.5815461",
"0.5793945",
"0.565781",
"0.5657534",
"0.56343275",
"0.5602399",
"0.5592737",
"0.55794835",
"0.5542218",
"0.5530604",
"0.5495615",
"0.54892266",
"0.54615164",
"0.5460569",
"0.54572856",
"0.54244167",
"0.54210985",
"0.5418621",
"0.5403502",
"0.539797",
"0.5396568",
"0.53828114",
"0.5379534",
"0.5362838",
"0.5349719",
"0.5348548",
"0.5344782",
"0.5340223",
"0.5339784",
"0.5339784",
"0.53178984",
"0.5314495",
"0.5310803",
"0.52564186",
"0.52552706",
"0.5253333",
"0.5246602",
"0.5241462",
"0.52378654",
"0.5234939",
"0.5230607",
"0.52298695",
"0.52298427",
"0.52298427",
"0.52183086",
"0.52051",
"0.52029055",
"0.5192423",
"0.5185774",
"0.51805824",
"0.518",
"0.51619506",
"0.5155472",
"0.51512694",
"0.5143727",
"0.51418024",
"0.514036",
"0.5136675",
"0.5130569",
"0.512799",
"0.5125943",
"0.5125748",
"0.51202804",
"0.51171553",
"0.51171494",
"0.5109656",
"0.51095945",
"0.51030934",
"0.5097325",
"0.5093391",
"0.5081454",
"0.5066974",
"0.50602186",
"0.50580436",
"0.50563866",
"0.5049417",
"0.50488126",
"0.50488126",
"0.5044546",
"0.50384146",
"0.5036683",
"0.50351363",
"0.5030791",
"0.5027673",
"0.5021924",
"0.5019549",
"0.50148404",
"0.50128293",
"0.50050527",
"0.5004467",
"0.5002718",
"0.50012285",
"0.49908942",
"0.49861616",
"0.49833432"
] | 0.6909729 | 0 |
Fit a model to data. | def fit(self):
raise NotImplementedError # pragma: no cover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit_from_model_data(self, model_data: np.ndarray) -> f.FitDataset:\r\n return f.FitDataset(dataset=self.dataset, model_data=model_data)",
"def fit_training_data(self):\n self.model.fit(self.X_train)",
"def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())",
"def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")",
"def fit(self, X):",
"def fit(self, data, data_val, model=None, *args, **kwargs):\n res = self._prepare_fit(model, data, data_val, generator=False,\n delay=False, *args, **kwargs)\n return res",
"def fit(self, data):\n return self",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y):\n self.model_x = X\n self.model_y = y",
"def fit(self, X):\n raise NotImplementedError",
"def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)",
"def fit(self, data: pd.DataFrame):\n raise NotImplementedError",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))",
"def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()",
"def __fit_model(self):\n\n labels = self.labeled_labels\n features = self.labeled_features\n\n pred = np.array(cross_val_predict(self.clf,\n features,\n labels,\n cv=self.cv))\n\n stats = self.__get_statistics(labels, pred)\n self.statistics.append(stats)\n\n self.clf.fit(features, labels)\n\n return self",
"def fit():\n pass",
"def fit(self, x):\n pass",
"def fit(self, model, data):\n best_model = None\n best_inliers = None\n best_num_inliers = 0\n best_residual_sum = np.inf\n\n if not isinstance(data, (tuple, list)):\n data = [data]\n num_data, num_feats = data[0].shape\n\n if self.min_samples is None:\n self.min_samples = num_feats + 1\n if self.residual_threshold is None:\n if len(data) > 1:\n data_idx = 1\n else:\n data_idx = 0\n self.residual_threshold = np.median(np.abs(\n data[data_idx] - np.median(data[data_idx])))\n\n for trial in range(self.max_trials):\n # randomly select subset\n rand_subset_idxs = np.random.choice(\n np.arange(num_data), size=self.min_samples, replace=False)\n rand_subset = [d[rand_subset_idxs] for d in data]\n\n # estimate with model\n model.estimate(*rand_subset)\n\n # compute residuals\n residuals = model.residuals(*data)\n residuals_sum = residuals.sum()\n inliers = residuals <= self.residual_threshold\n num_inliers = np.sum(inliers)\n\n # decide if better\n if (best_num_inliers < num_inliers) or (best_residual_sum > residuals_sum):\n best_num_inliers = num_inliers\n best_residual_sum = residuals_sum\n best_inliers = inliers\n\n # refit model using all inliers for this set\n if best_num_inliers == 0:\n data_inliers = data\n else:\n data_inliers = [d[best_inliers] for d in data]\n model.estimate(*data_inliers)\n\n ret = {\n \"best_params\": model.params,\n \"best_inliers\": best_inliers,\n }\n return ret",
"def fit(self, X, Y):\n ...",
"def fit(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n validation_freq=1,\n **kwargs):\n raise NotImplementedError()",
"def fit(self) -> None:\n\n levels = self.levels\n TSs = GetAggregateTS(self.data).aggregate(levels)\n models = {}\n residuals = {}\n fcsts = {}\n for bm in self.baseModels:\n model_name = bm.model_name\n if model_name is None: # only residuals and fcsts are provided\n models[bm.level] = None\n residuals[bm.level] = bm.residuals\n fcsts[bm.level] = bm.fcsts\n else:\n m = BASE_MODELS[model_name](\n data=TSs[bm.level],\n params=bm.model_params,\n )\n m.fit()\n models[bm.level] = m\n self.models = models\n self.info_fcsts = fcsts\n self.info_residuals = residuals",
"def fit(self):\n raise NotImplementedError",
"def fit(self, x):\n raise NotImplementedError()",
"def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model",
"def fit(self, X,y):\n pass",
"def fit(self, data):\n for v in self.features + self.targets:\n v._fit(data)",
"def fit(self, x, y):\n raise NotImplementedError('Subclass of LinearModel must implement fit method.')",
"def __call__(self, models, x, y, z=None, xbinsize=None, ybinsize=None, err=None, bkg=None, bkg_scale=1, **kwargs):\n\n tie_list = []\n try:\n n_inputs = models[0].n_inputs\n except TypeError:\n n_inputs = models.n_inputs\n\n self._data = Dataset(n_inputs, x, y, z, xbinsize, ybinsize, err, bkg, bkg_scale)\n\n if self._data.ndata > 1:\n\n if len(models) == 1:\n self._fitmodel = ConvertedModel([models.copy() for _ in xrange(self._data.ndata)], tie_list)\n # Copy the model so each data set has the same model!\n elif len(models) == self._data.ndata:\n self._fitmodel = ConvertedModel(models, tie_list)\n else:\n raise Exception(\"Don't know how to handle multiple models \"\n \"unless there is one foreach dataset\")\n else:\n if len(models) > 1:\n self._data.make_simfit(len(models))\n self._fitmodel = ConvertedModel(models, tie_list)\n else:\n self._fitmodel = ConvertedModel(models)\n\n self._fitter = Fit(self._data.data, self._fitmodel.sherpa_model, self._stat_method, self._opt_method, self._est_method, **kwargs)\n self.fit_info = self._fitter.fit()\n\n return self._fitmodel.get_astropy_model()",
"def fit(self, X, y, **fit_params):\n ...",
"def fit(self, X, y):\n\n # retain columns incase encoding occurs\n self.fit_X_columns = X.columns.tolist()\n\n # generate the imputation datasets from multiple imputation\n # then fit the analysis models on each of the imputed datasets\n self.models_ = self._apply_models_to_mi_data(\n self.linear_models, X, y\n )\n\n # generate the fit statistics from each of the m models\n self.statistics_ = self._get_stats_from_models(self.models_)\n\n # still return an instance of the class\n return self",
"def partial_fit(self, X, y=...):\n ...",
"def partial_fit(self, X, y=...):\n ...",
"def fit_model(self):\n model = self.make_model()\n self.history = model.fit(x=self.xt_train, y=self.yt_train,\n epochs=self.n_epochs, verbose=0,\n validation_split=self.v_split, shuffle=True)\n self.eval_model(model)\n self.save_model(model)\n return model",
"def fitted_model(model_data, sequential_model):\n x_train, y_train, x_val, y_val, x_test, _ = model_data\n compile_model(sequential_model)\n fitted_model = fit_model(sequential_model, 64, 1, False, x_train, y_train, x_val, y_val, x_test)\n return fitted_model",
"def fit(model, data, test_ids, exp_name, datasets):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n \n trained_model = train(model, train_ids, data, scaler, datasets)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments.append({'name':exp_name,'model':trained_model, 'results':results, 'scaler':scaler})\n return results",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def fit(self, input):\n raise NotImplementedError()",
"def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit(self, X, y):\n self.X_data = X\n self.y = y",
"def fit(self, X, y=None, **kwargs):\n raise NotImplementedError()",
"def fit(self, X):\n self._fit_X = X",
"def fit(self, Y):\n raise NotImplementedError",
"def fit_model(train_dataset, train_labels):\n model = make_model(get_models())\n model.fit(train_dataset, train_labels)\n return model",
"def fit(model, data, test_ids, exp_name, train_ids=None):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n if train_ids == None:\n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n \n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n scaling_data = data[0][train_ids]\n desc_scaler.fit(scaling_data)\n data[0] = desc_scaler.transform(data[0])\n else:\n scaling_data = None\n \n trained_model = train(model, train_ids, data, scaler)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments[exp_name] = {'model':trained_model, 'results':results, 'scaler':scaler, 'desc scaling data':scaling_data}\n return results",
"def fit(self):\n raise NotImplementedError('')",
"def fit(self, X, Y, **kwargs):\n raise NotImplementedError",
"def fit(self) -> None:\n start_time = time.time()\n # ---- START -----\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n for label, model in zip(self.label_list, self.models):\n model.fit(train_features, train_df[label])\n # ---- END -----\n end_time = time.time()\n print(\"Finished fitting : elasped time : \" + str(end_time - start_time))",
"def fit(self, data, labels, n_epochs=20):\n self.model.fit(x=data, y=labels, batch_size=self.batch_size, \n validation_split=0.1 if self.early_stopping else None, epochs=n_epochs,\n callbacks=[self.es] if self.early_stopping else None)",
"def fit(self, X, y=..., **fit_params):\n ...",
"def fit(self, data, labels):\n self.clf.fit(data, labels)",
"def fit(self, X):\n raise NotImplementedError('Abstract method \"fit\" must be '\n 'specialised!')",
"def fit(self, data: np.array, labels: np.array):\n self.model.fit(squeeze_keep_batch(data), squeeze_keep_batch(labels))",
"def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)",
"def fit(self, X, y=None):\n #import pdb\n #pdb.set_trace()\n return self.partial_fit(X, y)",
"def fit(self):\n \n print(\"Training model...\")\n center = self.center\n model = self.model\n n_epochs = self.config.epochs\n best_val_loss = np.inf\n for epoch in range(n_epochs):\n model.train()\n loop = tqdm(self.train_dataloader)\n for xb, _ in loop:\n loss = self.loss_batch(xb)\n loop.set_description(\"Epoch [{}/{}] \".format(epoch, n_epochs))\n loop.set_postfix({\"loss\":loss.item()})\n\n model.eval()\n with torch.no_grad():\n losses = [torch.cdist(model(xb), center.view(1, -1))\n for xb, yb in self.val_dataloader]\n losses = [x.item() for xb in losses for x in xb]\n val_loss = np.mean(losses) + self.get_regularizer_term()\n print(\"val_loss={:.6f}\".format(val_loss))\n\n if val_loss < best_val_loss:\n best_model_state = copy.deepcopy(model.state_dict())\n best_val_loss = val_loss\n self.save_model(self.config.mnist_cnn_weights, best_model_state)",
"def fit(self, input_data, targets):\n self.ensemble_model_.fit(input_data, targets)",
"def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)",
"def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True",
"def fit(self, x):\n x = np.asarray(x)\n _ = self.fit_transform(x)",
"def fit(self, model_name, **model_params):\n model = self.model_dict[model_name]\n model.set_params(**model_params)\n self.model = model.fit(\n self.data.loc[:, self.selected_features_], self.data.loc[:, self.target_name])",
"def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)",
"def fit(self, X):\n\n return self._fit(X)",
"def fit(self, data):\n if not self._transformers:\n return\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n final_step[1].fit(transformed_data)",
"def fit(self):\n self.model = RegressionModel(model_expression=self.model_expression,\n fit_filters=self.filters, predict_filters=self.out_filters,\n ytransform=None, name=self.name)\n\n df = get_data(tables = self.tables,\n filters = self.filters,\n model_expression = self.model_expression)\n \n results = self.model.fit(df)\n \n self.name = self._generate_name()\n self.summary_table = str(results.summary())\n print(self.summary_table)\n \n # We don't strictly need to save the fitted parameters, because they are also\n # contained in the urbansim.models.RegressionModel() sub-object. But maintaining\n # a parallel data structure to other templates will make it easier to refactor the\n # code later on to not rely on RegressionModel any more. \n \n self.fitted_parameters = results.params.tolist()\n self.residuals = results.resid",
"def partial_fit(self, X, y=..., **fit_params):\n ...",
"def fit(self, X, y, sample_weight=...):\n ...",
"def fitData(self,x=None,y=None,fixedpars='auto',weights=None,savedata=True,\n updatepars=True,fitf=False,contraction='sumsq',**kwargs):\n from scipy import optimize as opt\n from operator import isMappingType\n from functools import partial\n\n self._fitchi2 = None #clear saved chi-squared if it exists\n\n if x is None:\n if hasattr(self,'data') and self.data is not None:\n x = self.data[0]\n else:\n raise ValueError('No x data provided and no fitted data already present')\n else:\n x = np.array(x,copy=False)\n if x.dtype.kind == 'f':\n #for unclear reasons, fitting sometimes misbehaves if a float32\n #is used instead of the python system float (usually float64/double)\n #TODO:understand why this is necessary\n x = x.astype(float)\n\n if y is None:\n if hasattr(self,'data') and self.data is not None:\n y = self.data[1]\n else:\n raise ValueError('No y data provided and no fitted data already present')\n else:\n y = np.array(y,copy=False)\n\n if fitf:\n fitfunc = self.f\n else:\n fitfunc = self._filterfunc\n\n if fitfunc(x,*self.parvals).shape != y.shape:\n raise ModelTypeError('y array does not match output of model for input x')\n\n y = y.ravel()\n\n if self.fittype is None:\n method = self.fittypes[0]\n else:\n method = self.fittype\n\n if fixedpars is 'auto':\n fixedpars = self.fixedpars if hasattr(self,'fixedpars') else ()\n if fixedpars is None:\n fixedpars = tuple()\n\n ps=list(self.params)\n v=list(self.parvals) #initial guess\n\n if method not in self._optfittypes:\n for cls in self.__class__.__mro__:\n if hasattr(cls,'_fittypes') and isMappingType(cls._fittypes):\n if method in cls._fittypes:\n fitter = partial(cls._fittypes[method],self)\n break\n else:\n fitter = 'fit'+method[0].upper()+method[1:]\n if hasattr(self,fitter):\n fitter = getattr(self,fitter)\n else:\n raise ValueError('could not locate fitting function for fitting method '+method)\n\n res = fitter(x,y,fixedpars=fixedpars,weights=weights,**kwargs)\n\n #ensure that res is at least a tuple with parameters in elem 0\n from operator import isSequenceType\n if len(res)==0 or not isSequenceType(res[0]):\n res = (res,)\n\n if fixedpars:\n for p in fixedpars:\n i=ps.index(p)\n del ps[i]\n del v[i]\n else:\n if weights is None:\n wf = lambda v:1\n elif callable(weights):\n wf = weights\n else:\n w = np.array(weights,copy=False)\n if w.shape == y.shape:\n w = w.ravel()\n elif w.shape[1:] == y.shape and w.shape[0]==2:\n w = (w[0]**2+w[1]**2)**0.5\n else:\n raise ModelTypeError('weights shape does not match y')\n\n wf = lambda v:w\n\n kwargs['full_output'] = 1\n\n if fixedpars:\n for p in fixedpars:\n i=ps.index(p)\n del ps[i]\n del v[i]\n\n #make a function of signature f(x,v) where v are the parameters to be fit\n pdict=dict([(p,getattr(self,p)) for p in fixedpars])\n def f(x,v):\n pdict.update(dict(zip(ps,v)))\n #return fitfunc(x,**pdict)\n params = [pdict[a] for a in self._pars]\n return fitfunc(x,*params).ravel()\n else:\n f=lambda x,v:fitfunc(x,*v).ravel()\n\n if method == 'leastsq':\n if 'frac' in contraction:\n g=lambda v,x,y:wf(v)*(1-f(x,v)/y)\n else:\n g=lambda v,x,y:wf(v)*(y-f(x,v))\n res=opt.leastsq(g,v,(x,y),**kwargs)\n else:\n if 'frac' in contraction:\n if 'sq' in contraction:\n def g1(v,x,y):\n diff=1-f(x,v)/y\n return diff*diff\n elif 'abs' in contraction:\n def g1(v,x,y):\n diff=1-f(x,v)/y\n return np.abs(diff)\n else:\n def g1(v,x,y):\n diff=1-f(x,v)/y\n return diff\n else:\n if 'sq' in contraction:\n def g1(v,x,y):\n diff=y-f(x,v)\n return diff*diff\n elif 'abs' in contraction:\n def g1(v,x,y):\n diff=y-f(x,v)\n return np.abs(diff)\n else:\n def g1(v,x,y):\n diff=y-f(x,v)\n return np.diff\n if 'sum' in contraction:\n g=lambda v,x,y:np.sum(wf(v)*g1(v,x,y),axis=None)\n elif 'mean' in contraction:\n g=lambda v,x,y:np.mean(wf(v)*g1(v,x,y),axis=None)\n elif 'median' in contraction:\n g=lambda v,x,y:np.median(wf(v)*g1(v,x,y),axis=None)\n elif 'prod' in contraction:\n g=lambda v,x,y:np.prod(wf(v)*g1(v,x,y),axis=None)\n else:\n raise ValueError('no valid contraction method provided')\n\n if method == 'fmin':\n res=opt.fmin(g,v,(x,y),**kwargs)\n elif method == 'fmin_powell':\n res=opt.fmin_powell(g,v,(x,y),**kwargs)\n elif method == 'fmin_cg':\n #TODO:smartly include derivative\n res=opt.fmin_cg(g,v,args=(x,y),**kwargs)\n elif method == 'fmin_bfgs':\n #TODO:smartly include derivative\n res=opt.fmin_bfgs(g,v,args=(x,y),**kwargs)\n elif method == 'fmin_l_bfgs_b':\n #TODO:smartly include derivative\n del kwargs['full_output']\n kwargs['approx_grad'] = True\n res=opt.fmin_l_bfgs_b(g,v,args=(x,y),**kwargs)\n elif method == 'fmin_tnc':\n #TODO:smartly include derivative\n del kwargs['full_output']\n kwargs['approx_grad'] = 1\n res=opt.fmin_tnc(g,v,args=(x,y),**kwargs)\n elif method == 'fmin_cobyla':\n #TODO:smartly include derivative\n del kwargs['full_output']\n res=opt.fmin_cobyla(g,v,args=(x,y),**kwargs)\n res = [res]\n elif method == 'fmin_slsqp':\n #TODO:smartly include derivative\n res=opt.fmin_slsqp(g,v,args=(x,y),**kwargs)\n elif method == 'anneal' or method == 'global':\n res=opt.anneal(g,v,args=(x,y),**kwargs)\n else:\n raise ValueError('Unrecognzied method %s'%method)\n\n self.lastfit = res\n v=res[0] #assumes output is at least a tuple - needs \"full_output=1 !\"\n\n try:\n v[0]\n except IndexError: #only one parameter\n v=np.array([v])\n\n if updatepars:\n for par,newv in zip(ps,v):\n setattr(self,par,newv)\n\n if savedata:\n self.data = (x,y,weights)\n\n return v",
"def fit(self, X):\n self.rel_to_idx, self.ent_to_idx = create_mappings(X)\n self.is_fitted = True",
"def fit(self, X, y):\n assert y.shape[1] == self.n_classes and y.shape[0] == X.shape[0]\n self.model.fit(X, y, epochs=self.n_iter, batch_size=self.batch_size, verbose=self.verbose)\n return self",
"def fit(self, X, y):\n self.X_train = X\n self.y_train = y",
"def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)",
"def fit(self, X, y=None, **fit_params):\n return self",
"def fit_model(model):\r\n\r\n print \"Fitting vars:\"\r\n print model.vars.describe()\r\n\r\n import time\r\n start_time = time.time()\r\n model.map = mc.MAP(model.vars)\r\n model.map.fit(method='fmin_powell', verbose=1)\r\n\r\n model.mcmc = mc.MCMC(model.vars)\r\n model.mcmc.use_step_method(mc.AdaptiveMetropolis, model.vars['gamma'])\r\n model.mcmc.sample(20000, 10000, 100)\r\n model.mcmc.wall_time = time.time() - start_time",
"def fit(self, X):\n \n self._fit(X)\n return self",
"def fit(self, X, y=None):\n return self",
"def fit(self, X, y):\n \n # Create a copy of X that has a column for the intercept if the user\n # wants one.\n X_copy = self._add_intercept(X)\n \n # Fit the model coefficients using SVD.\n self._fit_svd(X_copy, y, alpha=0.0)\n \n # Calculate model statistics.\n self._calculate_model_stats_ols(X, y)\n \n return",
"def fit_transform(self, X, y=...):\n ...",
"def fit_store(X, y):\n print(\"Fitting model to training set...\")\n model = pr.build_model.fit_model(X, y)\n pickle.dump(model, open(\"models/\" + \"model\" + \".pkl\", \"wb\"))",
"def fit(self, data):\n if data is None:\n self.train_self()\n else:\n # not needed this week\n pass",
"def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass",
"def fit(self, *_):\n return self",
"def fit(self, *_):\n return self",
"def fit(self, *_):\n return self",
"def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)",
"def fit_transform(self, data):\n self.fit(data)\n return self.transform(data)",
"def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True"
] | [
"0.81083125",
"0.78165245",
"0.76790136",
"0.74471337",
"0.73553294",
"0.72820145",
"0.7271266",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72706705",
"0.72503513",
"0.7200485",
"0.7165293",
"0.7160537",
"0.71546054",
"0.71546054",
"0.71546054",
"0.7151678",
"0.7131963",
"0.7127522",
"0.71174663",
"0.7106154",
"0.7106065",
"0.7098847",
"0.7096153",
"0.7089722",
"0.70680594",
"0.7045082",
"0.70300835",
"0.7003736",
"0.69531465",
"0.6941921",
"0.6939507",
"0.69366264",
"0.6924526",
"0.6875212",
"0.6875212",
"0.68731326",
"0.6869962",
"0.68556714",
"0.6850119",
"0.68402314",
"0.6835348",
"0.6810672",
"0.6810672",
"0.67831343",
"0.67796135",
"0.67678005",
"0.6766598",
"0.6746214",
"0.67321205",
"0.6727493",
"0.6726939",
"0.67091334",
"0.6704163",
"0.6701424",
"0.6691145",
"0.6686866",
"0.668106",
"0.66770166",
"0.66455036",
"0.6604102",
"0.660016",
"0.6576693",
"0.6575616",
"0.65732706",
"0.65556",
"0.65530217",
"0.6550138",
"0.6543111",
"0.65396994",
"0.6530429",
"0.65154815",
"0.6514728",
"0.6499653",
"0.6499272",
"0.64978516",
"0.64821833",
"0.64810646",
"0.64767975",
"0.64650303",
"0.64646333",
"0.6462361",
"0.6452254",
"0.64508164",
"0.6440254",
"0.6440134",
"0.64365965",
"0.64365965",
"0.64365965",
"0.64278597",
"0.64212495",
"0.64177346"
] | 0.6884465 | 42 |
After a model has been fit predict returns the fitted values. This is a placeholder intended to be overwritten by individual models. | def predict(self, params, exog=None, *args, **kwargs):
raise NotImplementedError # pragma: no cover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fittedvalues(self):\n return self.model.predict(self.params)\n # TODO: GH#5255 is this necessarily equivalent to self.predict()?",
"def fit_predict(self):\n raise AttributeError",
"def fit_predict(self, X, y=None):\n return super().fit_predict(X, y)",
"def predict_only(self):",
"def predict(self, model, context, data):\n pass",
"def predict(self):\n raise NotImplementedError",
"def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)",
"def predict(self): \n return self.model.predict(self.test_x)",
"def predict(self, fit_result, x):\n raise NotImplementedError()",
"def make_predict_step(self):\n return self.make_eval_step()",
"def predict(self):\n\n self.eval()\n return self.forward(self.data)",
"def on_predict_begin(self, logs=None):",
"def on_predict_begin(self, logs=None):",
"def predict(self, model, x_test):\n pass",
"def _get_prediction(self):\n raise NotImplementedError",
"def predict(self, to_predict):\n\t\treturn self.model.predict(to_predict)",
"def predict():\n model = LinearRegression().fit(input_data[['x']], input_data['y'])\n future_vals = [[20], [21], [22]]\n return None",
"def predict(self):\n self.kf.predict()\n self.nb_kf_pred += 1\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(self.kf.x[:2].reshape(-1))\n return self.history[-1]",
"def predict(self):\n return _RateElasticNetRegressor.predict(self)",
"def _predict(self, x):\n pass",
"def fit_predict(self, X, y=None):\n return self.fit(X, y).predict(X)",
"def predict(self, **kwargs):\n raise NotImplementedError",
"def predict(self):\n if ((self.kf.x[6] + self.kf.x[2]) <= 0):\n self.kf.x[6] *= 0.0\n self.kf.predict()\n self.age += 1\n if (self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n return self.history[-1]",
"def predict(self, obs):\n pass",
"def get_prediction(self):\n return self.prediction",
"def _predict(self, testX):\n pass",
"def on_predict_end(self, logs=None):",
"def on_predict_end(self, logs=None):",
"def doPredict(self, data: StockData) -> float:\r\n pass",
"def predict(self, obs):\n return self.model(obs)",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)",
"def predict_response_variable(self, **kwargs):\n pass",
"def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)",
"def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)",
"def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)",
"def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)",
"def predict():\n import trace\n trace.predict()",
"def predict(self, predPoints=None):",
"def predict(self, instances):\r\n raise NotImplementedError",
"def predict_proba(self):\n ...",
"def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())",
"def predict(self, data):\n return self.result.predict(data)",
"def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_",
"def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_",
"def predictions(self):\n return self._pred",
"def predict(self, X: np.ndarray) -> np.ndarray:\n if not self._fitted:\n raise ValueError(\"Model is not fitted.\")\n\n return self._fit_func(X)",
"def predict(self, X: np.ndarray) -> np.ndarray:\n if not self._fitted:\n raise ValueError(\"Model is not fitted.\")\n\n return self._fit_func(X)",
"def predict(self, X):",
"def predict(self, X):",
"def predict(self, X):\n check_is_fitted(self, ['estimators_', 'final_estimator_'])\n return self.final_estimator_.predict(self.transform(X))",
"def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)",
"def predict(self,X): \n return self._predict(X)",
"def _pre_fit(self):\n pass",
"def fit_predict(\n self,\n X: np.ndarray,\n y: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n self.fit(X)\n return self.predict(y)",
"def predict(self, x, **kwargs):\n return self.tuner.get_best_models(1)[0].predict(x, **kwargs)",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predict(self, X):\n if self.model is None:\n print(\"%s.predict: implement me\" % (self.__class__.__name__))\n return np.zeros((1, self.odim))",
"def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()",
"def predict(self, X):\n self._check_is_fitted('predict')\n return self.best_estimator_.predict(X)",
"def predict_batch(self, model, context, data=None):\n pass",
"def predict_single_fold(self, model: TorchBasedLinearEstimator, dataset: TabularDataset) -> np.ndarray:\n pred = model.predict(dataset.data)\n\n return pred",
"def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))",
"def update_predictions(self, context):\n x, y, o = context.get_predictions()\n self.x_eval += x\n self.y_eval += y\n self.o_eval += o\n self.write_predictions(o)",
"def prediction_a(self):\n return self._prediction_a",
"def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)",
"def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)",
"def predict(self, data):\n\t\traise NotImplementedError",
"def _predict(self, X):\n raise NotImplementedError",
"def fit_predict(self, X: TimeSeriesInstances, y=None) -> np.ndarray:\n self.fit(X)\n return self.predict(X)",
"def fit_predict(self, X):\n MyKMedoids.fit(self, X)\n return MyKMedoids.predict(self, X)\n pass",
"def predict(model, x):\n y = model.predict(x)\n print(\"y\")\n print(y)\n return y[0]",
"def predict(self, data):\n return self.forward_propagate(data)",
"def process(self, data):\n return self.estimator.predict(data)",
"def predict(self, obs):\n pred_q = self.model(obs)\n return pred_q",
"def fit():\n pass",
"def before_epoch(self):\n\n # Prepare prediction container in every epoch, set/reset here as new predictions are obtained after each epoch as NN learns\n self.y_pred = []",
"def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()",
"def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction",
"def predict(self,x):\n return self._model.predict(x)",
"def _predict(self):\n bull_wr = self.bull_win_rate\n self.df = self.df.assign(bull_wr=bull_wr)",
"def predict(self, kf):\n self.mean, self.covariance = kf.predict(self.mean, self.covariance)",
"def predict(self, x):\n return self.model.predict(x)",
"def predict_and_update(self, z):",
"def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions",
"def predict_next(self):\n return self._get_mean()",
"def predict_next(self):\n return self._get_mean()",
"def predict(self, xs, stochastic=True, **kwargs):\n return super().predict(xs, **kwargs)",
"def predict(self, model_input):\n # Should return a dictionary of move-prior pairs and the value from\n # the network's value head\n pass",
"def predict(self, data_in):\n pass",
"def predict(self, test_data):\r\n return self.gs.predict(test_data)",
"def prediction_b(self):\r\n return self._prediction_b",
"def predict(self, X): \n return self.f(X, self.coefficients)",
"def predict(self, X):\n return self.model.predict(X)",
"def predict(self, X):\n return self.model.predict(X)",
"def predict(model, x):\n # Set model to evalution state to turn off dropout\n model.eval()\n x = to_Variable(x)\n yhat = model(x)\n _, tag = yhat.max(1)\n \n return tag.data.cpu().numpy()"
] | [
"0.763376",
"0.7340481",
"0.7176646",
"0.70071083",
"0.69615495",
"0.6811762",
"0.6780039",
"0.67395735",
"0.67003053",
"0.66701806",
"0.6623872",
"0.65716237",
"0.65716237",
"0.65679145",
"0.6562471",
"0.6515628",
"0.6515343",
"0.6505706",
"0.6497324",
"0.64924717",
"0.645448",
"0.64488935",
"0.643731",
"0.6432559",
"0.6430417",
"0.6392675",
"0.63750875",
"0.63750875",
"0.6367485",
"0.63667125",
"0.634725",
"0.634725",
"0.634725",
"0.63330346",
"0.63315475",
"0.6301967",
"0.6301967",
"0.6301967",
"0.6301967",
"0.6296454",
"0.62853044",
"0.6275599",
"0.6252449",
"0.62453616",
"0.6238781",
"0.6225134",
"0.6225134",
"0.62119323",
"0.62074727",
"0.62074727",
"0.62012035",
"0.62012035",
"0.6200179",
"0.61863",
"0.617899",
"0.61748254",
"0.61737806",
"0.6172582",
"0.61676216",
"0.61676216",
"0.61676216",
"0.615814",
"0.6154126",
"0.6153254",
"0.61513406",
"0.6150914",
"0.6149881",
"0.61475307",
"0.6139547",
"0.61376864",
"0.61376864",
"0.6134258",
"0.61259604",
"0.612474",
"0.6116735",
"0.61063033",
"0.6098622",
"0.6096917",
"0.6089328",
"0.6083657",
"0.6083224",
"0.6080989",
"0.6076778",
"0.60674673",
"0.6059033",
"0.6058577",
"0.60557014",
"0.60401785",
"0.60269314",
"0.6019672",
"0.6019672",
"0.6011729",
"0.60110086",
"0.600733",
"0.60021764",
"0.60014284",
"0.59995174",
"0.59903157",
"0.59903157",
"0.59890884"
] | 0.6104935 | 76 |
Initialize (possibly reinitialize) a Model instance. For instance, the design matrix of a linear model may change and some things must be recomputed. | def initialize(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_model(self):\n pass",
"def init_model(self):\n pass",
"def initialize(self, model):\n pass",
"def __init__(self, model: Model1D):\n self._model = model",
"def re_init(self):\n self.latent.re_init()\n if 're_init' in dir(self.inference_model):\n self.inference_model.re_init()\n if 're_init' in dir(self.generative_model):\n self.generative_model.re_init()",
"def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return",
"def __init__(self, *args, **kwargs):\n super(AbstractOdeModel, self).__init__(*args, **kwargs)\n self.saved_states = {}\n self.state = self.initial_state.copy()\n self.dirty = False # whether the solver will need to be reset due to a model change before the next solve\n self.set_solver(DefaultSolver())\n self.env = Env.ModelWrapperEnvironment(self)\n assert hasattr(self, 'output_names')",
"def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()",
"def initialize(self) -> None:\n self.model = load(self.path)",
"def __init__(self):\n self.model = None",
"def __init__(self):\n self.model = None",
"def __init__(self, modelwithparams=None, random_number=-1, problem_type='infer'):\n self.modelwithparams = modelwithparams\n self.oldpara = self.modelwithparams\n self.random_number = random_number\n self.flag = True\n self.problem_type = problem_type",
"def initialize_model(self):\n model = self.model_class()\n return model",
"def initialize_model(self, initial_data):\n # EDIT THIS METHOD TO RETURN A MINIMAX MODEL ###\n return None",
"def __init__(self):\n self.scaler = None\n self.model = None\n self.encoder = {}\n\n self._load_model()\n return",
"def _init_model(self, forrest):\n rels = self.get_rels(forrest)\n self._model = RDPModel(rels)",
"def _initialize_model(self):\n max_value = self.data.max()\n\n if self.model_type == self._GAUSSIAN2D:\n model = models.Gaussian2D(\n x_mean=self.x, y_mean=self.y, x_stddev=1, y_stddev=1\n )\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.x_stddev.bounds = (0, self._box / 4)\n model.y_stddev.bounds = (0, self._box / 4)\n model.x_mean.bounds = (self.x - 5, self.x + 5)\n model.y_mean.bounds = (self.y - 5, self.y + 5)\n\n elif self.model_type == self._MOFFAT2D:\n model = models.Moffat2D()\n model.x_0 = self.x\n model.y_0 = self.y\n model.gamma = 2\n model.alpha = 2\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.alpha.bounds = (1, 6)\n model.gamma.bounds = (0, self._box / 4)\n model.x_0.bounds = (self.x - 5, self.x + 5)\n model.y_0.bounds = (self.y - 5, self.y + 5)\n\n model += models.Const2D(self.fit_sky())\n model.amplitude_1.fixed = True\n return model",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, model: Model1D):\n super().__init__(model=model)",
"def __init__(self, *args):\n this = _libsbml.new_Model(*args)\n try: self.this.append(this)\n except: self.this = this",
"def create_model(self):\n self.model = None\n pass",
"def initialize_model(self, config_param_vals = None):\n self._is_initialized = True\n\n self.fmu.instantiate()\n self.fmu.reset()\n self.fmu.setupExperiment(startTime=self.start_time)\n if config_param_vals is not None:\n self._apply_config(config_param_vals)\n self.fmu.enterInitializationMode()\n self.fmu.exitInitializationMode()\n\n return",
"def _initialize_model(rngs):\n init_model_state, init_params = model_def.init(\n rngs, *dummy_input, train=False, debug=False).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if config.get('init_head_bias', None) is not None:\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state",
"def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()",
"def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model",
"def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()",
"def initialize_model(w, model_list, algo, inverse_transform):\n w_retransformed = np.dot(inverse_transform, w) # re-transform in original coordinates if a scaled R-matrix is used\n # calculate number of terms\n temp_coeffs = w.copy()\n temp_coeffs[temp_coeffs != 0.] = 1. # all non zero terms\n terms = int(np.sum(temp_coeffs))\n\n if len(model_list) != 0: # a previous model for comparison exists\n if not np.array_equal(w_retransformed, model_list[-1].coefficients): # different model than at last step\n model_list.append(Model(w_retransformed, w, terms, algo)) # save new model\n else: # always take first model obtained\n model_list.append(Model(w_retransformed, w, terms, algo)) # save new model",
"def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this",
"def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)",
"def __init__(self, model: MT):\n self.model: Final[MT] = model",
"def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True",
"def __init__(self, model):\n self._model = model",
"def initialize_main_model(self, model, **kwargs):\n return NotImplementedError(\n \"Initializer has not implemented an initialize_main_model method. Derived classes \"\n \"are required to overload this.\"\n )",
"def _initialize_model(rngs):\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state",
"def __init__(self):\n # Initializing the Model with the class\n super(Model, self).__init__()\n # torch.nn.Linear applies a Linear transformation. The first parameter is the size of each input sample. The second is the size of the output sample\n self.linear = torch.nn.Linear(1, 1)",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n self._params = self.find_params()",
"def __post_init_check(self):\n try:\n t = self.time\n m = self.metadata\n except AttributeError as e:\n clsname = self.__class__.__name__\n raise TypeError(f\"Model not initialized. Please call 'SupernovaModel.__init__' within the '{clsname}.__init__'\") from e",
"def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)",
"def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)",
"def initialize_model(self):\n args = self.args\n\n if self.args.search_space == 'nasbench':\n self.model_fn = NasBenchNetSearchDarts\n self.fixmodel_fn = NasBenchNet\n model = self.model_fn(args)\n utils = darts_nasbench_utils\n else:\n raise NotImplementedError(\"Not supported\")\n # finialize model update\n if args.gpus > 0:\n if self.args.gpus == 1:\n model = model.cuda()\n self.parallel_model = model\n else:\n self.model = model\n self.parallel_model = nn.DataParallel(self.model).cuda()\n # IPython.embed(header='checking replicas and others.')\n else:\n self.parallel_model = model\n\n darts = DartsArchitect(model, args=args)\n model = self.parallel_model\n # logging.info(\"DARTS param size = %fMB\", utils.count_parameters_in_MB(darts))\n self.train_fn = partial(darts_train_model, args=args, architect=darts, sampler=None)\n self.eval_fn = partial(darts_model_validation, args=args, verbose=True)\n self.controller = darts\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n\n # scheduler as Cosine.\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.learning_rate_min)\n return model, optimizer, scheduler, darts, None",
"def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)",
"def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)",
"def __init__(self, model: Callable, q: Callable, loss: Callable,\n optimizer: jax_optim.Optimizer, initial_params: Dict):\n self.model = model\n self.q = q\n self.loss = loss\n self.optimizer = optimizer\n self.optimizer_state = self.optimizer.init_fn(initial_params)\n self.step = 0",
"def basic_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n model_cls = getattr(mod, model_args.architectures,\n AutoModelForSequenceClassification)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model",
"def _init_model(self):\n self.A_inv = np.zeros(shape=(self.numUsers, self.d, self.d))\n self.b = np.zeros(shape=(self.numUsers, self.d))\n self.w = np.zeros(shape=(self.numUsers, self.d))\n for i, mat in enumerate(self.A_inv):\n self.A_inv[i] = np.eye(self.d)",
"def __init__(self):\n if Classifier.__instance is not None:\n LOGGER.logger.exception(\"This class is a singleton!\")\n else:\n self.model = False\n self.load_model()",
"def initModel(self):\n input_shape = (self.params[\"nb_features\"],)\n x = input_tensor = Input(input_shape)\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n for i in range(2, self.params[\"nb_layers\"] + 1):\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n if self.params[\"dropout\"]:\n x = Dropout(self.params[\"dropout\"])(x)\n x = output_tensor = Dense(4)(x)\n model = Model(input_tensor, output_tensor)\n return model",
"def __init__(self, rs_model, coordinator):\n\n if hasattr(rs_model.amodel, 'CapacityConstraint'):\n relaxed_constraint_name = 'CapacityConstraint'\n\n if hasattr(rs_model.amodel, 'CapacityConstraintLinear'):\n relaxed_constraint_name = 'CapacityConstraintLinear'\n \n if hasattr(rs_model.amodel, 'CapacityConstraintRef'):\n relaxed_constraint_name = 'CapacityConstraintRef'\n\n relaxed_constraint_range = [ arc for arc in rs_model.init_data[None]['Arcs'][None] ]\n relaxation_data = [(relaxed_constraint_name, relaxed_constraint_range)]\n\n decompose_local_rs_models = []\n for flow in rs_model.init_data[None]['Flows'][None]:\n lrsm = RsModel()\n lrsm.amodel = cp.deepcopy(rs_model.amodel)\n init_data_local = cp.deepcopy(rs_model.init_data)\n init_data_local[None]['Flows'][None] = [flow]\n init_data_local[None]['Src'] = { k: v for k, v in init_data_local[None]['Src'].items() if k == flow}\n init_data_local[None]['Dst'] = { k: v for k, v in init_data_local[None]['Dst'].items() if k == flow}\n init_data_local[None]['FlowUb'] = { k: v for k, v in init_data_local[None]['FlowUb'].items() if k == flow}\n init_data_local[None]['FlowLb'] = { k: v for k, v in init_data_local[None]['FlowLb'].items() if k == flow}\n lrsm.init_data = init_data_local\n lrsm.cmodel = None\n decompose_local_rs_models.append(lrsm)\n super().__init__(rs_model, relaxation_data, decompose_local_rs_models, coordinator)",
"def __init__(self, model):\n\t\tself.model = model",
"def initialize_model(self, init_model: Union[str, np.ndarray], pixel_mean: float):\n\n if (type(init_model) is str) and init_model == 'random':\n model = np.random.rand(*self.model_size)\n model = model * pixel_mean / model.mean()\n return model\n\n if (type(init_model) is str) and init_model == 'sum':\n model = self.frames.sum(0).reshape(*self.frame_size)\n model = model * pixel_mean / model.mean()\n model, mask = model_reshape(model, self.model_size)\n noise = np.where(mask == 1, 0, np.random.rand(*mask.shape)*pixel_mean*0.5)\n return model + noise\n\n if type(init_model) is np.ndarray:\n if not init_model.ndim == 2:\n raise ValueError(\"init_model has to be a 2D array.\")\n model, _ = model_reshape(init_model, self.model_size)\n model = model * pixel_mean / model.mean()\n return model\n raise ValueError(\"unknown initial model type. initial model can be 'random', 'sum', or a numpy array.\")",
"def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])",
"def _add_init(self, p_model):\r\n\r\n raise NotImplementedError",
"def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"preinfectious\", 0.)\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_progress\", 1. / param_dictionary[\"duration_preinfectious\"])\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])",
"def __init__(self, model, model_is_path=True):\n # Setup logging.\n self.log = logging.getLogger(self.__class__.__name__)\n\n # Parse the model.\n self.model_dict = parse(model, model_is_path)\n\n # The model dict has increasing integer keys so the model can\n # later be written in order (since GridLAB-D cares sometimes).\n # Get the first and last keys.\n keys = list(self.model_dict.keys())\n # Set keys for adding items to beginning or end of model.\n self.append_key = max(keys) + 1\n self.prepend_key = min(keys) - 1\n\n # Initialize model_map.\n self.model_map = {'clock': [], 'module': {}, 'object': {}, 'class': {},\n 'object_unnamed': []}\n\n # Map objects in the model.\n parallel_dict = self._map_model_dict(self.model_dict, parallel_dict={})\n # Merge the nested items into the top-level dict.\n for k, v in parallel_dict.items():\n # NOTE: this check isn't efficient, but it brings peace of\n # mind.\n if k in self.model_dict:\n m = 'The key {} already exists in self.model_dict!'.format(k)\n raise ItemExistsError(m)\n else:\n self.model_dict[k] = v\n\n self.log.info('GridLAB-D model parsed and mapped.')",
"def __init__(self, model: str, **kwargs):\n\n super().__init__(model=model, **kwargs)\n logger.info('load model done')",
"def __init__(self, model, settings):\n super().__init__(model, settings)\n self.model_part = self.model.CreateModelPart(self.settings[\"model_part_name\"].GetString())\n self.model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, self.settings[\"domain_size\"].GetInt())\n self.model_part.ProcessInfo.SetValue(KM.GRAVITY_Z, self.settings[\"gravity\"].GetDouble())\n self.EstimateDeltaTimeUtility = SW.EstimateTimeStepUtility(self.GetComputingModelPart(), self.settings[\"time_stepping\"])",
"def init_model(self):\n # n_dims == n_hparams\n n_dims = len(self.searchspace.keys())\n\n if self.interim_results:\n n_dims += 1 # add one dim for augumented budget\n\n cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))\n\n other_kernel = Matern(\n length_scale=np.ones(n_dims),\n length_scale_bounds=[(0.01, 100)] * n_dims,\n nu=2.5,\n )\n base_model = GaussianProcessRegressor(\n kernel=cov_amplitude * other_kernel,\n normalize_y=True,\n noise=\"gaussian\",\n n_restarts_optimizer=2,\n )\n self.base_model = base_model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def _init_model(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.MODEL_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._model = model_factory.ModelFactory.create_model(config=conf)\n return True\n except Exception as err:\n self.managerlogger.logger.error(\"init model error: %s\" % err)\n self.errorlogger.logger.error(\"init model error: \\n %s\" % traceback.format_exc())\n return False",
"def init_model(self, model_name, config=None):\n ModelDirectory.init_model(model_name, pipeline=self, config=config)\n return self\n #self._action_list.append({'name': INIT_MODEL_ID, 'model_name': model_name, 'config': config})\n #return self.append_action()",
"def __init__(self, model: torch.nn.Module) -> None:\n self.model = copy.deepcopy(model)\n self.model.eval().to(device=DEVICE)\n self.rule_layer_map: List[Tuple[List[str], rules.LrpRule,\n Dict[str, Union[torch.Tensor, float]]]] = []\n self.input_nchw: Optional[torch.Tensor] = None\n self.label_idx_n: Optional[torch.Tensor] = None\n self.relevance_scores_nchw: Optional[torch.Tensor] = None\n self.explained_class_indices: Optional[torch.Tensor] = None",
"def __init__(self):\n\n super().__init__()\n\n self._model = None # type: StateSpaceModel\n self._kernel = None # type: Distribution",
"def set_model(self):\n self.model = self.get_model()",
"def __init__(self, model: object):\n self.model = model",
"def initialize(self, model):\n # Retrieve all parameters on which to act\n self.set_pruning_parameters(model)\n # Create a set of masks for each layer\n mask = [None] * len(self.prune_parameters)\n for step, (name, param) in enumerate(self.prune_parameters):\n mask[step] = torch.ones_like(param.data).detach()#.cpu().numpy()\n # Save mask\n self.mask = mask\n # Save the current model weights\n self.initial_state_dict = None",
"def __init__(self, ml):\n # Save a reference to the model.\n self.ml = ml",
"def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)",
"def maybe_load_model(self):\n if self.model:\n return\n\n assert self.model_path, \"No model path\"\n\n _LOGGER.debug(\n \"Loading model from %s (beam width=%s)\", self.model_path, self.beam_width\n )\n self.model = deepspeech.Model(str(self.model_path))\n self.model.setBeamWidth(self.beam_width)\n\n if (\n self.scorer_path\n and self.scorer_path.is_file()\n ):\n _LOGGER.debug(\n \"Enabling language model (scorer=%s, lm_alpha=%s, lm_beta=%s)\",\n self.scorer_path,\n self.lm_alpha,\n self.lm_beta,\n )\n self.model.setScorerAlphaBeta(\n self.lm_alpha,\n self.lm_beta\n )\n self.model.enableExternalScorer(\n str(self.scorer_path)\n )",
"def __init__(self, model, name, sloppy=False):\n\n Model.__init__(self, model.copy(), name)\n\n self._cons_queue = list()\n self._var_queue = list()\n\n self._var_dict = dict()\n self._cons_dict = dict()\n\n self.sloppy=sloppy",
"def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))",
"def __init__(self, models: Optional[List[Model]] = None) -> None:\n self.models = models\n # ensure unique model names\n modelnames = [iml.name for iml in self.models]\n if len(set(modelnames)) < len(modelnames):\n logger.warning(\"Duplicate model names, appending a suffix.\")\n modelnames = [f\"{iml.name}_{i}\" for i, iml in enumerate(self.models)]\n self.modelnames = modelnames\n # attributes that are set and used later\n self.figure = None\n self.axes = None\n self.mosaic = None\n self.cmap = None\n self.adjust_height = False\n self.smdict = None",
"def __init__(self, num_models: int, num_classes: int):\n self.nun_models = num_models\n self.num_classes = num_classes\n self.model: keras.Model = self.init_model()",
"def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)",
"def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)",
"def __init__(self, meta_model):\r\n\r\n # Check if the meta-model is actually a meta-model\r\n RM.check_if_type(meta_model, MM.AbstractModel, 'The input meta-model')\r\n\r\n self.meta_model = meta_model\r\n\r\n MM.AbstractModel.__init__(self, meta_model.get_in_par_intervals(), meta_model.get_in_par_means(),\r\n meta_model.get_in_par_variances(), meta_model.get_out_par_intervals(),\r\n meta_model.get_out_par_means(), meta_model.get_out_par_variances())",
"def __init__(self):\n self.model = self._get_model()\n\n # NOTE: The order of this list hardcoded here, and needs to be changed when re-training the model!\n # When exporting the model in tflite format, the model_spec is lost, so we cannot do it like that:\n # classes = ['???'] * model.model_spec.config.num_classes\n # label_map = model.model_spec.config.label_map\n # for label_id, label_name in label_map.as_dict().items():\n # classes[label_id-1] = label_name\n self.classes = ['Baked Goods', 'Salad', 'Cheese', 'Seafood', 'Tomato']",
"def __init__(\n self, model_functions, model_definitions,\n check_arguments=False, verbose=True\n ):\n # enable dimension checking of model functions\n self.check_arguments = check_arguments\n\n # enable verbose output\n self.verbose = verbose\n\n # assign dummy dimensions\n self.NT = -1 # dimension of time (== 1)\n self.NX = -1 # no. of total states\n self.NY = -1 # no. of differential states\n self.NZ = -1 # no. of algebraic states\n self.NP = -1 # no. of parameters\n self.NU = -1 # no. of controls\n\n # load model definitions\n self.definitions = self.load_model_definitions(model_definitions)\n self.assign_dimensions() # assign dimensions\n self.assign_functions() # assign functions\n\n # load model functions\n self.load_model_functions(model_functions)",
"def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')",
"def build(self, check_model=True, initial_soc=None):\n if initial_soc is not None:\n self.set_initial_soc(initial_soc)\n\n if self.built_model:\n return\n elif self.model.is_discretised:\n self._model_with_set_params = self.model\n self._built_model = self.model\n else:\n self.set_parameters()\n self._mesh = pybamm.Mesh(self._geometry, self._submesh_types, self._var_pts)\n self._disc = pybamm.Discretisation(self._mesh, self._spatial_methods)\n self._built_model = self._disc.process_model(\n self._model_with_set_params, inplace=False, check_model=check_model\n )\n # rebuilt model so clear solver setup\n self._solver._model_set_up = {}",
"def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()",
"def init_weight(self, pretrained_model=None):\n if pretrained_model is not None:\n if os.path.exists(pretrained_model):\n utils.load_pretrained_model(self.backbone, pretrained_model)\n utils.load_pretrained_model(self, pretrained_model)\n else:\n raise Exception('Pretrained model is not found: {}'.format(\n pretrained_model))",
"def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())",
"def recent_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n config.dense_type = model_args.dense_type\n config.act_type = model_args.act_type\n config.num_labels_per_head = [\n len(label_id) for label_id in task_infos.head_id_to_label_id\n ]\n config.head2label = task_infos.head_id_to_label_id\n model_cls = getattr(mod, model_args.architectures,\n RobertaForKlueRecent)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model",
"def initialize(self, myid, dispatcher, **model_params):\n self.lock_update = threading.Lock()\n self.jobsdone = 0 # how many jobs has this worker completed?\n # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?\n self.myid = myid\n self.dispatcher = dispatcher\n self.finished = False\n logger.info(\"initializing worker #%s\", myid)\n self.model = lsimodel.LsiModel(**model_params)",
"def __init__(self):\n super().__init__()\n self.indices_dir = ''\n self.split_file = ''\n\n self.model = '' # string identifying the model\n self.experiment = '' # string to describe experiment\n self.maps = [data.ID_MAP_T1H2O, data.ID_MAP_FF, data.ID_MAP_B1] # the used maps\n self.patch_size = [1, 32, 32]\n\n # training configuration\n self.loss = 'mse' # string identifying the loss function (huber, mse or mae)\n self.learning_rate = 0.01 # the learning rate\n self.dropout_p = 0.2\n self.norm = 'bn' # none, bn\n\n # we use the mean absolute error as best model score\n self.best_model_score_is_positive = True\n self.best_model_score_name = 'mae'",
"def _create_model(self):\n\n model_formula = self.get_model_formula()\n\n removed_observation_index = self._model_dataset.index.isin(self._excluded_observations)\n\n # TODO: Handle error that occurs when all model observations are invalid\n model = smf.ols(model_formula,\n data=self._model_dataset,\n subset=~removed_observation_index,\n missing='drop')\n\n self._model = model",
"def __init__(self, x_function, x_derivative, data_f, data_df, a):\n self.x_function = x_function\n self.x_derivative = x_derivative\n self.data_f = data_f\n self.data_df = data_df\n self.a = a\n self.linear_model = LinearModel(self.x_function, self.x_derivative)",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def _prepare_model(model):\n\n # Ensure there is at least 1 load combination to solve if the user didn't define any\n if model.LoadCombos == {}:\n # Create and add a default load combination to the dictionary of load combinations\n model.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})\n \n # Generate all meshes\n for mesh in model.Meshes.values():\n if mesh.is_generated == False:\n mesh.generate()\n\n # Activate all springs and members for all load combinations\n for spring in model.Springs.values():\n for combo_name in model.LoadCombos.keys():\n spring.active[combo_name] = True\n \n # Activate all physical members for all load combinations\n for phys_member in model.Members.values():\n for combo_name in model.LoadCombos.keys():\n phys_member.active[combo_name] = True\n \n # Assign an internal ID to all nodes and elements in the model. This number is different from the name used by the user to identify nodes and elements.\n _renumber(model)",
"def __init__(self,\n obs_mat: np.ndarray,\n obs: np.ndarray,\n obj_type: str = 'soft',\n nu: float = 1.0):\n super().__init__(obs_mat, obs, obj_type=obj_type)\n self.nu = nu\n self._cached_mat = self.obs_mat.T.dot(self.obs_mat)\n self.default_fit_options = dict(\n init_x=None,\n init_w=None,\n tol=1e-6,\n max_iter=100,\n verbose=False\n )",
"def _get_model(self, fl_ctx: FLContext):\n if isinstance(self.model, str):\n # treat it as model component ID\n model_component_id = self.model\n engine = fl_ctx.get_engine()\n self.model = engine.get_component(model_component_id)\n if not self.model:\n self.log_error(fl_ctx, f\"cannot find model component '{model_component_id}'\")\n return\n if self.model and isinstance(self.model, dict):\n # try building the model\n try:\n engine = fl_ctx.get_engine()\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.model:\n self.model[\"args\"] = {}\n self.model = engine.build_component(self.model)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `model`: \" f\"{self.model} with Exception {e}\",\n fl_ctx,\n )\n return\n if self.model and not isinstance(self.model, torch.nn.Module):\n self.system_panic(fl_ctx, f\"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}\")\n return\n if self.model is None:\n self.system_panic(fl_ctx, f\"Model wasn't built correctly! It is {self.model}\")\n return\n self.log_info(fl_ctx, f\"Running model {self.model}\")",
"def __init__(self,\n model_fn=cake_fn,\n model_dir: Optional[str] = \"model\",\n saved_path : Optional[str] = None,\n ):\n self.model_fn = model_fn \n self.model_dir = model_dir\n if saved_path == None:\n self.update_predictor()\n elif saved_path == \"most_recent\":\n subdirs = [x for x in Path('saved_model').iterdir() if x.is_dir()\\\n and 'temp' not in str(x)]\n self.saved_path = \"saved_model/\"+str(sorted(subdirs)[-1])\n self._build_predictor()\n else:\n self.saved_path = saved_path\n self._build_predictor()",
"def __init__(self, model, m0, h, innerTol, misfit_only=False):\n self.model = model\n self.m0 = m0.copy()\n self.h = h\n self.tol = innerTol\n self.misfit_only=misfit_only\n self.ncalls = 0\n \n self.state_plus = model.generate_vector(STATE)\n self.adj_plus = model.generate_vector(ADJOINT)\n self.state_minus = model.generate_vector(STATE)\n self.adj_minus = model.generate_vector(ADJOINT)\n self.g_plus = model.generate_vector(PARAMETER)\n self.g_minus = model.generate_vector(PARAMETER)\n self.yhelp = model.generate_vector(PARAMETER)",
"def __init__(self, output_size=None, hidden_layers=None, learn_rate=0.001,\n drop_p=0.5, checkpoint=None, model_architecture=None,\n class_to_idx=None):\n if checkpoint is not None:\n # The map_location argument allows object deserialization on a\n # CPU-only machine even when the checkpoint was saved on a CUDA\n # enabled machine without first moving the model back to the CPU.\n keyword = {}\n if not torch.cuda.is_available():\n keyword['map_location'] = 'cpu'\n checkpoint = torch.load(checkpoint, **keyword)\n self.__setup_model(**checkpoint)\n\n else:\n if output_size is None:\n print('ERROR: output_size cannot be None when building a',\n 'new model.',\n file=sys.stderr)\n sys.exit(-1)\n if hidden_layers is None:\n print('ERROR: hidden_layers cannot be None when building a',\n 'new model.',\n file=sys.stderr)\n sys.exit(-1)\n\n self.__setup_model(output_size=output_size,\n hidden_layers=hidden_layers,\n learn_rate=learn_rate,\n drop_p=drop_p,\n model_architecture=model_architecture,\n class_to_idx=class_to_idx)"
] | [
"0.7124921",
"0.6987733",
"0.6885349",
"0.6875176",
"0.68088514",
"0.67898726",
"0.6724793",
"0.6722912",
"0.66522413",
"0.6631922",
"0.6631922",
"0.66145754",
"0.66093516",
"0.66029775",
"0.65747577",
"0.65733886",
"0.65632457",
"0.6534131",
"0.6534131",
"0.6534131",
"0.6534131",
"0.6534131",
"0.6531269",
"0.64967376",
"0.64945054",
"0.64900994",
"0.6478302",
"0.6467345",
"0.6467316",
"0.6456889",
"0.643681",
"0.64337033",
"0.6407347",
"0.6403455",
"0.6397626",
"0.6390568",
"0.6385764",
"0.63791543",
"0.6367684",
"0.6322172",
"0.6319108",
"0.6283416",
"0.6270255",
"0.62639326",
"0.6254676",
"0.61994153",
"0.61960405",
"0.6169805",
"0.6164325",
"0.6138812",
"0.6111009",
"0.61015797",
"0.60982305",
"0.60920316",
"0.6089457",
"0.6085367",
"0.6076643",
"0.60645556",
"0.6055624",
"0.60445917",
"0.6041459",
"0.6041459",
"0.6041459",
"0.6041459",
"0.6040927",
"0.60388803",
"0.60378075",
"0.60333514",
"0.60252357",
"0.6005752",
"0.6001807",
"0.59980613",
"0.5991733",
"0.5983692",
"0.59775007",
"0.59721714",
"0.5971548",
"0.5970442",
"0.5965488",
"0.5948034",
"0.59444636",
"0.5933777",
"0.5916129",
"0.58988404",
"0.58978915",
"0.5894608",
"0.589174",
"0.5886853",
"0.58862853",
"0.5885027",
"0.58846325",
"0.58839405",
"0.5881885",
"0.5866946",
"0.5866946",
"0.5865468",
"0.5853615",
"0.5848583",
"0.58462197",
"0.58454895",
"0.5840268"
] | 0.0 | -1 |
Loglikelihood of model evaluated pointwise | def loglikeobs(self, params, *args, **kwargs):
raise NotImplementedError # pragma: no cover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log_likelihood(self, data, reward_model, bias_params):",
"def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")",
"def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE",
"def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood",
"def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood",
"def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood",
"def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z",
"def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)",
"def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll",
"def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)",
"def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop",
"def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')",
"def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()",
"def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]",
"def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z",
"def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")",
"def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)",
"def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll",
"def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))",
"def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)",
"def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)",
"def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood",
"def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood",
"def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood",
"def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood",
"def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl",
"def llf(self):\n return self.model.loglike(self.params)",
"def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)",
"def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)",
"def loglikelihood(model, data, q):\n\tph, pvh = model\n\tnPeople, nQuestions = data.shape\n\tlogL = 0\n\tfor i in range(nPeople):\n\t\tanswers = data[i,:]\n\t\tfor k in range(nQuestions):\n\t\t\tlogL += np.log(sum(pvh[:, k, int(answers[k] - 1)] * q[i,:].T))\n\treturn logL",
"def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᵀΣ⁻¹G = LLᵀ.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # Hμ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - Hμ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: ½|L⁻¹(GᵀΣ⁻¹)y|²\n # (GᵀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # ½|L⁻¹(GᵀΣ⁻¹)y|² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: ½log |K⁻¹| - log |L| + ½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)",
"def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll",
"def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood",
"def get_total_log_likelihood(self, x, **kwargs):\n pass",
"def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood",
"def log_likelihood(X, mu, sigma, phi):\n ll = None\n\n #######################################################################\n # TODO: #\n # Compute the log-likelihood of the data under the current model. #\n # This is used to check for convergnence of the algorithm. #\n #######################################################################\n\n ll = np.zeros((X.shape[0], 1))\n k = mu.shape[0]\n\n for i in range(k):\n ll += multivariate_normal(mu[i, :], sigma[i]).pdf(X)[:, np.newaxis]*phi[i]\n\n ll = sum(np.log(ll))\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return ll",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\r\n # number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain\r\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\r\n # Log-Probabilities (call it LP) with one row per example and\r\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\r\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\r\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\r\n # the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def compute_log_likelihood(self, indicators, weights, l2):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs)) - l2* np.sum(weights[1:]**2)\n return lp",
"def log_prob(self):",
"def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood",
"def log_likelihood(self, theta):\n raise NotImplementedError()",
"def lnlike(params, observables, nDraws=1000000):\n #print('checking type ({}) and length ({}) of params in lnlikefxn'.format(type(params),len(params)))\n evalData=generateModelData(params, distance_standoffMid, nDraws)\n evalHist, evalBinEdges = np.histogram(evalData[:,3], tof_nBins, tof_range,\n density=True)\n logEvalHist = np.log(evalHist)\n #print(logEvalHist)\n # find what TOFs have zero observed data\n # we'll use this to handle cases where we might wind up with -inf*0\n # likelihood is fine if PDF is 0 somewhere where no data is found\n # without checks though, ln(PDF=0)=-inf, -inf*0 = nan\n # however, if PDF is 0 (lnPDF=-inf) where there IS data, lnL should be -inf\n zeroObservedIndices = np.where(observables == 0)[0]\n for idx in zeroObservedIndices:\n if logEvalHist[idx] == -inf:\n logEvalHist[zeroObservedIndices] = 0\n \n loglike = np.dot(logEvalHist,observables)\n return loglike",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]\r\n # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]",
"def log_likelihood_loss(y, tx, w):\n p_1 = sigmoid(tx.dot(w))\n p_0 = np.log(1-p_1)\n p_1 = np.log(p_1)\n return -np.sum((y == 1)*p_1+(y == 0)*p_0)",
"def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj",
"def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like",
"def loglikelihood(self, context, continuation):\n pass",
"def get_log_likelihood(response_probability, response):\n pass",
"def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx",
"def forward(self, input):\n log_likelihood = -0.5 * (math.log(2 * math.pi) + self.sigma2.log() + (input - self.mu) ** 2 / self.sigma2)\n return log_likelihood",
"def _log_probability(self, theta, model, bounds, x, y, yerr):\n lp = self._log_prior(theta, bounds)\n if not np.isfinite(lp):\n return -np.inf\n return lp + self._log_likelihood(theta, model, x, y, yerr)",
"def f(self, x):\n error = log_likelihood_calc(x[1], x[0], self.data)\n return error",
"def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)",
"def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])",
"def likelihood(self):\n \n raise NotImplementedError()",
"def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))",
"def compute_log_likelihood(self, X = None, Y = None):\n assert X is None and Y is None, \"{} does not support minibatch mode\".format(str(type(self)))\n K = self.kern.K(self.X) + torch.eye(self.X.size(0), dtype=self.X.dtype, device=self.X.device) * self.likelihood.variance.get()\n L = torch.cholesky(K, upper=False)\n m = self.mean_function(self.X)\n return densities.multivariate_normal(self.Y, m, L)",
"def _logprob_X(self, X, **kwargs):\n pass",
"def negative_log_likelihood(self, y):\r\n # start-snippet-2\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and\r\n # T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.\r\n\r\n #print \"y.ndim = \",y.ndim\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r",
"def log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_sum(tf.math.log(ps_terms))\n log_ll_value = product_term - N_exp\n else:\n product_term = np.log(s_terms.sum(axis=(-1, -2))).sum()\n log_ll_value = product_term - N_exp\n\n # BUG TODO\n if np.isnan(log_ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n log_ll_value = -np.inf if np.isnan(log_ll_value) else log_ll_value\n\n return log_ll_value",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr_grid\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_sum(tf.math.log(ps_terms))\n log_ll_value = product_term - N_exp\n else:\n product_term = np.log(s_terms.sum(axis=(-1, -2))).sum()\n log_ll_value = product_term - N_exp\n\n # BUG TODO\n if np.isnan(log_ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n log_ll_value = -np.inf if np.isnan(log_ll_value) else log_ll_value\n\n return log_ll_value",
"def log_likelihood(mu, sigma, y, T):\n ll = 0.\n for yi, Ti in zip(y, T):\n d = yi.size\n log_det_cov = np.linalg.slogdet(sigma[Ti])[1]\n y_minus_mean = yi - mu[Ti]\n term3 = np.dot(y_minus_mean.T.ravel(),\n np.linalg.solve(sigma[Ti], y_minus_mean.T).ravel())\n ll += (-0.5 * d * np.log(2 * np.pi) - 0.5 * log_det_cov - 0.5 * term3)\n return ll",
"def lnprob(theta, model, priors, x, y, yerr):\n lp = lnprior(theta, priors)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, model, x, y, yerr)",
"def log_likelihoods(self):\n return self.__data_frame.loc[:, \"ll\":\"ll\"].values[:-1]",
"def log_likelihood(cosmo_param, pk_obs, inv_cov):\n pknlfid, kbins, kspace = fiducial_power(cosmo_param)\n \n x = pk_obs - pknlfid\n return -0.5* (x.T @ inv_cov @ x)",
"def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)",
"def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)",
"def logpeak(x, p=default()):\n model = p[0] - p[1]*(x**2)\n return model",
"def calculate_likelihood(truth, log_forecast):\n\n return tf.reduce_sum(truth * log_forecast) # Dimensions [batch_size, N_LABEL_TIMESTEPS, N_LABEL_CLASSES]",
"def pseudolikelihood(self, data):\n data = toPM(data); # interface glue: convert {0,1} to {-1,+1}\n r = self.L.dot(data)\n r += self.h.reshape(-1,1) if len(data.shape)==2 else self.h\n lnp = -np.log(1+np.exp(-2*data*r)) # ln p(x_i^(s)|x_{-i}^(s)) for all vars i, samples s\n return lnp.sum(axis=0) # sum over i => pseudo-log-likelihood of each x^(s)",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def log_likelihood(model, dataloader, K=200):\n total_sum = 0\n importance_values = []\n zs_batch = torch.randn((dataloader.batch_size, K, 100))\n for i, minibatch in enumerate(dataloader):\n minibatch = minibatch[0]\n importance_values += importance_sampling_function(model, minibatch, zs_batch[:len(minibatch)])\n return torch.mean(torch.stack(importance_values))",
"def negative_log_likelihood(self, y):\r\n \r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r",
"def compute_log_marginal_likelihood(\n K_i: torch.Tensor,\n logDetK: torch.Tensor,\n y: torch.Tensor,\n normalize: bool = True,\n log_prior_dist=None,\n):\n lml = (\n -0.5 * y.t() @ K_i @ y\n + 0.5 * logDetK\n - y.shape[0]\n / 2.0\n * torch.log(\n 2\n * torch.tensor(\n np.pi,\n )\n )\n )\n if log_prior_dist is not None:\n lml -= log_prior_dist\n return lml / y.shape[0] if normalize else lml",
"def log_likelihood(self, X, Y, theta):\n \n alphas, logZ = self.alpha_chain(X, theta)\n total = 0\n \n s_prev = self.S\n \n for t,s in enumerate(Y):\n total += self.log_psi(theta, t, s_prev, s, X[t]) \n s_prev = s\n \n total -= logZ\n return total / X.shape[0]",
"def log_likelihood(self, theta, x, **kwargs):\n\n u, logdet_dudx, log_a = self.forward(theta, x, **kwargs)\n\n constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))\n # log_likelihood = torch.log(torch.sum(torch.exp(log_a - 0.5 * u ** 2 + logdet_dudx), dim=2))\n log_likelihood = torch.logsumexp(log_a - 0.5 * u**2 + logdet_dudx, dim=2)\n log_likelihood = constant + torch.sum(log_likelihood, dim=1)\n\n return u, log_likelihood",
"def define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=1., predict=False, prior_scale = 1.0, device = 'cpu'):\n\n fmodel = util.make_functional(model)\n dist_list = []\n for tau in tau_list:\n dist_list.append(torch.distributions.Normal(torch.zeros_like(tau), tau**-0.5))\n\n def log_prob_func(params):\n # model.zero_grad()\n # params is flat\n # Below we update the network weights to be params\n params_unflattened = util.unflatten(model, params)\n\n i_prev = 0\n l_prior = torch.zeros_like( params[0], requires_grad=True) # Set l2_reg to be on the same device as params\n for weights, index, shape, dist in zip(model.parameters(), params_flattened_list, params_shape_list, dist_list):\n # weights.data = params[i_prev:index+i_prev].reshape(shape)\n w = params[i_prev:index+i_prev]\n l_prior = dist.log_prob(w).sum() + l_prior\n i_prev += index\n\n # Sample prior if no data\n if x is None:\n # print('hi')\n return l_prior/prior_scale\n\n x_device = x.to(device)\n y_device = y.to(device)\n\n\n output = fmodel(x_device, params=params_unflattened)\n\n if model_loss == 'binary_class_linear_output':\n crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device))\n elif model_loss == 'multi_class_linear_output':\n # crit = nn.MSELoss(reduction='mean')\n crit = nn.CrossEntropyLoss(reduction='sum')\n # crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device.long().view(-1)))\n # ll = - tau_out *(torch.nn.functional.nll_loss(output, y.long().view(-1)))\n elif model_loss == 'multi_class_log_softmax_output':\n ll = - tau_out *(torch.nn.functional.nll_loss(output, y_device.long().view(-1)))\n\n elif model_loss == 'regression':\n # crit = nn.MSELoss(reduction='sum')\n ll = - 0.5 * tau_out * ((output - y_device) ** 2).sum(0)#sum(0)\n\n elif callable(model_loss):\n # Assume defined custom log-likelihood.\n ll = - model_loss(output, y_device).sum(0)\n else:\n raise NotImplementedError()\n\n if torch.cuda.is_available():\n del x_device, y_device\n torch.cuda.empty_cache()\n\n if predict:\n return (ll + l_prior/prior_scale), output\n else:\n return (ll + l_prior/prior_scale)\n\n return log_prob_func",
"def logit_pvalue(model, x, verbose=False):\n probs = model.predict_proba(x)\n n_datapoints = probs.shape[0]\n n_feautures = len(model.coef_[0]) + 1\n coeffs = np.hstack([model.intercept_.reshape(-1, 1), model.coef_])\n x_full = np.matrix(np.insert(np.array(x), 0, 1, axis=1))\n pvals = []\n errors = []\n for coeffs_vec, p_vec in zip(coeffs, probs.T):\n ans = np.zeros((n_feautures, n_feautures))\n for i in range(n_datapoints):\n ans += np.dot(x_full[i].T, x_full[i]) * p_vec[i] * (1 - p_vec[i])\n try:\n vcov = np.linalg.inv(np.matrix(ans))\n serrors = np.sqrt(np.diag(vcov))\n t = coeffs_vec / serrors\n pn = (1 - norm.cdf(abs(t))) * 2\n except np.linalg.linalg.LinAlgError as e:\n if verbose:\n print(\"det : {0}\".format(np.linalg.det(np.matrix(ans))))\n serrors = np.zeros(ans.shape[0])\n pn = np.zeros(ans.shape[0])\n pvals.append(pn)\n errors.append(serrors)\n pvals = np.array(pvals)\n errors = np.array(errors)\n return pvals.T, coeffs.T, errors.T",
"def lnlike(theta, model, x, y, yerr):\n return -np.nansum(0.5 * np.log([2 * np.pi] * len(y)))\\\n -np.nansum(np.log(yerr))\\\n -0.5*np.nansum(((y-model(x, *theta))/yerr)**2)",
"def model_likelihoods(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> List[Tensor]:\n return [m.log_prob(obs, actions, next_obs).mean() for m in self.model]",
"def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - [email protected]\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*[email protected]\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik",
"def negative_log_likelihood(self, y):\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def negative_log_likelihood(self, y):\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def logp(self, F, Y):\n raise NotImplementedError(\"implement the logp function\\\n for this likelihood\")",
"def loglikelihood(y: float64, m: float64, sigma: float64) -> float64:\n\n # -log(sqrt(2*pi)) = -0.9189385332046727\n\n return -0.9189385332046727 - np.log(sigma) - (y - m) * (y - m) / (2.0 * sigma * sigma)",
"def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))",
"def _log_lik(self, X, Y, delta):\n pi = self.predict_proba(X, self.fit_intercept, self.coeffs)\n p0, p1, pc = self.p0, self.p1, self.pc\n prb = ((pi * p0 * (1. - p0) ** (Y - 1.)\n + (1. - pi) * p1 * (1. - p1) ** (Y - 1.)\n ) * (1. - pc) ** Y\n ) ** delta \\\n * ((pi * (1 - p0) ** Y\n + (1. - pi) * (1. - p1) ** Y\n ) * pc * (1. - pc) ** (Y - 1.)\n ) ** (1. - delta)\n return np.mean(np.log(prb))",
"def loglikelihood(mean, grad):\n\n # update the global latent_means list\n latent_means[index] = mean\n\n if grad.size > 0:\n # update the gradient\n grad[:] = compute_gradient(\n Y=Y, \n mi=mean, \n latent_Sigmas=latent_Sigmas,\n B1=B1,\n B2=B2,\n ss=ss,\n mu=mu,\n g1=g1,\n g2=g2,\n sigma2=sigma2,\n index=index\n )\n\n a1, a2, a3, a4, a5 = compute_terms(\n Y=Y, \n latent_means=latent_means, \n latent_Sigmas=latent_Sigmas, \n B1=B1, \n B2=B2, \n mu=mu, \n g1=g1, \n g2=g2\n )\n\n scalars = N*q/2 - N*p/2*np.log(TWOPI*sigma2)\n\n total = sum(\n [\n item1 - 1/(2*sigma2)*item2 + (TWOPI)**(1/2-q)*(item3 + item4 + item5) \n for item1, item2, item3, item4, item5 in zip(a1, a2, a3, a4, a5)\n ]\n )\n\n return total + scalars",
"def relative_likelihood(self):\n \n if self.num_hidden == 0:\n \n return T.exp(-self.compute_energy(self.x, self.batch_size))\n \n if self.num_hidden > 0:\n \n return T.exp(-self.compute_free_energy(self.x))",
"def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n if self.BICscore is None:\n BIC = 0\n for i, model in enumerate(self.models):\n n = model.n \n k = model.m.num_params\n L = model.m.log_likelihood()\n BIC += L - k/2*np.log(n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))",
"def NLL(self,y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])"
] | [
"0.8078789",
"0.8046145",
"0.78648543",
"0.782446",
"0.77342033",
"0.77272224",
"0.77194923",
"0.7687198",
"0.7685602",
"0.7643445",
"0.7605884",
"0.7569359",
"0.75504184",
"0.74748665",
"0.74734956",
"0.74657774",
"0.7450817",
"0.74382687",
"0.7420948",
"0.738504",
"0.73678744",
"0.7324908",
"0.7317116",
"0.7295415",
"0.7240325",
"0.7230728",
"0.7221347",
"0.71898717",
"0.71793056",
"0.7171889",
"0.7168894",
"0.71572393",
"0.7151493",
"0.7121796",
"0.7117503",
"0.710322",
"0.70853424",
"0.70844275",
"0.7079496",
"0.70611095",
"0.7059836",
"0.70589507",
"0.7056072",
"0.7029886",
"0.70046055",
"0.7003465",
"0.69957423",
"0.69955",
"0.6982835",
"0.69771045",
"0.697262",
"0.696014",
"0.69489866",
"0.693152",
"0.69215417",
"0.69016623",
"0.6892785",
"0.6883342",
"0.68730986",
"0.6867153",
"0.68597704",
"0.68593395",
"0.6850292",
"0.6844826",
"0.6840435",
"0.6837055",
"0.6820002",
"0.6809664",
"0.67949855",
"0.6793254",
"0.6785493",
"0.67740715",
"0.6772778",
"0.67711854",
"0.6764759",
"0.67612696",
"0.67257035",
"0.67253894",
"0.67225295",
"0.67204505",
"0.6715298",
"0.6708707",
"0.67033845",
"0.66925406",
"0.66907823",
"0.6683631",
"0.6675916",
"0.6674309",
"0.6669921",
"0.66679955",
"0.6666517",
"0.6642934",
"0.6642934",
"0.66407394",
"0.6635715",
"0.662287",
"0.6617292",
"0.6614447",
"0.65928763",
"0.6567923",
"0.65608215"
] | 0.0 | -1 |
Loglikelihood of model. Default implementation sums loglikeobs. | def loglike(self, params, *args, **kwargs):
return np.sum(self.loglikeobs(params, *args, **kwargs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")",
"def log_likelihood(self, data, reward_model, bias_params):",
"def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)",
"def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def get_total_log_likelihood(self, x, **kwargs):\n pass",
"def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z",
"def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')",
"def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood",
"def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")",
"def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)",
"def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood",
"def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)",
"def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE",
"def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood",
"def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop",
"def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)",
"def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll",
"def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()",
"def log_likelihood(self, theta):\n raise NotImplementedError()",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z",
"def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)",
"def loglikelihood(model, data, q):\n\tph, pvh = model\n\tnPeople, nQuestions = data.shape\n\tlogL = 0\n\tfor i in range(nPeople):\n\t\tanswers = data[i,:]\n\t\tfor k in range(nQuestions):\n\t\t\tlogL += np.log(sum(pvh[:, k, int(answers[k] - 1)] * q[i,:].T))\n\treturn logL",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def loglikelihood(self, context, continuation):\n pass",
"def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood",
"def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)",
"def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᵀΣ⁻¹G = LLᵀ.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # Hμ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - Hμ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: ½|L⁻¹(GᵀΣ⁻¹)y|²\n # (GᵀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # ½|L⁻¹(GᵀΣ⁻¹)y|² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: ½log |K⁻¹| - log |L| + ½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)",
"def model_likelihoods(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> List[Tensor]:\n return [m.log_prob(obs, actions, next_obs).mean() for m in self.model]",
"def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood",
"def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))",
"def llf(self):\n return self.model.loglike(self.params)",
"def log_likelihoods(self):\n return self.__data_frame.loc[:, \"ll\":\"ll\"].values[:-1]",
"def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl",
"def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]",
"def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood",
"def compute_log_likelihood(self, indicators, weights, l2):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs)) - l2* np.sum(weights[1:]**2)\n return lp",
"def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood",
"def compute_log_likelihood(self, X = None, Y = None):\n assert X is None and Y is None, \"{} does not support minibatch mode\".format(str(type(self)))\n K = self.kern.K(self.X) + torch.eye(self.X.size(0), dtype=self.X.dtype, device=self.X.device) * self.likelihood.variance.get()\n L = torch.cholesky(K, upper=False)\n m = self.mean_function(self.X)\n return densities.multivariate_normal(self.Y, m, L)",
"def loglikeobs(self, params, *args, **kwargs):\n raise NotImplementedError # pragma: no cover",
"def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)",
"def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood",
"def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n if self.BICscore is None:\n BIC = 0\n for i, model in enumerate(self.models):\n n = model.n \n k = model.m.num_params\n L = model.m.log_likelihood()\n BIC += L - k/2*np.log(n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))",
"def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll",
"def calculate_likelihood(truth, log_forecast):\n\n return tf.reduce_sum(truth * log_forecast) # Dimensions [batch_size, N_LABEL_TIMESTEPS, N_LABEL_CLASSES]",
"def compute_log_likelihood(self, X, y, weights, avg=False):\n Z = self.sigmoid(np.dot(X, weights))\n epsilon = np.finfo(float).eps\n Z = np.clip(Z, epsilon, 1.0-epsilon)\n\n ll_all = y * np.log(Z) + (1 - y) * np.log(1 - Z)\n if not avg:\n return np.sum(ll_all)\n else:\n return np.mean(ll_all)",
"def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])",
"def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)",
"def log_likelihood(self, theta, active=None):\n return sum(self.log_likelihood_term(theta, active=active))",
"def log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_sum(tf.math.log(ps_terms))\n log_ll_value = product_term - N_exp\n else:\n product_term = np.log(s_terms.sum(axis=(-1, -2))).sum()\n log_ll_value = product_term - N_exp\n\n # BUG TODO\n if np.isnan(log_ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n log_ll_value = -np.inf if np.isnan(log_ll_value) else log_ll_value\n\n return log_ll_value",
"def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))",
"def log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr_grid\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_sum(tf.math.log(ps_terms))\n log_ll_value = product_term - N_exp\n else:\n product_term = np.log(s_terms.sum(axis=(-1, -2))).sum()\n log_ll_value = product_term - N_exp\n\n # BUG TODO\n if np.isnan(log_ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n log_ll_value = -np.inf if np.isnan(log_ll_value) else log_ll_value\n\n return log_ll_value",
"def log_likelihoods(self):\n return self.get_log_likelihoods()",
"def log_likelihood(model, dataloader, K=200):\n total_sum = 0\n importance_values = []\n zs_batch = torch.randn((dataloader.batch_size, K, 100))\n for i, minibatch in enumerate(dataloader):\n minibatch = minibatch[0]\n importance_values += importance_sampling_function(model, minibatch, zs_batch[:len(minibatch)])\n return torch.mean(torch.stack(importance_values))",
"def log_likelihood(X, mu, sigma, phi):\n ll = None\n\n #######################################################################\n # TODO: #\n # Compute the log-likelihood of the data under the current model. #\n # This is used to check for convergnence of the algorithm. #\n #######################################################################\n\n ll = np.zeros((X.shape[0], 1))\n k = mu.shape[0]\n\n for i in range(k):\n ll += multivariate_normal(mu[i, :], sigma[i]).pdf(X)[:, np.newaxis]*phi[i]\n\n ll = sum(np.log(ll))\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return ll",
"def lnlike(params, observables, nDraws=1000000):\n #print('checking type ({}) and length ({}) of params in lnlikefxn'.format(type(params),len(params)))\n evalData=generateModelData(params, distance_standoffMid, nDraws)\n evalHist, evalBinEdges = np.histogram(evalData[:,3], tof_nBins, tof_range,\n density=True)\n logEvalHist = np.log(evalHist)\n #print(logEvalHist)\n # find what TOFs have zero observed data\n # we'll use this to handle cases where we might wind up with -inf*0\n # likelihood is fine if PDF is 0 somewhere where no data is found\n # without checks though, ln(PDF=0)=-inf, -inf*0 = nan\n # however, if PDF is 0 (lnPDF=-inf) where there IS data, lnL should be -inf\n zeroObservedIndices = np.where(observables == 0)[0]\n for idx in zeroObservedIndices:\n if logEvalHist[idx] == -inf:\n logEvalHist[zeroObservedIndices] = 0\n \n loglike = np.dot(logEvalHist,observables)\n return loglike",
"def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx",
"def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like",
"def nll_logprobs(self, input, target_idx):\n raise NotImplementedError()",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def loglikelihood(self, net):\n\n adjmat = net.edges.adjacency_matrix\n\n # if any of the mustexist or mustnotexist constraints are violated,\n # return negative infinity\n if (not (adjmat | self.mustexist).all()) or \\\n (adjmat & self.mustnotexist).any():\n return NEGINF\n\n # if any custom constraints are violated, return negative infinity\n if self.constraints and not all(c(adjmat) for c in self.constraints):\n return NEGINF\n\n loglike = 0.0\n if self.energy_matrix != None:\n energy = N.sum(adjmat * self.energy_matrix) \n loglike = -self.weight * energy\n\n return loglike",
"def forward(self, input):\n log_likelihood = -0.5 * (math.log(2 * math.pi) + self.sigma2.log() + (input - self.mu) ** 2 / self.sigma2)\n return log_likelihood",
"def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])",
"def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)",
"def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj",
"def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood",
"def likelihood(self):\n \n raise NotImplementedError()",
"def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)",
"def log_likelihood(self, X, Y, theta):\n \n alphas, logZ = self.alpha_chain(X, theta)\n total = 0\n \n s_prev = self.S\n \n for t,s in enumerate(Y):\n total += self.log_psi(theta, t, s_prev, s, X[t]) \n s_prev = s\n \n total -= logZ\n return total / X.shape[0]",
"def log_prob(self):",
"def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval",
"def log_likelihood_loss(y, tx, w):\n p_1 = sigmoid(tx.dot(w))\n p_0 = np.log(1-p_1)\n p_1 = np.log(p_1)\n return -np.sum((y == 1)*p_1+(y == 0)*p_0)",
"def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll",
"def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood",
"def log_likelihood(self, theta=None, phi=None):\n theta = theta if theta is not None else self.theta\n phi = phi if phi is not None else self.phi\n ret = 0.\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n tp = 0.\n for k in range(self.n_components):\n tp += theta[m, k] * phi[k, w_mn]\n ret += np.log(tp)\n return ret",
"def loglikelihood(mean, grad):\n\n # update the global latent_means list\n latent_means[index] = mean\n\n if grad.size > 0:\n # update the gradient\n grad[:] = compute_gradient(\n Y=Y, \n mi=mean, \n latent_Sigmas=latent_Sigmas,\n B1=B1,\n B2=B2,\n ss=ss,\n mu=mu,\n g1=g1,\n g2=g2,\n sigma2=sigma2,\n index=index\n )\n\n a1, a2, a3, a4, a5 = compute_terms(\n Y=Y, \n latent_means=latent_means, \n latent_Sigmas=latent_Sigmas, \n B1=B1, \n B2=B2, \n mu=mu, \n g1=g1, \n g2=g2\n )\n\n scalars = N*q/2 - N*p/2*np.log(TWOPI*sigma2)\n\n total = sum(\n [\n item1 - 1/(2*sigma2)*item2 + (TWOPI)**(1/2-q)*(item3 + item4 + item5) \n for item1, item2, item3, item4, item5 in zip(a1, a2, a3, a4, a5)\n ]\n )\n\n return total + scalars",
"def LLwrapper(params):\n NLL = LogLikelihood(gauss, s)\n return NLL(params[0], params[1])",
"def log_likelihoodJoint(theta, x, y, data, var, size):\n #unpack the parameters\n #[xpos, ypos]*images) +[amplitude, radius, focus])\n images = len(theta[:-5]) / 2\n peak, radius, focus, width_x, width_y = theta[-5:]\n\n lnL = 0.\n for tmp in xrange(images):\n #X and Y are always in pairs\n center_x = theta[2*tmp]\n center_y = theta[2*tmp+1]\n\n #1)Generate a model Airy disc\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius,\n x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)\n\n #2)Apply Focus, no normalisation as smoothing\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)\n model = signal.convolve2d(adata, focusdata, mode='same')\n\n #3)Apply CCD diffusion, approximated with a Gaussian -- max = 1 as centred\n CCD = models.Gaussian2D(1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.)\n CCDdata = CCD.eval(x, y, 1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.).reshape(size)\n model = signal.convolve2d(model, CCDdata, mode='same').flatten()\n\n #lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var[tmp].flatten())\n #Gary B. said that this should be from the model not data so recompute var (now contains rn**2)\n var = var[tmp] + model.copy()\n lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var)\n\n return lnL",
"def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n \n if self.BICscore is None:\n k = self.m.num_params\n L = self.m.log_likelihood()\n BIC = L - k/2*np.log(self.n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\r\n # number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain\r\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\r\n # Log-Probabilities (call it LP) with one row per example and\r\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\r\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\r\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\r\n # the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def llf_obs(self):\n return self.model.loglikeobs(self.params)",
"def log_likelihood_grad_bias(self, data, reward_model, bias_params):",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e., number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]]\r\n # and T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return T.log(self.p_y_given_x)[T.arange(y.shape[0]),y]",
"def get_log_likelihoods(self, short=False):\n if short:\n return self.memory.get('log_likelihoods', self.s, self.e)\n else:\n return np.concatenate(\n (\n self.memory.get('log_likelihoods', self.s, self.e),\n self.tail_batch.log_likelihoods\n ), axis=0\n )",
"def log_likelihood(mu, sigma, y, T):\n ll = 0.\n for yi, Ti in zip(y, T):\n d = yi.size\n log_det_cov = np.linalg.slogdet(sigma[Ti])[1]\n y_minus_mean = yi - mu[Ti]\n term3 = np.dot(y_minus_mean.T.ravel(),\n np.linalg.solve(sigma[Ti], y_minus_mean.T).ravel())\n ll += (-0.5 * d * np.log(2 * np.pi) - 0.5 * log_det_cov - 0.5 * term3)\n return ll",
"def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8",
"def define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=1., predict=False, prior_scale = 1.0, device = 'cpu'):\n\n fmodel = util.make_functional(model)\n dist_list = []\n for tau in tau_list:\n dist_list.append(torch.distributions.Normal(torch.zeros_like(tau), tau**-0.5))\n\n def log_prob_func(params):\n # model.zero_grad()\n # params is flat\n # Below we update the network weights to be params\n params_unflattened = util.unflatten(model, params)\n\n i_prev = 0\n l_prior = torch.zeros_like( params[0], requires_grad=True) # Set l2_reg to be on the same device as params\n for weights, index, shape, dist in zip(model.parameters(), params_flattened_list, params_shape_list, dist_list):\n # weights.data = params[i_prev:index+i_prev].reshape(shape)\n w = params[i_prev:index+i_prev]\n l_prior = dist.log_prob(w).sum() + l_prior\n i_prev += index\n\n # Sample prior if no data\n if x is None:\n # print('hi')\n return l_prior/prior_scale\n\n x_device = x.to(device)\n y_device = y.to(device)\n\n\n output = fmodel(x_device, params=params_unflattened)\n\n if model_loss == 'binary_class_linear_output':\n crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device))\n elif model_loss == 'multi_class_linear_output':\n # crit = nn.MSELoss(reduction='mean')\n crit = nn.CrossEntropyLoss(reduction='sum')\n # crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device.long().view(-1)))\n # ll = - tau_out *(torch.nn.functional.nll_loss(output, y.long().view(-1)))\n elif model_loss == 'multi_class_log_softmax_output':\n ll = - tau_out *(torch.nn.functional.nll_loss(output, y_device.long().view(-1)))\n\n elif model_loss == 'regression':\n # crit = nn.MSELoss(reduction='sum')\n ll = - 0.5 * tau_out * ((output - y_device) ** 2).sum(0)#sum(0)\n\n elif callable(model_loss):\n # Assume defined custom log-likelihood.\n ll = - model_loss(output, y_device).sum(0)\n else:\n raise NotImplementedError()\n\n if torch.cuda.is_available():\n del x_device, y_device\n torch.cuda.empty_cache()\n\n if predict:\n return (ll + l_prior/prior_scale), output\n else:\n return (ll + l_prior/prior_scale)\n\n return log_prob_func",
"def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - [email protected]\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*[email protected]\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik",
"def log_likelihood(self, theta, x, **kwargs):\n\n u, logdet_dudx, log_a = self.forward(theta, x, **kwargs)\n\n constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))\n # log_likelihood = torch.log(torch.sum(torch.exp(log_a - 0.5 * u ** 2 + logdet_dudx), dim=2))\n log_likelihood = torch.logsumexp(log_a - 0.5 * u**2 + logdet_dudx, dim=2)\n log_likelihood = constant + torch.sum(log_likelihood, dim=1)\n\n return u, log_likelihood",
"def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())",
"def _logprob_X(self, X, **kwargs):\n pass",
"def get_log_like(self):\n\n # Make a function which will stack all point sources (XYLike do not support spatial dimension)\n\n expectation = self.get_model()\n\n if self._is_poisson:\n\n # Poisson log-likelihood\n\n return np.sum(poisson_log_likelihood_ideal_bkg(self._y, np.zeros_like(self._y), expectation))\n\n else:\n\n # Chi squared\n\n chi2_ = half_chi2(self._y, self._yerr, expectation)\n\n assert np.all(np.isfinite(chi2_))\n\n return np.sum(chi2_) * (-1)",
"def negative_log_likelihood(self, y):\r\n # start-snippet-2\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class\r\n # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and\r\n # T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.\r\n\r\n #print \"y.ndim = \",y.ndim\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r",
"def log_marginal_likelihood(self, X, W):\n phi_X = self.phi(X, W)\n S_n = phi_X.T @ phi_X + np.eye(self.M)\n mu_n = np.linalg.inv(S_n) @ phi_X.T @ self.Y\n a_n = self.gamma_a0 + self.N / 2\n A = np.diag(self.Y.T @ self.Y)\n C = np.diag(mu_n.T @ S_n @ mu_n)\n b_n = self.gamma_b0 + 0.5 * (A - C)\n\n # Compute Lambda term.\n sign, logdet = np.linalg.slogdet(S_n)\n lambda_term = -0.5 * sign * logdet\n\n # Compute b_n term.\n b_term = self.gamma_a0 * np.log(self.gamma_b0) - a_n * np.log(b_n)\n\n # Compute a_n term.\n gamma_term = gammaln(a_n) - gammaln(self.gamma_a0)\n\n # Compute sum over all y_n.\n return np.sum(gamma_term + b_term + lambda_term)",
"def _log_likelihood_poisson(self, df, dfo, n_bins=10):\n cond = df[\"selected_jig\"].values == 1\n range = parameter_ranges['uae'], parameter_ranges['rec']\n\n uae_obs = dfo[\"mueff_av\"].values\n rec_obs = dfo[\"rec_arcsec\"].values\n obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=n_bins)\n\n uae_mod = df[\"uae_obs_jig\"].values[cond]\n rec_mod = df[\"rec_obs_jig\"].values[cond]\n model, _, _ = np.histogram2d(uae_mod, rec_mod, range=range, bins=n_bins, density=True)\n\n # Rescale model by number of observations\n model = model.astype(\"float\") * dfo.shape[0]\n\n # Calculate Poisson probability for each bin\n obs = obs.reshape(-1).astype(\"float\")\n model = model.reshape(-1)\n probs = stats.poisson(mu=model).pmf(obs)\n\n # Return overall log likelihood\n return np.log(probs).sum()",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)"
] | [
"0.81072646",
"0.7853458",
"0.77865106",
"0.7597883",
"0.7591351",
"0.75211746",
"0.7505817",
"0.75030684",
"0.7466109",
"0.7463448",
"0.74243414",
"0.74091846",
"0.740693",
"0.73675585",
"0.7324371",
"0.7308806",
"0.7306328",
"0.7265567",
"0.7232976",
"0.7188578",
"0.715438",
"0.71136564",
"0.7102312",
"0.70765376",
"0.7004373",
"0.6998405",
"0.6943935",
"0.69364995",
"0.6883036",
"0.6872581",
"0.6867338",
"0.68635213",
"0.6854963",
"0.68409693",
"0.68357766",
"0.6825958",
"0.6820241",
"0.6808611",
"0.6774825",
"0.6767416",
"0.67554265",
"0.67492384",
"0.67479813",
"0.6747616",
"0.674387",
"0.67393374",
"0.67273134",
"0.6704353",
"0.66953546",
"0.6684967",
"0.6684595",
"0.66771907",
"0.6674058",
"0.6650024",
"0.6643363",
"0.6631782",
"0.6622033",
"0.6621179",
"0.66200036",
"0.6613123",
"0.66018397",
"0.65819025",
"0.65336263",
"0.65311784",
"0.65234226",
"0.6516358",
"0.65006185",
"0.64972126",
"0.6489628",
"0.64853317",
"0.6478298",
"0.6475579",
"0.6469244",
"0.64665437",
"0.64488673",
"0.6446764",
"0.64424443",
"0.644218",
"0.64392716",
"0.6437254",
"0.6418751",
"0.6418363",
"0.6415496",
"0.64033717",
"0.64015627",
"0.6391246",
"0.63599193",
"0.633834",
"0.6332394",
"0.6331061",
"0.63037467",
"0.63010406",
"0.6291023",
"0.62887645",
"0.625861",
"0.624452",
"0.6242619",
"0.6231715",
"0.6228432",
"0.62245655"
] | 0.75402844 | 5 |
Score vector of model evaluated pointwise. The gradient of loglikeobs with respect to each parameter. | def score_obs(self, params, *args, **kwargs):
if self._use_approx_cs:
return approx_fprime_cs(params, self.loglikeobs,
args=args, kwargs=kwargs)
else:
return approx_fprime(params, self.loglikeobs,
args=args, kwargs=kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)",
"def update(self, returns, log_probs):\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, returns):\n policy_gradient.append(-log_prob * Gt)\n\n loss = torch.stack(policy_gradient).sum()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()",
"def msll(Y_true, Y_pred, V_pred, Y_train):\n mt, st = Y_train.mean(), Y_train.std()\n ll = norm.logpdf(Y_true, loc=Y_pred, scale=np.sqrt(V_pred))\n rand_ll = norm.logpdf(Y_true, loc=mt, scale=st)\n msll = - (ll - rand_ll).mean()\n return msll",
"def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def log_prior_grad(self, inputs):",
"def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product",
"def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)",
"def score(self, params, *args, **kwargs):\n try:\n # If an analytic score_obs is available, try this first before\n # falling back to numerical differentiation below\n return self.score_obs(params, *args, **kwargs).sum(0)\n except NotImplementedError:\n # Fallback in case a `loglike` is implemented but `loglikeobs`\n # is not.\n approx_func = (approx_fprime_cs\n if self._use_approx_cs else approx_fprime)\n return approx_func(params, self.loglike, args=args, kwargs=kwargs)",
"def logit_pvalue(model, x, verbose=False):\n probs = model.predict_proba(x)\n n_datapoints = probs.shape[0]\n n_feautures = len(model.coef_[0]) + 1\n coeffs = np.hstack([model.intercept_.reshape(-1, 1), model.coef_])\n x_full = np.matrix(np.insert(np.array(x), 0, 1, axis=1))\n pvals = []\n errors = []\n for coeffs_vec, p_vec in zip(coeffs, probs.T):\n ans = np.zeros((n_feautures, n_feautures))\n for i in range(n_datapoints):\n ans += np.dot(x_full[i].T, x_full[i]) * p_vec[i] * (1 - p_vec[i])\n try:\n vcov = np.linalg.inv(np.matrix(ans))\n serrors = np.sqrt(np.diag(vcov))\n t = coeffs_vec / serrors\n pn = (1 - norm.cdf(abs(t))) * 2\n except np.linalg.linalg.LinAlgError as e:\n if verbose:\n print(\"det : {0}\".format(np.linalg.det(np.matrix(ans))))\n serrors = np.zeros(ans.shape[0])\n pn = np.zeros(ans.shape[0])\n pvals.append(pn)\n errors.append(serrors)\n pvals = np.array(pvals)\n errors = np.array(errors)\n return pvals.T, coeffs.T, errors.T",
"def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores",
"def _perplexity(self, X, log_w):\n return np.exp(-log_w/X.sum())",
"def logloss_mc(y_true, y_prob, epsilon=1e-15):\n # normalize\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\n y_prob = np.maximum(epsilon, y_prob)\n y_prob = np.minimum(1 - epsilon, y_prob)\n # get probabilities\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\n ll = - np.mean(np.log(y))\n return ll",
"def compute_log_loss(predicted, actual, eps=1e-14):\n predicted = np.clip(predicted, eps, 1 - eps)\n loss = -1 * np.mean(actual * np.log(predicted)\n + (1 - actual)\n * np.log(1 - predicted))\n\n return loss",
"def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood",
"def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient",
"def log_loss_objective(y_true: npt.NDArray, y_pred: npt.NDArray) -> Tuple[npt.NDArray, npt.NDArray]:\n y_pred = sigmoid(y_pred)\n grad = y_pred - y_true\n hess = y_pred * (1.0 - y_pred)\n return grad, hess",
"def log_loss(self):\n probabilities = self.probability_array().copy()\n # need to flip the probabilities for p < 0.5 with this binary case.\n # 1 - old_val is same as oldval*-1 + 1. Do in 2 steps:\n probabilities[np.equal(0, self.y)] *= -1\n probabilities[np.equal(0, self.y)] += 1\n # when multiclass: np.amax(probabilities, 1)\n return np.log(probabilities).sum()",
"def loss(self, y_true, score, pos_label=_NoValue):\n if pos_label is not _NoValue:\n raise ValueError(\"`pos_label` not supported\")\n\n score = score.atleast_2d() # Working with 2-D arrays only\n\n p = CSoftmax().softmax(score) # SoftMax function\n\n # find-like indexing (list of lists)\n return -CArray(p[[list(range(score.shape[0])), y_true.tolist()]]).log()",
"def grad_log_q(self,z): \n param_count = 0\n grad = np.zeros((np.sum(self.approx_param_no),self.sims))\n for core_param in range(len(self.q)):\n for approx_param in range(self.q[core_param].param_no):\n grad[param_count] = self.q[core_param].vi_score(z[core_param],approx_param) \n param_count += 1\n return grad",
"def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW",
"def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost",
"def score(self, x, y=None):\n _, logp = self.score_samples(x)\n return logp",
"def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad",
"def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.maximum(0.0, scores - correct_class_scores[:, None] + 1.0)\r\n margins[range(N), y] = 0.0\r\n loss = np.sum(margins) / N\r\n\r\n # Compute gradient off loss function w.r.t. scores\r\n num_pos = np.sum(margins > 0, axis=1)\r\n dscores = np.zeros(scores.shape)\r\n dscores[margins > 0] = 1\r\n dscores[range(N), y] -= num_pos\r\n dscores /= N\r\n\r\n return loss, dscores",
"def logloss_mc(y_true, y_prob, epsilon=10e-15):\r\n # normalize\r\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\r\n print 'y_prob: ' + str(y_prob[1])\r\n print 'y_true: ' + str(y_true[1])\r\n y_prob = np.maximum(epsilon, y_prob)\r\n y_prob = np.minimum(1 - epsilon, y_prob)\r\n print 'y_prob: ' + str(y_prob[1])\r\n print 'y_true: ' + str(y_true[1])\r\n # get probabilities\r\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\r\n print 'y: ' + str(y[1])\r\n print 'y_true: ' + str(y_true[1])\r\n ll = - np.mean(np.log(y))\r\n return ll",
"def _log_prior_gradients(self):\n x = self._get_params()\n ret = np.zeros(x.size)\n [np.put(ret,i,p.lnpdf_grad(xx)) for i,(p,xx) in enumerate(zip(self.priors,x)) if not p is None]\n return ret",
"def _log_prior_gradients(self):\r\n if self.priors is None:\r\n return 0.\r\n x = self._get_params()\r\n ret = np.zeros(x.size)\r\n [np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]\r\n return ret",
"def scoring_function(self, model, y_true, y_predicted_probability):",
"def evaluate(self):\n # initialize delta_weights\n Loss = 0\n for i, x_test in enumerate(self.X_test):\n Loss += (self.sigmoid(np.dot(self.weights,x_test))-self.y_test[i])**2\n return Loss",
"def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.log_loss(y_true, y_predicted, sample_weight=sample_weight)",
"def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.log_loss(y_true, y_predicted, sample_weight=sample_weight)",
"def multiclass_log_loss(y_true, y_pred, eps=1e-15):\n clip = np.clip(y_pred, eps, 1 - eps)\n actual = np.zeros(y_pred.shape)\n rows = actual.shape[0]\n print rows\n print np.arange(rows)\n print (y_true.astype(int))\n actual[np.arange(rows), y_true.astype(int)] = 1\n print actual\n vsota = np.sum(actual * np.log(clip))\n print vsota\n return -1.0 / rows * vsota",
"def loss_grad_softmax_vectorized(W, X, y):\n loss = 0 \n grad = np.zeros_like(W)\n dim, num_train = X.shape\n\n scores = W.dot(X) # [K, N]\n # Shift scores so that the highest value is 0\n scores -= np.max(scores)\n scores_exp = np.exp(scores)\n correct_scores_exp = scores_exp[y, range(num_train)] # [N, ]\n scores_exp_sum = np.sum(scores_exp, axis=0) # [N, ]\n loss = -np.sum(np.log(correct_scores_exp / scores_exp_sum))\n loss /= num_train\n #loss += 0.5 * reg * np.sum(W * W)\n\n scores_exp_normalized = scores_exp / scores_exp_sum\n # deal with the correct class\n scores_exp_normalized[y, range(num_train)] -= 1 # [K, N]\n grad = scores_exp_normalized.dot(X.T)\n grad /= num_train\n grad += W\n\n return grad",
"def gnll_loss(y, parameter_vector):\n alpha, mu, sigma = slice_parameter_vectors(\n parameter_vector, components\n ) # Unpack parameter vectors\n\n gm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=alpha),\n components_distribution=tfd.Normal(loc=mu, scale=sigma),\n )\n\n log_likelihood = gm.log_prob(tf.transpose(y)) # Evaluate log-probability of y\n\n return -tf.reduce_mean(log_likelihood, axis=-1)",
"def logistic_grad(z):\n idx_pos = np.where(z >= 0.)\n idx_neg = np.where(z < 0.)\n res = np.empty(z.shape)\n res[idx_pos] = 1. / (1. + np.exp(-z[idx_pos]))\n res[idx_neg] = 1 - 1. / (1. + np.exp(z[idx_neg]))\n return res",
"def logistic_grad(z):\n idx_pos = np.where(z >= 0.)\n idx_neg = np.where(z < 0.)\n res = np.empty(z.shape)\n res[idx_pos] = 1. / (1. + np.exp(-z[idx_pos]))\n res[idx_neg] = 1 - 1. / (1. + np.exp(z[idx_neg]))\n return res",
"def cv_gradient(self,z):\n gradient = np.zeros(np.sum(self.approx_param_no))\n z_t = z.T \n log_q = self.normal_log_q(z.T)\n log_p = self.log_p(z.T)\n grad_log_q = self.grad_log_q(z)\n gradient = grad_log_q*(log_p-log_q)\n\n alpha0 = alpha_recursion(np.zeros(np.sum(self.approx_param_no)), grad_log_q, gradient, np.sum(self.approx_param_no)) \n\n vectorized = gradient - ((alpha0/np.var(grad_log_q,axis=1))*grad_log_q.T).T\n\n return np.mean(vectorized,axis=1)",
"def cv_gradient(self, z): \n z_t = np.transpose(z)\n log_q = self.normal_log_q(z_t)\n log_p = self.log_p(z_t)\n grad_log_q = self.grad_log_q(z)\n gradient = grad_log_q*np.repeat((log_p - log_q).T,2,axis=0)\n\n alpha0 = alpha_recursion(np.zeros(np.sum(self.approx_param_no)), grad_log_q, gradient, np.sum(self.approx_param_no)) \n \n vectorized = gradient - ((alpha0/np.var(grad_log_q,axis=1))*grad_log_q.T).T\n\n return np.mean(vectorized,axis=1)",
"def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]",
"def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])",
"def calc_score(model, scorer, X, y_true):\n\n y_preds = model.predict(X)\n score = scorer(y_true, y_preds)\n\n return score",
"def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def logp_grad(self, xs, ys, fs, **kwargs):",
"def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec",
"def score(self, indices):\n self.model.eval()\n _, prediction = self.model(self.propagation_matrix, self.features).max(dim=1)\n correct = prediction[indices].eq(self.target[indices]).sum().item()\n acc = correct / indices.shape[0]\n return acc",
"def log_prob_easier(self, x):\n normalization_const = -0.5 * tf.math.log(2 * np.pi) - tf.math.log(self.stddev)\n sq_term = - 0.5 * ((x - self.mean) / self.stddev) ** 2\n l_prob = tf.math.reduce_sum(normalization_const + sq_term, axis=1)\n return l_prob",
"def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval",
"def __call__(self, y, pred, sample_weight=None):\n # logaddexp(0, v) == log(1.0 + exp(v))\n pred = pred.ravel()\n if sample_weight is None:\n return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))\n else:\n return (-2.0 / sample_weight.sum() *\n np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))",
"def score(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n return approx_fprime_cs(params, self.loglike, args=(transformed,))",
"def loss_gradient(self, targets, scores):\n m = targets * scores\n numer = 4. * (2. * numpy.arctan(m) - 1.)\n denom = 1. + m**2\n return numer/denom",
"def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z",
"def log_prob(self):",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def score_samples(self, x):\n n = x.shape[0]\n logp = np.log(self.mix_weight)\n logpz = np.zeros((n, self.ncomponents))\n\n for i in range(self.ncomponents):\n logpz[:, i] = logp[i] + multivariate_normal.logpdf(x, self.cond_proba.mean[i], self.cond_proba.cov[i])\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll",
"def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)",
"def score(self, data):\n return np.mean( np.log( mvn.getSamplePointDensity(self.dataFrame_, self.H_, pd.DataFrame(data)) ) )",
"def get_score(self, solution: np.array) -> float:\n pass",
"def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss",
"def score_samples(self, x):\n n = x.shape[0]\n logp = np.log(self.mix_weight)\n logpz = np.zeros((n, self.ncomponents))\n\n for i in range(self.ncomponents):\n logpz[:, i] = logp[i] + multivariate_student.logpdf(x, self.cond_proba.mean[i], self.cond_proba.cov[i],\n self.cond_proba.df)\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll",
"def log_likelihood(self, data, reward_model, bias_params):",
"def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.mean_squared_log_error(\n y_true, y_predicted, sample_weight=sample_weight\n )",
"def compute_val_loglik(current_beta, current_mu, true_data_mean, batch_size):\n x_batch = np.random.normal(true_data_mean, 1, batch_size)\n z_batch = np.random.normal(0, 1, batch_size)\n value_generated_data = evaluate_value_fn(current_beta, current_mu, x_batch,\n z_batch, batch_size,\n true_data_mean)\n loglik_generated_data = 0\n for z in z_batch:\n generated = generate_from_noise(z, current_mu)\n loglik_generated_data += \\\n norm.logpdf(generated, loc=true_data_mean, scale=1)\n return value_generated_data, loglik_generated_data",
"def logistic(self, X, w):\n g = 1 / (1 + np.exp(-X.dot(w)))\n return g",
"def score(self, X):\n nolist = False\n if not isinstance(X, list):\n X = [X]\n nolist = True\n\n scores = []\n for i in X:\n Xi = X[i]\n Xhati = self.predict(Xi)\n\n scores.append(1.0 - np.sum((Xi - Xhati)**2.0) / np.sum(Xi**2.0))\n\n if nolist:\n return scores[0]\n else:\n return scores",
"def _log_prior_gradients(self):\n if self.priors.size == 0:\n return 0.\n x = self.param_array\n ret = np.zeros(x.size)\n #compute derivate of prior density\n [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]\n #add in jacobian derivatives if transformed\n priored_indexes = np.hstack([i for p, i in self.priors.items()])\n for c,j in self.constraints.items():\n if not isinstance(c, Transformation):continue\n for jj in j:\n if jj in priored_indexes:\n ret[jj] += c.log_jacobian_grad(x[jj])\n return ret",
"def score(self, data_test, labels_pred, is_train=False):\n return -np.log(np.clip(self.score_trust(data_test, labels_pred, is_train=is_train),\n sys.float_info.min, None))",
"def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params",
"def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])",
"def llf(self):\n return self.model.loglike(self.params)",
"def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]",
"def regress(self, model, log):\n self.optimizer_kwargs.update({'jac': self.lossprime,\n 'args': (self.lossprime,)})\n log('Starting parameter optimization.', tic='opt')\n log(' Optimizer: %s' % self.optimizer)\n log(' Optimizer kwargs: %s' % self.optimizer_kwargs)\n x0 = model.vector.copy()\n try:\n self.optimizer(model.get_loss, x0, **self.optimizer_kwargs)\n\n except ConvergenceOccurred:\n log('...optimization successful.', toc='opt')\n return True\n else:\n log('...optimization unsuccessful.', toc='opt')\n if self.lossprime:\n max_lossprime = \\\n max(abs(max(model.lossfunction.dloss_dparameters)),\n abs(min(model.lossfunction.dloss_dparameters)))\n log('...maximum absolute value of loss prime: %.3e'\n % max_lossprime)\n return False",
"def objective_function_gradients(self, x):\n self._set_params_transformed(x)\n LL_gradients = self._transform_gradients(self._log_likelihood_gradients())\n prior_gradients = self._transform_gradients(self._log_prior_gradients())\n return - LL_gradients - prior_gradients",
"def leonfcn(x: np.ndarray) -> np.ndarray:\n assert x.shape[1] == 2, \"Leon function is defined only on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n scores = 100 * ((Y - X**3) ** 2) + ((1 - X) ** 2)\n return scores",
"def forward(self, X, labels):\n features = self.get_conv_feats(X)\n W = self.W\n T = self.T\n log_prob = CRFautograd.apply(W, T, features, labels)\n return log_prob",
"def loglloop(store):\n suml=0.0\n for i in xrange(store['yvec'].shape[0]):\n xbeta=dot(store['xmat'][i,:],store['beta'])\n suml=suml+store['yvec'][i] * xbeta - exp(xbeta)\n return suml",
"def log_loss(m_true, alpha, alpha0, m_probs, lambd=1.0):\n \n m_probs = tf.clip_by_value(m_probs, 1e-15, 1 - 1e-15)\n loss = -tf.reduce_mean(input_tensor=tf.reduce_sum(input_tensor=m_true * tf.math.log(m_probs), axis=1))\n if lambd > 0:\n kl = kullback_leibler_dirichlet(m_true, alpha)\n loss = loss + lambd * kl\n return loss",
"def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW",
"def loss(self, x, s):\n return sum(cp.abs(x + s[self.w] - np.log(self.y)))",
"def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1로 구성된 x와 같은 쉐입의 매트릭스를 만든다\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # 정답레이블의 스코어로만 구성된 하나의 컬럼 벡터를 만든다\n y_score = np.reshape(y_score, (x.shape[0], 1)) # 브로드캐스팅을 위해 리쉐입 해준다\n y_temp[np.arange(x.shape[0]), y] = 0 # 1로 구성된 템프매트릭스의 정답 레이블에 해당되는 인덱스에 0을 할당한다\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx",
"def _compute_model_prob(self, per_list_logodds):\n with tf.compat.v1.name_scope(name='compute_model_prob'):\n return tf.stop_gradient(\n tf.exp(-self._alpha *\n (per_list_logodds -\n tf.reduce_min(per_list_logodds, axis=2, keepdims=True))))",
"def l2_loss_vectorized(self, W, X, y, reg):\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n num_train = X.shape[0]\n num_of_classes = W.shape[1]\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the perceptron loss, storing the #\n # result in loss and the gradient in dW #\n #############################################################################\n\n\n scores = X.dot(W) - y\n\n loss = np.mean(0.5 * (scores**2))\n\n grad = np.empty_like(W)\n grad = X.T.dot(scores)\n dW = grad\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW",
"def gradient_model(x, I_0, a, lam):\n if np.any(np.array(x) < 0):\n raise RuntimeError('x must be positive')\n if np.any(np.array([I_0, a, lam]) < 0):\n raise RuntimeError('all params must be positive')\n return a + I_0 * np.exp(-x / lam)",
"def loss(self, y: np.ndarray, y_hat: np.ndarray) -> float:\n losses = -(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))\n return losses.mean() + self.reg / self.num_parameters * (\n (self.v[:, -1] ** 2).sum() + (self.w ** 2).sum()\n )",
"def logit_pvalue(self, p, feature):\n n = p.shape[0]\n m = len(self.model.coef_[0]) + 1\n print(self.model.coef_)\n coefs = np.concatenate(\n [self.model.intercept_ + self.beta_, self.model.coef_[0]]\n )\n x_full = np.matrix(np.insert(np.array(feature), 0, 1, axis=1))\n print(x_full.shape)\n ans = np.zeros((m, m))\n for i in range(n):\n ans = (\n ans\n + np.dot(np.transpose(x_full[i, :]), x_full[i, :]) * p[i, 1] * p[i, 0]\n )\n vcov = np.linalg.inv(np.matrix(ans))\n se = np.sqrt(np.diag(vcov))\n t = coefs / se\n p_value = (1 - norm.cdf(abs(t))) * 2\n return p_value",
"def loss(self, rng_key, param_map, model, guide, *args, **kwargs):\n def single_particle_elbo(rng_key):\n model_seed, guide_seed = random.split(rng_key)\n seeded_model = seed(model, model_seed)\n seeded_guide = seed(guide, guide_seed)\n guide_log_density, guide_trace = log_density(seeded_guide, args, kwargs, param_map)\n seeded_model = replay(seeded_model, guide_trace)\n model_log_density, _ = log_density(seeded_model, args, kwargs, param_map)\n\n # log p(z) - log q(z)\n elbo = model_log_density - guide_log_density\n return elbo\n\n # Return (-elbo) since by convention we do gradient descent on a loss and\n # the ELBO is a lower bound that needs to be maximized.\n if self.num_particles == 1:\n return - single_particle_elbo(rng_key)\n else:\n rng_keys = random.split(rng_key, self.num_particles)\n return - jnp.mean(vmap(single_particle_elbo)(rng_keys))",
"def score(self, X: np.ndarray) -> np.ndarray:\n # Matrix where predictions[i, j] is the prediction (1 or -1) for data point i\n # by learner j.\n predictions = np.zeros((len(X), self.num_learners))\n for i, learner in enumerate(self.learners):\n predictions[:, i] = learner.predict(X)\n return predictions @ self.learner_weights",
"def log_prob(self, value, avg=False):\n value = self._validate_sample(value, avg)\n\n # Unsqueeze the parameters at location -2 to allow for an arbitrary\n # number of sample locations.\n tmp_f = (self.f_avg if avg else self.f).unsqueeze(-2)\n tmp_alpha = (self.alpha_avg if avg else self.alpha).unsqueeze(-2)\n tmp_beta = (self.beta_avg if avg else self.beta).unsqueeze(-2)\n tmp_Z = (self.Z_avg if avg else self.Z).unsqueeze(-1)\n\n res = value - tmp_f\n assert res.shape[-1] == self.N\n ll_terms = -(res) * t.where(res > 0, tmp_alpha, -tmp_beta)\n lls = ll_terms.sum(-1) - t.log(tmp_Z)\n return lls",
"def cost_function(self, X, y, theta_list, bias):\n total_samples = len(y)\n loss = 0\n\n for i in range(total_samples):\n hypothesis = bias\n hypothesis += np.matmul(X[i], np.array(theta_list).T)\n \n de = 1.0 + np.exp(-hypothesis)\n sigmoidhypothesis = 1.0/de\n\n loss += (y[i]*np.log(sigmoidhypothesis)) + ((1-y[i])*(np.log(1 - sigmoidhypothesis)))\n\n return -1 * (loss/total_samples) #loss calculation",
"def logp(self, xs, ys, **kwargs):\n ind = np.isclose(self.predict(xs, **kwargs),ys)\n axis = tuple(range(1,len(xs.shape)))\n return np.log(np.prod(ind, axis=axis)) # default behavior",
"def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss",
"def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss",
"def log_likelihood_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-y)",
"def cost(self, A, b, w):\n f = 0\n if self.glm == 'Gaussian':\n tt = np.dot(A, w) - b\n # nao é loglik mesmo, é só mse\n loglik = 0.5 * np.linalg.norm(tt) ** 2.0\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 100), -100)#avoid overflow\n loglik = -(b * xb - np.exp(xb)).sum()\n elif self.glm == 'Gamma':\n loglik = 0\n for i in np.arange(0, A.shape[0]):\n loglik += scipy.stats.gamma.logpdf(b[i], 1.0 / np.dot(A[i, :], w))\n elif self.glm == 'Binomial':\n ov_lim = 50\n Xbeta = np.maximum(np.minimum(np.dot(A, w), ov_lim), -ov_lim)#avoid overflow\n loglik = -1 * np.sum(((b * Xbeta) - np.log(1 + np.exp(Xbeta))))\n if self.mean:\n loglik /= float(A.shape[0])\n if not np.isnan(loglik):\n f += loglik\n else:\n print(\"****** WARNING: loglik is nan.\")\n return f",
"def logloss(y, p):\n\n p[p < EPS] = EPS\n p[p > 1 - EPS] = 1 - EPS\n return log_loss(y, p)"
] | [
"0.6731809",
"0.6511133",
"0.6408616",
"0.6373088",
"0.6353184",
"0.6333015",
"0.6306042",
"0.6281399",
"0.627509",
"0.6274971",
"0.62635344",
"0.62288404",
"0.6224662",
"0.6214824",
"0.6204803",
"0.62040687",
"0.61912745",
"0.6183201",
"0.617974",
"0.61708385",
"0.61646855",
"0.61340374",
"0.6130766",
"0.6127207",
"0.6114385",
"0.6112781",
"0.6106604",
"0.6105813",
"0.60955644",
"0.6085752",
"0.607583",
"0.6069044",
"0.6060616",
"0.6059515",
"0.6058694",
"0.6058694",
"0.60528386",
"0.6050586",
"0.6035431",
"0.60224664",
"0.60224664",
"0.60151476",
"0.6013254",
"0.6010996",
"0.600595",
"0.60027874",
"0.5998022",
"0.5989625",
"0.598892",
"0.5978638",
"0.5972775",
"0.59592474",
"0.59459424",
"0.5943021",
"0.59372",
"0.59371585",
"0.5936288",
"0.5927388",
"0.5897573",
"0.5889629",
"0.58884335",
"0.58806056",
"0.58787245",
"0.58748806",
"0.5872554",
"0.5866204",
"0.58649445",
"0.58549625",
"0.58533186",
"0.58463967",
"0.5844579",
"0.58312845",
"0.5828705",
"0.58277225",
"0.5825637",
"0.58118063",
"0.5807446",
"0.58066595",
"0.5804308",
"0.5801052",
"0.5780898",
"0.5779383",
"0.5778546",
"0.5775034",
"0.5771963",
"0.5771173",
"0.57705194",
"0.5769419",
"0.5768411",
"0.57632166",
"0.5760955",
"0.57595503",
"0.5759445",
"0.57570815",
"0.57532316",
"0.5748695",
"0.5747834",
"0.57446325",
"0.57415783",
"0.5739235"
] | 0.5764669 | 89 |
Score vector of model. Default implementation sums score_obs. The gradient of loglike with respect to each parameter. | def score(self, params, *args, **kwargs):
try:
# If an analytic score_obs is available, try this first before
# falling back to numerical differentiation below
return self.score_obs(params, *args, **kwargs).sum(0)
except NotImplementedError:
# Fallback in case a `loglike` is implemented but `loglikeobs`
# is not.
approx_func = (approx_fprime_cs
if self._use_approx_cs else approx_fprime)
return approx_func(params, self.loglike, args=args, kwargs=kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product",
"def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.maximum(0.0, scores - correct_class_scores[:, None] + 1.0)\r\n margins[range(N), y] = 0.0\r\n loss = np.sum(margins) / N\r\n\r\n # Compute gradient off loss function w.r.t. scores\r\n num_pos = np.sum(margins > 0, axis=1)\r\n dscores = np.zeros(scores.shape)\r\n dscores[margins > 0] = 1\r\n dscores[range(N), y] -= num_pos\r\n dscores /= N\r\n\r\n return loss, dscores",
"def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec",
"def loss(self, y_true, score, pos_label=_NoValue):\n if pos_label is not _NoValue:\n raise ValueError(\"`pos_label` not supported\")\n\n score = score.atleast_2d() # Working with 2-D arrays only\n\n p = CSoftmax().softmax(score) # SoftMax function\n\n # find-like indexing (list of lists)\n return -CArray(p[[list(range(score.shape[0])), y_true.tolist()]]).log()",
"def score_obs(self, params, *args, **kwargs):\n if self._use_approx_cs:\n return approx_fprime_cs(params, self.loglikeobs,\n args=args, kwargs=kwargs)\n else:\n return approx_fprime(params, self.loglikeobs,\n args=args, kwargs=kwargs)",
"def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])",
"def score_model(self, length):\n train_score = self.dtr.score(self.X_train, self.y_train)\n test_score = self.dtr.score(self.X_test, self.y_test)\n self.scores.append([length, train_score, test_score])",
"def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW",
"def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)",
"def score(self, X):\n nolist = False\n if not isinstance(X, list):\n X = [X]\n nolist = True\n\n scores = []\n for i in X:\n Xi = X[i]\n Xhati = self.predict(Xi)\n\n scores.append(1.0 - np.sum((Xi - Xhati)**2.0) / np.sum(Xi**2.0))\n\n if nolist:\n return scores[0]\n else:\n return scores",
"def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores",
"def calc_score(model, scorer, X, y_true):\n\n y_preds = model.predict(X)\n score = scorer(y_true, y_preds)\n\n return score",
"def eval_score( # type: ignore\n self, model_in: torch.Tensor, target: Optional[torch.Tensor] = None, idx=None, next_obs=None\n ) -> torch.Tensor:\n # target = target.repeat((self.num_members, 1, 1))\n loss = self._vaml_loss(model_in, target, idx, next_obs=next_obs, eval=True)\n if self.add_mse:\n loss += self._mse_loss(model_in, target).mean(-1, keepdim=True)\n return loss.detach()",
"def scoring_function(self, model, y_true, y_predicted_probability):",
"def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z",
"def get_score(self, solution: np.array) -> float:\n pass",
"def update(self, returns, log_probs):\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, returns):\n policy_gradient.append(-log_prob * Gt)\n\n loss = torch.stack(policy_gradient).sum()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()",
"def lm(self, lm_para=LmPara()):\r\n if self.doc_len == 0:\r\n return np.log(MIN_LM_SCORE)\r\n v_tf = np.maximum(self.v_tf, lm_para.min_tf)\r\n v_tf /= self.doc_len\r\n v_tf = np.maximum(v_tf, MIN_LM_SCORE)\r\n score = np.log(v_tf).dot(self.v_q_tf)\r\n\r\n return score",
"def log_loss(self):\n probabilities = self.probability_array().copy()\n # need to flip the probabilities for p < 0.5 with this binary case.\n # 1 - old_val is same as oldval*-1 + 1. Do in 2 steps:\n probabilities[np.equal(0, self.y)] *= -1\n probabilities[np.equal(0, self.y)] += 1\n # when multiclass: np.amax(probabilities, 1)\n return np.log(probabilities).sum()",
"def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad",
"def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0",
"def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)",
"def score(self, x, y=None):\n _, logp = self.score_samples(x)\n return logp",
"def predict_score(self, X):\r\n if self.score:\r\n preds = self.model.predictValue(X)\r\n return preds",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score",
"def score(self, indices):\n self.model.eval()\n _, prediction = self.model(self.propagation_matrix, self.features).max(dim=1)\n correct = prediction[indices].eq(self.target[indices]).sum().item()\n acc = correct / indices.shape[0]\n return acc",
"def total_score(self, logits):\n previous = torch.full((1, self.tag_size), -10000., device=device)\n previous[0][self.tag_map[self.start_tag]] = 0.\n\n for index in range(len(logits)):\n previous = previous.expand(self.tag_size, self.tag_size).t()\n emit = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)\n scores = previous + emit + self.transitions\n previous = log_sum_exp(scores)\n\n # previous = previous + self.transitions[:, self.tag_map[self.stop_tag]]\n # previous += self.transitions[self.tag_map[self.stop_tag]]\n previous += self.transitions[self.tag_map[:, self.stop_tag]]\n total_scores = log_sum_exp(previous.t())[0]\n return total_scores",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def loss_gradient(self, targets, scores):\n m = targets * scores\n numer = 4. * (2. * numpy.arctan(m) - 1.)\n denom = 1. + m**2\n return numer/denom",
"def score(self, X: np.ndarray) -> np.ndarray:\n # Matrix where predictions[i, j] is the prediction (1 or -1) for data point i\n # by learner j.\n predictions = np.zeros((len(X), self.num_learners))\n for i, learner in enumerate(self.learners):\n predictions[:, i] = learner.predict(X)\n return predictions @ self.learner_weights",
"def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval",
"def score(self, X):\n return _betadiv(X, parafac(self.factors_), self.beta).sum()",
"def score(self, data):\n\n score_mappings = {\n \"0\": np.log(self.class_zero_doc_count / self.total_docs),\n \"1\": np.log(self.class_one_doc_count / self.total_docs)\n }\n\n features = self.featurize(data)\n\n for f in features:\n\n if(f[0] in self.class_zero):\n cond_prob_zero = np.log((self.class_zero[f[0]] + 1) / (self.class_zero_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_zero = np.log(1 / (self.class_zero_feature_count + len(self.vocab)))\n else:\n cond_prob_zero = 0\n\n if(f[0] in self.class_one):\n cond_prob_one = np.log((self.class_one[f[0]] + 1) / (self.class_one_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_one = np.log(1 / (self.class_one_feature_count + len(self.vocab)))\n else:\n cond_prob_one = 0\n\n score_mappings[\"0\"] += cond_prob_zero\n score_mappings[\"1\"] += cond_prob_one\n\n score_mappings[\"0\"] = np.exp(score_mappings[\"0\"])\n score_mappings[\"1\"] = np.exp(score_mappings[\"1\"])\n\n return score_mappings",
"def score(self, model, context):\n pass",
"def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]",
"def prob_calibration_function_multiclass(truthvec, scoremat, verbose=False, **kwargs):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n num_classes = scoremat.shape[1]\n function_list = []\n for i in range(num_classes):\n scorevec = scoremat[:,i]\n curr_truthvec = (truthvec==i).astype(int)\n function_list.append(prob_calibration_function(curr_truthvec,scorevec,verbose=verbose,**kwargs))\n\n def calibrate_scores_multiclass(new_scoremat):\n a,b = new_scoremat.shape\n pre_probmat = np.zeros((a,b))\n for i in range(num_classes):\n pre_probmat[:,i] = function_list[i](new_scoremat[:,i])\n probmat = (pre_probmat.T/np.sum(pre_probmat,axis=1)).T\n #if (not extrapolate):\n # new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n # new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n return probmat\n return calibrate_scores_multiclass, function_list",
"def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)",
"def score(self, X, y):\r\n n_feature, _ = self.check_model()\r\n _, n_label = y.shape\r\n y = self.check_X_y_weights(X, y)\r\n\r\n if X.shape[1] == (n_feature + 1):\r\n X = X[:, 1:]\r\n\r\n assert (X.shape[1] == n_feature), \"X is of the wrong shape\"\r\n\r\n if self.scoring_func is None:\r\n y_pred = self.forward(X)\r\n\r\n loss = self.loss_func(torch.from_numpy(y_pred).float(),\r\n torch.from_numpy(y).float())\r\n loss = torch.mean(torch.sum(loss, 1)).numpy()\r\n\r\n return - loss\r\n else:\r\n y_pred = self.predict(X)\r\n return self.scoring_func(y_pred, y)",
"def loss(Y,Y_pred):\n\n Y = Y.tolist()\n Y_pred = Y_pred.tolist()\n score = 0\n for i in range(len(Y)):\n score += (Y[i]-Y_pred[i])**2\n score=cmath.sqrt(score/len(Y))\n return score",
"def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.log_loss(y_true, y_predicted, sample_weight=sample_weight)",
"def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.log_loss(y_true, y_predicted, sample_weight=sample_weight)",
"def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]",
"def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):\n \n # Get a valid word contained in the word_to_vec_map \n any_word = list(word_to_vec_map.keys())[0]\n \n # Initialize cost. It is needed during grading\n cost = 0\n \n # Define number of training examples\n m = Y.shape[0] # number of training examples\n n_y = len(np.unique(Y)) # number of classes \n n_h = word_to_vec_map[any_word].shape[0] # dimensions of the GloVe vectors \n \n # Initialize parameters using Xavier initialization\n W = np.random.randn(n_y, n_h) / np.sqrt(n_h)\n b = np.zeros((n_y,))\n \n # Convert Y to Y_onehot with n_y classes\n Y_oh = convert_to_one_hot(Y, C = n_y) \n \n # Optimization loop\n for t in range(num_iterations): # Loop over the number of iterations\n for i in range(m): # Loop over the training examples\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Average the word vectors of the words from the i'th training example\n # def sentence_to_avg(sentence, word_to_vec_map): # return avg\n avg = sentence_to_avg(X[i], word_to_vec_map)\n\n # Forward propagate the avg through the softmax layer. \n # You can use np.dot() to perform the multiplication.\n z = np.dot(W, avg) + b\n a = softmax(z)\n\n # Compute cost using the i'th training label's one hot representation and \"A\" (the output of the softmax)\n cost = - np.sum(Y_oh[i] * a)\n ### END CODE HERE ###\n \n # Compute gradients \n dz = a - Y_oh[i]\n dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))\n db = dz\n\n # Update parameters with Stochastic Gradient Descent\n W = W - learning_rate * dW\n b = b - learning_rate * db\n \n if t % 100 == 0:\n print(\"Epoch: \" + str(t) + \" --- cost = \" + str(cost))\n pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py\n\n return pred, W, b",
"def loss_grad_softmax_vectorized(W, X, y):\n loss = 0 \n grad = np.zeros_like(W)\n dim, num_train = X.shape\n\n scores = W.dot(X) # [K, N]\n # Shift scores so that the highest value is 0\n scores -= np.max(scores)\n scores_exp = np.exp(scores)\n correct_scores_exp = scores_exp[y, range(num_train)] # [N, ]\n scores_exp_sum = np.sum(scores_exp, axis=0) # [N, ]\n loss = -np.sum(np.log(correct_scores_exp / scores_exp_sum))\n loss /= num_train\n #loss += 0.5 * reg * np.sum(W * W)\n\n scores_exp_normalized = scores_exp / scores_exp_sum\n # deal with the correct class\n scores_exp_normalized[y, range(num_train)] -= 1 # [K, N]\n grad = scores_exp_normalized.dot(X.T)\n grad /= num_train\n grad += W\n\n return grad",
"def __log_scores(self, scores, loss, tag):\r\n\t\tprint(\"\\t{:6s} - \".format(tag), end=\" \")\r\n\t\tfor name, value in scores.items():\r\n\t\t\tprint(name, '{:.4f}'.format(value), end=\", \")\r\n\t\tprint(\" Loss: {:.4f}\".format(loss))",
"def scores(self, eouts, temperature=1.0):\n return torch.log_softmax(self.output(eouts) / temperature, dim=-1)",
"def score(self, sentence):\n # TODO your code here\n score = 0.0\n for i,token in enumerate(sentence[1:]):\n prev = sentence[i]\n current = token\n freq = self.vocab[current][prev] + self.epsilon\n\n score += math.log(freq)\n score -= math.log(self.word_counts[prev] + self.epsilon * self.v)\n return score",
"def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])",
"def score_calc(self, annotations, predictions):\n\n mean_probabilities_of_classes = np.expand_dims(np.mean(predictions, axis=0), axis=0)\n KL_d = predictions * (np.log(predictions + self.eps) - np.log(mean_probabilities_of_classes + self.eps))\n KL_D = KL_d.sum(axis=1)\n\n score = np.exp(np.mean(KL_D))\n return score",
"def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))",
"def get_score(self, input, target_idx, noise_idx):\n raise NotImplementedError()",
"def _score_for_model(meta):\n mean_acc = list()\n pipes = meta[\"pipeline\"]\n acc = meta[\"accuracy\"]\n if \"tagger\" in pipes:\n mean_acc.append(acc[\"tags_acc\"])\n if \"morphologizer\" in pipes:\n mean_acc.append((acc[\"morphs_acc\"] + acc[\"pos_acc\"]) / 2)\n if \"parser\" in pipes:\n mean_acc.append((acc[\"uas\"] + acc[\"las\"]) / 2)\n if \"ner\" in pipes:\n mean_acc.append((acc[\"ents_p\"] + acc[\"ents_r\"] + acc[\"ents_f\"]) / 3)\n if \"textcat\" in pipes:\n mean_acc.append(acc[\"textcat_score\"])\n if \"senter\" in pipes:\n mean_acc.append((acc[\"sent_p\"] + acc[\"sent_r\"] + acc[\"sent_f\"]) / 3)\n return sum(mean_acc) / len(mean_acc)",
"def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads",
"def softmax_loss(scores, y):\r\n N = scores.shape[0] # number of input data\r\n\r\n # compute data loss\r\n shifted_logits = scores - np.max(scores, axis=1, keepdims=True)\r\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\r\n log_probs = shifted_logits - np.log(Z)\r\n probs = np.exp(log_probs)\r\n loss = -np.sum(log_probs[range(N), y]) / N\r\n\r\n # Compute gradient of loss function w.r.t. scores\r\n dscores = probs.copy()\r\n dscores[range(N), y] -= 1\r\n dscores /= N\r\n \r\n return loss, dscores",
"def svm_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement loss and gradient for multiclass SVM classification. #\n # This will be similar to the svm loss vectorized implementation in #\n # cs231n/classifiers/linear_svm.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n y_temp = np.ones((x.shape[0], x.shape[1])) # 1로 구성된 x와 같은 쉐입의 매트릭스를 만든다\n #print(y_temp)\n y_score = x[np.arange(x.shape[0]), y] # 정답레이블의 스코어로만 구성된 하나의 컬럼 벡터를 만든다\n y_score = np.reshape(y_score, (x.shape[0], 1)) # 브로드캐스팅을 위해 리쉐입 해준다\n y_temp[np.arange(x.shape[0]), y] = 0 # 1로 구성된 템프매트릭스의 정답 레이블에 해당되는 인덱스에 0을 할당한다\n #print(y_temp)\n loss_temp = (x - y_score) - 1\n loss_temp = (-loss_temp * y_temp) / x.shape[0]\n loss = (np.sum(loss_temp))\n #print(loss_temp)\n\n #print(np.sum(loss_temp, axis = 1))\n \n temp = loss_temp * x.shape[0]\n temp[loss_temp > 0] = 1\n row_sum = np.sum(temp, axis = 1)\n temp[np.arange(x.shape[0]), y] = -row_sum.T\n dx = -temp\n\n dx /= x.shape[0]\n\n\n #print(dx)\n\n\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx",
"def score(self, X, label):\n pred_risk = self.predict(X)\n CI = self._metrics_ci(label, pred_risk)\n return CI",
"def eval_additional_scores(self, **kwargs):\n self.model.eval()\n self.likelihood.eval()\n\n X_train_torch = torch.from_numpy(kwargs[\"X_train\"]).to(self.device)\n y_train_torch = torch.from_numpy(kwargs[\"y_train\"]).to(self.device)\n mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model, num_data=y_train_torch.numel())\n\n with torch.no_grad(), gpytorch.settings.num_likelihood_samples(self.num_likelihood_samples):\n f_pred = self.model(X_train_torch)\n elbo = mll(f_pred, y_train_torch).item()\n\n return {\n \"elbo\": elbo\n }",
"def dloss(self, y_true, score, pos_label=None):\n score = score.atleast_2d() # Working with 2-D arrays only\n\n grad = CSoftmax().softmax(score)\n\n # we subtract -1 only to the elements equal to y_true\n grad[[list(range(score.shape[0])), y_true.tolist()]] -= 1.0\n\n # find-like indexing (list of lists)\n a = y_true.tolist() if pos_label is None else [pos_label]\n\n # Return elements equal to y_true (if pos_label is None) or pos_label\n return CArray(grad[[list(range(score.shape[0])), a]])",
"def objective(self, var: ndarray) -> float:\n beta, gamma = self.get_vars(var)\n r = self.get_residual(beta)\n d = self.get_varmat(gamma)\n\n val = 0.5*(d.logdet() + r.dot(d.invdot(r)))\n val += self.fevar.prior_objective(beta)\n val += self.revar.prior_objective(gamma)\n\n return val",
"def loss(self, targets, scores):\n return (2. * numpy.arctan(targets * scores) - 1.)**2",
"def score(matrix,seq,ns=True):\n #specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])\n specific_binding = 0\n for i in xrange(len(matrix)): \n specific_binding += matrix[i][base_dict[seq[i]]]\n if ns:\n return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta\n else:\n return specific_binding",
"def score(model):\n # get the first layer\n layer = model.get_layer('encoder')\n # extracts weights\n weights = layer.get_weights()[0]\n # calculate the infinity norm as shown in the paper.\n # For each input feature get the absolute maximum weight\n # connected with this feature\n scores = np.linalg.norm(weights, ord=np.inf, axis=1)\n # the final score is a importance measure for each feature\n sorted_scores = sorted(range(len(scores)), key=lambda k: scores[k])\n return sorted_scores[::-1]",
"def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss",
"def do_loss(logits, labels):\n return tf.reduce_sum(tf.square(logits - labels))",
"def logscore(self, word, context=None):\n return log_base2(self.score(word, context))",
"def score(self, data_test, labels_pred, is_train=False):\n return -np.log(np.clip(self.score_trust(data_test, labels_pred, is_train=is_train),\n sys.float_info.min, None))",
"def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S",
"def loss(self, y: np.ndarray, y_hat: np.ndarray) -> float:\n losses = -(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))\n return losses.mean() + self.reg / self.num_parameters * (\n (self.v[:, -1] ** 2).sum() + (self.w ** 2).sum()\n )",
"def grad_log_q(self,z): \n param_count = 0\n grad = np.zeros((np.sum(self.approx_param_no),self.sims))\n for core_param in range(len(self.q)):\n for approx_param in range(self.q[core_param].param_no):\n grad[param_count] = self.q[core_param].vi_score(z[core_param],approx_param) \n param_count += 1\n return grad",
"def __call__(self, score_outputs, labels):\n with tf.name_scope('rpn_loss'):\n levels = sorted(score_outputs.keys())\n\n score_losses = []\n for level in levels:\n score_losses.append(\n self._rpn_score_loss(\n score_outputs[level],\n labels[level],\n normalizer=tf.cast(\n tf.shape(score_outputs[level])[0] *\n self._rpn_batch_size_per_im,\n dtype=tf.float32)))\n\n # Sums per level losses to total loss.\n return tf.math.add_n(score_losses)",
"def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab",
"def objective(params):\n\t# hyperopt casts as float\n\tparams['num_boost_round'] = int(params['num_boost_round'])\n\tparams['num_leaves'] = int(params['num_leaves'])\n\n\t# need to be passed as parameter\n\tparams['is_unbalance'] = True\n\tparams['verbose'] = -1\n\tparams['seed'] = 1\n\n\tcv_result = lgb.cv(\n\t\tparams,\n\t\tdtrain,\n\t\tnum_boost_round=params['num_boost_round'],\n\t\tmetrics='binary_logloss',\n\t\tnfold=3,\n\t\tearly_stopping_rounds=20,\n\t\tstratified=False)\n\tearly_stop_dict[objective.i] = len(cv_result['binary_logloss-mean'])\n\terror = round(cv_result['binary_logloss-mean'][-1], 4)\n\tobjective.i+=1\n\treturn error",
"def decision_function(self, X):\n X = atleast2d_or_csr(X)\n scores = safe_sparse_dot(X, self.coef_.T) + self.intercept_\n if self.classes.shape[0] == 2:\n return np.ravel(scores)\n else:\n return scores",
"def score(self, word):\n assert self.words is not None, \"You need to train first.\"\n if word in self.words:\n return np.log(1 - self.a) + np.log(self.words[word] / self.nwords)\n else:\n logprob = 0\n for l in word:\n # this calculates add+1-smoothed probabilities to make\n # sure that unknown letters are treated correctly.\n # not required, using simply the relative\n # frequency is sufficient.\n logprob += np.log(self.letters.get(l, 1) /\n (self.nletters + len(self.letters)))\n return np.log(self.a) + logprob",
"def score(self, X, y):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.score(stuff,y)\n\n return result\n pass",
"def loss(self, y_pred=None, y_true=None):\n ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2)\n return -ll.sum(dim=0)",
"def score(self, X, y):\n X_pp = self.preprocessor.transform(X)\n # Score the model on the data here\n return(self.estimator.score(X_pp, y))",
"def log_loss(network, model_indices, sim_data, lambd=1.0):\n\n # Compute evidences\n alpha = network(sim_data)\n\n # Obtain probs\n model_probs = alpha / tf.reduce_sum(alpha, axis=1, keepdims=True)\n\n # Numerical stability\n model_probs = tf.clip_by_value(model_probs, 1e-15, 1 - 1e-15)\n\n # Actual loss + regularization (if given)\n loss = -tf.reduce_mean(tf.reduce_sum(model_indices * tf.math.log(model_probs), axis=1))\n if lambd > 0:\n kl = kl_dirichlet(model_indices, alpha)\n loss = loss + lambd * kl\n return loss",
"def score(self, y_true, y_pred):\r\n pass",
"def loss(self, rng_key, param_map, model, guide, *args, **kwargs):\n def single_particle_elbo(rng_key):\n model_seed, guide_seed = random.split(rng_key)\n seeded_model = seed(model, model_seed)\n seeded_guide = seed(guide, guide_seed)\n guide_log_density, guide_trace = log_density(seeded_guide, args, kwargs, param_map)\n seeded_model = replay(seeded_model, guide_trace)\n model_log_density, _ = log_density(seeded_model, args, kwargs, param_map)\n\n # log p(z) - log q(z)\n elbo = model_log_density - guide_log_density\n return elbo\n\n # Return (-elbo) since by convention we do gradient descent on a loss and\n # the ELBO is a lower bound that needs to be maximized.\n if self.num_particles == 1:\n return - single_particle_elbo(rng_key)\n else:\n rng_keys = random.split(rng_key, self.num_particles)\n return - jnp.mean(vmap(single_particle_elbo)(rng_keys))",
"def eml_add_smooth(yi, xi, eqml):\n return (eqml[yi][xi] + 1) / (sum(eqml[yi].values()) + train_set_size)",
"def real_path_score(self, logits, label):\n score = torch.zeros(1, device=device)\n label = torch.cat([torch.tensor([self.tag_map[self.start_tag]], dtype=torch.long, device=device), label.to(torch.long)])\n\n for index, logit in enumerate(logits):\n emission_score = logit[label[index + 1]]\n transition_score = self.transitions[label[index], label[index + 1]]\n # transition_score = self.transitions[label[index + 1], label[index]]\n score += emission_score + transition_score\n\n # Add the final Stop Tag, the final transition score\n score += self.transitions[label[-1], self.tag_map[self.stop_tag]]\n # score += self.transitions[self.tag_map[self.stop_tag], label[-1]]\n return score",
"def compute_loss(self, obs, returns):",
"def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad",
"def logloss_mc(y_true, y_prob, epsilon=1e-15):\n # normalize\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\n y_prob = np.maximum(epsilon, y_prob)\n y_prob = np.minimum(1 - epsilon, y_prob)\n # get probabilities\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\n ll = - np.mean(np.log(y))\n return ll",
"def fit(self,\n obs: np.ndarray,\n discounted_returns: np.ndarray\n ) -> np.ndarray:\n\n with tf.GradientTape() as tape:\n # predicted value is calcualted by subbing in\n pred_value = self.model(obs, training=True)\n # fit value function by regression on mean-squared error (Pseudocode line 8)\n # minimizing the difference between the predicted value and the actual value\n critic_loss = self.loss_func(pred_value, tf.stop_gradient(discounted_returns))\n\n gradients = tape.gradient(critic_loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n return critic_loss",
"def scores_(self):\n return self.predictor.scores_",
"def forward(ctx, scores):\n size = scores.size()\n prob = F.softmax(scores, dim=-1)\n idx = multinomial(prob.view(-1, size[-1]), num_samples=1, replacement=False).view(size[:-1])\n scores_net = eye(scores.size(-1), device=scores.device)\n return scores_net[idx]",
"def loss(self,A2,label):\r\n m = label.shape[0]\r\n\r\n log_likelihood = -np.log(A2[label,range(m)])\r\n loss = np.sum(log_likelihood) / m\r\n return loss",
"def _loss_gradient(x0, x1, b, w, lam, weights=None):\n nvars = len(w)\n\n # initialize + regularization term\n loss = 0.5 * lam * np.sum(w ** 2)\n gradient = np.zeros(nvars + 1) # first position is b\n gradient[1:] = lam * w\n\n # we need prediction for x\n pred_x_0_1 = [LogisticRegression._sigmoid(x0, b, w), LogisticRegression._sigmoid(x1, b, w)]\n\n # the log likelihood\n log_like_x_0_1 = [np.log(1.0 - pred_x_0_1[0]),\n np.log(pred_x_0_1[1])]\n\n # also need the error for gradient.\n error = [pred_x_0_1[0],\n pred_x_0_1[1] - 1]\n\n if weights is None:\n loss += -np.sum(log_like_x_0_1[1]) - np.sum(log_like_x_0_1[0])\n gradient[0] += np.sum(error[0]) + np.sum(error[1]) # * 1 for bias term \n for k in range(nvars):\n gradient[k + 1] += np.sum(error[0] * x0[:, k]) + np.sum(error[1] * x1[:, k])\n else:\n loss += -np.sum(weights[1] * log_like_x_0_1[1]) - np.sum(weights[0] * log_like_x_0_1[0])\n gradient[0] += np.sum(error[0] * weights[0]) + np.sum(error[1] * weights[1])\n for k in range(nvars):\n gradient[k + 1] += ( np.sum(weights[0] * error[0] * x0[:, k]) +\n np.sum(weights[1] * error[1] * x1[:, k]) )\n return loss, gradient",
"def score(self, X, y=...):\n ...",
"def multiclass_log_loss(y_true, y_pred, eps=1e-15):\n clip = np.clip(y_pred, eps, 1 - eps)\n actual = np.zeros(y_pred.shape)\n rows = actual.shape[0]\n print rows\n print np.arange(rows)\n print (y_true.astype(int))\n actual[np.arange(rows), y_true.astype(int)] = 1\n print actual\n vsota = np.sum(actual * np.log(clip))\n print vsota\n return -1.0 / rows * vsota",
"def loss(self, scores, true_pos, lamb=1e-7):\n loss = F.multi_margin_loss(scores, true_pos, margin=self.config[\"margin\"])\n if self.config[\"use_local_only\"]:\n return loss\n\n # regularization\n X = F.normalize(self.rel_embs)\n diff = (\n (\n X.view(self.config[\"n_rels\"], 1, -1)\n - X.view(1, self.config[\"n_rels\"], -1)\n )\n .pow(2)\n .sum(dim=2)\n .add_(1e-5)\n .sqrt()\n )\n diff = diff * (diff < 1).float()\n loss -= torch.sum(diff).mul(lamb)\n\n X = F.normalize(self.ew_embs)\n diff = (\n (\n X.view(self.config[\"n_rels\"], 1, -1)\n - X.view(1, self.config[\"n_rels\"], -1)\n )\n .pow(2)\n .sum(dim=2)\n .add_(1e-5)\n .sqrt()\n )\n diff = diff * (diff < 1).float()\n loss -= torch.sum(diff).mul(lamb)\n return loss",
"def log_sum_exp(vec):\r\n\r\n\r\n max_score, idx = torch.max(vec, -1, keepdim = True) # ( B, to_target, 1)\r\n # max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n # max_score.expand_as(vec)\r\n # to_target = vec.size(1)\r\n\r\n return max_score.squeeze(-1) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), -1)) # B * to_target\r",
"def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params",
"def score(self, y_hat, y_true=None, verbose=True):\n nan_score = float(9999)\n act = y_true if y_true is not None else self.actual[self.y_cols]\n if hasattr(act, 'values'):\n act = act.values\n if hasattr(y_hat, 'values'):\n y_hat = y_hat.values\n assert act.shape == y_hat.shape, f'shape mismatch in DM.score(): ' \\\n f'{act.shape} != {y_hat.shape}'\n sklearn_metrics = {'mse': mse, 'msle': msle}\n lossf = sklearn_metrics[cfg.data_cfg['loss']]\n try:\n score = lossf(act, y_hat)\n except ValueError as err:\n print(err)\n print(f'NaNs in prediction. Setting score to {nan_score}.')\n score = nan_score\n return score, act",
"def function(self, scores, multilabel):\n\n # Output functions\n # pylint: disable=C3001\n identity = lambda x: x\n sigmoid = lambda x: 1.0 / (1.0 + np.exp(-x))\n softmax = lambda x: np.exp(x) / np.sum(np.exp(x))\n function = identity if multilabel is None else sigmoid if multilabel else softmax\n\n # Apply output function\n return function(np.array(scores))",
"def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW",
"def __call__(self, score_map, one_hot_label) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n assert not one_hot_label.requires_grad\n pi = one_hot_label.to(torch.float)\n\n assert not torch.any(torch.isinf(score_map))\n assert not torch.any(torch.isnan(score_map))\n\n log_qi = torch.log(self.clamp_softmax(score_map))\n\n assert not torch.any(torch.isnan(log_qi))\n\n log_fg_qi = log_qi[:, 1:, :, :]\n fg_pi = pi[:, 1:, :, :]\n fg_count = torch.sum(fg_pi, dim=(1, 2, 3)) + self.eps\n\n log_bg_qi = log_qi[:, 0:1, :, :]\n bg_pi = pi[:, 0:1, :, :]\n bg_count = torch.sum(bg_pi, dim=(1, 2, 3)) + self.eps\n\n fg_loss_ = torch.sum(fg_pi * log_fg_qi, dim=(1, 2, 3))\n fg_loss = -1 * torch.mean(fg_loss_ / fg_count) # mean reduce on batch\n\n bg_loss_ = torch.sum(bg_pi * log_bg_qi, dim=(1, 2, 3))\n bg_loss = -1 * torch.mean(bg_loss_ / bg_count) # mean reduce on batch\n\n total_loss = bg_loss + fg_loss\n assert not torch.any(torch.isnan(total_loss)), \\\n \"fg_loss: {} fg_count: {} bg_loss: {} bg_count: {}\".format(fg_loss, fg_count, bg_loss, bg_count)\n\n return total_loss, bg_loss, fg_loss"
] | [
"0.6385058",
"0.6369366",
"0.6337341",
"0.62241894",
"0.6202223",
"0.6085212",
"0.6069365",
"0.5963406",
"0.59437096",
"0.59082556",
"0.5902111",
"0.58229357",
"0.5803482",
"0.57961464",
"0.5775284",
"0.5754475",
"0.5751942",
"0.5720772",
"0.5715828",
"0.57079184",
"0.56972706",
"0.56924284",
"0.56900287",
"0.5684798",
"0.56828547",
"0.5678644",
"0.5676018",
"0.5663591",
"0.56587905",
"0.56492484",
"0.5648787",
"0.56481785",
"0.56374395",
"0.5629737",
"0.5618622",
"0.56112176",
"0.55889016",
"0.5584572",
"0.55768627",
"0.5565633",
"0.5564364",
"0.5564364",
"0.55539227",
"0.55504405",
"0.5550231",
"0.5541401",
"0.55369365",
"0.5520265",
"0.5516783",
"0.5515999",
"0.55155027",
"0.5511123",
"0.5509981",
"0.55050623",
"0.5502061",
"0.54968494",
"0.54931635",
"0.5480789",
"0.5477338",
"0.54744023",
"0.54708344",
"0.5462712",
"0.5453526",
"0.54526544",
"0.54509056",
"0.5449664",
"0.544792",
"0.54381615",
"0.5432927",
"0.54307073",
"0.5429782",
"0.54242986",
"0.54225075",
"0.54199594",
"0.54187393",
"0.54067624",
"0.5405294",
"0.54005265",
"0.53963625",
"0.5395284",
"0.5392596",
"0.53923595",
"0.5389547",
"0.538912",
"0.53888845",
"0.5385502",
"0.5383773",
"0.5382428",
"0.5381121",
"0.53808665",
"0.5380264",
"0.53795123",
"0.5376071",
"0.53754514",
"0.5373938",
"0.5373181",
"0.5370708",
"0.53638506",
"0.53604347",
"0.5359435"
] | 0.6393054 | 0 |
Fisher information matrix of model Returns Hessian of loglike evaluated at params. | def information(self, params):
# TODO: If the docstring is right, then why not just implement this?
raise NotImplementedError # pragma: no cover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hessian(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n\n return approx_hess_cs(params, self.loglike)",
"def _hessian(self):\n log_g = np.log(self._gv())\n log_f = np.log(self._fv())\n h_inf = np.mean((1 - log_g + log_f) / (self.y - self.err_inf) ** 2)\n return h_inf",
"def fisher(params, log_prob_func=None, jitter=None, normalizing_const=1., softabs_const=1e6, metric=Metric.HESSIAN):\n\n log_prob = log_prob_func(params)\n if util.has_nan_or_inf(log_prob):\n print('Invalid log_prob: {}, params: {}'.format(log_prob, params))\n raise util.LogProbError()\n if metric == Metric.JACOBIAN_DIAG:\n # raise NotImplementedError()\n # import pdb; pdb.set_trace()\n jac = util.jacobian(log_prob, params, create_graph=True, return_inputs=False)\n jac = torch.cat([j.flatten() for j in jac])\n # util.flatten(jac).view(1,-1)\n fish = torch.matmul(jac.view(-1,1),jac.view(1,-1)).diag().diag()#/ normalizing_const #.diag().diag() / normalizing_const\n else:\n hess = torch.autograd.functional.hessian(log_prob_func, params, create_graph=True)\n fish = - hess #/ normalizing_const\n if util.has_nan_or_inf(fish):\n print('Invalid hessian: {}, params: {}'.format(fish, params))\n raise util.LogProbError()\n if jitter is not None:\n params_n_elements = fish.shape[0]\n fish += (torch.eye(params_n_elements) * torch.rand(params_n_elements) * jitter).to(fish.device)\n if (metric is Metric.HESSIAN) or (metric is Metric.JACOBIAN_DIAG):\n return fish, None\n elif metric == Metric.SOFTABS:\n eigenvalues, eigenvectors = torch.linalg.eigh(fish, UPLO='L')\n abs_eigenvalues = (1./torch.tanh(softabs_const * eigenvalues)) * eigenvalues\n fish = torch.matmul(eigenvectors, torch.matmul(abs_eigenvalues.diag(), eigenvectors.t()))\n return fish, abs_eigenvalues\n else:\n # if metric == Metric.JACOBIAN:\n # jac = jacobian(log_prob, params, create_graph=True)\n # fish = torch.matmul(jac.t(),jac) / normalizing_const\n raise ValueError('Unknown metric: {}'.format(metric))",
"def hessian(self, params, *args, **kwargs):\n if self._use_approx_cs:\n return approx_hess_cs(params, self.loglike,\n args=args, kwargs=kwargs)\n else:\n return approx_hess(params, self.loglike,\n args=args, kwargs=kwargs)",
"def hessian(self, params):\n\n if self.use_sqrt:\n return self.hessian_sqrt(params)\n else:\n return self.hessian_full(params)",
"def _get_hessian(self):\n if not self.sparse:\n hess = numpy.dot(self.jacobian_T, self.jacobian)\n else:\n hess = self.jacobian_T*self.jacobian\n return hess",
"def hessian_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n # Blocks for the fixed and random effects parameters.\n hess_fe = 0.\n hess_re = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n hess_fere = np.zeros((self.k_re2, self.k_fe),\n dtype=np.float64)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n rvir = 0.\n xtvix = 0.\n xtax = [0.,] * self.k_re2\n B = np.zeros(self.k_re2, dtype=np.float64)\n D = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n F = [[0.,]*self.k_re2 for k in range(self.k_re2)]\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xtvix += np.dot(exog.T, viexog)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n rvir += np.dot(resid, vir)\n\n for jj1,mat1 in self._gen_dV_dPsi(ex_r):\n\n hess_fere[jj1,:] += np.dot(viexog.T,\n np.dot(mat1, vir))\n if self.reml:\n xtax[jj1] += np.dot(viexog.T, np.dot(mat1, viexog))\n\n B[jj1] += np.dot(vir, np.dot(mat1, vir))\n E = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n mat1)\n\n for jj2,mat2 in self._gen_dV_dPsi(ex_r, jj1):\n Q = np.dot(mat2, E)\n Q1 = Q + Q.T\n vt = np.dot(vir, np.dot(Q1, vir))\n D[jj1, jj2] += vt\n if jj1 != jj2:\n D[jj2, jj1] += vt\n R = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, Q)\n rt = np.trace(R) / 2\n hess_re[jj1, jj2] += rt\n if jj1 != jj2:\n hess_re[jj2, jj1] += rt\n if self.reml:\n F[jj1][jj2] += np.dot(viexog.T,\n np.dot(Q, viexog))\n\n hess_fe -= fac * xtvix / rvir\n\n hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)\n\n hess_fere = -fac * hess_fere / rvir\n\n if self.reml:\n for j1 in range(self.k_re2):\n Q1 = np.linalg.solve(xtvix, xtax[j1])\n for j2 in range(j1 + 1):\n Q2 = np.linalg.solve(xtvix, xtax[j2])\n a = np.trace(np.dot(Q1, Q2))\n a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))\n a *= 0.5\n hess_re[j1, j2] += a\n if j1 > j2:\n hess_re[j2, j1] += a\n\n # Put the blocks together to get the Hessian.\n m = self.k_fe + self.k_re2\n hess = np.zeros((m, m), dtype=np.float64)\n hess[0:self.k_fe, 0:self.k_fe] = hess_fe\n hess[0:self.k_fe, self.k_fe:] = hess_fere.T\n hess[self.k_fe:, 0:self.k_fe] = hess_fere\n hess[self.k_fe:, self.k_fe:] = hess_re\n\n return hess",
"def hessian(self, x):\n h = self._hess(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return h",
"def get_hessian(self):\n return self.tc.hessian_func(\n self.pf.XS[:, :, 0].transpose(),\n self.pf.XS[:, :, 1].transpose(),\n self.pf.WS[:].transpose())",
"def hessian(self):\n\n with open('lig.fchk', 'r') as fchk:\n\n lines = fchk.readlines()\n hessian_list = []\n\n for count, line in enumerate(lines):\n if line.startswith('Cartesian Force Constants'):\n start_pos = count + 1\n if line.startswith('Dipole Moment'):\n end_pos = count\n\n if not start_pos and end_pos:\n raise EOFError('Cannot locate Hessian matrix in lig.fchk file.')\n\n for line in lines[start_pos: end_pos]:\n # Extend the list with the converted floats from the file, splitting on spaces and removing '\\n' tags.\n hessian_list.extend([float(num) * 0.529 for num in line.strip('\\n').split()])\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n hessian = zeros((hess_size, hess_size))\n\n # Rewrite Hessian to full, symmetric 3N * 3N matrix rather than list with just the non-repeated values.\n m = 0\n for i in range(hess_size):\n for j in range(i + 1):\n hessian[i, j] = hessian_list[m]\n hessian[j, i] = hessian_list[m]\n m += 1\n\n check_symmetry(hessian)\n\n return hessian",
"def hessian(f, s, p, dx=1e-6, gmix=False, k =['All']):\n import numpy\n N = (p.m['n'] - 1)\n H = numpy.zeros(shape=(N,N))\n for m in range(1, N + 1):\n for z in range(1, N + 1):\n H[m - 1, z - 1] = FD(f, s, p, 2, z, m, dx, gmix, k)\n \n return H",
"def hessian(self, var: ndarray) -> ndarray:\n beta, gamma = self.get_vars(var)\n sqrt_gamma = np.sqrt(gamma)\n d = self.get_varmat(gamma)\n femat = self.get_femat(beta)\n obsvar = split_by_sizes(self.get_obsvar(), self.data.group_sizes)\n remat = split_by_sizes(self.get_remat(), self.data.group_sizes)\n dlmats = [DLMat(obsvar[i], remat[i]*sqrt_gamma)\n for i in range(self.data.num_groups)]\n\n beta_fisher = femat.T.dot(d.invdot(femat))\n beta_fisher += self.fevar.prior_hessian(beta)\n\n gamma_fisher = np.zeros((self.revar.size, self.revar.size))\n for i, dlmat in enumerate(dlmats):\n gamma_fisher += 0.5*(remat[i].T.dot(dlmat.invdot(remat[i])))**2\n gamma_fisher += self.revar.prior_hessian(gamma)\n\n return block_diag(beta_fisher, gamma_fisher)",
"def hessian(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = s * np.eye(n)\n\n forw1 = np.zeros(n)\n forw2 = np.zeros((n, n))\n for i in range(n):\n forw1[i] = f(x + e[i])\n for j in range(i, n):\n forw2[i, j] = forw2[j, i] = f(x + e[i] + e[j])\n\n H = (forw2 - _colvec(forw1) - _rowvec(forw1) + f(x)) / s**2\n return H",
"def calculate_hessian(model, data, step_size):\n hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))\n for output_name in model.output_names:\n hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)\n mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)\n mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)\n hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative\n hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T\n return hessian",
"def fisher_matrix(model, dataset, samples):\n inputs, labels = dataset\n weights = model.trainable_weights\n variance = [tf.zeros_like(tensor) for tensor in weights]\n\n for _ in range(samples):\n # Select a random element from the dataset.\n index = np.random.randint(len(inputs))\n data = inputs[index]\n\n # When extracting from the array we lost a dimension so put it back.\n data = tf.expand_dims(data, axis=0)\n\n # Collect gradients.\n with tf.GradientTape() as tape:\n output = model(data)\n log_likelihood = tf.math.log(output)\n\n gradients = tape.gradient(log_likelihood, weights)\n\n # If the model has converged, we can assume that the current weights\n # are the mean, and each gradient we see is a deviation. The variance is\n # the average of the square of this deviation.\n variance = [var + (grad ** 2) for var, grad in zip(variance, gradients)]\n\n fisher_diagonal = [tensor / samples for tensor in variance]\n return fisher_diagonal",
"def compute_hessian_logreg(tx, w):\n t = tx.dot(w)\n s = np.diag(sigmoid(t)*(1 - sigmoid(t)))\n\n return tx.T.dot(s).dot(tx)",
"def get_hessian(phi, pred, t, dot_product, reg= 1, regression= \"logistic\"):\n R = np.eye(pred.shape[0])\n if regression == \"logistic\":\n for i in range(pred.shape[0]):\n R[i,i] = pred[i,0] * (1- pred[i,0])\n elif regression == \"probit\":\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n t_n = t[i,0] \n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n\n term1 = 1/ (y_n * (1- y_n) + TOLERANCE)\n term2 = (y_n - t_n)/(y_n**2 * (1- y_n) + TOLERANCE)\n term3 = (y_n - t_n)/((1- y_n)**2 * y_n + TOLERANCE)\n term4 = (y_n - t_n)* dotp/(y_n * (1- y_n) * pdf + TOLERANCE)\n\n R[i,i] = (term1 - term2 + term3 - term4)*(pdf**2)\n\n # Add regularization\t\t\t\n hessian = np.matmul(np.matmul(phi.T, R), phi) + np.eye(phi.shape[1])/reg\n return hessian",
"def calc_hessian(t_Wbt, t_dlogFPdA, t_FP):\n\n tmp = t_Wbt.dimshuffle(0, 'x', 1) * t_FP # b, j, t\n tmp1 = tmp.dimshuffle(0, 'x', 1, 2) * t_dlogFPdA\n\n return T.dot(\n tmp1.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)),\n t_dlogFPdA.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)).T\n )",
"def hessian(self):\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n\n lines = file.readlines()\n\n for count, line in enumerate(lines):\n if '## Hessian' in line or '## New Matrix (Symmetry' in line:\n # Set the start of the hessian to the row of the first value.\n hess_start = count + 5\n break\n else:\n raise EOFError('Cannot locate Hessian matrix in output.dat file.')\n\n # Check if the hessian continues over onto more lines (i.e. if hess_size is not divisible by 5)\n extra = 0 if hess_size % 5 == 0 else 1\n\n # hess_length: # of cols * length of each col\n # + # of cols - 1 * #blank lines per row of hess_vals\n # + # blank lines per row of hess_vals if the hess_size continues over onto more lines.\n hess_length = (hess_size // 5) * hess_size + (hess_size // 5 - 1) * 3 + extra * (3 + hess_size)\n\n hess_end = hess_start + hess_length\n\n hess_vals = []\n\n for file_line in lines[hess_start:hess_end]:\n # Compile lists of the 5 Hessian floats for each row.\n # Number of floats in last row may be less than 5.\n # Only the actual floats are added, not the separating numbers.\n row_vals = [float(val) for val in file_line.split() if len(val) > 5]\n hess_vals.append(row_vals)\n\n # Remove blank list entries\n hess_vals = [elem for elem in hess_vals if elem]\n\n reshaped = []\n\n # Convert from list of (lists, length 5) to 2d array of size hess_size x hess_size\n for old_row in range(hess_size):\n new_row = []\n for col_block in range(hess_size // 5 + extra):\n new_row += hess_vals[old_row + col_block * hess_size]\n\n reshaped.append(new_row)\n\n hess_matrix = array(reshaped)\n\n # Cache the unit conversion.\n conversion = 627.509391 / (0.529 ** 2)\n hess_matrix *= conversion\n\n check_symmetry(hess_matrix)\n\n return hess_matrix",
"def hessian_factor(self, params, scale=None, observed=True):\n raise NotImplementedError # pragma: no cover",
"def FeH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))/constants.A_FeH\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))/constants.A_FeH",
"def likelihood_hessian(self, sign_switch, hyperparam):\n\n self.timer.tic()\n\n if numpy.isscalar(hyperparam):\n hyperparam_ = numpy.array([hyperparam], dtype=float)\n else:\n hyperparam_ = hyperparam\n\n # Check if Hessian is already computed for an identical hyperparam\n if (self.ell_hessian_hyperparam is not None) and \\\n (self.ell_hessian is not None) and \\\n (hyperparam_.size == self.ell_hessian_hyperparam.size) and \\\n numpy.allclose(hyperparam_, self.ell_hessian_hyperparam,\n atol=self.hyperparam_tol):\n if sign_switch:\n return -self.ell_hessian\n else:\n return self.ell_hessian\n\n # Compute second derivative w.r.t eta\n d2ell_deta2 = self._likelihood_der2_eta(hyperparam)\n\n # To convert derivative to log scale, Jacobian is needed. Note: The\n # Jacobian itself is already converted to log scale.\n if self.use_log_eta or self.use_log_scale:\n jacobian_ = self.likelihood_jacobian(False, hyperparam)\n\n # Since we use xi = log_eta instead of eta as the variable, the\n # derivative of ell w.r.t log_eta should be taken into account.\n if self.use_log_eta:\n eta = self._hyperparam_to_eta(hyperparam)\n if numpy.isscalar(jacobian_):\n dell_deta = jacobian_\n else:\n dell_deta = jacobian_[0]\n\n # Convert second derivative to log scale (Note: dell_deta is\n # already in log scale)\n d2ell_deta2 = d2ell_deta2 * eta**2 * numpy.log(10.0)**2 + \\\n dell_deta * numpy.log(10.0)\n\n # Hessian here is a 2D array of size 1.\n hessian = d2ell_deta2\n\n # Compute Hessian w.r.t scale\n if hyperparam_.size > self.scale_index:\n\n # Compute second derivative w.r.t scale\n d2ell_dscale2 = self._likelihood_der2_scale(hyperparam)\n\n # Convert derivative w.r.t log of scale (if needed)\n if self.use_log_scale:\n scale = self._hyperparam_to_scale(\n hyperparam_[self.scale_index:])\n dell_dscale = jacobian_[self.scale_index:]\n\n for p in range(scale.size):\n for q in range(scale.size):\n if p == q:\n\n # dell_dscale is already converted to logscale\n d2ell_dscale2[p, q] = d2ell_dscale2[p, q] * \\\n scale[p]**2 * (numpy.log(10.0)**2) + \\\n dell_dscale[p] * numpy.log(10.0)\n else:\n d2ell_dscale2[p, q] = d2ell_dscale2[p, q] * \\\n scale[p] * scale[q] * (numpy.log(10.0)**2)\n\n # Compute second mixed derivative w.r.t scale and eta\n d2ell_deta_dscale = self._likelihood_der2_mixed(hyperparam)\n\n if self.use_log_eta:\n eta = self._hyperparam_to_eta(hyperparam)\n for p in range(scale.size):\n d2ell_deta_dscale[0, p] = d2ell_deta_dscale[0, p] * \\\n eta * numpy.log(10.0)\n\n if self.use_log_scale:\n scale = self._hyperparam_to_scale(\n hyperparam_[self.scale_index:])\n for p in range(scale.size):\n d2ell_deta_dscale[0, p] = d2ell_deta_dscale[0, p] * \\\n scale[p] * numpy.log(10.0)\n\n # Concatenate derivatives to form Hessian of all variables\n hessian = numpy.block(\n [[d2ell_deta2, d2ell_deta_dscale],\n [d2ell_deta_dscale.T, d2ell_dscale2]])\n\n # Store hessian to member data (without sign-switch).\n self.ell_hessian = hessian\n self.ell_hessian_hyperparam = hyperparam_\n\n if sign_switch:\n hessian = -hessian\n\n self.timer.toc()\n\n return hessian",
"def hessian(beta, X):\n w = sigmoid(np.dot(X, beta))\n w_vector = w * (1-w)\n \n return np.dot(X.T, X*w_vector)",
"def _getHessian(self):\n assert self.init, 'GP not initialised'\n assert self.fast is False, 'Not supported for fast implementation'\n\n if self.cache['Hessian'] is None:\n ParamMask = self.gp.getParamMask()['covar']\n std = sp.zeros(ParamMask.sum())\n H = self.gp.LMLhess_covar()\n It = (ParamMask[:, 0] == 1)\n self.cache['Hessian'] = H[It, :][:, It]\n\n return self.cache['Hessian']",
"def _getFisher(self):\n Ctot = self.vd.getGP().getCovar()\n Ki = sp.linalg.inv(Ctot.K())\n n_scales = self.vd.getNumberScales()\n out = sp.zeros((n_scales, n_scales))\n for m in range(n_scales):\n out[m, m] = 0.5 * sp.trace(\n sp.dot(Ki,\n sp.dot(\n Ctot.Kgrad_param(m), sp.dot(Ki, Ctot.Kgrad_param(\n m)))))\n for n in range(m):\n out[m, n] = 0.5 * sp.trace(\n sp.dot(Ki,\n sp.dot(\n Ctot.Kgrad_param(m),\n sp.dot(Ki, Ctot.Kgrad_param(n)))))\n out[n, m] = out[m, n]\n return out",
"def H(self) -> BaseMatrix:",
"def H(self) -> BaseMatrix:",
"def H(nodes, fct=\"identity\", betas=None, sigma=None):\n if fct == \"identity\":\n # Return: Mx1 vector\n return sigma * nodes\n elif fct == \"one\":\n # Return: a scalar\n return 1\n elif fct == \"logistic\":\n # Return: an MxI matrix\n return 1 / (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas)))\n elif fct == \"logistic_identity\":\n # Return: an MxI matrix\n return np.diag(nodes).dot(1 / (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))))\n elif fct == \"hessian_sigma\":\n # Return: an MxI matrix\n to_divide = (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))) * (\n 1 + np.outer(np.exp(sigma * nodes), np.exp(-betas)))\n return np.diag(nodes ** 2).dot(1 / to_divide)\n elif fct == \"hessian_betaj\":\n # Return: an MxI matrix\n to_divide = (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))) * (\n 1 + np.outer(np.exp(sigma * nodes), np.exp(-betas)))\n return 1 / to_divide\n elif fct == \"hessian_betai_betaj\":\n # Return: a scalar\n return 0\n elif fct == \"hessian_sigma_betaj\":\n # Return: an MxI matrix\n to_divide = (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))) * (\n 1 + np.outer(np.exp(sigma * nodes), np.exp(-betas)))\n return np.diag(nodes).dot(1 / to_divide)\n else:\n return None",
"def get_prior_fisher_matrix():\n\tprior_fisher = 0.1 * np.eye(2)\n\t\n\treturn prior_fisher",
"def gradient_and_hessian(self, y, params, natural_gradient=True):\n\n (mu,) = self.predict(params)\n\n grad = np.zeros(shape=(len(y), 1))\n grad[:, 0] = mu - y\n\n if natural_gradient:\n fisher_matrix = np.zeros(shape=(len(y), 1, 1))\n fisher_matrix[:, 0, 0] = mu\n\n grad = np.linalg.solve(fisher_matrix, grad)\n\n hess = np.ones(shape=(len(y), 1)) # we set the hessian constant\n else:\n hess = mu\n\n return grad, hess",
"def calculate_hessian(y, tx, w):\n txw = tx.dot(w)\n diag = sigmoid(txw)*(np.ones(txw.shape)-sigmoid(txw))\n return np.matmul(np.multiply(tx,diag).T,tx)",
"def get_Hessian(self, output_name=None): \n \n\n #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])\n return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])",
"def fisher_diag(\n negative_log_likelihood: LossFun,\n params: Any,\n inputs: jnp.ndarray,\n targets: jnp.ndarray,\n) -> jnp.DeviceArray:\n return jnp.square(\n ravel(jax.grad(negative_log_likelihood)(params, inputs, targets)))",
"def test_xml_hessian(xml_parser_disp):\n\n hessian = xml_parser_disp.get_hessian()\n assert hessian.shape == (24, 24)\n test = np.array([-0.46355041, 0. , 0. , -0.05917741])\n np.testing.assert_allclose(hessian[0][0:4], test)\n test = np.array([ 0.11487952, 0.08151255, 0.08370068, 0.11487952])\n np.testing.assert_allclose(hessian[15][0:4], test)\n test = np.array([ 0.11431486, -0.0818301 ])\n np.testing.assert_allclose(hessian[15][9:11], test)",
"def hessian(self, x, y, obj_factor):\n pass",
"def hess_f(z, X, Y, _lambda):\r\n d = z.shape[0]\r\n w = z[:-1]\r\n beta = z[-1]\r\n hess = np.zeros((d, d))\r\n hess[:-1, :-1] = - np.einsum('ki,kj->ij', X * g(X.dot(w) + beta), X * g(-(X.dot(w) + beta)))\r\n hess[:-1, [-1]] = - np.einsum('ij,ik->kj', g(X.dot(w) + beta) * g(-(X.dot(w) + beta)), X) + 2 * _lambda\r\n hess[[-1], :-1] = hess[:-1, [-1]].T\r\n hess[-1, -1] = - np.dot(g(X.dot(w) + beta).T, g(-(X.dot(w) + beta))) + 2 * _lambda\r\n return hess",
"def Hf(self, x, X):\n if type(x) == list:\n x = np.array(x)\n return self.model.hf(x, X, *self.params)",
"def compute_hessian_vector_product(self, function, arguments):",
"def calc_hessian(self, reuse_first=False):\n \n self.setup()\n \n # Create our 3D dictionary the first time we execute.\n if not self.hessian:\n for name1 in self.param_names:\n self.hessian[name1] = {}\n for name2 in self.param_names:\n self.hessian[name1][name2] = {}\n \n self.hessian_ondiag_case = OrderedDict()\n self.hessian_offdiag_case = OrderedDict()\n\n # Pull stepsizes from driver's parameters\n base_param = OrderedDict()\n stepsize = {}\n for key, item in self._parent.get_parameters().iteritems():\n \n if item.fd_step:\n stepsize[key] = item.fd_step\n else:\n stepsize[key] = self.default_stepsize\n\n # Diagonal terms in Hessian always need base point\n # Usually, we will have saved this when we calculated\n # the gradient.\n if reuse_first:\n base_param = self.base_param\n base_data = self.base_data\n else:\n # Pull initial state from driver's parameters\n for key, item in self._parent.get_parameters().iteritems():\n base_param[key] = item.evaluate()\n \n base_data = self._run_point(base_param)\n \n # Assemble input data\n # Cases : ondiag [fp, fm]\n deltas = [1, -1]\n for param in self.param_names:\n \n pcase = []\n for j_step, delta in enumerate(deltas):\n \n case = base_param.copy()\n case[param] += delta*stepsize[param]\n pcase.append({ 'param': case })\n \n self.hessian_ondiag_case[param] = pcase\n \n # Assemble input data\n # Cases : offdiag [fpp, fpm, fmp, fmm]\n deltas = [[1, 1],\n [1, -1],\n [-1, 1],\n [-1, -1]]\n for i, param1 in enumerate(self.param_names):\n \n offdiag = {}\n for param2 in self.param_names[i+1:]:\n \n pcase = []\n for delta in deltas:\n \n case = base_param.copy()\n case[param1] += delta[0]*stepsize[param1]\n case[param2] += delta[1]*stepsize[param2]\n pcase.append({ 'param': case })\n offdiag[param2] = pcase\n \n self.hessian_offdiag_case[param1] = offdiag\n \n # Run all \"cases\".\n # TODO - Integrate OpenMDAO's concurrent processing capability once it\n # is formalized. This operation is inherently paralellizable.\n \n # We don't need to re-run on-diag cases if the gradients were\n # calculated with Central Difference.\n if reuse_first and self.form=='central':\n for key, case in self.hessian_ondiag_case.iteritems():\n \n gradient_case = self.gradient_case[key]\n for ipcase, pcase in enumerate(case):\n \n gradient_ipcase = gradient_case[ipcase]\n pcase['data'] = gradient_ipcase['data'] \n else:\n for case in self.hessian_ondiag_case.values():\n for pcase in case:\n data = self._run_point(pcase['param'])\n pcase['data'] = data\n\n # Off-diag cases must always be run.\n for cases in self.hessian_offdiag_case.values():\n for case in cases.values():\n for pcase in case:\n pcase['data'] = self._run_point(pcase['param'])\n\n \n # Calculate Hessians - On Diagonal\n for key, case in self.hessian_ondiag_case.iteritems():\n \n eps = stepsize[key]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key][key][name] = \\\n diff_2nd_xx(case[0]['data'][name],\n base_data[name],\n case[1]['data'][name], eps)\n \n # Calculate Hessians - Off Diagonal\n for key1, cases in self.hessian_offdiag_case.iteritems():\n \n eps1 = stepsize[key1]\n for key2, case in cases.iteritems():\n \n eps2 = stepsize[key2]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key1][key2][name] = \\\n diff_2nd_xy(case[0]['data'][name],\n case[1]['data'][name],\n case[2]['data'][name],\n case[3]['data'][name],\n eps1, eps2)\n \n # Symmetry\n # (Should ponder whether we should even store it.)\n self.hessian[key2][key1][name] = \\\n self.hessian[key1][key2][name]",
"def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n\n if self.hessian_f:\n # if the problem has knowledge about the hessian, use it directly without approximation\n return self.hessian_f(x)\n\n return hessian_approximation(self.f, x)",
"def logistic(self,w,Xi):\n # print(w.T)\n # print(Xi)\n a = np.dot(w.T,Xi)\n return 1/(1+np.exp(-a))",
"def hessian(x):\n x_grad = np.gradient(x) \n hessian = np.empty((x.ndim, x.ndim) + x.shape, dtype=x.dtype) \n for k, grad_k in enumerate(x_grad):\n \"\"\"\n iterate over dimensions\n apply gradient again to every component of the first derivative.\n \"\"\"\n tmp_grad = np.gradient(grad_k) \n for l, grad_kl in enumerate(tmp_grad):\n hessian[k, l, :, :] = grad_kl\n return hessian",
"def log_fisher(self):\n raise NotImplementedError(\"the log_fisher property should \"\n \"be defined in the Estimator sub-class\")",
"def calculate_hessian(self, finite_step):\n\n # Create the OpenMM coords list from the qm coordinates and convert to nm\n input_coords = self.molecule.coords['qm'].flatten() * constants.ANGS_TO_NM\n\n # We get each hessian element from = [E(dx + dy) + E(-dx - dy) - E(dx - dy) - E(-dx + dy)] / 4 dx dy\n hessian = np.zeros((3 * len(self.molecule.atoms), 3 * len(self.molecule.atoms)))\n\n for i in range(3 * len(self.molecule.atoms)):\n for j in range(i, 3 * len(self.molecule.atoms)):\n # Mutate the atomic coords\n # Do less energy evaluations on the diagonal of the matrix\n if i == j:\n coords = deepcopy(input_coords)\n coords[i] += 2 * finite_step\n e1 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= 2 * finite_step\n e2 = self.get_energy(self.format_coords(coords))\n hessian[i, j] = (e1 + e2) / (4 * finite_step**2 * self.molecule.atoms[i // 3].atomic_mass)\n else:\n coords = deepcopy(input_coords)\n coords[i] += finite_step\n coords[j] += finite_step\n e1 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= finite_step\n coords[j] -= finite_step\n e2 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] += finite_step\n coords[j] -= finite_step\n e3 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= finite_step\n coords[j] += finite_step\n e4 = self.get_energy(self.format_coords(coords))\n hessian[i, j] = (e1 + e2 - e3 - e4) / (4 * finite_step ** 2 * self.molecule.atoms[i // 3].atomic_mass)\n\n # Now make the matrix symmetric\n sym_hessian = hessian + hessian.T - np.diag(hessian.diagonal())\n return sym_hessian",
"def approx_hessian(f, x, epsilon):\n n = len(x)\n H = np.zeros((n, n))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, x + ei, epsilon)\n g2 = approx_gradient(f, x - ei, epsilon)\n H[i, :] = (g1 - g2) / epsilon\n ei[i] = 0\n return H",
"def hes_res(self, params, **kwargs):\n e = kwargs.get(\"e\", self.problem.data_e)\n\n hes = self.hessian.eval(params, **kwargs)\n for i, e_i in enumerate(e):\n hes[:, :, i] = - hes[:, :, i] / e_i\n\n return hes, self.jac_res(params, **kwargs)",
"def test_hessian():\n x, y = fwd.Variable(), fwd.Variable()\n rosen = 100.0*(y - x**2)**2 + (1 - x)**2.0\n rosen_hessian = lambda x, y: \\\n np.array([[1200*x**2-400*x+2, -400*x],\n [-400*x, 200]])\n rosen_hessian_returned = rosen.hessian_at({x: 1.0, y: 1.0})\n rosen_hessian_expected = rosen_hessian(1.0, 1.0)\n for i in range(2):\n for j in range(2):\n assert equals(rosen_hessian_returned[i, j],\n rosen_hessian_expected[i, j])",
"def rm_hamiltonian(params, momentum, log_prob_func, jitter, normalizing_const, softabs_const=1e6, sampler=Sampler.HMC, integrator=Integrator.EXPLICIT, metric=Metric.HESSIAN):\n\n log_prob = log_prob_func(params)\n ndim = params.nelement()\n pi_term = ndim * torch.log(2.*torch.tensor(pi))\n\n fish, abs_eigenvalues = fisher(params, log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, metric=metric)\n\n if abs_eigenvalues is not None:\n if util.has_nan_or_inf(fish) or util.has_nan_or_inf(abs_eigenvalues):\n print('Invalid Fisher: {} , abs_eigenvalues: {}, params: {}'.format(fish, abs_eigenvalues, params))\n raise util.LogProbError()\n else:\n if util.has_nan_or_inf(fish):\n print('Invalid Fisher: {}, params: {}'.format(fish, params))\n raise util.LogProbError()\n\n if metric == Metric.SOFTABS:\n log_det_abs = abs_eigenvalues.log().sum()\n else:\n log_det_abs = torch.slogdet(fish)[1]\n fish_inverse_momentum = cholesky_inverse(fish, momentum)\n quadratic_term = torch.matmul(momentum.view(1, -1), fish_inverse_momentum)\n hamiltonian = - log_prob + 0.5 * pi_term + 0.5 * log_det_abs + 0.5 * quadratic_term\n if util.has_nan_or_inf(hamiltonian):\n print('Invalid hamiltonian, log_prob: {}, params: {}, momentum: {}'.format(log_prob, params, momentum))\n raise util.LogProbError()\n\n return hamiltonian",
"def load_force_constant_matrix(self):\n # Orca print the hessian to .hess file. you need to provide .hess instead of .log\n\n raise LogError('The load_force_constant_matrix method is not implemented for Orca Logs')",
"def approx_hessian(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n H = np.zeros((n, n, npts))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, (x.T + ei).T, epsilon, args=args)\n g2 = approx_gradient(f, (x.T - ei).T, epsilon, args=args)\n H[i, ...] = np.reshape((g1 - g2) / epsilon, (n, npts))\n ei[i] = 0\n return H.squeeze()",
"def compute_current_h(x, matrix, x_format):\n subs = {}\n\n for i, state in enumerate(x_format):\n subs[state] = x[i, 0]\n\n result = matrix.evalf(subs=subs)\n # print(\"hx result : \", result)\n return result",
"def grid_hessian(self, gridaxes):\n assert np.isscalar(self.dim), 'Hessian only implemented for scalar and vector functions'\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n colloc = [collocation_derivs(self.kvs[i], gridaxes[i], derivs=2) for i in range(self.sdim)]\n\n d = self.sdim\n n_hess = ((d+1)*d) // 2 # number of components in symmetric part of Hessian\n N = tuple(len(g) for g in gridaxes) # shape of tensor grid\n\n # determine size of output array\n if self.dim == 1:\n out_shape = N + (n_hess,)\n else:\n out_shape = N + (self.dim, n_hess)\n hess = np.empty(out_shape, dtype=self.coeffs.dtype)\n\n i_hess = 0\n for i in reversed(range(self.sdim)): # x-component is the last one\n for j in reversed(range(i+1)):\n # compute vector of derivative indices\n D = self.sdim * [0]\n D[i] += 1\n D[j] += 1\n ops = [colloc[k][D[k]] for k in range(self.sdim)] # derivatives in directions i,j\n\n if self.dim == 1: # scalar function\n hess[..., i_hess] = apply_tprod(ops, self.coeffs) # D_i D_j (self)\n else: # vector function\n for k in range(self.dim):\n hess[..., k, i_hess] = apply_tprod(ops, self.coeffs[..., k]) # D_i D_j (self[k])\n i_hess += 1\n return hess # shape: shape(grid) x self.dim x n_hess",
"def compute_hessian(self, dw, trn_X, trn_y, epsilon: float = 0.01):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dalpha_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='alpha')\n dalpha_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='alpha')\n hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian",
"def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt",
"def Heff(self, x):\r\n x = x.reshape(self.bond * self.p, -1)\r\n # Interactions between left environment and left site\r\n result = self.HA.reshape(self.bond * self.p, -1) @ x\r\n # Interactions between right environment and right site\r\n result += x @ self.HA.reshape(self.bond * self.p, -1).T\r\n # Interactions between left and right site\r\n x = x.reshape(self.bond, self.p, self.bond, self.p)\r\n result = result.reshape(self.bond, self.p, self.bond, self.p)\r\n result += np.einsum('xyij,lirj->lxry', self.NN_interaction, x)\r\n\r\n return result.ravel()",
"def hessian_diag(\n loss: LossFun,\n params: Any,\n inputs: jnp.DeviceArray,\n targets: jnp.DeviceArray,\n) -> jnp.DeviceArray:\n vs = jnp.eye(ravel(params).size)\n comp = lambda v: jnp.vdot(v, ravel(hvp(loss, v, params, inputs, targets)))\n return jax.vmap(comp)(vs)",
"def hessian_matrix_eigvals(H_elems):\n return _symmetric_compute_eigenvalues(H_elems)",
"def build_eval(self, inputs):\n def evaluate_hessian(x):\n return self.Hx(inputs, x) + self.reg_coeff * x\n\n return evaluate_hessian",
"def llhessian(store, beta):\n nobs = store['yvec'].shape[0]\n kreg = store['xmat'].shape[1]\n lamb = exp(dot(store['xmat'], beta))\n sum = zeros((kreg, kreg))\n for i in xrange(nobs):\n sum = sum + lamb[i] * outer(store['xmat'][i], store['xmat'][i])\n return -sum",
"def hat_matrix(X, include_bias=True):\n if include_bias:\n X = np.hstack([np.ones([len(X), 1]), X])\n\n A = np.matmul(X.T, X)\n\n LL = cho_factor(A)\n return np.matmul(X, cho_solve(LL, X.T))",
"def approx_hessian_diag(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n h = np.zeros((n, npts))\n ei = np.zeros(n)\n fx = f(x, *args)\n for i in range(n):\n ei[i] = epsilon\n h[i, :] = (f((x.T + ei).T, *args) + f((x.T - ei).T, *args) - 2 * fx) / (epsilon ** 2)\n ei[i] = 0\n return h.squeeze()",
"def hessian(self, x, y, amp, sigma_x, sigma_y, center_x = 0, center_y = 0):\n f_ = self.function(x, y, amp, sigma_x, sigma_y, center_x, center_y)\n f_xx = f_ * ( (-1./sigma_x**2) + (center_x-x)**2/sigma_x**4 )\n f_yy = f_ * ( (-1./sigma_y**2) + (center_y-y)**2/sigma_y**4 )\n f_xy = f_ * (center_x-x)/sigma_x**2 * (center_y-y)/sigma_y**2\n return f_xx, f_xy, f_xy, f_yy",
"def test_hmf_init(self):\n spec = np.random.random((20, 100))\n invvar = np.random.random((20, 100))\n hmf = HMF(spec, invvar)\n assert hmf.K == 4\n assert log.level == 20 # INFO\n hmf = HMF(spec, invvar, K=6, verbose=True)\n assert hmf.K == 6\n assert log.level == 10 # DEBUG",
"def calculate_fisher_information(self,\n x_filename,\n n_events=1):\n\n if self.model is None:\n raise ValueError('No model -- train or load model before evaluating it!')\n\n # Load training data\n logging.info('Loading evaluation data')\n xs = load_and_check(x_filename)\n n_samples = xs.shape[0]\n\n # Estimate scores\n if self.method in ['sally', 'sallino']:\n logging.info('Starting score evaluation')\n\n t_hats = evaluate_local_score_model(\n model=self.model,\n xs=xs\n )\n else:\n raise NotImplementedError('Fisher information calculation only implemented for SALLY estimators')\n\n # Calculate Fisher information\n n_parameters = t_hats.shape[1]\n fisher_information = np.zeros((n_parameters, n_parameters))\n for t_hat in t_hats:\n fisher_information += np.outer(t_hat, t_hat)\n fisher_information = float(n_events) / float(n_samples) * fisher_information\n\n # Calculate expected score\n expected_score = np.mean(t_hats, axis=0)\n logging.info('Expected score (should be close to zero): %s', expected_score)\n\n return fisher_information",
"def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y",
"def _factorsH(self, inputs):\n return tensor.dot(self._hidden_activation(inputs), self.whf)",
"def hessianstructure(self):\n pass",
"def hmc_step_stiefel(X0, log_pi, args=(), epsilon=.3, T=500):\n n, d = X0.shape\n U = np.random.randn(*X0.shape)\n tmp = np.dot(X0.T, U)\n U = orth_stiefel_project(X0, U)\n log_pi0, G0 = log_pi(X0, *args)\n H0 = log_pi0 + .5 * np.einsum('ij,ij', U, U)\n X1 = X0.copy()\n G1 = G0\n for tau in xrange(T):\n U += 0.5 * epsilon * G1\n U = orth_stiefel_project(X0, U)\n A = np.dot(X1.T, U)\n S = np.dot(U.T, U)\n exptA = scipy.linalg.expm(-epsilon * A)\n tmp0 = np.bmat([X0, U])\n tmp1 = scipy.linalg.expm(epsilon * np.bmat([[A, -S], \n [np.eye(d), A]]))\n tmp2 = scipy.linalg.block_diag(exptA, exptA)\n tmp3 = np.dot(tmp0, np.dot(tmp1, tmp2))\n X1 = tmp3[:, :d]\n U = tmp3[:, d:]\n log_pi1, G1 = log_pi(X1, *args)\n U += 0.5 * epsilon * G1\n U = orth_stiefel_project(X0, U)\n H1 = log_pi1 + .5 * np.einsum('ij,ij', U, U)\n u = np.random.rand()\n if u < math.exp(-H1 + H0):\n return X1, 1, log_pi1\n return X0, 0, log_pi0",
"def get_logCRF(train, model):\n word = train[0]\n Y = train[1]\n char_count, _ = word.shape\n # calculating forward messages\n alpha = np.zeros((char_count, model.dimY))\n first_term = np.dot(word, model.getW(model.labels))\n second_term = model._T\n for i in range(1, char_count):\n sum_term = (first_term[i-1] + alpha[i-1]) + second_term\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term) \n # getting logZ from messages\n logZ = logsumexp_trick(first_term[char_count-1]+alpha[char_count-1])\n w_term = np.sum(model.getW(Y).transpose() * word) # $\\sum_{j=1}^m {W_{yj} . x_j}$\n t_term = np.sum(model.getT(Y[:-1], Y[1:])) #$T_{yj, yj+1}\n value = -logZ + w_term + t_term\n return value",
"def get_hess(self, x: np.ndarray) -> np.ndarray:\n hess = self(x, (2,), MODE_FUN)\n return hess",
"def hessian ( calculate_cost_function, x0, epsilon=1.e-5, linear_approx=False, *args ):\n # ``calculate_cost_function`` is the cost function implementation\n # The next line calculates an approximation to the first\n # derivative\n import numpy as np\n from scipy.optimize import approx_fprime\n f1 = approx_fprime( x0, calculate_cost_function, epsilon, *args) \n \n # This is a linear approximation. Obviously much more efficient\n # if cost function is linear\n if linear_approx:\n f1 = np.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = np.zeros ( ( n, n ) )\n # The next loop fill in the matrix\n xx = x0\n for j in xrange( n ):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = approx_fprime( x0, calculate_cost_function, epsilon, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def hessian(self, x1, x2, out=None):\n raise NotImplementedError",
"def report_model_F(self):\n print(\" h k qx qz q model F\")\n for a, b, c, d, e, f, g in zip(self.h, self.k, self.qx, self.qz, self.q, \n self._model(), self.F):\n print(\"{0: 1d} {1: 1d} {2: .3f} {3: .3f} {4: .3f} {5: 7.2f} {6: 7.2f}\"\n .format(a, b, c, d, e, f, g))",
"def get_heff_matrix(fpath=F_READ):\n with open(fpath, 'r') as fr:\n line = fr.readline()\n dim = int(line.split()[0])\n for i in range(dim):\n fr.readline()\n matrix = np.empty(shape=(dim, dim))\n for i in range(dim):\n ldat = fr.readline().split()\n row = np.array([float(x) for x in ldat])\n matrix[i, :] = row\n return matrix",
"def fisher(_x,_y,P):\n \n x, y = np.meshgrid(np.linspace(-1,1,100),np.linspace(-1,1,100))\n x = x.flatten()\n y = y.flatten()\n\n A, sig, x0, y0, B = P\n r = (x - x0)**2 + (y - y0)**2\n\n f = np.exp(-0.5*r/sig**2)\n d0 = f\n d1 = r/sig**3 * f\n d2 = A * (x - x0)/sig**2 * f \n d3 = A * (y - y0)/sig**2 * f\n d4 = np.ones(f.size)\n derivs = [d0, d1, d2,d3, d4]\n F = np.zeros((len(derivs), len(derivs)))\n for i in range(len(derivs)):\n for j in range(len(derivs)):\n F[i,j] = np.sum(derivs[i]*derivs[j])\n return F",
"def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))",
"def hsic(x,y,sigma):\n # m is the number of observations here\n m = len(x)\n gamma = 1.0/(2*sigma**2)\n\n k = rbf_kernel(x,x,gamma)\n l = rbf_kernel(y,y,gamma)\n for i in range(m):\n k[i,i] = 0\n l[i,i] = 0\n h = np.eye(m)-1.0/m\n hsic_value = (1.0/(m-1)**2)*np.trace(np.dot(np.dot(np.dot(k,h),l),h))\n return hsic_value",
"def approx_hessian_diag(f, x, epsilon):\n n = len(x)\n h = np.zeros(n)\n ei = np.zeros(n)\n fx = f(x)\n for i in range(n):\n ei[i] = epsilon\n h[i] = (f(x + ei) + f(x - ei) - 2 * fx) / (epsilon ** 2)\n ei[i] = 0\n return h",
"def get_model_init(self) -> ndarray:\n beta = np.zeros(self.fevar.size)\n gamma = np.zeros(self.revar.size)\n var = np.hstack([beta, gamma])\n grad_beta = self.gradient(var)[:self.fevar.size]\n hess_beta = self.hessian(var)[:self.fevar.size,\n :self.fevar.size]\n beta = beta - np.linalg.solve(\n hess_beta + np.identity(self.fevar.size),\n grad_beta\n )\n return np.hstack([beta, gamma])",
"def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs",
"def fir(timeseries, design):\r\n X = np.matrix(design)\r\n y = np.matrix(timeseries)\r\n h = np.array(linalg.pinv(X.T * X) * X.T * y.T)\r\n return h",
"def forward(self, z_t_1, h_x, phi_table, t, temp=0):\n \n# sparsemax.device = z_t_1.device\n \n z_category, z_category_sparse = self.gen_z_t_dist_now(z_t_1, h_x)\n \n# if t > self.t_thres:\n# \n# if self.use_gumbel_softmax:\n# # print(t, 'inference here')\n# # device = z_category.device\n# \n# averaged_z_t = 0\n# \n# log_prob = Variable(torch.log(z_category))\n# \n# for k in range(self.sampling_times): \n# curr_z_t = F.gumbel_softmax(log_prob, tau = 0.05)\n# \n# # curr_z_t = sparsemax(log_prob)\n# \n# \n# averaged_z_t += curr_z_t\n# \n# del curr_z_t\n# \n# # averaged_z_t = averaged_z_t.to(device)\n# \n# z_t = averaged_z_t/self.sampling_times\n# \n# # print('diff::', torch.norm(z_t - z_category))\n# # \n# # print()\n# else:\n# z_t = z_category\n# \n# else:\n z_t = z_category\n \n if len(z_t.shape) == 2:\n phi_z = torch.mm(z_t, torch.t(phi_table))\n else:\n \n phi_table_full = (torch.t(phi_table)).view(1, phi_table.shape[1], phi_table.shape[0])\n \n phi_table_full = phi_table_full.repeat(phi_table.shape[1], 1, 1)\n \n phi_z = torch.bmm(z_t, phi_table_full)\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_category, phi_z, z_category_sparse",
"def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h",
"def get_gradients_hessian(self, loss, params):\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name +\n \"/gradients\"):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError(\"Variable {} has `None` for gradient. \"\n \"Please make sure that all of your ops have a \"\n \"gradient defined (i.e. are differentiable). \"\n \"Common ops without gradient: \"\n \"K.argmax, K.round, K.eval.\".format(param))\n\n # WARNING: for now we do not support gradient clip\n # grads = self._clip_gradients(grads)\n\n v = [np.random.uniform(0, 1, size = p.shape) for p in params]\n for vi in v:\n vi[ vi < 0.5] = -1 \n vi[ vi >= 0.5] = 1 \n v = [tf.convert_to_tensor(vi, dtype = tf.dtypes.float32) for vi in v]\n\n vprod = tf.reduce_sum([ tf.reduce_sum(vi * grad) for vi, grad in zip(v, grads)])\n\n Hv = gradients.gradients(vprod, params)\n\n Hd = [ tf.abs(Hvi * vi) for Hvi, vi in zip(Hv, v)]\n\n return grads, Hd",
"def fwd_model(Ti_samples,To_samples, dw_samples, kw_samples,hi_samples,ho_samples,TA_samples):\n\t#Determine number of samples (totquat)\n\ttotquat=len(Ti_samples)\n\t# List to store values of Q (assuming no radiative heat transfer) calculated from\n\t# the random samples of the parameters\n\tQ_samples_4PCE=[]\n\t# List to store values of Q assuming radiative heat transfer occurs\n\t#Q_r_samples_4PCE=[]\n\t# Calculate values of heat flux Q (assuming no radiative heat transfer)\n\t# for the different sample values and append to the list\n\tfor i in range(totquat):\n\t\t(Q,T1,T2)=compute_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i],\\\n\t\t\tkw_samples[i], hi_samples[i], ho_samples[i])\n\t\tQ_samples_4PCE.append(Q)\n\t\t# Calculate values of heat flux Q assuming radiative heat transfer to atmosphere and append to list\n\t\t# For the required estimates of Q,T1, and T2 needed to solve the nonlinear system,\n\t\t# we use the values obtained by solving the system assuming no radiative heat transfer\n\t\t\"\"\"Q2=r_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i], kw_samples[i],\\\n\t\t\thi_samples[i], ho_samples[i], TA_samples[i], (Q,T1,T2))\n\t\tQ_r_samples_4PCE.append(Q2)\n\t# Convert Q_r_samples_4PCE to numpy array\n\tQ_evals = np.array(Q_r_samples_4PCE)\n\treturn Q_evals\"\"\"\n\t\tConvert Q_samples_4PCE to numpy array\n\t\tQ_evals = np.array(Q_samples_4PCE)\n\t\treturn Q_evals\"\"\"\n\n\ndef KDE(fcn_evals):\n\t\"\"\"\n\tPerforms kernel density estimation\n\tInput:\n\t\tfcn_evals: numpy array of evaluations of the forward model (values of heat flux Q)\n\tOutput:\n\t\txpts_pce: numpy array of points at which the PDF is estimated.\n\t\tPDF_data_pce: numpy array of estimated PDF values.\n\t\"\"\"\n\t# Perform KDE on fcn_evals\n\tkern_pce=stats.kde.gaussian_kde(fcn_evals)\n\t# Generate points at which to evaluate the PDF\n\txpts_pce=np.linspace(fcn_evals.min(),fcn_evals.max(),200)\n\t# Evaluate the estimated PDF at these points\n\tPDF_data_pce=kern_pce(xpts_pce)\n\treturn xpts_pce, PDF_data_pce",
"def FAP_model(z, Ni):\n return 1 - (1-np.exp(-z))**Ni",
"def numerical_hessian(\n func: Callable | None, params: Iterable[zfit.Parameter], hessian=None\n) -> tf.Tensor:\n from ..core.parameter import assign_values\n\n params = convert_to_container(params)\n\n def wrapped_func(param_values):\n assign_values(params, param_values)\n value = func()\n if hasattr(value, \"numpy\"):\n value = value.numpy()\n return value\n\n param_vals = znp.stack(params)\n original_vals = [param.value() for param in params]\n\n if hessian == \"diag\":\n hesse_func = numdifftools.Hessdiag(\n wrapped_func,\n order=2,\n # TODO: maybe add step to remove numerical problems?\n base_step=1e-4,\n )\n else:\n hesse_func = numdifftools.Hessian(\n wrapped_func,\n order=2,\n base_step=1e-4,\n )\n if tf.executing_eagerly():\n computed_hessian = convert_to_tensor(hesse_func(param_vals))\n else:\n computed_hessian = tf.numpy_function(\n hesse_func, inp=[param_vals], Tout=tf.float64\n )\n n_params = param_vals.shape[0]\n if hessian == \"diag\":\n computed_hessian.set_shape((n_params,))\n else:\n computed_hessian.set_shape((n_params, n_params))\n\n assign_values(params, original_vals)\n return computed_hessian",
"def forward(log_emlik, log_startprob, log_transmat):\n alpha = np.zeros(log_emlik.shape)\n\n alpha[0,:] = log_startprob[0:-1] + log_emlik[0,:]\n\n sum_row = 0;\n log_transmat = log_transmat[0:-1];\n\n\n for frame in range(1,len(log_emlik)):\n\n for state in range(0,len(log_emlik[0])):\n\n alpha[frame,state] = logsumexp(alpha[frame-1,:] + log_transmat[:,state]) + log_emlik[frame,state]\n #print(alpha[frame,state])\n #print(alpha[frame,:])\n\n return alpha",
"def test_hessian(self):\n # almost spherical case\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0.\n sigma = 1.\n amp = 2.\n\n f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian(x, y, amp,\n sigma, e1, e2)\n f_xx_sphere, f_xy_sphere, f_yx_sphere, f_yy_sphere = self.gaussian_kappa.hessian(x,\n y, amp=amp, sigma=sigma)\n npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=4)\n npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=4)\n npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=4)\n npt.assert_almost_equal(f_yx, f_xy, decimal=8)\n\n # spherical case\n e1, e2 = 0., 0.\n f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian(x, y, amp, sigma, e1, e2)\n\n npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=4)\n npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=4)\n npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=4)",
"def fisher(probs):\r\n stat = -2 * log(array(probs)).sum()\r\n if isnan(stat):\r\n return nan\r\n else:\r\n try:\r\n return chi_high(stat, 2 * len(probs))\r\n except OverflowError as e:\r\n return nan",
"def MH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))",
"def num_hessian(x0, cost_function, epsilon = 1.e-5, linear_approx = False, *args):\n # The next line calculates an approximation to the first derivative\n f1 = sp.optimize.approx_fprime(x0, cost_function, *args) \n # This is a linear approximation, efficient if cost function is linear\n if linear_approx:\n f1 = sp.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = sp.zeros ((n, n))\n # The next loop fill in the matrix\n xx = x0\n for j in xrange(n):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = sp.optimize.approx_fprime(x0, cost_function, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def XavierInit(self):\n\n raw_std = (2 / (self.num_input + self.num_output))**0.5\n if 'relu' == self.act_function:\n init_std = raw_std * (2**0.5)\n elif 'sigmoid' == self.act_function:\n init_std = raw_std\n else:\n init_std = raw_std # * 4\n\n self.W = np.random.normal(0, init_std, (self.num_input, self.num_output))\n self.b = np.random.normal(0, init_std, (1, self.num_output))\n self.v_W = 0\n self.v_b = 0",
"def heatbasic(u0,T,K):\n import numpy as np\n N = len(u0)-1\n\n dx = 1.0/N;\n dt = 0.1/K;\n x = np.linspace(0,1,N+1)\n\n u = np.copy(u0)\n\n u_history = [u]\n\n A = np.zeros( (N+1,N+1) )\n for i in range(1,N):\n A[i,i-1] = 1;\n A[i,i] = -2;\n A[i,i+1] = 1\n A = A * dt/(dx*dx)\n A = A + np.eye(N+1)\n A[0,0] = 0.0;\n A[N,N] = 0.0;\n\n for k in range(K):\n u = np.dot(A,u)\n u_history.append(u)\n\n return u_history",
"def standard_error_est(Y, nodes_num, betas, sigma):\n\n ## Generating nodes\n N, I = np.shape(Y)\n nodes, weights = np.polynomial.hermite.hermgauss(nodes_num)\n weights /= np.sqrt(np.pi)\n nodes *= np.sqrt(2)\n\n ## Calculating H functions in equation (12)\n h_betaj = H(nodes, fct=\"hessian_betaj\", sigma=sigma, betas=betas) # MxI\n h_betai_betaj = H(nodes, fct=\"hessian_betai_betaj\", sigma=sigma, betas=betas) # MxI\n h_sigma = H(nodes, fct=\"hessian_sigma\", sigma=sigma, betas=betas) # scalar\n h_sigma_betaj = H(nodes, fct=\"hessian_sigma_betaj\", sigma=sigma, betas=betas) # MxI\n\n ## Calculating Hessian matrix based on equation (14) (15) (16) and (17)\n marginal = (f_y(Y, betas, sigma, nodes) * H(nodes, fct=\"one\")).dot(weights)\n\n # h_sigma_mat\n cache = f_y(Y, betas, sigma, nodes).dot(np.diag(weights).dot(h_sigma))\n h_sigma_scalar = np.diag(1 / marginal).dot(cache).sum() # scalar\n # print(h_sigma_scalar)\n\n # h_betaj_mat\n cache = f_y(Y, betas, sigma, nodes).dot(np.diag(weights).dot(h_betaj))\n h_betaj_vec = np.diag(1 / marginal).dot(cache).sum(axis=0) # Ix1 vector\n # print(h_betaj_vec)\n\n # h_sigma_betaj\n cache = f_y(Y, betas, sigma, nodes).dot(np.diag(weights).dot(h_sigma_betaj))\n h_sigma_betaj_vec = np.diag(1 / marginal).dot(cache).sum(axis=0) # Ix1 vector\n # print(h_sigma_betaj_vec)\n\n hessian = np.zeros((I + 1, I + 1))\n hessian[:I, -1] = hessian[-1, :I] = h_sigma_betaj_vec\n hessian[I, I] = h_sigma_scalar\n for i in range(I):\n hessian[i, i] = h_betaj_vec[i]\n\n ## Calculating standard error of parameters\n std_est = np.sqrt([np.linalg.inv(hessian)[i, i] for i in range(I + 1)])\n\n return {\"Hessian matrix\": hessian, \"std_est\": std_est}",
"def fgsm(model, x, y, epsilon=0.1, label_leaking=True):\n delta = torch.zeros_like(x, requires_grad=True)\n logits = model(x + delta)\n # Use the model's output instead of the true labels to avoid label leaking at training time.\n if not label_leaking:\n y = logits.max(dim=1)[1]\n loss = nn.CrossEntropyLoss()(logits, y)\n loss.backward()\n return epsilon * delta.grad.detach().sign()",
"def SE(H, W):\n\n no_real, N, N, K, M = H.shape\n all_powers = np.swapaxes(np.swapaxes(H, 0, 1) @ hermitian(W), 0, 1)\n all_powers = np.abs(all_powers) ** 2\n\n\n\n # (no_real, N, N, K, K)\n # (no_real, n_t, n, k, k_neighbor)\n # the power coming from BS n_t to User k in BS n, using the\n # precoding of BS n_t to user k_neighbor in BS n1\n\n\n p_sig = np.zeros((no_real, N, K))\n p_int = np.zeros((no_real, N, K, N))\n sinr = np.zeros_like(p_sig)\n\n\n for r in range(no_real):\n for n in range(N):\n for k in range(K):\n p_sig[r, n, k] = all_powers[r, n, n, k, k]\n for n_t in range(N):\n p_int[r, n, k, n_t] = all_powers[r, n_t, n, k].sum()\n if n_t == n:\n p_int[r, n, k, n_t] -= p_sig[r,n,k]\n sinr = p_sig / ((p_int).sum(axis=-1) + 1)\n return np.log2(1 + sinr), p_sig, p_int",
"def information_matrix(self):\n return self._cov.inv()",
"def log_density_hessian(\n self,\n theta_unc: FloatArray,\n *,\n propto: bool = True,\n jacobian: bool = True,\n out_grad: Optional[FloatArray] = None,\n out_hess: Optional[FloatArray] = None,\n ) -> Tuple[float, FloatArray, FloatArray]:\n dims = self.param_unc_num()\n if out_grad is None:\n out_grad = np.zeros(shape=dims)\n elif out_grad.shape != (dims,):\n raise ValueError(f\"out_grad size = {out_grad.size} != params size = {dims}\")\n hess_size = dims * dims\n if out_hess is None:\n out_hess = np.zeros(shape=hess_size)\n elif out_hess.shape != (dims, dims):\n raise ValueError(\n f\"out_hess size = {out_hess.size} != params size^2 = {hess_size}\"\n )\n lp = ctypes.pointer(ctypes.c_double())\n err = ctypes.pointer(ctypes.c_char_p())\n rc = self._log_density_hessian(\n self.model,\n int(propto),\n int(jacobian),\n theta_unc,\n lp,\n out_grad,\n out_hess,\n err,\n )\n if rc:\n raise self._handle_error(err.contents, \"log_density_hessian\")\n out_hess = out_hess.reshape(dims, dims)\n return lp.contents.value, out_grad, out_hess",
"def LogisticRegression_sklearn(X_train, X_test, y_train, y_test):\n\n\tlog_reg = LogisticRegression()\n\tlog_reg.fit(X_train, y_train.ravel())\n\tyPred =log_reg.predict(X_test)\n\n\t#Printing metrics of the logistic regression model\n\tprint('Accuracy:', metrics.accuracy_score(y_test, yPred))\n\tprint('Precision:', metrics.precision_score(y_test, yPred))\n\tprint('Recall', metrics.recall_score(y_test, yPred))\n\n\t#confusion matrix\n\n\tconfusionMatrix = matrix.confusion_matrix(y_test, yPred)\n\tsb.heatmap(pd.DataFrame(confusionMatrix), annot= True, fmt='g')\n\tplt.title('Confustion matrix with default value 1')\n\tplt.ylabel('True values')\n\tplt.xlabel('Predicted values')\n\tplt.show()",
"def hf(self, x, X):\n if type(x) == list:\n x = np.array(x)\n return self.model.hf(x, X, *self.params)"
] | [
"0.693216",
"0.68775684",
"0.6742063",
"0.6737252",
"0.64117485",
"0.6381536",
"0.6330648",
"0.6237004",
"0.61958313",
"0.6183961",
"0.61752",
"0.61537415",
"0.61301404",
"0.61163014",
"0.6060606",
"0.6042777",
"0.59978664",
"0.5942022",
"0.58947915",
"0.5877783",
"0.5867141",
"0.58252275",
"0.57694966",
"0.5736838",
"0.5725413",
"0.5715866",
"0.5715866",
"0.5694602",
"0.5676592",
"0.5664787",
"0.56387436",
"0.56226075",
"0.559898",
"0.55952555",
"0.55879337",
"0.55806464",
"0.5570981",
"0.550058",
"0.5480701",
"0.5477955",
"0.54466975",
"0.54411",
"0.5406682",
"0.5395035",
"0.53933644",
"0.5386933",
"0.5384075",
"0.537581",
"0.5367633",
"0.53630626",
"0.5354564",
"0.5351312",
"0.53456944",
"0.5342781",
"0.53382343",
"0.53250563",
"0.53017884",
"0.5295279",
"0.52950627",
"0.5294239",
"0.5270045",
"0.52624184",
"0.5260358",
"0.5256029",
"0.52482796",
"0.5245281",
"0.5244752",
"0.5223894",
"0.5220322",
"0.520468",
"0.5203821",
"0.51993865",
"0.5192552",
"0.5189342",
"0.5172176",
"0.51415867",
"0.5132168",
"0.5116417",
"0.51001036",
"0.50953114",
"0.5093133",
"0.508806",
"0.5084835",
"0.5074076",
"0.5065582",
"0.5061396",
"0.5055352",
"0.50551856",
"0.50469387",
"0.50396496",
"0.50372237",
"0.5036439",
"0.50161844",
"0.5006767",
"0.5003276",
"0.5001208",
"0.49920774",
"0.49887827",
"0.49866557",
"0.49731216",
"0.49706557"
] | 0.0 | -1 |
The Hessian matrix of the model The default implementation uses a numerical derivative. | def hessian(self, params, *args, **kwargs):
if self._use_approx_cs:
return approx_hess_cs(params, self.loglike,
args=args, kwargs=kwargs)
else:
return approx_hess(params, self.loglike,
args=args, kwargs=kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_hessian(self):\n if not self.sparse:\n hess = numpy.dot(self.jacobian_T, self.jacobian)\n else:\n hess = self.jacobian_T*self.jacobian\n return hess",
"def get_hessian(self):\n return self.tc.hessian_func(\n self.pf.XS[:, :, 0].transpose(),\n self.pf.XS[:, :, 1].transpose(),\n self.pf.WS[:].transpose())",
"def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n\n if self.hessian_f:\n # if the problem has knowledge about the hessian, use it directly without approximation\n return self.hessian_f(x)\n\n return hessian_approximation(self.f, x)",
"def _hessian(self):\n log_g = np.log(self._gv())\n log_f = np.log(self._fv())\n h_inf = np.mean((1 - log_g + log_f) / (self.y - self.err_inf) ** 2)\n return h_inf",
"def hessian(x):\n x_grad = np.gradient(x) \n hessian = np.empty((x.ndim, x.ndim) + x.shape, dtype=x.dtype) \n for k, grad_k in enumerate(x_grad):\n \"\"\"\n iterate over dimensions\n apply gradient again to every component of the first derivative.\n \"\"\"\n tmp_grad = np.gradient(grad_k) \n for l, grad_kl in enumerate(tmp_grad):\n hessian[k, l, :, :] = grad_kl\n return hessian",
"def hessian(self, x):\n h = self._hess(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return h",
"def hessian(self, params):\n\n if self.use_sqrt:\n return self.hessian_sqrt(params)\n else:\n return self.hessian_full(params)",
"def calculate_hessian(model, data, step_size):\n hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))\n for output_name in model.output_names:\n hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)\n mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)\n mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)\n hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative\n hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T\n return hessian",
"def hessian(self):\n\n with open('lig.fchk', 'r') as fchk:\n\n lines = fchk.readlines()\n hessian_list = []\n\n for count, line in enumerate(lines):\n if line.startswith('Cartesian Force Constants'):\n start_pos = count + 1\n if line.startswith('Dipole Moment'):\n end_pos = count\n\n if not start_pos and end_pos:\n raise EOFError('Cannot locate Hessian matrix in lig.fchk file.')\n\n for line in lines[start_pos: end_pos]:\n # Extend the list with the converted floats from the file, splitting on spaces and removing '\\n' tags.\n hessian_list.extend([float(num) * 0.529 for num in line.strip('\\n').split()])\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n hessian = zeros((hess_size, hess_size))\n\n # Rewrite Hessian to full, symmetric 3N * 3N matrix rather than list with just the non-repeated values.\n m = 0\n for i in range(hess_size):\n for j in range(i + 1):\n hessian[i, j] = hessian_list[m]\n hessian[j, i] = hessian_list[m]\n m += 1\n\n check_symmetry(hessian)\n\n return hessian",
"def hessian(beta, X):\n w = sigmoid(np.dot(X, beta))\n w_vector = w * (1-w)\n \n return np.dot(X.T, X*w_vector)",
"def compute_hessian(self, dw, trn_X, trn_y, epsilon: float = 0.01):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dalpha_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='alpha')\n dalpha_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='alpha')\n hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian",
"def hessian(self):\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n\n lines = file.readlines()\n\n for count, line in enumerate(lines):\n if '## Hessian' in line or '## New Matrix (Symmetry' in line:\n # Set the start of the hessian to the row of the first value.\n hess_start = count + 5\n break\n else:\n raise EOFError('Cannot locate Hessian matrix in output.dat file.')\n\n # Check if the hessian continues over onto more lines (i.e. if hess_size is not divisible by 5)\n extra = 0 if hess_size % 5 == 0 else 1\n\n # hess_length: # of cols * length of each col\n # + # of cols - 1 * #blank lines per row of hess_vals\n # + # blank lines per row of hess_vals if the hess_size continues over onto more lines.\n hess_length = (hess_size // 5) * hess_size + (hess_size // 5 - 1) * 3 + extra * (3 + hess_size)\n\n hess_end = hess_start + hess_length\n\n hess_vals = []\n\n for file_line in lines[hess_start:hess_end]:\n # Compile lists of the 5 Hessian floats for each row.\n # Number of floats in last row may be less than 5.\n # Only the actual floats are added, not the separating numbers.\n row_vals = [float(val) for val in file_line.split() if len(val) > 5]\n hess_vals.append(row_vals)\n\n # Remove blank list entries\n hess_vals = [elem for elem in hess_vals if elem]\n\n reshaped = []\n\n # Convert from list of (lists, length 5) to 2d array of size hess_size x hess_size\n for old_row in range(hess_size):\n new_row = []\n for col_block in range(hess_size // 5 + extra):\n new_row += hess_vals[old_row + col_block * hess_size]\n\n reshaped.append(new_row)\n\n hess_matrix = array(reshaped)\n\n # Cache the unit conversion.\n conversion = 627.509391 / (0.529 ** 2)\n hess_matrix *= conversion\n\n check_symmetry(hess_matrix)\n\n return hess_matrix",
"def hessian(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n\n return approx_hess_cs(params, self.loglike)",
"def _getHessian(self):\n assert self.init, 'GP not initialised'\n assert self.fast is False, 'Not supported for fast implementation'\n\n if self.cache['Hessian'] is None:\n ParamMask = self.gp.getParamMask()['covar']\n std = sp.zeros(ParamMask.sum())\n H = self.gp.LMLhess_covar()\n It = (ParamMask[:, 0] == 1)\n self.cache['Hessian'] = H[It, :][:, It]\n\n return self.cache['Hessian']",
"def hessian(f, s, p, dx=1e-6, gmix=False, k =['All']):\n import numpy\n N = (p.m['n'] - 1)\n H = numpy.zeros(shape=(N,N))\n for m in range(1, N + 1):\n for z in range(1, N + 1):\n H[m - 1, z - 1] = FD(f, s, p, 2, z, m, dx, gmix, k)\n \n return H",
"def hessian(self, x, y, obj_factor):\n pass",
"def calculate_hessian(y, tx, w):\n txw = tx.dot(w)\n diag = sigmoid(txw)*(np.ones(txw.shape)-sigmoid(txw))\n return np.matmul(np.multiply(tx,diag).T,tx)",
"def calculate_hessian(self, finite_step):\n\n # Create the OpenMM coords list from the qm coordinates and convert to nm\n input_coords = self.molecule.coords['qm'].flatten() * constants.ANGS_TO_NM\n\n # We get each hessian element from = [E(dx + dy) + E(-dx - dy) - E(dx - dy) - E(-dx + dy)] / 4 dx dy\n hessian = np.zeros((3 * len(self.molecule.atoms), 3 * len(self.molecule.atoms)))\n\n for i in range(3 * len(self.molecule.atoms)):\n for j in range(i, 3 * len(self.molecule.atoms)):\n # Mutate the atomic coords\n # Do less energy evaluations on the diagonal of the matrix\n if i == j:\n coords = deepcopy(input_coords)\n coords[i] += 2 * finite_step\n e1 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= 2 * finite_step\n e2 = self.get_energy(self.format_coords(coords))\n hessian[i, j] = (e1 + e2) / (4 * finite_step**2 * self.molecule.atoms[i // 3].atomic_mass)\n else:\n coords = deepcopy(input_coords)\n coords[i] += finite_step\n coords[j] += finite_step\n e1 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= finite_step\n coords[j] -= finite_step\n e2 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] += finite_step\n coords[j] -= finite_step\n e3 = self.get_energy(self.format_coords(coords))\n coords = deepcopy(input_coords)\n coords[i] -= finite_step\n coords[j] += finite_step\n e4 = self.get_energy(self.format_coords(coords))\n hessian[i, j] = (e1 + e2 - e3 - e4) / (4 * finite_step ** 2 * self.molecule.atoms[i // 3].atomic_mass)\n\n # Now make the matrix symmetric\n sym_hessian = hessian + hessian.T - np.diag(hessian.diagonal())\n return sym_hessian",
"def hessian ( calculate_cost_function, x0, epsilon=1.e-5, linear_approx=False, *args ):\n # ``calculate_cost_function`` is the cost function implementation\n # The next line calculates an approximation to the first\n # derivative\n import numpy as np\n from scipy.optimize import approx_fprime\n f1 = approx_fprime( x0, calculate_cost_function, epsilon, *args) \n \n # This is a linear approximation. Obviously much more efficient\n # if cost function is linear\n if linear_approx:\n f1 = np.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = np.zeros ( ( n, n ) )\n # The next loop fill in the matrix\n xx = x0\n for j in xrange( n ):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = approx_fprime( x0, calculate_cost_function, epsilon, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def hessian(self, x1, x2, out=None):\n raise NotImplementedError",
"def _update_hessian_gradient_and_energy(self) -> None:\n assert self._species and self._coords is not None and self._method\n should_calc_hessian = True\n\n if (\n _energy_method_string(self._species)\n == method_string(self._method, self._method.keywords.hess)\n and self._species.hessian is not None\n ):\n logger.info(\n \"Have a calculated the energy at the same level of \"\n \"theory as this optimisation and a present Hessian. \"\n \"Not calculating a new Hessian\"\n )\n should_calc_hessian = False\n\n self._update_gradient_and_energy()\n\n if should_calc_hessian:\n self._update_hessian()\n else:\n self._coords.update_h_from_cart_h(\n self._species.hessian.to(\"Ha Å^-2\") # type: ignore\n )\n return None",
"def get_hessian(phi, pred, t, dot_product, reg= 1, regression= \"logistic\"):\n R = np.eye(pred.shape[0])\n if regression == \"logistic\":\n for i in range(pred.shape[0]):\n R[i,i] = pred[i,0] * (1- pred[i,0])\n elif regression == \"probit\":\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n t_n = t[i,0] \n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n\n term1 = 1/ (y_n * (1- y_n) + TOLERANCE)\n term2 = (y_n - t_n)/(y_n**2 * (1- y_n) + TOLERANCE)\n term3 = (y_n - t_n)/((1- y_n)**2 * y_n + TOLERANCE)\n term4 = (y_n - t_n)* dotp/(y_n * (1- y_n) * pdf + TOLERANCE)\n\n R[i,i] = (term1 - term2 + term3 - term4)*(pdf**2)\n\n # Add regularization\t\t\t\n hessian = np.matmul(np.matmul(phi.T, R), phi) + np.eye(phi.shape[1])/reg\n return hessian",
"def grid_hessian(self, gridaxes):\n assert np.isscalar(self.dim), 'Hessian only implemented for scalar and vector functions'\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n colloc = [collocation_derivs(self.kvs[i], gridaxes[i], derivs=2) for i in range(self.sdim)]\n\n d = self.sdim\n n_hess = ((d+1)*d) // 2 # number of components in symmetric part of Hessian\n N = tuple(len(g) for g in gridaxes) # shape of tensor grid\n\n # determine size of output array\n if self.dim == 1:\n out_shape = N + (n_hess,)\n else:\n out_shape = N + (self.dim, n_hess)\n hess = np.empty(out_shape, dtype=self.coeffs.dtype)\n\n i_hess = 0\n for i in reversed(range(self.sdim)): # x-component is the last one\n for j in reversed(range(i+1)):\n # compute vector of derivative indices\n D = self.sdim * [0]\n D[i] += 1\n D[j] += 1\n ops = [colloc[k][D[k]] for k in range(self.sdim)] # derivatives in directions i,j\n\n if self.dim == 1: # scalar function\n hess[..., i_hess] = apply_tprod(ops, self.coeffs) # D_i D_j (self)\n else: # vector function\n for k in range(self.dim):\n hess[..., k, i_hess] = apply_tprod(ops, self.coeffs[..., k]) # D_i D_j (self[k])\n i_hess += 1\n return hess # shape: shape(grid) x self.dim x n_hess",
"def hessian(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = s * np.eye(n)\n\n forw1 = np.zeros(n)\n forw2 = np.zeros((n, n))\n for i in range(n):\n forw1[i] = f(x + e[i])\n for j in range(i, n):\n forw2[i, j] = forw2[j, i] = f(x + e[i] + e[j])\n\n H = (forw2 - _colvec(forw1) - _rowvec(forw1) + f(x)) / s**2\n return H",
"def hessian(self,x=None,y=None,save=True):\n\n\t\tif (x is not None) and (y is not None):\n\n\t\t\tassert x.shape==y.shape,\"x and y must have the same shape!\"\n\n\t\t\t#x coordinates\n\t\t\tif type(x)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert x.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\tj = np.mod(((x / self.resolution).decompose().value).astype(np.int32),self.data.shape[1])\n\n\t\t\telse:\n\n\t\t\t\tj = np.mod((x / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[1])\t\n\n\t\t\t#y coordinates\n\t\t\tif type(y)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert y.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\ti = np.mod(((y / self.resolution).decompose().value).astype(np.int32),self.data.shape[0])\n\n\t\t\telse:\n\n\t\t\t\ti = np.mod((y / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[0])\n\n\t\telse:\n\t\t\ti = None\n\t\t\tj = None\n\n\t\t#Call the C backend\n\t\thessian_xx,hessian_yy,hessian_xy = _topology.hessian(self.data,j,i)\n\t\t\n\t\t#Return the hessian\n\t\tif (x is not None) and (y is not None):\n\n\t\t\treturn hessian_xx.reshape(x.shape),hessian_yy.reshape(x.shape),hessian_xy.reshape(x.shape)\n\n\t\telse:\n\n\t\t\tif save:\n\t\t\t\tself.hessian_xx = hessian_xx\n\t\t\t\tself.hessian_yy = hessian_yy\n\t\t\t\tself.hessian_xy = hessian_xy\n\n\t\t\treturn hessian_xx,hessian_yy,hessian_xy",
"def approx_hessian(f, x, epsilon):\n n = len(x)\n H = np.zeros((n, n))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, x + ei, epsilon)\n g2 = approx_gradient(f, x - ei, epsilon)\n H[i, :] = (g1 - g2) / epsilon\n ei[i] = 0\n return H",
"def get_Hessian(self, output_name=None): \n \n\n #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])\n return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])",
"def hessian(poly):\n return gradient(gradient(poly))",
"def calc_hessian(t_Wbt, t_dlogFPdA, t_FP):\n\n tmp = t_Wbt.dimshuffle(0, 'x', 1) * t_FP # b, j, t\n tmp1 = tmp.dimshuffle(0, 'x', 1, 2) * t_dlogFPdA\n\n return T.dot(\n tmp1.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)),\n t_dlogFPdA.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)).T\n )",
"def gradient_and_hessian(self, y, params, natural_gradient=True):\n\n (mu,) = self.predict(params)\n\n grad = np.zeros(shape=(len(y), 1))\n grad[:, 0] = mu - y\n\n if natural_gradient:\n fisher_matrix = np.zeros(shape=(len(y), 1, 1))\n fisher_matrix[:, 0, 0] = mu\n\n grad = np.linalg.solve(fisher_matrix, grad)\n\n hess = np.ones(shape=(len(y), 1)) # we set the hessian constant\n else:\n hess = mu\n\n return grad, hess",
"def likelihood_hessian(self, sign_switch, hyperparam):\n\n self.timer.tic()\n\n if numpy.isscalar(hyperparam):\n hyperparam_ = numpy.array([hyperparam], dtype=float)\n else:\n hyperparam_ = hyperparam\n\n # Check if Hessian is already computed for an identical hyperparam\n if (self.ell_hessian_hyperparam is not None) and \\\n (self.ell_hessian is not None) and \\\n (hyperparam_.size == self.ell_hessian_hyperparam.size) and \\\n numpy.allclose(hyperparam_, self.ell_hessian_hyperparam,\n atol=self.hyperparam_tol):\n if sign_switch:\n return -self.ell_hessian\n else:\n return self.ell_hessian\n\n # Compute second derivative w.r.t eta\n d2ell_deta2 = self._likelihood_der2_eta(hyperparam)\n\n # To convert derivative to log scale, Jacobian is needed. Note: The\n # Jacobian itself is already converted to log scale.\n if self.use_log_eta or self.use_log_scale:\n jacobian_ = self.likelihood_jacobian(False, hyperparam)\n\n # Since we use xi = log_eta instead of eta as the variable, the\n # derivative of ell w.r.t log_eta should be taken into account.\n if self.use_log_eta:\n eta = self._hyperparam_to_eta(hyperparam)\n if numpy.isscalar(jacobian_):\n dell_deta = jacobian_\n else:\n dell_deta = jacobian_[0]\n\n # Convert second derivative to log scale (Note: dell_deta is\n # already in log scale)\n d2ell_deta2 = d2ell_deta2 * eta**2 * numpy.log(10.0)**2 + \\\n dell_deta * numpy.log(10.0)\n\n # Hessian here is a 2D array of size 1.\n hessian = d2ell_deta2\n\n # Compute Hessian w.r.t scale\n if hyperparam_.size > self.scale_index:\n\n # Compute second derivative w.r.t scale\n d2ell_dscale2 = self._likelihood_der2_scale(hyperparam)\n\n # Convert derivative w.r.t log of scale (if needed)\n if self.use_log_scale:\n scale = self._hyperparam_to_scale(\n hyperparam_[self.scale_index:])\n dell_dscale = jacobian_[self.scale_index:]\n\n for p in range(scale.size):\n for q in range(scale.size):\n if p == q:\n\n # dell_dscale is already converted to logscale\n d2ell_dscale2[p, q] = d2ell_dscale2[p, q] * \\\n scale[p]**2 * (numpy.log(10.0)**2) + \\\n dell_dscale[p] * numpy.log(10.0)\n else:\n d2ell_dscale2[p, q] = d2ell_dscale2[p, q] * \\\n scale[p] * scale[q] * (numpy.log(10.0)**2)\n\n # Compute second mixed derivative w.r.t scale and eta\n d2ell_deta_dscale = self._likelihood_der2_mixed(hyperparam)\n\n if self.use_log_eta:\n eta = self._hyperparam_to_eta(hyperparam)\n for p in range(scale.size):\n d2ell_deta_dscale[0, p] = d2ell_deta_dscale[0, p] * \\\n eta * numpy.log(10.0)\n\n if self.use_log_scale:\n scale = self._hyperparam_to_scale(\n hyperparam_[self.scale_index:])\n for p in range(scale.size):\n d2ell_deta_dscale[0, p] = d2ell_deta_dscale[0, p] * \\\n scale[p] * numpy.log(10.0)\n\n # Concatenate derivatives to form Hessian of all variables\n hessian = numpy.block(\n [[d2ell_deta2, d2ell_deta_dscale],\n [d2ell_deta_dscale.T, d2ell_dscale2]])\n\n # Store hessian to member data (without sign-switch).\n self.ell_hessian = hessian\n self.ell_hessian_hyperparam = hyperparam_\n\n if sign_switch:\n hessian = -hessian\n\n self.timer.toc()\n\n return hessian",
"def eval_hessian(self, T, epsilon=1e-1, diag=False):\n param0 = T.param.copy()\n if hasattr(T, 'copy'):\n T = T.copy()\n\n def simi(param):\n T.param = param\n return self.eval(T)\n\n if diag:\n return np.diag(approx_hessian_diag(simi, param0, epsilon))\n else:\n return approx_hessian(simi, param0, epsilon)",
"def calc_hessian(self, reuse_first=False):\n \n self.setup()\n \n # Create our 3D dictionary the first time we execute.\n if not self.hessian:\n for name1 in self.param_names:\n self.hessian[name1] = {}\n for name2 in self.param_names:\n self.hessian[name1][name2] = {}\n \n self.hessian_ondiag_case = OrderedDict()\n self.hessian_offdiag_case = OrderedDict()\n\n # Pull stepsizes from driver's parameters\n base_param = OrderedDict()\n stepsize = {}\n for key, item in self._parent.get_parameters().iteritems():\n \n if item.fd_step:\n stepsize[key] = item.fd_step\n else:\n stepsize[key] = self.default_stepsize\n\n # Diagonal terms in Hessian always need base point\n # Usually, we will have saved this when we calculated\n # the gradient.\n if reuse_first:\n base_param = self.base_param\n base_data = self.base_data\n else:\n # Pull initial state from driver's parameters\n for key, item in self._parent.get_parameters().iteritems():\n base_param[key] = item.evaluate()\n \n base_data = self._run_point(base_param)\n \n # Assemble input data\n # Cases : ondiag [fp, fm]\n deltas = [1, -1]\n for param in self.param_names:\n \n pcase = []\n for j_step, delta in enumerate(deltas):\n \n case = base_param.copy()\n case[param] += delta*stepsize[param]\n pcase.append({ 'param': case })\n \n self.hessian_ondiag_case[param] = pcase\n \n # Assemble input data\n # Cases : offdiag [fpp, fpm, fmp, fmm]\n deltas = [[1, 1],\n [1, -1],\n [-1, 1],\n [-1, -1]]\n for i, param1 in enumerate(self.param_names):\n \n offdiag = {}\n for param2 in self.param_names[i+1:]:\n \n pcase = []\n for delta in deltas:\n \n case = base_param.copy()\n case[param1] += delta[0]*stepsize[param1]\n case[param2] += delta[1]*stepsize[param2]\n pcase.append({ 'param': case })\n offdiag[param2] = pcase\n \n self.hessian_offdiag_case[param1] = offdiag\n \n # Run all \"cases\".\n # TODO - Integrate OpenMDAO's concurrent processing capability once it\n # is formalized. This operation is inherently paralellizable.\n \n # We don't need to re-run on-diag cases if the gradients were\n # calculated with Central Difference.\n if reuse_first and self.form=='central':\n for key, case in self.hessian_ondiag_case.iteritems():\n \n gradient_case = self.gradient_case[key]\n for ipcase, pcase in enumerate(case):\n \n gradient_ipcase = gradient_case[ipcase]\n pcase['data'] = gradient_ipcase['data'] \n else:\n for case in self.hessian_ondiag_case.values():\n for pcase in case:\n data = self._run_point(pcase['param'])\n pcase['data'] = data\n\n # Off-diag cases must always be run.\n for cases in self.hessian_offdiag_case.values():\n for case in cases.values():\n for pcase in case:\n pcase['data'] = self._run_point(pcase['param'])\n\n \n # Calculate Hessians - On Diagonal\n for key, case in self.hessian_ondiag_case.iteritems():\n \n eps = stepsize[key]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key][key][name] = \\\n diff_2nd_xx(case[0]['data'][name],\n base_data[name],\n case[1]['data'][name], eps)\n \n # Calculate Hessians - Off Diagonal\n for key1, cases in self.hessian_offdiag_case.iteritems():\n \n eps1 = stepsize[key1]\n for key2, case in cases.iteritems():\n \n eps2 = stepsize[key2]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key1][key2][name] = \\\n diff_2nd_xy(case[0]['data'][name],\n case[1]['data'][name],\n case[2]['data'][name],\n case[3]['data'][name],\n eps1, eps2)\n \n # Symmetry\n # (Should ponder whether we should even store it.)\n self.hessian[key2][key1][name] = \\\n self.hessian[key1][key2][name]",
"def approx_hessian(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n H = np.zeros((n, n, npts))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, (x.T + ei).T, epsilon, args=args)\n g2 = approx_gradient(f, (x.T - ei).T, epsilon, args=args)\n H[i, ...] = np.reshape((g1 - g2) / epsilon, (n, npts))\n ei[i] = 0\n return H.squeeze()",
"def hessian_matrix_eigvals(H_elems):\n return _symmetric_compute_eigenvalues(H_elems)",
"def hessian(self, var: ndarray) -> ndarray:\n beta, gamma = self.get_vars(var)\n sqrt_gamma = np.sqrt(gamma)\n d = self.get_varmat(gamma)\n femat = self.get_femat(beta)\n obsvar = split_by_sizes(self.get_obsvar(), self.data.group_sizes)\n remat = split_by_sizes(self.get_remat(), self.data.group_sizes)\n dlmats = [DLMat(obsvar[i], remat[i]*sqrt_gamma)\n for i in range(self.data.num_groups)]\n\n beta_fisher = femat.T.dot(d.invdot(femat))\n beta_fisher += self.fevar.prior_hessian(beta)\n\n gamma_fisher = np.zeros((self.revar.size, self.revar.size))\n for i, dlmat in enumerate(dlmats):\n gamma_fisher += 0.5*(remat[i].T.dot(dlmat.invdot(remat[i])))**2\n gamma_fisher += self.revar.prior_hessian(gamma)\n\n return block_diag(beta_fisher, gamma_fisher)",
"def get_gradients_hessian(self, loss, params):\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name +\n \"/gradients\"):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError(\"Variable {} has `None` for gradient. \"\n \"Please make sure that all of your ops have a \"\n \"gradient defined (i.e. are differentiable). \"\n \"Common ops without gradient: \"\n \"K.argmax, K.round, K.eval.\".format(param))\n\n # WARNING: for now we do not support gradient clip\n # grads = self._clip_gradients(grads)\n\n v = [np.random.uniform(0, 1, size = p.shape) for p in params]\n for vi in v:\n vi[ vi < 0.5] = -1 \n vi[ vi >= 0.5] = 1 \n v = [tf.convert_to_tensor(vi, dtype = tf.dtypes.float32) for vi in v]\n\n vprod = tf.reduce_sum([ tf.reduce_sum(vi * grad) for vi, grad in zip(v, grads)])\n\n Hv = gradients.gradients(vprod, params)\n\n Hd = [ tf.abs(Hvi * vi) for Hvi, vi in zip(Hv, v)]\n\n return grads, Hd",
"def approx_hessian_diag(f, x, epsilon):\n n = len(x)\n h = np.zeros(n)\n ei = np.zeros(n)\n fx = f(x)\n for i in range(n):\n ei[i] = epsilon\n h[i] = (f(x + ei) + f(x - ei) - 2 * fx) / (epsilon ** 2)\n ei[i] = 0\n return h",
"def _update_hessian(self) -> None:\n assert self._species and self._coords is not None and self._method\n\n species = self._species.new_species(\n name=f\"{self._species.name}_opt_{self.iteration}\"\n )\n species.coordinates = self._coords.to(\"cartesian\")\n\n species.calc_hessian(\n method=self._method,\n keywords=self._method.keywords.hess,\n n_cores=self._n_cores,\n )\n assert species.hessian is not None, \"Failed to calculate H\"\n\n self._species.hessian = species.hessian.copy()\n self._coords.update_h_from_cart_h(self._species.hessian.to(\"Ha Å^-2\"))",
"def approx_hessian_diag(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n h = np.zeros((n, npts))\n ei = np.zeros(n)\n fx = f(x, *args)\n for i in range(n):\n ei[i] = epsilon\n h[i, :] = (f((x.T + ei).T, *args) + f((x.T - ei).T, *args) - 2 * fx) / (epsilon ** 2)\n ei[i] = 0\n return h.squeeze()",
"def num_hessian(x0, cost_function, epsilon = 1.e-5, linear_approx = False, *args):\n # The next line calculates an approximation to the first derivative\n f1 = sp.optimize.approx_fprime(x0, cost_function, *args) \n # This is a linear approximation, efficient if cost function is linear\n if linear_approx:\n f1 = sp.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = sp.zeros ((n, n))\n # The next loop fill in the matrix\n xx = x0\n for j in xrange(n):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = sp.optimize.approx_fprime(x0, cost_function, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def hessian_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n # Blocks for the fixed and random effects parameters.\n hess_fe = 0.\n hess_re = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n hess_fere = np.zeros((self.k_re2, self.k_fe),\n dtype=np.float64)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n rvir = 0.\n xtvix = 0.\n xtax = [0.,] * self.k_re2\n B = np.zeros(self.k_re2, dtype=np.float64)\n D = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n F = [[0.,]*self.k_re2 for k in range(self.k_re2)]\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xtvix += np.dot(exog.T, viexog)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n rvir += np.dot(resid, vir)\n\n for jj1,mat1 in self._gen_dV_dPsi(ex_r):\n\n hess_fere[jj1,:] += np.dot(viexog.T,\n np.dot(mat1, vir))\n if self.reml:\n xtax[jj1] += np.dot(viexog.T, np.dot(mat1, viexog))\n\n B[jj1] += np.dot(vir, np.dot(mat1, vir))\n E = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n mat1)\n\n for jj2,mat2 in self._gen_dV_dPsi(ex_r, jj1):\n Q = np.dot(mat2, E)\n Q1 = Q + Q.T\n vt = np.dot(vir, np.dot(Q1, vir))\n D[jj1, jj2] += vt\n if jj1 != jj2:\n D[jj2, jj1] += vt\n R = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, Q)\n rt = np.trace(R) / 2\n hess_re[jj1, jj2] += rt\n if jj1 != jj2:\n hess_re[jj2, jj1] += rt\n if self.reml:\n F[jj1][jj2] += np.dot(viexog.T,\n np.dot(Q, viexog))\n\n hess_fe -= fac * xtvix / rvir\n\n hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)\n\n hess_fere = -fac * hess_fere / rvir\n\n if self.reml:\n for j1 in range(self.k_re2):\n Q1 = np.linalg.solve(xtvix, xtax[j1])\n for j2 in range(j1 + 1):\n Q2 = np.linalg.solve(xtvix, xtax[j2])\n a = np.trace(np.dot(Q1, Q2))\n a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))\n a *= 0.5\n hess_re[j1, j2] += a\n if j1 > j2:\n hess_re[j2, j1] += a\n\n # Put the blocks together to get the Hessian.\n m = self.k_fe + self.k_re2\n hess = np.zeros((m, m), dtype=np.float64)\n hess[0:self.k_fe, 0:self.k_fe] = hess_fe\n hess[0:self.k_fe, self.k_fe:] = hess_fere.T\n hess[self.k_fe:, 0:self.k_fe] = hess_fere\n hess[self.k_fe:, self.k_fe:] = hess_re\n\n return hess",
"def test_hessian():\n x, y = fwd.Variable(), fwd.Variable()\n rosen = 100.0*(y - x**2)**2 + (1 - x)**2.0\n rosen_hessian = lambda x, y: \\\n np.array([[1200*x**2-400*x+2, -400*x],\n [-400*x, 200]])\n rosen_hessian_returned = rosen.hessian_at({x: 1.0, y: 1.0})\n rosen_hessian_expected = rosen_hessian(1.0, 1.0)\n for i in range(2):\n for j in range(2):\n assert equals(rosen_hessian_returned[i, j],\n rosen_hessian_expected[i, j])",
"def test_xml_hessian(xml_parser_disp):\n\n hessian = xml_parser_disp.get_hessian()\n assert hessian.shape == (24, 24)\n test = np.array([-0.46355041, 0. , 0. , -0.05917741])\n np.testing.assert_allclose(hessian[0][0:4], test)\n test = np.array([ 0.11487952, 0.08151255, 0.08370068, 0.11487952])\n np.testing.assert_allclose(hessian[15][0:4], test)\n test = np.array([ 0.11431486, -0.0818301 ])\n np.testing.assert_allclose(hessian[15][9:11], test)",
"def H(self) -> BaseMatrix:",
"def H(self) -> BaseMatrix:",
"def get_hess_grad(computer, o_molsys):\n # Not sure why we need a copy here\n logger = logging.getLogger(__name__)\n logger.debug(\"Computing an analytical hessian\")\n xyz = o_molsys.geom.copy()\n # Always return_true so we don't have to compute the gradient as well\n ret = computer.compute(xyz, driver=\"hessian\", return_full=True, print_result=False)\n h_cart = np.asarray(ret[\"return_result\"]).reshape(o_molsys.geom.size, o_molsys.geom.size)\n try:\n logger.debug(\"Looking for gradient in hessian output\")\n g_cart = ret[\"extras\"][\"qcvars\"][\"CURRENT GRADIENT\"]\n except KeyError:\n logger.error(\"Could not find the gradient in qcschema\")\n grad = computer.compute(o_molsys.geom, driver=\"gradient\", return_full=False)\n g_cart = np.asarray(grad)\n # Likely not at stationary point. Include forces\n # ADDENDUM currently neglects forces term for all points - including non-stationary\n H = o_molsys.hessian_to_internals(h_cart)\n\n return H, g_cart",
"def hessian(self, x, y, sigma0, Ra, Rs, e1, e2, center_x=0, center_y=0):\n alpha_ra, alpha_dec = self.derivatives(x, y, sigma0, Ra, Rs, e1, e2, center_x, center_y)\n diff = self._diff\n alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, sigma0, Ra, Rs, e1, e2, center_x, center_y)\n alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, sigma0, Ra, Rs, e1, e2, center_x, center_y)\n\n f_xx = (alpha_ra_dx - alpha_ra)/diff\n f_xy = (alpha_ra_dy - alpha_ra)/diff\n f_yx = (alpha_dec_dx - alpha_dec)/diff\n f_yy = (alpha_dec_dy - alpha_dec)/diff\n\n return f_xx, f_xy, f_yx, f_yy",
"def hessianstructure(self):\n pass",
"def get_model_init(self) -> ndarray:\n beta = np.zeros(self.fevar.size)\n gamma = np.zeros(self.revar.size)\n var = np.hstack([beta, gamma])\n grad_beta = self.gradient(var)[:self.fevar.size]\n hess_beta = self.hessian(var)[:self.fevar.size,\n :self.fevar.size]\n beta = beta - np.linalg.solve(\n hess_beta + np.identity(self.fevar.size),\n grad_beta\n )\n return np.hstack([beta, gamma])",
"def test_hessian(self):\n # almost spherical case\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0.\n sigma = 1.\n amp = 2.\n\n f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian(x, y, amp,\n sigma, e1, e2)\n f_xx_sphere, f_xy_sphere, f_yx_sphere, f_yy_sphere = self.gaussian_kappa.hessian(x,\n y, amp=amp, sigma=sigma)\n npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=4)\n npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=4)\n npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=4)\n npt.assert_almost_equal(f_yx, f_xy, decimal=8)\n\n # spherical case\n e1, e2 = 0., 0.\n f_xx, f_xy, f_yx, f_yy = self.gaussian_kappa_ellipse.hessian(x, y, amp, sigma, e1, e2)\n\n npt.assert_almost_equal(f_xx, f_xx_sphere, decimal=4)\n npt.assert_almost_equal(f_yy, f_yy_sphere, decimal=4)\n npt.assert_almost_equal(f_xy, f_xy_sphere, decimal=4)",
"def diagonal_hessian(kernel: Kern, x: ndarray) -> ndarray:\n if isinstance(kernel, Stationary):\n num_points, num_dimensions = x.shape\n\n return np.zeros((num_points, num_dimensions, num_dimensions))\n else:\n raise NotImplementedError",
"def llhessian(store, beta):\n nobs = store['yvec'].shape[0]\n kreg = store['xmat'].shape[1]\n lamb = exp(dot(store['xmat'], beta))\n sum = zeros((kreg, kreg))\n for i in xrange(nobs):\n sum = sum + lamb[i] * outer(store['xmat'][i], store['xmat'][i])\n return -sum",
"def hessian(self, x, y, amp, sigma_x, sigma_y, center_x = 0, center_y = 0):\n f_ = self.function(x, y, amp, sigma_x, sigma_y, center_x, center_y)\n f_xx = f_ * ( (-1./sigma_x**2) + (center_x-x)**2/sigma_x**4 )\n f_yy = f_ * ( (-1./sigma_y**2) + (center_y-y)**2/sigma_y**4 )\n f_xy = f_ * (center_x-x)/sigma_x**2 * (center_y-y)/sigma_y**2\n return f_xx, f_xy, f_xy, f_yy",
"def hessian(f, delta=DELTA):\n def hessian_f(*args, **kwargs):\n if len(args) == 1:\n x, = args\n hessianf_x = (\n f(x+delta) + f(x-delta) - 2*f(x)\n )/delta**2\n return hessianf_x\n elif len(args) == 2:\n x, y = args\n if type(x) in [float, int] and type(y) in [float, int]:\n hess_xx = (\n f(x + delta, y) + f(x - delta, y) - 2*f(x, y)\n )/delta**2\n hess_yy = (\n f(x, y + delta) + f(x, y - delta) - 2*f(x, y)\n )/delta**2\n hess_xy = (\n + f(x+delta/2, y+delta/2)\n + f(x-delta/2, y-delta/2)\n - f(x+delta/2, y-delta/2)\n - f(x-delta/2, y+delta/2)\n )/delta**2\n return hess_xx, hess_xy, hess_yy\n return hessian_f",
"def compute_hessian_logreg(tx, w):\n t = tx.dot(w)\n s = np.diag(sigmoid(t)*(1 - sigmoid(t)))\n\n return tx.T.dot(s).dot(tx)",
"def hessian_factor(self, params, scale=None, observed=True):\n raise NotImplementedError # pragma: no cover",
"def hessian(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y = 0):\n alpha_ra, alpha_dec = self.derivatives(x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y)\n diff = 0.000001\n alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y)\n alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, n_sersic, R_sersic, k_eff, e1, e2, center_x, center_y)\n\n f_xx = (alpha_ra_dx - alpha_ra)/diff\n f_xy = (alpha_ra_dy - alpha_ra)/diff\n f_yx = (alpha_dec_dx - alpha_dec)/diff\n f_yy = (alpha_dec_dy - alpha_dec)/diff\n\n return f_xx, f_xy, f_yx, f_yy",
"def autodiff_hessian(\n func: Callable, params: Iterable[zfit.Parameter], hessian=None\n) -> tf.Tensor:\n\n return automatic_value_gradients_hessian(func, params, hessian=hessian)[2]",
"def hes_res(self, params, **kwargs):\n e = kwargs.get(\"e\", self.problem.data_e)\n\n hes = self.hessian.eval(params, **kwargs)\n for i, e_i in enumerate(e):\n hes[:, :, i] = - hes[:, :, i] / e_i\n\n return hes, self.jac_res(params, **kwargs)",
"def numerical_hessian(\n func: Callable | None, params: Iterable[zfit.Parameter], hessian=None\n) -> tf.Tensor:\n from ..core.parameter import assign_values\n\n params = convert_to_container(params)\n\n def wrapped_func(param_values):\n assign_values(params, param_values)\n value = func()\n if hasattr(value, \"numpy\"):\n value = value.numpy()\n return value\n\n param_vals = znp.stack(params)\n original_vals = [param.value() for param in params]\n\n if hessian == \"diag\":\n hesse_func = numdifftools.Hessdiag(\n wrapped_func,\n order=2,\n # TODO: maybe add step to remove numerical problems?\n base_step=1e-4,\n )\n else:\n hesse_func = numdifftools.Hessian(\n wrapped_func,\n order=2,\n base_step=1e-4,\n )\n if tf.executing_eagerly():\n computed_hessian = convert_to_tensor(hesse_func(param_vals))\n else:\n computed_hessian = tf.numpy_function(\n hesse_func, inp=[param_vals], Tout=tf.float64\n )\n n_params = param_vals.shape[0]\n if hessian == \"diag\":\n computed_hessian.set_shape((n_params,))\n else:\n computed_hessian.set_shape((n_params, n_params))\n\n assign_values(params, original_vals)\n return computed_hessian",
"def build_eval(self, inputs):\n def evaluate_hessian(x):\n return self.Hx(inputs, x) + self.reg_coeff * x\n\n return evaluate_hessian",
"def construct_hessian(f, mesh=None, op=DefaultOptions()):\n if mesh is None:\n mesh = f.function_space().mesh()\n dim = mesh.topological_dimension()\n assert dim in (2, 3)\n P1_ten = TensorFunctionSpace(mesh, \"CG\", 1)\n n = FacetNormal(mesh)\n\n # Integration by parts applied to the Hessian definition\n if op.hessian_recovery == 'parts':\n H = TrialFunction(P1_ten)\n τ = TestFunction(P1_ten)\n a = inner(tau, H)*dx\n L = -inner(div(τ), grad(f))*dx\n for i in range(dim):\n for j in range(dim):\n L += τ[i, j]*n[j]*f.dx(i)*ds\n\n H = Function(P1_ten)\n solve(a == L, H, solver_parameters=op.hessian_solver_parameters)\n\n # Double L2 projection, using a mixed formulation for the gradient and Hessian\n elif op.hessian_recovery == 'dL2':\n P1_vec = VectorFunctionSpace(mesh, \"CG\", 1)\n V = P1_ten*P1_vec\n H, g = TrialFunctions(V)\n τ, φ = TestFunctions(V)\n a = inner(τ, H)*dx\n a += inner(φ, g)*dx\n a += inner(div(τ), g)*dx\n for i in range(dim):\n for j in range(dim):\n a += -g[i]*τ[i, j]*n[j]*ds\n\n # L = inner(grad(f), φ)*dx\n L = f*dot(φ, n)*ds - f*div(φ)*dx # enables f to be P0\n\n q = Function(V)\n solve(a == L, q) # TODO: Solver parameters?\n H = q.split()[0]\n\n return H",
"def compute_hessian_vector_product(self, function, arguments):",
"def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)",
"def standard_error_est(Y, nodes_num, betas, sigma):\n\n ## Generating nodes\n N, I = np.shape(Y)\n nodes, weights = np.polynomial.hermite.hermgauss(nodes_num)\n weights /= np.sqrt(np.pi)\n nodes *= np.sqrt(2)\n\n ## Calculating H functions in equation (12)\n h_betaj = H(nodes, fct=\"hessian_betaj\", sigma=sigma, betas=betas) # MxI\n h_betai_betaj = H(nodes, fct=\"hessian_betai_betaj\", sigma=sigma, betas=betas) # MxI\n h_sigma = H(nodes, fct=\"hessian_sigma\", sigma=sigma, betas=betas) # scalar\n h_sigma_betaj = H(nodes, fct=\"hessian_sigma_betaj\", sigma=sigma, betas=betas) # MxI\n\n ## Calculating Hessian matrix based on equation (14) (15) (16) and (17)\n marginal = (f_y(Y, betas, sigma, nodes) * H(nodes, fct=\"one\")).dot(weights)\n\n # h_sigma_mat\n cache = f_y(Y, betas, sigma, nodes).dot(np.diag(weights).dot(h_sigma))\n h_sigma_scalar = np.diag(1 / marginal).dot(cache).sum() # scalar\n # print(h_sigma_scalar)\n\n # h_betaj_mat\n cache = f_y(Y, betas, sigma, nodes).dot(np.diag(weights).dot(h_betaj))\n h_betaj_vec = np.diag(1 / marginal).dot(cache).sum(axis=0) # Ix1 vector\n # print(h_betaj_vec)\n\n # h_sigma_betaj\n cache = f_y(Y, betas, sigma, nodes).dot(np.diag(weights).dot(h_sigma_betaj))\n h_sigma_betaj_vec = np.diag(1 / marginal).dot(cache).sum(axis=0) # Ix1 vector\n # print(h_sigma_betaj_vec)\n\n hessian = np.zeros((I + 1, I + 1))\n hessian[:I, -1] = hessian[-1, :I] = h_sigma_betaj_vec\n hessian[I, I] = h_sigma_scalar\n for i in range(I):\n hessian[i, i] = h_betaj_vec[i]\n\n ## Calculating standard error of parameters\n std_est = np.sqrt([np.linalg.inv(hessian)[i, i] for i in range(I + 1)])\n\n return {\"Hessian matrix\": hessian, \"std_est\": std_est}",
"def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)",
"def get_hess(self, x: np.ndarray) -> np.ndarray:\n hess = self(x, (2,), MODE_FUN)\n return hess",
"def hess_f(z, X, Y, _lambda):\r\n d = z.shape[0]\r\n w = z[:-1]\r\n beta = z[-1]\r\n hess = np.zeros((d, d))\r\n hess[:-1, :-1] = - np.einsum('ki,kj->ij', X * g(X.dot(w) + beta), X * g(-(X.dot(w) + beta)))\r\n hess[:-1, [-1]] = - np.einsum('ij,ik->kj', g(X.dot(w) + beta) * g(-(X.dot(w) + beta)), X) + 2 * _lambda\r\n hess[[-1], :-1] = hess[:-1, [-1]].T\r\n hess[-1, -1] = - np.dot(g(X.dot(w) + beta).T, g(-(X.dot(w) + beta))) + 2 * _lambda\r\n return hess",
"def dftb_hessian(num_atoms, tol):\n dftb_hessian=\"\"\"Driver = SecondDerivatives{\n Atoms = {{ num_atoms }}\n Delta = {{ tol }}\n }\n \"\"\"\n return Environment().from_string(dftb_hessian).render(num_atoms=num_atoms,tol=tol)",
"def hessian(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0):\n x_ = x - center_x\n y_ = y - center_y\n r = np.sqrt(x_**2 + y_**2)\n if isinstance(r, int) or isinstance(r, float):\n r = max(self._s, r)\n else:\n r[r < self._s] = self._s\n d_alpha_dr = self.d_alpha_dr(x, y, n_sersic, R_sersic, k_eff, center_x, center_y)\n alpha = -self.alpha_abs(x, y, n_sersic, R_sersic, k_eff, center_x, center_y)\n\n f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r\n f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r\n f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r\n\n return f_xx, f_xy, f_xy, f_yy",
"def sum_hessian(self, module, g_inp, g_out):\n return self._sum_hessian(module, g_inp, g_out)",
"def hessian_diag(\n loss: LossFun,\n params: Any,\n inputs: jnp.DeviceArray,\n targets: jnp.DeviceArray,\n) -> jnp.DeviceArray:\n vs = jnp.eye(ravel(params).size)\n comp = lambda v: jnp.vdot(v, ravel(hvp(loss, v, params, inputs, targets)))\n return jax.vmap(comp)(vs)",
"def check_hessian(jac, hessian_matvec, zz, plot=False, disp=True, rel=True,\n direction=None, fd_eps=np.logspace(-13, 0, 14)[::-1]):\n assert zz.ndim == 2\n assert zz.shape[1] == 1\n grad = jac(zz)\n if direction is None:\n direction = np.random.normal(0, 1, (zz.shape[0], 1))\n direction /= np.linalg.norm(direction)\n directional_derivative = hessian_matvec(zz, direction)\n errors = []\n row_format = \"{:<12} {:<25} {:<25} {:<25}\"\n if disp:\n if rel:\n print(\n row_format.format(\n \"Eps\", \"norm(jv)\", \"norm(jv_fd)\",\n \"Rel. Errors\"))\n else:\n print(row_format.format(\n \"Eps\", \"norm(jv)\", \"norm(jv_fd)\",\n \"Abs. Errors\"))\n for ii in range(fd_eps.shape[0]):\n zz_perturbed = zz.copy()+fd_eps[ii]*direction\n perturbed_grad = jac(zz_perturbed)\n fd_directional_derivative = (perturbed_grad-grad)/fd_eps[ii]\n # print(directional_derivative, fd_directional_derivative)\n errors.append(np.linalg.norm(\n fd_directional_derivative.reshape(directional_derivative.shape) -\n directional_derivative))\n if rel:\n errors[-1] /= np.linalg.norm(directional_derivative)\n if disp:\n print(row_format.format(fd_eps[ii],\n np.linalg.norm(directional_derivative),\n np.linalg.norm(fd_directional_derivative),\n errors[ii]))\n # print(fd_directional_derivative,directional_derivative)\n\n if plot:\n plt.loglog(fd_eps, errors, 'o-')\n label = r'$\\lvert\\nabla^2_\\epsilon \\cdot p f-\\nabla^2 f\\cdot p\\rvert$'\n plt.ylabel(label)\n plt.xlabel(r'$\\epsilon$')\n plt.show()\n\n return np.asarray(errors)",
"def hessian_cn(self, x):\n h = self.hessian(x)\n try:\n u, s, vt = svd(h)\n except LinAlgError:\n return nan\n s = relu(s)\n if s.min() > 0:\n return s.max() / s.min()\n else:\n return nan",
"def denoise_hessian(hessian):\n new_hessian = hessian.copy()\n s = new_hessian.values.shape\n c = new_hessian.columns\n new_hessian = new_hessian.values.flatten()\n new_hessian[np.argsort(new_hessian.flatten())[int(len(new_hessian.flatten()) * 0.999):]] = np.sign(new_hessian[np.argsort(new_hessian.flatten())[int(len(new_hessian.flatten()) * 0.999):]]) * np.abs(new_hessian.flatten()[np.argsort(new_hessian.flatten())][int(len(new_hessian.flatten()) * 0.999)])\n new_hessian[np.argsort(new_hessian.flatten())[::-1][int(len(new_hessian.flatten()) * 0.999):]] = np.sign(new_hessian[np.argsort(new_hessian.flatten())[::-1][int(len(new_hessian.flatten()) * 0.999):]]) * np.abs(new_hessian.flatten()[np.argsort(new_hessian.flatten())][::-1][int(len(new_hessian.flatten()) * 0.999)])\n return pd.DataFrame(new_hessian.reshape(s), columns = c)",
"def make_loss_hessian_func(self, ext):\n hessian_strategy = ext.get_loss_hessian_strategy()\n\n if hessian_strategy == LossHessianStrategy.EXACT:\n return self.derivatives.sqrt_hessian\n elif hessian_strategy == LossHessianStrategy.SAMPLING:\n mc_samples = ext.get_num_mc_samples()\n return partial(self.derivatives.sqrt_hessian_sampled, mc_samples=mc_samples)\n elif hessian_strategy == LossHessianStrategy.SUM:\n return self.derivatives.sum_hessian\n else:\n raise ValueError(f\"Unknown Hessian strategy: {hessian_strategy}\")",
"def Heff(self, x):\r\n x = x.reshape(self.bond * self.p, -1)\r\n # Interactions between left environment and left site\r\n result = self.HA.reshape(self.bond * self.p, -1) @ x\r\n # Interactions between right environment and right site\r\n result += x @ self.HA.reshape(self.bond * self.p, -1).T\r\n # Interactions between left and right site\r\n x = x.reshape(self.bond, self.p, self.bond, self.p)\r\n result = result.reshape(self.bond, self.p, self.bond, self.p)\r\n result += np.einsum('xyij,lirj->lxry', self.NN_interaction, x)\r\n\r\n return result.ravel()",
"def FeH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))/constants.A_FeH\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))/constants.A_FeH",
"def hessian_weight_location_location(self) -> np.ndarray:\n return -self.location",
"def sum_hessian(\n self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]\n ) -> Tensor:\n self._check_2nd_order_make_sense(module, g_out)\n return self._sum_hessian(module, g_inp, g_out)",
"def _best_hessian_updater(self) -> \"HessianUpdater\":\n coords_l, coords_k = self._history.final, self._history.penultimate\n assert coords_k.g is not None and coords_l.g is not None\n\n for update_type in self._hessian_update_types:\n updater = update_type(\n h=coords_k.h,\n h_inv=coords_k.h_inv,\n s=coords_l.raw - coords_k.raw,\n y=coords_l.g - coords_k.g,\n subspace_idxs=coords_l.indexes,\n )\n\n if not updater.conditions_met:\n logger.info(f\"Conditions for {update_type} not met\")\n continue\n\n return updater\n\n raise RuntimeError(\n \"Could not update the inverse Hessian - no \"\n \"suitable update strategies\"\n )",
"def hessian_lag(self, x, y, out=None, **kwargs):\n hess = self._base_nlp.hessian_lag(x, y, **kwargs)\n\n append_row = self._zid_to_xid\n append_col = self._zid_to_xid\n append_data = np.ones(self.nz, dtype=np.double)\n append_data.fill(self.rho)\n\n # this will add rho to the diagonal\n hess.row = np.concatenate([hess.row, append_row])\n hess.col = np.concatenate([hess.col, append_col])\n hess.data = np.concatenate([hess.data, append_data])\n\n hess.sum_duplicates()\n\n if out is not None:\n assert isinstance(out, coo_matrix), \"hessian must be a coo_matrix\"\n assert out.shape[0] == self.nx, \"hessian has {} rows\".format(self.nx)\n assert out.shape[1] == self.nx, \"hessian has {} columns\".format(self.nx)\n assert out.nnz == self.nnz_hessian_lag, \"hessian has {} nnz\".format(self.nnz_hessian_lag)\n out.data = hess.data\n return hess",
"def H(self, z):\n prefactor = 15./np.pi**4.*self.Omega_gamma*(1.+z)**4.\n # Dark energy contribution\n Xde = self.X_DE(z)\n # Neutrino contribution\n yn = np.outer(self.M_nu/(const.kB*self.T_nu), 1./(1.+z))\n Fn = self.FermiDirac_integral(np.array(yn))\n nu_contribution = prefactor*self.Gamma_nu**4.*Fn\n # UR contribution\n Fu = self.FermiDirac_integral(0.)\n ur_contribution = prefactor*self.Gamma_nu_inst**4.*Fu*self.massless_nu\n # WDM contribution\n yw = np.outer(self.M_wdm/(const.kB*self.T_wdm), 1./(1.+z))\n Fw = self.FermiDirac_integral(np.array(yw))\n wdm_contribution = prefactor*np.expand_dims(self.Gamma_wdm**4.,1)*Fw\n # H(z)\n return self.H0*(self.Omega_cdm *(1+z)**3 +\n self.Omega_b *(1+z)**3 +\n self.Omega_gamma *(1+z)**4 + \n self.Omega_K *(1+z)**2 +\n self.Omega_lambda*Xde +\n ur_contribution +\n np.sum(wdm_contribution,axis=0) + \n np.sum(nu_contribution ,axis=0))**0.5",
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def automatic_value_gradient_hessian(\n func: Callable = None,\n params: Iterable[zfit.Parameter] = None,\n value_grad_func=None,\n hessian=None,\n) -> [tf.Tensor, tf.Tensor, tf.Tensor]:\n if params is None:\n raise ValueError(\"Parameters have to be specified, are currently None.\")\n if func is None and value_grad_func is None:\n ValueError(\"Either `func` or `value_grad_func` has to be specified.\")\n\n from .. import z\n\n persistant = (\n hessian == \"diag\" or tf.executing_eagerly()\n ) # currently needed, TODO: can we better parallelize that?\n # TODO(WrappedVariable): this is needed if we want to use wrapped Variables\n # params = _extract_tfparams(params)\n with tf.GradientTape(persistent=persistant, watch_accessed_variables=False) as tape:\n tape.watch(params)\n if callable(value_grad_func):\n loss, gradients = value_grad_func(params)\n else:\n loss, gradients = autodiff_value_gradients(func=func, params=params)\n if hessian == \"diag\":\n gradients = tf.unstack(gradients)\n # gradients_tf = znp.stack(gradients)\n if hessian == \"diag\":\n computed_hessian = znp.stack(\n [\n tape.gradient(grad, sources=param)\n for param, grad in zip(params, gradients)\n ]\n )\n # gradfunc = lambda par_grad: tape.gradient(par_grad[0], sources=par_grad[1])\n # computed_hessian = tf.vectorized_map(gradfunc, zip(params, gradients))\n else:\n computed_hessian = z.convert_to_tensor(\n tape.jacobian(\n gradients,\n sources=params,\n experimental_use_pfor=True, # causes TF bug? Slow..\n )\n )\n del tape\n return loss, gradients, computed_hessian",
"def hessian(kernel: Kern, variable_points: ndarray, fixed_points: ndarray) -> ndarray:\n if isinstance(kernel, RBF):\n lengthscale = kernel.lengthscale.values[0]\n k = kernel.K(variable_points, fixed_points)\n\n _, num_dimensions = variable_points.shape\n\n # The (i, j, k)-th element of this is the k-th component of X_i - D_j (i.e. (X_i - D_j)_k).\n differences = variable_points[:, newaxis, :] - fixed_points[newaxis, :, :]\n\n # The (i, j, k, l)-th element of this is (X_i - D_j)_k * (X_i - D_j)_l. This can be viewed as a matrix of\n # matrices, whose (i, j)-th matrix is the outer product of (X_i - D_j) with itself.\n outer_products_of_differences = np.einsum('ijk,ijl->ijkl', differences, differences, optimize=True)\n\n transformed_outer_products = (outer_products_of_differences / lengthscale ** 2) - np.eye(num_dimensions)\n\n # Now multiply the (i, j)-th transformed outer product by K(X_i, D_j).\n product = np.einsum('ij,ijkl->ijkl', k, transformed_outer_products, optimize=True)\n\n return product / (lengthscale ** 2)\n else:\n raise NotImplementedError",
"def H(nodes, fct=\"identity\", betas=None, sigma=None):\n if fct == \"identity\":\n # Return: Mx1 vector\n return sigma * nodes\n elif fct == \"one\":\n # Return: a scalar\n return 1\n elif fct == \"logistic\":\n # Return: an MxI matrix\n return 1 / (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas)))\n elif fct == \"logistic_identity\":\n # Return: an MxI matrix\n return np.diag(nodes).dot(1 / (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))))\n elif fct == \"hessian_sigma\":\n # Return: an MxI matrix\n to_divide = (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))) * (\n 1 + np.outer(np.exp(sigma * nodes), np.exp(-betas)))\n return np.diag(nodes ** 2).dot(1 / to_divide)\n elif fct == \"hessian_betaj\":\n # Return: an MxI matrix\n to_divide = (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))) * (\n 1 + np.outer(np.exp(sigma * nodes), np.exp(-betas)))\n return 1 / to_divide\n elif fct == \"hessian_betai_betaj\":\n # Return: a scalar\n return 0\n elif fct == \"hessian_sigma_betaj\":\n # Return: an MxI matrix\n to_divide = (1 + np.outer(np.exp(-sigma * nodes), np.exp(betas))) * (\n 1 + np.outer(np.exp(sigma * nodes), np.exp(-betas)))\n return np.diag(nodes).dot(1 / to_divide)\n else:\n return None",
"def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)",
"def numerical_value_gradient_hessian(\n func: Callable | None,\n params: Iterable[zfit.Parameter],\n gradient: Callable | None = None,\n hessian: str | None = None,\n) -> [tf.Tensor, tf.Tensor, tf.Tensor]:\n if params is None:\n raise ValueError(\"params cannot be None\")\n if func is None and gradient is None:\n raise ValueError(\"Either func or grad has to be given\")\n value, gradients = numerical_value_gradient(func, params)\n hessian = numerical_hessian(func, params, hessian=hessian)\n\n return value, gradients, hessian",
"def forcing(self):\n if not self._fr or not self._frstar:\n raise ValueError('Need to compute Fr, Fr* first.')\n return -Matrix([self._f_d, self._f_dnh])",
"def Hf(self, x, X):\n if type(x) == list:\n x = np.array(x)\n return self.model.hf(x, X, *self.params)",
"def get_sigmaz(self):\n\n try:\n out = np.diag(self.eigen_x)\n except AttributeError:\n self.get_eigen()\n out = np.diag(self.eigen_x)\n return out",
"def make_hessian_mat_prod(self, module, g_inp, g_out):\n return self._make_hessian_mat_prod(module, g_inp, g_out)",
"def refine_Hessian_SG(self, kpx, kpy, kps):\n\n k2x = []\n k2y = []\n sigmas = []\n i = 0\n kds = []\n kdx = []\n kdy = []\n\n #Hessian patch 3 ordre 2\n SGX0Y0 = [-0.11111111 , 0.22222222 , -0.11111111 , 0.22222222 , 0.55555556 , 0.22222222 , -0.11111111 , 0.22222222 , -0.11111111]\n SGX1Y0 = [-0.16666667 , 0.00000000 , 0.16666667 , -0.16666667 , 0.00000000 , 0.16666667 , -0.16666667 , 0.00000000 , 0.16666667 ]\n SGX2Y0 = [0.16666667 , -0.33333333 , 0.16666667 , 0.16666667 , -0.33333333 , 0.16666667 , 0.16666667, -0.33333333, 0.16666667 ]\n SGX0Y1 = [-0.16666667, -0.16666667, -0.16666667, 0.00000000, 0.00000000, 0.00000000, 0.16666667, 0.16666667, 0.16666667]\n SGX1Y1 = [0.25000000, 0.00000000, -0.25000000, 0.00000000, 0.00000000, 0.00000000, -0.25000000, 0.00000000, 0.25000000]\n SGX0Y2 = [0.16666667 , 0.16666667 , 0.16666667 , -0.33333333 , -0.33333333 , -0.33333333 , 0.16666667 , 0.16666667 , 0.16666667]\n\n# SGX0Y0 = [0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0]\n# SGX1Y0 = [0.0,0.0,0.0,-0.5,0.0,0.5,0.0,0.0,0.0]\n# SGX2Y0 = [0.0,0.0,0.0,0.33333333,-0.66666667,0.33333333,0.0,0.0,0.0]\n# SGX0Y1 = [0.0,-0.5,0.0,0.0,0.0,0.0,0.0,0.5,0.0]\n# SGX0Y2 = [0.0, 0.33333333 , 0.0 , 0.0 , -0.66666667,0.0, 0.0 , 0.33333333 , 0.0]\n\n\n for y, x, sigma in zip(kpy, kpx, kps):\n\n curr_dog = self.dogs[sigma]\n prev_dog = self.dogs[sigma - 1]\n next_dog = self.dogs[sigma + 1]\n\n# if (x > 1 and x < curr_dog.shape[1] - 2 and y > 1 and y < curr_dog.shape[0] - 2):\n\n\n patch3 = curr_dog[y - 1:y + 2, x - 1:x + 2]\n patch3_prev = prev_dog[y - 1:y + 2, x - 1:x + 2]\n patch3_next = next_dog[y - 1:y + 2, x - 1:x + 2]\n\n dx = (SGX1Y0 * patch3.ravel()).sum()\n dy = (SGX0Y1 * patch3.ravel()).sum()\n d2x = (SGX2Y0 * patch3.ravel()).sum()\n d2y = (SGX0Y2 * patch3.ravel()).sum()\n dxy = (SGX1Y1 * patch3.ravel()).sum()\n\n s_next = (SGX0Y0 * patch3_next.ravel()).sum()\n s = (SGX0Y0 * patch3.ravel()).sum()\n s_prev = (SGX0Y0 * patch3_prev.ravel()).sum()\n d2s = (s_next + s_prev - 2.0 * s)\n ds = (s_next - s_prev) / 2.0\n\n dx_next = (SGX1Y0 * patch3_next.ravel()).sum()\n dx_prev = (SGX1Y0 * patch3_prev.ravel()).sum()\n\n dy_next = (SGX0Y1 * patch3_next.ravel()).sum()\n dy_prev = (SGX0Y1 * patch3_prev.ravel()).sum()\n\n dxs = (dx_next - dx_prev) / 2.0\n dys = (dy_next - dy_prev) / 2.0\n\n print(dx,dy,ds)\n print(d2x,d2y,d2s,dxy,dxs,dys)\n\n lap = numpy.array([[d2y, dxy, dys], [dxy, d2x, dxs], [dys, dxs, d2s]])\n delta = -(numpy.dot(numpy.linalg.inv(lap), [dy, dx, ds]))\n print(y,x)\n print(delta)\n# err = numpy.linalg.norm(delta[:-1])\n if numpy.abs(delta[0]) <= self.tresh and numpy.abs(delta[1]) <= self.tresh and numpy.abs(delta[2]) <= self.tresh:\n k2x.append(x + delta[1])\n k2y.append(y + delta[0])\n sigmas.append(sigma+delta[2])\n# kds.append(delta[2])\n# kdx.append(delta[1])\n# kdy.append(delta[0])\n\n return numpy.asarray(k2x), numpy.asarray(k2y), numpy.asarray(sigmas), numpy.asarray(kds)",
"def hessian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n H = np.zeros((self.n, self.n))\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Hess per column\n p_idx = int(joint_p[1:]) - 1\n sin_p_term = 0.0\n cos_p_term = 0.0\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n sin_p_term += self.a[node_jdx] * np.sin(theta_jdx)\n cos_p_term += self.a[node_jdx] * np.cos(theta_jdx)\n\n for (qdx, joint_q) in enumerate(\n ee_path[pdx:]\n ): # TODO: check if starting from pdx works\n qdx = qdx + pdx\n q_idx = int(joint_q[1:]) - 1\n sin_q_term = 0.0\n cos_q_term = 0.0\n for kdx in range(qdx, len(ee_path)):\n node_kdx = ee_path[kdx]\n theta_kdx = sum(\n [joint_angles[key] for key in ee_path[0 : kdx + 1]]\n )\n sin_q_term += self.a[node_kdx] * np.sin(theta_kdx)\n cos_q_term += self.a[node_kdx] * np.cos(theta_kdx)\n\n # assert(q_idx >= p_idx)\n H[p_idx, q_idx] += (\n 2.0 * sin_q_term * sin_p_term\n - 2.0 * dg_ee_x * cos_q_term\n + 2.0 * cos_p_term * cos_q_term\n - 2.0 * dg_ee_y * sin_q_term\n )\n\n return H + H.T - np.diag(np.diag(H))",
"def errSinvh(self):\n return np.matrix(np.diag(self.errSinvhD))",
"def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho",
"def hessian(self, x, y, Rs, rho0, r200=100, center_x=0, center_y=0, angle=False):\n if angle is True:\n rho0_input = self.alpha2rho0(phi_E=rho0, Rs=Rs)\n else:\n rho0_input = rho0\n if Rs < 0.0001:\n Rs = 0.0001\n x_ = x - center_x\n y_ = y - center_y\n R = np.sqrt(x_**2 + y_**2)\n kappa = self.nfw2D(R, Rs, rho0_input, r200)\n gamma1, gamma2 = self.nfwGamma(R, Rs, rho0_input, r200, x_, y_)\n f_xx = kappa + gamma1\n f_yy = kappa - gamma1\n f_xy = gamma2\n return f_xx, f_yy, f_xy",
"def hessian_matrix_det(image, sigma=1, approximate=True):\n image = img_as_float(image)\n if image.ndim == 2 and approximate:\n raise NotImplementedError(\"approximate=True case not implemented\")\n # integral = integral_image(image)\n # return np.array(_hessian_matrix_det(integral, sigma))\n else: # slower brute-force implementation for nD images\n hessian_mat_array = _symmetric_image(hessian_matrix(image, sigma))\n return cp.linalg.det(hessian_mat_array)"
] | [
"0.81279826",
"0.80043125",
"0.72955406",
"0.7269672",
"0.7233574",
"0.7195273",
"0.7130439",
"0.70821714",
"0.7045756",
"0.6984545",
"0.68845636",
"0.6882204",
"0.68040764",
"0.6759905",
"0.6717752",
"0.6695646",
"0.6690471",
"0.66878116",
"0.65491647",
"0.6513133",
"0.65000296",
"0.64666605",
"0.6461637",
"0.64496255",
"0.6432345",
"0.6426354",
"0.6423603",
"0.6399819",
"0.6390212",
"0.63892674",
"0.6355698",
"0.6353656",
"0.63457584",
"0.6299223",
"0.6287318",
"0.6258453",
"0.6246488",
"0.61896646",
"0.61701965",
"0.61404246",
"0.61383116",
"0.61376166",
"0.61094886",
"0.60753566",
"0.6029642",
"0.6029642",
"0.6025773",
"0.6009788",
"0.5987639",
"0.5975266",
"0.5964774",
"0.5941369",
"0.5914138",
"0.5903466",
"0.5897911",
"0.5891445",
"0.58812886",
"0.5803413",
"0.5783484",
"0.57722706",
"0.5770599",
"0.5748078",
"0.57384104",
"0.5732682",
"0.57118356",
"0.56822103",
"0.5670168",
"0.5630966",
"0.56226677",
"0.5600491",
"0.559826",
"0.5594917",
"0.5554761",
"0.55042404",
"0.54878664",
"0.54831964",
"0.5482345",
"0.5459563",
"0.54455423",
"0.5441892",
"0.54040956",
"0.53812546",
"0.53344667",
"0.53265977",
"0.53240895",
"0.5300112",
"0.52900803",
"0.5289438",
"0.52827066",
"0.5275413",
"0.52626806",
"0.5259328",
"0.52538925",
"0.52415186",
"0.5237066",
"0.5216668",
"0.5205757",
"0.51975405",
"0.5195893",
"0.51894194"
] | 0.6848153 | 12 |
Weights for calculating Hessian | def hessian_factor(self, params, scale=None, observed=True):
raise NotImplementedError # pragma: no cover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_hessian(y, tx, w):\n txw = tx.dot(w)\n diag = sigmoid(txw)*(np.ones(txw.shape)-sigmoid(txw))\n return np.matmul(np.multiply(tx,diag).T,tx)",
"def hessian(self, params):\n\n if self.use_sqrt:\n return self.hessian_sqrt(params)\n else:\n return self.hessian_full(params)",
"def compute_hessian(self, dw, trn_X, trn_y, epsilon: float = 0.01):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dalpha_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='alpha')\n dalpha_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='alpha')\n hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian",
"def hessian(beta, X):\n w = sigmoid(np.dot(X, beta))\n w_vector = w * (1-w)\n \n return np.dot(X.T, X*w_vector)",
"def _get_hessian(self):\n if not self.sparse:\n hess = numpy.dot(self.jacobian_T, self.jacobian)\n else:\n hess = self.jacobian_T*self.jacobian\n return hess",
"def _update_samples_weight(self):\n m, n = 0, self.u.shape[0]\n T = self.u.shape[1]\n N = n + T\n d_0 = matrix(self.d_0.reshape(n, 1))\n\n # Linear Inequallity Constraints, Gx <= h\n G = matrix(-1 * np.eye(N))\n h = matrix(np.zeros(shape=(N, 1)))\n\n # Linear Equality Constraints, Ax = b\n A = matrix(np.concatenate((np.ones(shape=(T, 1)), np.zeros(shape=(n, 1))), axis=0).T)\n b = matrix(1.0)\n\n def F(x=None, z=None):\n if x is None: return 0, matrix(0.5, (N, 1))\n w = x[:T, :]\n phi = x[T:, :]\n reg_inv = 1 / self.reg\n\n weighted_u = np.dot(self.u, w) # n x 1\n scores = -1 * reg_inv * (weighted_u + phi) # n x 1\n\n # Numeric correction\n scores -= max(scores)\n\n # Auxilliaries\n weighted_scores_exp = np.multiply(d_0, np.exp(scores))\n sum_weighted_scores_exp = np.sum(weighted_scores_exp)\n sum_weighted_scores_exp_square = sum_weighted_scores_exp ** 2\n squared_weighted_scores_exp = np.square(weighted_scores_exp)\n weighted_scores_exp_mults = np.dot(weighted_scores_exp, weighted_scores_exp.T)\n uw_mult = np.multiply(self.u, weighted_scores_exp)\n uw_mult_sum = np.sum(np.multiply(self.u, weighted_scores_exp), axis=0)\n\n f = self.reg * np.log(sum_weighted_scores_exp) + self.kappa * np.sum(phi) # f(x)\n\n dfdw = -1 * uw_mult_sum.T / sum_weighted_scores_exp\n dfdphi = (-1 * weighted_scores_exp / sum_weighted_scores_exp) + self.kappa\n Df = np.concatenate((dfdw, dfdphi), axis=0) # Gradient\n\n mf = matrix(f)\n mDf = matrix(Df.T)\n if z is None:\n return mf, mDf\n # Assumes d_0 is uniform\n H = np.zeros(shape=(N, N)) # Hessian\n dfdwiwi = np.zeros(shape=(T, 1))\n dfdphiiphij = -1 * reg_inv * (np.tril(weighted_scores_exp_mults)) / sum_weighted_scores_exp_square\n dfdphiiphii = reg_inv * (np.multiply(weighted_scores_exp,\n sum_weighted_scores_exp - weighted_scores_exp) / sum_weighted_scores_exp_square)\n # dfdwiwj, dfwiphij are zeros\n dfdphiiwj = reg_inv * ((\n uw_mult * sum_weighted_scores_exp - weighted_scores_exp * uw_mult_sum) / sum_weighted_scores_exp_square)\n\n H[T:, T:] = dfdphiiphij\n H[T:, :T] = dfdphiiwj\n H_diagonal = np.concatenate((dfdwiwi, dfdphiiphii), axis=0)\n np.fill_diagonal(H, H_diagonal)\n\n mH = matrix(z[0] * H)\n return mf, mDf, mH\n\n prev_w = self.w\n prev_slacks = self.slacks\n try:\n wphi = solvers.cp(F, G=G, h=h, A=A, b=b)['x']\n self.w = wphi[:T, :]\n self.slacks = wphi[T:, :]\n except Exception as e: # Catch rank errors and continue to next iteration\n self.slacks = prev_slacks\n self.w = prev_w\n try:\n self.w = np.concatenate((self.w, [[1 / (len(self.w) + 1)]]), axis=0)\n except:\n self.w = np.concatenate((self.w, [1 / (len(self.w) + 1)]), axis=0)\n self.w /= np.sum(self.w)\n\n scores = ((-1 / self.reg) * np.squeeze(np.asarray(np.dot(self.u, self.w) + self.slacks))) + np.log(\n self.d_0) # Update according to Equation (6)\n return self.softmax(scores)",
"def get_hessian(self):\n return self.tc.hessian_func(\n self.pf.XS[:, :, 0].transpose(),\n self.pf.XS[:, :, 1].transpose(),\n self.pf.WS[:].transpose())",
"def get_weights(self):",
"def calculate_hessian(model, data, step_size):\n hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))\n for output_name in model.output_names:\n hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)\n mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)\n mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)\n hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative\n hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T\n return hessian",
"def hessian(self, x):\n h = self._hess(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return h",
"def _hessian(self):\n log_g = np.log(self._gv())\n log_f = np.log(self._fv())\n h_inf = np.mean((1 - log_g + log_f) / (self.y - self.err_inf) ** 2)\n return h_inf",
"def hessian(self, params, *args, **kwargs):\n if self._use_approx_cs:\n return approx_hess_cs(params, self.loglike,\n args=args, kwargs=kwargs)\n else:\n return approx_hess(params, self.loglike,\n args=args, kwargs=kwargs)",
"def hessian(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = s * np.eye(n)\n\n forw1 = np.zeros(n)\n forw2 = np.zeros((n, n))\n for i in range(n):\n forw1[i] = f(x + e[i])\n for j in range(i, n):\n forw2[i, j] = forw2[j, i] = f(x + e[i] + e[j])\n\n H = (forw2 - _colvec(forw1) - _rowvec(forw1) + f(x)) / s**2\n return H",
"def hessian(self, x, y, obj_factor):\n pass",
"def calc_hessian(t_Wbt, t_dlogFPdA, t_FP):\n\n tmp = t_Wbt.dimshuffle(0, 'x', 1) * t_FP # b, j, t\n tmp1 = tmp.dimshuffle(0, 'x', 1, 2) * t_dlogFPdA\n\n return T.dot(\n tmp1.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)),\n t_dlogFPdA.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)).T\n )",
"def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)",
"def hessian(self, x1, x2, out=None):\n raise NotImplementedError",
"def hessian(x):\n x_grad = np.gradient(x) \n hessian = np.empty((x.ndim, x.ndim) + x.shape, dtype=x.dtype) \n for k, grad_k in enumerate(x_grad):\n \"\"\"\n iterate over dimensions\n apply gradient again to every component of the first derivative.\n \"\"\"\n tmp_grad = np.gradient(grad_k) \n for l, grad_kl in enumerate(tmp_grad):\n hessian[k, l, :, :] = grad_kl\n return hessian",
"def weights(self) -> List[float]:",
"def compute_hessian_vector_product(self, function, arguments):",
"def gradient_and_hessian(self, y, params, natural_gradient=True):\n\n (mu,) = self.predict(params)\n\n grad = np.zeros(shape=(len(y), 1))\n grad[:, 0] = mu - y\n\n if natural_gradient:\n fisher_matrix = np.zeros(shape=(len(y), 1, 1))\n fisher_matrix[:, 0, 0] = mu\n\n grad = np.linalg.solve(fisher_matrix, grad)\n\n hess = np.ones(shape=(len(y), 1)) # we set the hessian constant\n else:\n hess = mu\n\n return grad, hess",
"def compute_hessian_logreg(tx, w):\n t = tx.dot(w)\n s = np.diag(sigmoid(t)*(1 - sigmoid(t)))\n\n return tx.T.dot(s).dot(tx)",
"def hessian(poly):\n return gradient(gradient(poly))",
"def get_Hessian(self, output_name=None): \n \n\n #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])\n return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])",
"def get_hessian(phi, pred, t, dot_product, reg= 1, regression= \"logistic\"):\n R = np.eye(pred.shape[0])\n if regression == \"logistic\":\n for i in range(pred.shape[0]):\n R[i,i] = pred[i,0] * (1- pred[i,0])\n elif regression == \"probit\":\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n t_n = t[i,0] \n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n\n term1 = 1/ (y_n * (1- y_n) + TOLERANCE)\n term2 = (y_n - t_n)/(y_n**2 * (1- y_n) + TOLERANCE)\n term3 = (y_n - t_n)/((1- y_n)**2 * y_n + TOLERANCE)\n term4 = (y_n - t_n)* dotp/(y_n * (1- y_n) * pdf + TOLERANCE)\n\n R[i,i] = (term1 - term2 + term3 - term4)*(pdf**2)\n\n # Add regularization\t\t\t\n hessian = np.matmul(np.matmul(phi.T, R), phi) + np.eye(phi.shape[1])/reg\n return hessian",
"def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)",
"def hessian(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n\n return approx_hess_cs(params, self.loglike)",
"def weights(self):\r\n\t\treturn None",
"def SG(self, h, y):\n self.check_sg_weights()\n\n A = self.sg_weights[0] #(n, n)\n B = self.sg_weights[1] #(10, n)\n C = self.sg_weights[2] #(1, n)\n\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta",
"def calc_hessian(self, reuse_first=False):\n \n self.setup()\n \n # Create our 3D dictionary the first time we execute.\n if not self.hessian:\n for name1 in self.param_names:\n self.hessian[name1] = {}\n for name2 in self.param_names:\n self.hessian[name1][name2] = {}\n \n self.hessian_ondiag_case = OrderedDict()\n self.hessian_offdiag_case = OrderedDict()\n\n # Pull stepsizes from driver's parameters\n base_param = OrderedDict()\n stepsize = {}\n for key, item in self._parent.get_parameters().iteritems():\n \n if item.fd_step:\n stepsize[key] = item.fd_step\n else:\n stepsize[key] = self.default_stepsize\n\n # Diagonal terms in Hessian always need base point\n # Usually, we will have saved this when we calculated\n # the gradient.\n if reuse_first:\n base_param = self.base_param\n base_data = self.base_data\n else:\n # Pull initial state from driver's parameters\n for key, item in self._parent.get_parameters().iteritems():\n base_param[key] = item.evaluate()\n \n base_data = self._run_point(base_param)\n \n # Assemble input data\n # Cases : ondiag [fp, fm]\n deltas = [1, -1]\n for param in self.param_names:\n \n pcase = []\n for j_step, delta in enumerate(deltas):\n \n case = base_param.copy()\n case[param] += delta*stepsize[param]\n pcase.append({ 'param': case })\n \n self.hessian_ondiag_case[param] = pcase\n \n # Assemble input data\n # Cases : offdiag [fpp, fpm, fmp, fmm]\n deltas = [[1, 1],\n [1, -1],\n [-1, 1],\n [-1, -1]]\n for i, param1 in enumerate(self.param_names):\n \n offdiag = {}\n for param2 in self.param_names[i+1:]:\n \n pcase = []\n for delta in deltas:\n \n case = base_param.copy()\n case[param1] += delta[0]*stepsize[param1]\n case[param2] += delta[1]*stepsize[param2]\n pcase.append({ 'param': case })\n offdiag[param2] = pcase\n \n self.hessian_offdiag_case[param1] = offdiag\n \n # Run all \"cases\".\n # TODO - Integrate OpenMDAO's concurrent processing capability once it\n # is formalized. This operation is inherently paralellizable.\n \n # We don't need to re-run on-diag cases if the gradients were\n # calculated with Central Difference.\n if reuse_first and self.form=='central':\n for key, case in self.hessian_ondiag_case.iteritems():\n \n gradient_case = self.gradient_case[key]\n for ipcase, pcase in enumerate(case):\n \n gradient_ipcase = gradient_case[ipcase]\n pcase['data'] = gradient_ipcase['data'] \n else:\n for case in self.hessian_ondiag_case.values():\n for pcase in case:\n data = self._run_point(pcase['param'])\n pcase['data'] = data\n\n # Off-diag cases must always be run.\n for cases in self.hessian_offdiag_case.values():\n for case in cases.values():\n for pcase in case:\n pcase['data'] = self._run_point(pcase['param'])\n\n \n # Calculate Hessians - On Diagonal\n for key, case in self.hessian_ondiag_case.iteritems():\n \n eps = stepsize[key]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key][key][name] = \\\n diff_2nd_xx(case[0]['data'][name],\n base_data[name],\n case[1]['data'][name], eps)\n \n # Calculate Hessians - Off Diagonal\n for key1, cases in self.hessian_offdiag_case.iteritems():\n \n eps1 = stepsize[key1]\n for key2, case in cases.iteritems():\n \n eps2 = stepsize[key2]\n \n for name in list(self.objective_names + \\\n self.eqconst_names + \\\n self.ineqconst_names):\n self.hessian[key1][key2][name] = \\\n diff_2nd_xy(case[0]['data'][name],\n case[1]['data'][name],\n case[2]['data'][name],\n case[3]['data'][name],\n eps1, eps2)\n \n # Symmetry\n # (Should ponder whether we should even store it.)\n self.hessian[key2][key1][name] = \\\n self.hessian[key1][key2][name]",
"def gradient_200(weights, dev):\n\n @qml.qnode(dev, interface=None)\n def circuit(w):\n for i in range(3):\n qml.RX(w[i], wires=i)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 0])\n\n qml.RY(w[3], wires=1)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 0])\n\n qml.RX(w[4], wires=2)\n\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(2))\n\n gradient = np.zeros([5], dtype=np.float64)\n hessian = np.zeros([5, 5], dtype=np.float64)\n\n # QHACK #\n \n gradQuantities = np.zeros([5, 2], dtype=np.float64)\n \n s = 1e-6\n base = circuit(weights)\n for i in range(len(gradient)):\n modWeights = weights.copy()\n modWeights[i] += s\n gradQuantities[i, 0] = circuit(modWeights)\n modWeights[i] -= 2*s\n gradQuantities[i, 1] = circuit(modWeights)\n gradient[i] = gradQuantities[i, 0] - gradQuantities[i, 1]\n gradient /= 2*np.sin(s)\n \n for i in range(len(hessian)):\n for j in range(i, len(hessian[i])):\n if (i == j):\n hessian[i, i] = 4*(gradQuantities[i, 0] - 2*base + gradQuantities[i, 1])\n else:\n modWeights = weights.copy()\n modWeights[i] += s\n modWeights[j] += s\n hessian[i, j] += circuit(modWeights)\n modWeights[j] -= 2*s\n hessian[i, j] -= circuit(modWeights)\n modWeights[i] -= 2*s\n hessian[i, j] += circuit(modWeights)\n modWeights[j] += 2*s\n hessian[i, j] -= circuit(modWeights)\n hessian[j, i] = hessian[i, j]\n hessian /= (4*np.sin(s)**2)\n # QHACK #\n #error_check(gradient, hessian)\n return gradient, hessian, circuit.diff_options[\"method\"]",
"def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n\n if self.hessian_f:\n # if the problem has knowledge about the hessian, use it directly without approximation\n return self.hessian_f(x)\n\n return hessian_approximation(self.f, x)",
"def update_weights_positive(self):\n eta = self.config.eta\n self.w_xh += eta * (self.x.T @ self.h)\n self.w_th += eta * (self.t.T @ self.h)\n self.w_ho += eta * (self.h.T @ self.o)\n self.w_hz += eta * (self.h.T @ self.z)",
"def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...",
"def weights(self):\n return self.mul(self.P, self.mul(\n self.L * self.tril_mask + self.I,\n #self.U * self.triu_mask + self.s.diag()\n self.U * self.triu_mask + (self.sign_s * self.log_abs_s.exp()).diag()\n ))",
"def approx_hessian(f, x, epsilon):\n n = len(x)\n H = np.zeros((n, n))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, x + ei, epsilon)\n g2 = approx_gradient(f, x - ei, epsilon)\n H[i, :] = (g1 - g2) / epsilon\n ei[i] = 0\n return H",
"def forward(self, weights):\n\n return (np.sum(np.square(weights))) * (self.lambd / 2)",
"def get_weights(self):\n \n w = torch.exp(self._weight) * self.mask_d + self._weight * self.mask_o\n\n w_squared_norm = (w ** 2).sum(-1, keepdim=True)\n \n w = self._diag_weight.exp() * w / w_squared_norm.sqrt()\n \n wpl = self._diag_weight + self._weight - 0.5 * torch.log(w_squared_norm) \n\n return w.t(), wpl.t()[self.mask_d.bool().t()].view(\n self.dim, self.in_features // self.dim, self.out_features // self.dim)",
"def get_gradients_hessian(self, loss, params):\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name +\n \"/gradients\"):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError(\"Variable {} has `None` for gradient. \"\n \"Please make sure that all of your ops have a \"\n \"gradient defined (i.e. are differentiable). \"\n \"Common ops without gradient: \"\n \"K.argmax, K.round, K.eval.\".format(param))\n\n # WARNING: for now we do not support gradient clip\n # grads = self._clip_gradients(grads)\n\n v = [np.random.uniform(0, 1, size = p.shape) for p in params]\n for vi in v:\n vi[ vi < 0.5] = -1 \n vi[ vi >= 0.5] = 1 \n v = [tf.convert_to_tensor(vi, dtype = tf.dtypes.float32) for vi in v]\n\n vprod = tf.reduce_sum([ tf.reduce_sum(vi * grad) for vi, grad in zip(v, grads)])\n\n Hv = gradients.gradients(vprod, params)\n\n Hd = [ tf.abs(Hvi * vi) for Hvi, vi in zip(Hv, v)]\n\n return grads, Hd",
"def getWeights(self,dist,xin,yin):\r\n \r\n Ns = len(dist)\r\n \r\n # Construct the LHS matrix C\r\n C=np.ones((Ns+1,Ns+1))\r\n for i in range(0,Ns):\r\n C[i,i]=0\r\n for j in range(i+1,Ns):\r\n D = np.sqrt((xin[i]-xin[j])**2+(yin[i]-yin[j])**2)\r\n C[i,j] = self.semivariogram(D)\r\n C[j,i] = C[i,j]\r\n\r\n C[Ns,Ns]=0\r\n\r\n # Calculate the inverse of C \r\n Cinv = np.linalg.inv(C)\r\n \r\n # Loop through each model point and calculate the vector D\r\n gamma = np.ones((Ns+1,1))\r\n \r\n for j in range(0,Ns):\r\n gamma[j,0]= self.semivariogram( dist[j])\r\n # Solve the matrix to get the weights\r\n W = np.dot(Cinv,gamma)\r\n W = W[:-1,:]\r\n \r\n #print np.size(gamma,axis=0),np.size(gamma,axis=1) \r\n return 1.0/float(Ns)*np.ones((Ns,1))",
"def approx_hessian(f, x, epsilon, args=()):\n n = x.shape[0]\n npts = 1\n if len(x.shape) > 1:\n npts = x.shape[1]\n H = np.zeros((n, n, npts))\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g1 = approx_gradient(f, (x.T + ei).T, epsilon, args=args)\n g2 = approx_gradient(f, (x.T - ei).T, epsilon, args=args)\n H[i, ...] = np.reshape((g1 - g2) / epsilon, (n, npts))\n ei[i] = 0\n return H.squeeze()",
"def get_weights(self):\n return self.weights\n #print(W)",
"def init_weights(self):\n self._q_neuron.h(self._weights) \n self._q_neuron.x(self._weights)",
"def test_hessian():\n x, y = fwd.Variable(), fwd.Variable()\n rosen = 100.0*(y - x**2)**2 + (1 - x)**2.0\n rosen_hessian = lambda x, y: \\\n np.array([[1200*x**2-400*x+2, -400*x],\n [-400*x, 200]])\n rosen_hessian_returned = rosen.hessian_at({x: 1.0, y: 1.0})\n rosen_hessian_expected = rosen_hessian(1.0, 1.0)\n for i in range(2):\n for j in range(2):\n assert equals(rosen_hessian_returned[i, j],\n rosen_hessian_expected[i, j])",
"def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente",
"def sum_hessian(self, module, g_inp, g_out):\n return self._sum_hessian(module, g_inp, g_out)",
"def train(self):\n\n for i in range(self.c):\n mu_i = self.estimate_mu(i).T\n cov_i = self.estimate_sigma(i, mu_i)\n inv_sigma_i = np.linalg.inv(cov_i)\n P_i = self.estimate_P(i)\n Wi = -1 / 2 * inv_sigma_i\n wi = inv_sigma_i @ mu_i\n wi0 = -1 / 2 * mu_i.T @ inv_sigma_i @ mu_i - 1 / 2 * np.log(np.linalg.det(cov_i)) + np.log(P_i)\n self.weights.append([Wi, wi, wi0])\n return self.weights",
"def sum_hessian(\n self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]\n ) -> Tensor:\n self._check_2nd_order_make_sense(module, g_out)\n return self._sum_hessian(module, g_inp, g_out)",
"def _getHessian(self):\n assert self.init, 'GP not initialised'\n assert self.fast is False, 'Not supported for fast implementation'\n\n if self.cache['Hessian'] is None:\n ParamMask = self.gp.getParamMask()['covar']\n std = sp.zeros(ParamMask.sum())\n H = self.gp.LMLhess_covar()\n It = (ParamMask[:, 0] == 1)\n self.cache['Hessian'] = H[It, :][:, It]\n\n return self.cache['Hessian']",
"def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights",
"def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc",
"def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H",
"def u_weights(self):\n for i in range(self.n_inputs):\n self._q_neuron.cx(self._weights[i], self.inputs[i])",
"def hess_f(z, X, Y, _lambda):\r\n d = z.shape[0]\r\n w = z[:-1]\r\n beta = z[-1]\r\n hess = np.zeros((d, d))\r\n hess[:-1, :-1] = - np.einsum('ki,kj->ij', X * g(X.dot(w) + beta), X * g(-(X.dot(w) + beta)))\r\n hess[:-1, [-1]] = - np.einsum('ij,ik->kj', g(X.dot(w) + beta) * g(-(X.dot(w) + beta)), X) + 2 * _lambda\r\n hess[[-1], :-1] = hess[:-1, [-1]].T\r\n hess[-1, -1] = - np.dot(g(X.dot(w) + beta).T, g(-(X.dot(w) + beta))) + 2 * _lambda\r\n return hess",
"def learn(self, Xtrain, ytrain):\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xless.T,Xless)/numsamples + (self.params['regwgt'] * np.identity(np.shape(Xless)[1]))), Xless.T),ytrain)/numsamples",
"def llhessian(store, beta):\n nobs = store['yvec'].shape[0]\n kreg = store['xmat'].shape[1]\n lamb = exp(dot(store['xmat'], beta))\n sum = zeros((kreg, kreg))\n for i in xrange(nobs):\n sum = sum + lamb[i] * outer(store['xmat'][i], store['xmat'][i])\n return -sum",
"def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w",
"def weights(self):\n return [x.numpy() for x in self.core.w]",
"def InitWeights(self):\n self.w = -1 + 2 * np.random.rand(self.num_of_inputs,)\n self.w0 = -1 + 2 * np.random.rand()",
"def learn(self, Xtrain, ytrain):\n # Dividing by numsamples before adding ridge regularization\n # to make the regularization parameter not dependent on numsamples\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n y = ytrain[:, np.newaxis]\n #self.weights = np.dot(np.dot(np.transpose(Xless), np.linalg.inv(np.dot(Xless, np.transpose(Xless))/numsamples) / numsamples), y) / numsamples\n #Solves with respect to w for the equation Xless * w = y: it computes the pseudo inverse, using singular values internally, for the matri Xlessx, avoiding the original singular matrix error.\n self.weights = np.linalg.lstsq(Xless, y)[0]",
"def apply_weights(self):\n w0_array = np.ones(self.N)*self.w0\n return w0_array + self.X.dot(self.w)",
"def __train_hebbs__(self):\n\n rho = self.__get_rho__()\n\n copied_train_data = np.copy(self.train_data)\n\n for curr_train_sample in tqdm(copied_train_data,\n disable=not self.verbose,\n postfix=f'Model training...'):\n\n train_sample_norm = curr_train_sample - rho\n\n assert len(train_sample_norm.shape) == 1, \\\n f'Flatten your input! Now dim is: {train_sample_norm.shape}'\n\n self.weights += np.outer(train_sample_norm, train_sample_norm)\n\n diagonal_values = np.diag(self.weights) # extracts diagonal values from matrix\n diagonal_weights = np.diag(diagonal_values) # creates diagonal matrix from diagonal values for weights\n\n self.weights = self.weights - diagonal_weights\n self.weights = self.weights / len(self.train_data)",
"def hes_res(self, params, **kwargs):\n e = kwargs.get(\"e\", self.problem.data_e)\n\n hes = self.hessian.eval(params, **kwargs)\n for i, e_i in enumerate(e):\n hes[:, :, i] = - hes[:, :, i] / e_i\n\n return hes, self.jac_res(params, **kwargs)",
"def num_hessian(x0, cost_function, epsilon = 1.e-5, linear_approx = False, *args):\n # The next line calculates an approximation to the first derivative\n f1 = sp.optimize.approx_fprime(x0, cost_function, *args) \n # This is a linear approximation, efficient if cost function is linear\n if linear_approx:\n f1 = sp.matrix(f1)\n return f1.transpose() * f1 \n # Allocate space for the hessian\n n = x0.shape[0]\n hessian = sp.zeros ((n, n))\n # The next loop fill in the matrix\n xx = x0\n for j in xrange(n):\n xx0 = xx[j] # Store old value\n xx[j] = xx0 + epsilon # Perturb with finite difference\n # Recalculate the partial derivatives for this new point\n f2 = sp.optimize.approx_fprime(x0, cost_function, *args) \n hessian[:, j] = (f2 - f1)/epsilon # scale...\n xx[j] = xx0 # Restore initial value of x0 \n return hessian",
"def weight(self):",
"def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error= (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost =0.5* error**2\n return cost",
"def hessian(f, s, p, dx=1e-6, gmix=False, k =['All']):\n import numpy\n N = (p.m['n'] - 1)\n H = numpy.zeros(shape=(N,N))\n for m in range(1, N + 1):\n for z in range(1, N + 1):\n H[m - 1, z - 1] = FD(f, s, p, 2, z, m, dx, gmix, k)\n \n return H",
"def initialize_weights_xavier(self):\n\t\tself.weights = [np.random.uniform(-1/sqrt(size1), 1/sqrt(size1)) for size1, size2 in zip(self.sizes[:-1], self.sizes[1:])]\n\t\tself.biases = [np.zeros([size, ]) for size in self.sizes[1:]]",
"def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)",
"def hessianstructure(self):\n pass",
"def update_weights(weights, alpha, y_true, y_pred):\n def change_labels(arr):\n for i,a in enumerate(arr):\n if a == 0:\n arr[i] = -1\n return arr \n \n y_true, y_pred = change_labels(y_true), change_labels(y_pred)\n w_hat = weights * np.exp(-alpha * y_true * y_pred)\n return w_hat / sum(w_hat)",
"def ml_weights(inputs, targets):\n Phi = np.matrix(inputs)\n targets = np.matrix(targets).reshape((len(targets),1))\n weights = linalg.inv(Phi.transpose()*Phi)*Phi.transpose()*targets\n return np.array(weights).flatten()",
"def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))",
"def hebb_rule(dados):\n # Passo 0: Inicializar todos os pesos\n n = len(dados[0][0]) - 1\n weight = zeros(n + 1)\n print(weight)\n\n # Passo 1: Para cada vetor de treinamento na entrada e par de objetivos na saída (e : s)\n for _, dado in enumerate(dados):\n # Passo 2: Ajuste as ativações para as unidades de entrada\n x = dado[0]\n # Passo 3: Ajuste a ativação para a unidade de saída\n y = dado[1]\n # Passo 4: Ajuste os pesos e o bias\n for j in range(n):\n weight[j] += x[j] * y\n weight[n] += + y # Bias é weight[n]\n print(weight)",
"def dcweights(x):\n\n #Form the vanderMonde matrix:\n A=np.vander(x).T\n A=A[::-1,:]\n F=0*A\n n=snp.arange(len(x))+1\n for i in range(len(x)-1):\n a=x[i]; b=x[i+1]\n f=(b**n-a**n)/n\n F[:,i]=f\n w=snp.solve(A,F)\n\n return w[:,:-1]",
"def get_weights(self):\n return [self.w, self.b]",
"def test_xml_hessian(xml_parser_disp):\n\n hessian = xml_parser_disp.get_hessian()\n assert hessian.shape == (24, 24)\n test = np.array([-0.46355041, 0. , 0. , -0.05917741])\n np.testing.assert_allclose(hessian[0][0:4], test)\n test = np.array([ 0.11487952, 0.08151255, 0.08370068, 0.11487952])\n np.testing.assert_allclose(hessian[15][0:4], test)\n test = np.array([ 0.11431486, -0.0818301 ])\n np.testing.assert_allclose(hessian[15][9:11], test)",
"def calcweighted(store):\n nobs = store['yvec'].shape[0]\n store['Upper'].put(-store['rho'], range(0, nobs - 1), range(1, nobs))\n store['Upper'].matvec(store['yvec'], store['yvectil'])\n for i in xrange(store['xmat'].shape[1]):\n store['Upper'].matvec(store['xmat'][:, i], store['xmattil'][:, i])",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def get_weights(self):\n\t\treturn self.V",
"def update_recurrent_weights_step(self):\n \n # update weights: hebbian term\n self.delta_Wee=self.learn_rate*(self.rr[0:self.N_e]-self.input_mean)*\\\n (self.rr[0:self.N_e].T-self.input_mean)\n \n self.W_ee+=self.dt*self.delta_Wee\n\n # update weights: normalize to fixed mean of incoming and outgoing weights\n self.W_ee-=(self.W_ee.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n self.W_ee-=(self.W_ee.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n self.W_ee=np.clip(self.W_ee,0,self.W_max_ee)\n \n # update excitatory weights in the big weight matrix\n self.W[:self.N_e,:self.N_e]=self.W_ee",
"def compute_neural_weights(self, dist=\"zero\"):\n # TODO: Should this begin with zeros? Or can we initialize the matrices with random dist?\n harmonies = fortran_reshape(self.Hcc, (self.nSym, self.nSym))\n W = self.initializer((self.nSym, self.nSym), dist=dist)\n\n # Update using the Hcc infos:\n for i in range(self.nSym):\n w_i = self.TP[:, i].unsqueeze(1) # take the i-th binding\n for j in range(i+1): # just operate in the lower triangle, the rest is symmetric\n w_j = self.TP[:, j].unsqueeze(1) # take the j-th binding\n if i != j:\n k = 1\n else:\n k = 0.5\n W += k * (harmonies[i, j] * (w_i @ w_j.T + w_j @\n w_i.T)) / ((w_i.T @ w_i) @ (w_j.T @ w_j))\n return W",
"def update_weights(self):\n\t\tpass",
"def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost",
"def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost",
"def euclidean_cost_hessian(self, J: dict, K: dict, r: dict):\n H = 0\n for e in J.keys():\n J_e = J[e]\n N = J_e.shape[1]\n H += J_e.T @ J_e\n # TODO: Try with einsum for speed, maybe?\n for idx in range(N):\n for jdx in range(idx, N):\n dH = K[e][:, idx, jdx].T @ r[e]\n H[idx, jdx] -= dH\n if idx != jdx:\n H[jdx, idx] -= dH\n return H",
"def weights(self):\n return np.array(self.intensity[self.idx])",
"def hessian(self):\n\n hess_size = 3 * len(self.molecule.molecule['input'])\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n\n lines = file.readlines()\n\n for count, line in enumerate(lines):\n if '## Hessian' in line or '## New Matrix (Symmetry' in line:\n # Set the start of the hessian to the row of the first value.\n hess_start = count + 5\n break\n else:\n raise EOFError('Cannot locate Hessian matrix in output.dat file.')\n\n # Check if the hessian continues over onto more lines (i.e. if hess_size is not divisible by 5)\n extra = 0 if hess_size % 5 == 0 else 1\n\n # hess_length: # of cols * length of each col\n # + # of cols - 1 * #blank lines per row of hess_vals\n # + # blank lines per row of hess_vals if the hess_size continues over onto more lines.\n hess_length = (hess_size // 5) * hess_size + (hess_size // 5 - 1) * 3 + extra * (3 + hess_size)\n\n hess_end = hess_start + hess_length\n\n hess_vals = []\n\n for file_line in lines[hess_start:hess_end]:\n # Compile lists of the 5 Hessian floats for each row.\n # Number of floats in last row may be less than 5.\n # Only the actual floats are added, not the separating numbers.\n row_vals = [float(val) for val in file_line.split() if len(val) > 5]\n hess_vals.append(row_vals)\n\n # Remove blank list entries\n hess_vals = [elem for elem in hess_vals if elem]\n\n reshaped = []\n\n # Convert from list of (lists, length 5) to 2d array of size hess_size x hess_size\n for old_row in range(hess_size):\n new_row = []\n for col_block in range(hess_size // 5 + extra):\n new_row += hess_vals[old_row + col_block * hess_size]\n\n reshaped.append(new_row)\n\n hess_matrix = array(reshaped)\n\n # Cache the unit conversion.\n conversion = 627.509391 / (0.529 ** 2)\n hess_matrix *= conversion\n\n check_symmetry(hess_matrix)\n\n return hess_matrix",
"def get_weights(self):\n return self.__weights",
"def apply_weights(self):\n return self.X.dot(self.get_weights())",
"def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)",
"def _whiten_wls(mat, weights):\n\n if weights.shape[0] != mat.shape[0]:\n raise ValueError(\n \"The number of weights must be the same as the number of observations\"\n )\n if mat.ndim == 1:\n return mat * np.sqrt(weights)\n elif mat.ndim == 2:\n # return np.column_stack([x[:,0], np.sqrt(weights)[:, None]*x[:,1:]])\n return np.sqrt(weights)[:, None] * mat",
"def weights(err):\n w = np.power(err, -2)\n w/= np.sum(w)\n return w",
"def get_weights(self):\r\n return self.weights",
"def get_weights(self):\r\n return self.weights",
"def hessian_weight_location_location(self) -> np.ndarray:\n return -self.location",
"def hessian_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n # Blocks for the fixed and random effects parameters.\n hess_fe = 0.\n hess_re = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n hess_fere = np.zeros((self.k_re2, self.k_fe),\n dtype=np.float64)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n rvir = 0.\n xtvix = 0.\n xtax = [0.,] * self.k_re2\n B = np.zeros(self.k_re2, dtype=np.float64)\n D = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n F = [[0.,]*self.k_re2 for k in range(self.k_re2)]\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xtvix += np.dot(exog.T, viexog)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n rvir += np.dot(resid, vir)\n\n for jj1,mat1 in self._gen_dV_dPsi(ex_r):\n\n hess_fere[jj1,:] += np.dot(viexog.T,\n np.dot(mat1, vir))\n if self.reml:\n xtax[jj1] += np.dot(viexog.T, np.dot(mat1, viexog))\n\n B[jj1] += np.dot(vir, np.dot(mat1, vir))\n E = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n mat1)\n\n for jj2,mat2 in self._gen_dV_dPsi(ex_r, jj1):\n Q = np.dot(mat2, E)\n Q1 = Q + Q.T\n vt = np.dot(vir, np.dot(Q1, vir))\n D[jj1, jj2] += vt\n if jj1 != jj2:\n D[jj2, jj1] += vt\n R = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, Q)\n rt = np.trace(R) / 2\n hess_re[jj1, jj2] += rt\n if jj1 != jj2:\n hess_re[jj2, jj1] += rt\n if self.reml:\n F[jj1][jj2] += np.dot(viexog.T,\n np.dot(Q, viexog))\n\n hess_fe -= fac * xtvix / rvir\n\n hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)\n\n hess_fere = -fac * hess_fere / rvir\n\n if self.reml:\n for j1 in range(self.k_re2):\n Q1 = np.linalg.solve(xtvix, xtax[j1])\n for j2 in range(j1 + 1):\n Q2 = np.linalg.solve(xtvix, xtax[j2])\n a = np.trace(np.dot(Q1, Q2))\n a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))\n a *= 0.5\n hess_re[j1, j2] += a\n if j1 > j2:\n hess_re[j2, j1] += a\n\n # Put the blocks together to get the Hessian.\n m = self.k_fe + self.k_re2\n hess = np.zeros((m, m), dtype=np.float64)\n hess[0:self.k_fe, 0:self.k_fe] = hess_fe\n hess[0:self.k_fe, self.k_fe:] = hess_fere.T\n hess[self.k_fe:, 0:self.k_fe] = hess_fere\n hess[self.k_fe:, self.k_fe:] = hess_re\n\n return hess",
"def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W",
"def get_alternate_weights(self, epsilon):\n new_eps = epsilon\n old_eps = self.epsilon\n\n # A simple version of generic reweighting code\n # w = self.weights\n # w /= torch.exp(-0.5*sqd / old_eps**2.)\n # w *= torch.exp(-0.5*sqd / new_eps**2.)\n # w /= sum(w)\n\n if new_eps == 0:\n w = self.weights\n # Remove existing distance-based weight contribution\n w /= torch.exp(-0.5 * self.sqd / old_eps**2.)\n # Replace with a indicator function weight contribution\n w = torch.where(\n self.sqd==0.,\n w,\n torch.zeros_like(w)\n )\n else:\n # TODO Avoid need to normalise by always using log weights?\n # Normalising sqd part 1\n # Ignore distances if weight already zero\n # (to avoid rare possibility of setting all weights to zero)\n sqd_pos_weight = torch.where(\n self.weights > 0,\n self.sqd,\n torch.full_like(self.sqd, self.sqd.max())\n )\n # Normalising sqd part 2\n # Reduce chance of exponentiation giving zero weights\n sqd_norm = sqd_pos_weight - sqd_pos_weight.min()\n # A more efficient way to do the generic case\n a = 0.5 * (old_eps**-2. - new_eps**-2.)\n w = self.weights * torch.exp(sqd_norm*a)\n\n wsum = w.sum()\n if wsum > 0.:\n w /= wsum\n\n return w",
"def getWeights(self):\n return self.W1, self.W2"
] | [
"0.7432778",
"0.72551763",
"0.7211145",
"0.7205693",
"0.71838",
"0.70502096",
"0.69169587",
"0.6911036",
"0.68722624",
"0.6833038",
"0.6738732",
"0.6719632",
"0.6706449",
"0.6674543",
"0.6654394",
"0.663828",
"0.65999717",
"0.65776294",
"0.65616745",
"0.65462524",
"0.6495858",
"0.6470106",
"0.6468748",
"0.6461925",
"0.6444993",
"0.6423074",
"0.64223295",
"0.6419997",
"0.6416347",
"0.6406596",
"0.63997173",
"0.6374703",
"0.6368209",
"0.6363969",
"0.63485265",
"0.6329259",
"0.6325759",
"0.6325287",
"0.63244253",
"0.6303258",
"0.6301682",
"0.6291445",
"0.6284288",
"0.62696177",
"0.6232104",
"0.6224195",
"0.6222468",
"0.62178344",
"0.6186146",
"0.61738425",
"0.61702764",
"0.61689734",
"0.6165226",
"0.6163484",
"0.61624724",
"0.61619234",
"0.61590815",
"0.61440516",
"0.61404526",
"0.61363786",
"0.612997",
"0.61254805",
"0.61159486",
"0.61150175",
"0.6107331",
"0.6105253",
"0.6099969",
"0.6099681",
"0.6089748",
"0.6072655",
"0.6057507",
"0.60508525",
"0.6042294",
"0.60417104",
"0.6040026",
"0.6037778",
"0.6031287",
"0.6024418",
"0.6015237",
"0.60151786",
"0.60089725",
"0.60012174",
"0.5997608",
"0.5992321",
"0.5992321",
"0.5992017",
"0.59793717",
"0.5970492",
"0.5968149",
"0.59581286",
"0.59559697",
"0.5949236",
"0.5948842",
"0.59467775",
"0.59467775",
"0.59442246",
"0.59416676",
"0.5935857",
"0.5933735",
"0.5931187"
] | 0.6014805 | 80 |
If no start_params are given, use reasonable defaults. | def _get_start_params(self, start_params=None):
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else: # pragma: no cover
raise ValueError("If exog is None, then start_params should "
"be specified")
return start_params | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)",
"def start( *args, **kwargs ):",
"def ReviewServiceArgs(cls, start = False):\n return (start,)",
"def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:\n params = self._invocation_params\n # if params[\"best_of\"] != 1:\n # raise ValueError(\"OpenAI only supports best_of == 1 for streaming\")\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n params[\"stream\"] = True\n return params",
"def __init__(__self__, *,\n start_time: Optional[pulumi.Input[str]] = None):\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)",
"def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()",
"def defaultRegridParams(self):\n\n casalog.origin(\"ParallelDataHelper\")\n \n if self.__args['mode'] == 'channel' or self.__args['mode'] == 'channel_b':\n self.__args['start'] = str(self.__args['start'])\n self.__args['width'] = str(self.__args['width'])\n \n elif self.__args['mode'] == 'velocity':\n restfreq = self.__args['restfreq']\n if restfreq == \"\" or restfreq.isspace():\n raise ValueError, \"Parameter restfreq must be set when mode='velocity'\"\n \n if self.__args['start'] == 0:\n self.__args['start'] = ''\n \n if self.__args['width'] == 1:\n self.__args['width'] = ''\n \n\n # Check if the parameter has valid velocity units\n if not self.__args['start'] == '':\n if (qa.quantity(self.__args['start'])['unit'].find('m/s') < 0):\n raise TypeError, 'Parameter start does not have valid velocity units'\n \n if not self.__args['width'] == '':\n if (qa.quantity(self.__args['width'])['unit'].find('m/s') < 0):\n raise TypeError, 'Parameter width does not have valid velocity units'\n \n elif self.__args['mode'] == 'frequency':\n if self.__args['start'] == 0:\n self.__args['start'] = ''\n if self.__args['width'] == 1:\n self.__args['width'] = ''\n \n # Check if the parameter has valid frequency units\n if not self.__args['start'] == '':\n if (qa.quantity(self.__args['start'])['unit'].find('Hz') < 0):\n raise TypeError, 'Parameter start does not have valid frequency units'\n \n if not self.__args['width'] == '':\n if (qa.quantity(self.__args['width'])['unit'].find('Hz') < 0):\n raise TypeError, 'Parameter width does not have valid frequency units' \n \n start = self.__args['start']\n width = self.__args['width']\n \n return start, width",
"def set_default_parameters(self):\n super().set_default_parameters()",
"def start(self, **kwargs):\n pass",
"def start(self, **kwargs):\n pass",
"def init_params(self):\n blah",
"def pre_setup(self, sch_params: Optional[Mapping[str, Any]]) -> Optional[Mapping[str, Any]]:\n return sch_params",
"def initialize_params(self, params):\n pass",
"def __init__(__self__, *,\n end: pulumi.Input[str],\n start: pulumi.Input[str]):\n pulumi.set(__self__, \"end\", end)\n pulumi.set(__self__, \"start\", start)",
"def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None",
"def _init_start(self):\n def start(core, args):\n task = ' '.join(args.task) if args.task else ''\n return core.start(task=task)\n\n usage = 'stl start [task]'\n desc = (\n 'make a log that you are starting to work'\n )\n\n subp = self.subparsers.add_parser(\n 'start', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'task', nargs=argparse.REMAINDER,\n help='the task that you are about to start working on')\n\n subp.set_defaults(func=start)",
"def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite",
"def update_start_values(self, params):\n allparwids = {}\n for comp in self.fit_components.values():\n if comp.usebox is not None and comp.usebox.IsChecked():\n for name, parwids in comp.parwids.items():\n allparwids[name] = parwids\n\n for pname, par in params.items():\n if pname in allparwids:\n allparwids[pname].value.SetValue(par.value)",
"def set_params(self):\r\n pass",
"def set_params(self, **kwargs):\n if 'nbins' in kwargs:\n self._nbins = kwargs['nbins']\n if self._nbins != 'auto':\n self._nbins = int(self._nbins)\n if 'symmetric' in kwargs:\n self._symmetric = kwargs['symmetric']\n if 'prune' in kwargs:\n prune = kwargs['prune']\n if prune is not None and prune not in ['upper', 'lower', 'both']:\n raise ValueError(\n \"prune must be 'upper', 'lower', 'both', or None\")\n self._prune = prune\n if 'min_n_ticks' in kwargs:\n self._min_n_ticks = max(1, kwargs['min_n_ticks'])\n if 'steps' in kwargs:\n steps = kwargs['steps']\n if steps is None:\n self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]\n else:\n self._steps = self._validate_steps(steps)\n self._extended_steps = self._staircase(self._steps)\n if 'integer' in kwargs:\n self._integer = kwargs['integer']",
"def set_start_time():\n __start = current_time_milli()",
"def start(total_param):\n global start_time\n global total\n\n if type(total_param) is list:\n total_param = len(total_param)\n if type(total_param) is not int:\n sys.exit(\"bad total_param. Should be list or int.\")\n\n start_time = time.time()\n total = total_param",
"def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}",
"def _set_start(self, coordinates):\n self._start = coordinates",
"def _SetRunParameters(self, params: Mapping[str, Any]) -> None:\n # Ideally YCSB should be refactored to include a function that just takes\n # commands for a run, but that will be a large refactor.\n FLAGS['ycsb_run_parameters'].unparse()\n FLAGS['ycsb_run_parameters'].parse([f'{k}={v}' for k, v in params.items()])",
"def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']",
"def __init__(**params):",
"def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)",
"def start_workflow(self, **params):\n raise NotImplementedError",
"def set_params(self, **kwargs) -> NoReturn:\n pass",
"def set_training_params(use_defaults):\n if use_defaults:\n n_epochs, batch_size, epsilon = default_training_params()\n return n_epochs, batch_size, epsilon\n\n print (\"Select number of epochs to train (default 100):\")\n n_epochs = int(input())\n print (\"Select batch size (default 64):\")\n batch_size = int(input())\n print (\"Select learning rate (default 0.0001):\")\n epsilon = float(input())\n return n_epochs, batch_size, epsilon",
"def main(self, params):\n pass",
"def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)",
"def _set_default_args(self):\n self._parser.add_argument(\"username\")\n self._parser.add_argument(\"password\")\n self._parser.add_argument(\n \"--start\",\n help=\"Start date for the scraper in iso format, eg: 2017-11-19\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--end\",\n help=\"End date for the scraper in iso format\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--skip-delete\",\n help=\"Delete the scraper folder in /tmp after run\",\n action=\"store_true\",\n )",
"def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params",
"def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {0: ['cdhit_test_seqs_0'],\r\n 1: ['cdhit_test_seqs_1'],\r\n 2: ['cdhit_test_seqs_2'],\r\n 3: ['cdhit_test_seqs_3'],\r\n 4: ['cdhit_test_seqs_4'],\r\n 5: ['cdhit_test_seqs_5'],\r\n 6: ['cdhit_test_seqs_6'],\r\n 7: ['cdhit_test_seqs_7'],\r\n 8: ['cdhit_test_seqs_8'],\r\n 9: ['cdhit_test_seqs_9']}\r\n\r\n app = CdHitOtuPicker(params={})\r\n obs = app(self.tmp_seq_filepath1)\r\n self.assertEqual(obs, exp)",
"def __init__(self, start_time=None):\n if start_time is None:\n self.started = time.time()\n else:\n self.started = start_time",
"def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)",
"def apply_startup_params(self):\n config = self._protocol.get_startup_config()\n \n if not isinstance(config, dict):\n raise InstrumentParameterException(\"Incompatible initialization parameters\")\n \n log.trace(\"BARS driver applying config: %s\", config)\n self._protocol.set_readonly_values()\n self.set_resource(config)",
"def start(self, start):\n if start is None:\n raise ValueError(\"Invalid value for `start`, must not be `None`\")\n\n self._start = start",
"def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])",
"def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }",
"def __init__(self, name=None, start_settings=None,\n parallel_settings=None, electronic_settings=None, magnetic_settings=None,\n ionic_settings=None, hubbard_settings=None, hybrid_settings=None, misc_settings=None):\n\n self._name = name or \"input_param\"\n self._start_settings = start_settings or {\"NWRITE\": 2, \"ISTART\": 1, \"INIWAV\": 1,\n \"ICHARG\": None, \"NELECT\": None, \"LORBIT\": 11,\n \"NEDOS\": 1000, \"LOPTICS\": \".FALSE.\",\"ISYM\": -1 , \"LELF\": None, \"LVHAR\": None, \"RWIGS\": None, \"LVTOF\": None, \"NBANDS\": None, \"LWAVE\": None}\n self._parallel_settings = parallel_settings or {\"flnm\": \"run_scf.sh\", \"job_name\": \"scf_std\", \"machine\": \"nano\" ,\n \"partition\": \"etna\", \"nodes\": 4,\"ppn\": 24,\n \"max_time\": \"24:00:00\", \"NCORE\": 8, \"KPAR\": 2, \"exec\": \"vasp_std\"}\n self._electronic_settings = electronic_settings or {\"PREC\":\"Accurate\" , \"ALGO\": \"Normal\", \"ENCUT\": 800,\n \"NELM\": None, \"NELMIN\": None, \"GGA\": \"PS\" ,\"EDIFF\": 10E-05, \"ISMEAR\": 1,\n \"SIGMA\": 0.2, \"LASPH\": \".TRUE.\", \"LREAL\": \"Auto\", \"ADDGRID\": \".TRUE.\", \"MAXMIX\": 100, \"BMIX\": 1.5}\n self._ionic_settings = ionic_settings\n self._magnetic_settings = magnetic_settings\n self._hybrid_settings = hybrid_settings\n self._hubbard_settings = hubbard_settings\n self._misc_settings = misc_settings",
"def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value",
"def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value",
"def start(self, start):\n\n self._start = start",
"def start(self, start):\n\n self._start = start",
"def start(self, start):\n\n self._start = start",
"def start(self, start):\n\n self._start = start",
"def default_start(self, data):\n return {}",
"def default_start(self, data):\n return {}",
"def _use_existing_params(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/params'))\n self.params = sh['params']\n sh.close()",
"def set_params(self, **kwargs):\n ...",
"def __init__(self, **kwargs):\n\n self._active = True\n self._count = kwargs.get('start_count', 0)",
"def default_params():\n params = {}\n params['load'] = None\n params['style'] = 'ggplot'\n params['show'] = True\n params['save'] = None\n return params",
"def set_params(self, **params):\n return super().set_params(**params)",
"def set_params(self):\n raise NotImplementedError",
"def __init__(self, xstart, **more_args):\r\n self.xstart = xstart\r\n self.more_args = more_args\r\n self.initialize()",
"def Pool2DOptionsStart(builder):\n return Start(builder)",
"def start(self):\n return self.params.send_params()",
"def set_scan_params(self, session, params):\n if params['reset']:\n self._set_default_scan_params()\n for k in ['az_speed', 'az_accel']:\n if params[k] is not None:\n self.scan_params[k] = params[k]\n self.log.info('Updated default scan params to {sp}', sp=self.scan_params)\n yield\n return True, 'Done'",
"def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')",
"def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')",
"def set_params(self, params):",
"def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)",
"def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params",
"def set_params(self, **params):\n\n return super().set_params(**params)",
"def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)",
"def set_params(self, *, params: Params) -> None: # pragma: no cover\n\t\tsuper().set_params(params=params)",
"def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params",
"def read_start_params(path_or_database):\n database = load_database(**_process_path_or_database(path_or_database))\n optimization_problem = read_last_rows(\n database=database,\n table_name=\"optimization_problem\",\n n_rows=1,\n return_type=\"dict_of_lists\",\n )\n start_params = optimization_problem[\"params\"][0]\n return start_params",
"def _set_default_parameters(self, parameters):\n parameters_dict = parameters.get_dict()\n from aiida_crystal_dft.io.f9 import Fort9\n with self.inputs.wavefunction.open(mode='rb') as f:\n file_name = f.name\n wf = Fort9(file_name)\n if 'band' in parameters_dict:\n\n # automatic generation of k-point path\n if 'bands' not in parameters_dict['band']:\n self.logger.info('Proceeding with automatic generation of k-points path')\n structure = wf.get_structure()\n shrink, points, path = get_shrink_kpoints_path(structure)\n parameters_dict['band']['shrink'] = shrink\n parameters_dict['band']['bands'] = path\n\n # automatic generation of first and last band\n if 'first' not in parameters_dict['band']:\n parameters_dict['band']['first'] = 1\n if 'last' not in parameters_dict['band']:\n parameters_dict['band']['last'] = wf.get_ao_number()\n\n if 'dos' in parameters_dict:\n # automatic generation of projections in case no projections are given\n # TODO: explicit asking for automatic projections\n if ('projections_atoms' not in parameters_dict['dos'] and\n 'projections_orbitals' not in parameters_dict['dos']):\n self.logger.info('Proceeding with automatic generation of dos atomic projections')\n parameters_dict['dos']['projections_atoms'] = get_dos_projections_atoms(wf.get_atomic_numbers())\n\n # automatic generation of first and last band\n if 'first' not in parameters_dict['dos']:\n parameters_dict['dos']['first'] = 1\n if 'last' not in parameters_dict['dos']:\n parameters_dict['dos']['last'] = wf.get_ao_number()\n return get_data_class('dict')(dict=parameters_dict)",
"def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params",
"def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar",
"def start():",
"def start():",
"def start():",
"def start():",
"def __init__( self, parameters={} ):\n self.params = {}",
"def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_start_time(self, *args, **kwargs)",
"def start_schedule(self, *args: Sequence[Any], **kwargs: Mapping[str, Any]) -> None:\n self._args = args\n self._kwargs = kwargs\n super().start()",
"def start(self, start):\n if self._configuration.client_side_validation and start is None:\n raise ValueError(\"Invalid value for `start`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n start is not None and start < 0): # noqa: E501\n raise ValueError(\"Invalid value for `start`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._start = start",
"def init_params(self, parameters):\r\n max_epoch = parameters['num_epoch']\r\n momentum_rate = parameters['momentum']\r\n loss = parameters['loss_function']\r\n accuracy = parameters['accuracy']\r\n regularization = parameters['regularization']\r\n batch_size = parameters['batch_size']\r\n optimizer = parameters['optimizer'] if parameters['optimizer'] is not None else 'batch'\r\n self.__init__(max_epoch, optimizer, loss, accuracy, momentum_rate, regularization, batch_size)",
"def set_params(self, params: Dict) -> None:\n self.leak.set_g(params['g_leak'])\n self.nav.set_g(params['g_nav'])\n self.kvhh.set_g(params['g_kvhh'])\n self.kva.set_g(params['g_kva'])\n self.kvsi.set_g(params['g_kvsi'])\n self.cav.set_g(params['g_cav'])\n self.kca.set_g(params['g_kca'])\n self.nap.set_g(params['g_nap'])\n self.kir.set_g(params['g_kir'])\n self.ampar.set_g(params['g_ampar'])\n self.nmdar.set_g(params['g_nmdar'])\n self.gabar.set_g(params['g_gabar'])\n self.tau_ca = params['t_ca']",
"def test_call_default_params_minlen(self):\r\n\r\n app = Usearch610DeNovoOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'output_dir': self.output_dir,\r\n 'remove_usearch_logs': True,\r\n 'minlen': 101\r\n })\r\n\r\n obs_clusters = app(self.tmp_seq_filepath_97perc_id)\r\n\r\n # Should get no results\r\n expected_clusters = {}\r\n\r\n self.assertEqual(obs_clusters, expected_clusters)",
"def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3",
"def required_parameters(self):\n return ['seed', 'run_params']",
"def default_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...",
"def set_default_parameters(self):\n super().set_default_parameters()\n if not \"replace_existing_files\" in vars(self):\n self.replace_existing_files = False\n if not \"num_files_per_point\" in vars(self):\n self.num_files_per_point = -1\n if not \"input_location_type\" in vars(self):\n self.input_location_type = \"local\"\n if not \"output_location_type\" in vars(self):\n self.output_location_type = \"local\"",
"def init(self, parameters):\n pass",
"def set_start_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_start_time(self, *args, **kwargs)",
"def _initialize_posterior_state(self, start=None):\n if start is None:\n self.state = self._initialize_default_start(self.state)\n else:\n self.state.alpha = start['alpha']\n self.state.beta = start['beta']\n self.state.tau = start['tau']\n self.state.eta = start['eta']\n self.state.spatial = self.state.eta",
"def __init__(self, start, home, left_limit, right_limit, **kwargs):\n super().__init__(start, home, **kwargs)\n self.left_limit = left_limit\n self.right_limit = right_limit",
"def _start(args=None):\n options = _parse_args(args)\n main(**options)",
"def init_params():\r\n\r\n p = OrderedDict()\r\n p['startYear'] = 1855\r\n p['num5YearAgeClasses'] = 25\r\n p['numCareLevels'] = 5\r\n p['pixelsInPopPyramid'] = 2000\r\n p['pixelsPerTown'] = 16 # 56\r\n p['mapGridXDimension'] = 20\r\n p['mapGridYDimension'] = 25\r\n p['careLevelColour'] = ['deepskyblue','green','yellow','orange','red']\r\n p['careDemandInHours'] = [ 0.0, 12.0, 24.0, 48.0, 96.0 ]\r\n p['unmetNeedColor'] = ['deepskyblue','green','yellow','orange','red', 'mediumorchid']\r\n p['houseSizeColour'] = ['deepskyblue','green','yellow','orange','red', 'mediumorchid']\r\n p['mainFont'] = 'Helvetica 18'\r\n p['fontColour'] = 'white'\r\n p['dateX'] = 70\r\n p['dateY'] = 20\r\n p['popX'] = 70\r\n p['popY'] = 50\r\n p['delayTime'] = 0.0\r\n p['maxTextUpdateList'] = 12\r\n \r\n return p",
"def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params",
"def _set_params(self, *args, **kwargs):\n startup = False\n try:\n params = args[0]\n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n try:\n startup = args[1]\n except IndexError:\n pass\n\n # Only check for readonly parameters if we are not setting them from startup\n if not startup:\n readonly = self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)\n\n log.debug(\"set param, but check visibility first\")\n log.debug(\"Read only keys: %s\", readonly)\n\n for (key, val) in params.iteritems():\n if key in readonly:\n raise InstrumentParameterException(\"Attempt to set read only parameter (%s)\" % key)\n\n # Make sure this method is overloaded because this just verifies, but doesn't\n # set a damn thing.",
"def init_params_on_input(self, train_valid_iterator: TrainValidIterator) -> dict:\n suggested_params = copy(self.default_params)\n task = train_valid_iterator.train.task\n\n assert \"sklearn\" in task.losses, \"Sklearn loss should be defined\"\n\n if task.name == \"reg\":\n # suggested_params['cs'] = list(map(lambda x: 1 / (2 * x), suggested_params['cs']))\n suggested_params[\"cs\"] = [1 / (2 * i) for i in suggested_params[\"cs\"]]\n\n return suggested_params",
"def add_default_bounds_to_params(params):\n defaults = pd.DataFrame(\n {\"lower_bound\": -np.inf, \"upper_bound\": np.inf},\n index=params.index,\n )\n params = params.combine_first(defaults)\n\n return params",
"def set_params(self, *argv, **kwargs):\n pass"
] | [
"0.6189463",
"0.596062",
"0.5936523",
"0.5922948",
"0.5908743",
"0.5905468",
"0.58663934",
"0.5828694",
"0.5814522",
"0.5814522",
"0.5756695",
"0.5748159",
"0.5678698",
"0.5660043",
"0.5645459",
"0.56087685",
"0.56059563",
"0.5576346",
"0.5533934",
"0.5510091",
"0.55027384",
"0.54661244",
"0.54416907",
"0.5432734",
"0.5428722",
"0.54077",
"0.5406224",
"0.539342",
"0.5391937",
"0.53904986",
"0.5386068",
"0.5376807",
"0.53716713",
"0.53706867",
"0.5347968",
"0.53410584",
"0.5335071",
"0.53182614",
"0.5306341",
"0.5305678",
"0.530509",
"0.53049326",
"0.53040934",
"0.53000575",
"0.53000575",
"0.5298281",
"0.5298281",
"0.5298281",
"0.5298281",
"0.5289371",
"0.5289371",
"0.5276212",
"0.5257937",
"0.5255078",
"0.52459455",
"0.5241048",
"0.5240485",
"0.52388465",
"0.5234512",
"0.5233081",
"0.52280337",
"0.52206194",
"0.5217014",
"0.5216314",
"0.52151155",
"0.52111894",
"0.5210953",
"0.5210575",
"0.5209559",
"0.52066207",
"0.5201796",
"0.51960266",
"0.5176221",
"0.5156425",
"0.51493365",
"0.51493365",
"0.51493365",
"0.51493365",
"0.5143782",
"0.5138794",
"0.51317614",
"0.5119015",
"0.5111695",
"0.51007473",
"0.5100542",
"0.5097408",
"0.5095442",
"0.50880116",
"0.50790375",
"0.5077933",
"0.50770336",
"0.5076686",
"0.5075589",
"0.5074321",
"0.5072426",
"0.50719583",
"0.50651646",
"0.5062671",
"0.50580925",
"0.50513077"
] | 0.73069286 | 0 |
Fit method for likelihood based models | def fit(self, start_params=None, method='newton', maxiter=100,
full_output=True, disp=True, fargs=(), callback=None, retall=False,
skip_hessian=False, **kwargs):
Hinv = None # JP error if full_output=0, Hinv not defined
start_params = self._get_start_params(start_params)
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
# f = lambda params, *args: -self.loglike(params, *args) / nobs
def f(params, *args):
return -self.loglike(params, *args) / nobs
if method == 'newton':
# TODO: why are score and hess positive?
def score(params, *args):
return self.score(params, *args) / nobs
def hess(params, *args):
return self.hessian(params, *args) / nobs
else:
def score(params, *args):
return -self.score(params, *args) / nobs
def hess(params, *args):
return -self.hessian(params, *args) / nobs
warn_convergence = kwargs.pop('warn_convergence', True)
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
fargs, kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output)
# NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault('cov_params_func', None)
if cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == 'newton' and full_output:
Hinv = np.linalg.inv(-retvals['Hessian']) / nobs
# TODO: try/except for non-invertible hessian?
elif not skip_hessian:
H = -1 * self.hessian(xopt)
invertible = False
if np.all(np.isfinite(H)):
eigvals, eigvecs = np.linalg.eigh(H)
if np.min(eigvals) > 0:
invertible = True
if invertible:
Hinv = eigvecs.dot(np.diag(1.0 / eigvals)).dot(eigvecs.T)
Hinv = np.asfortranarray((Hinv + Hinv.T) / 2.0)
else:
warnings.warn('Inverting hessian failed, no bse or cov_params '
'available', HessianInversionWarning)
Hinv = None
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type': kwargs['cov_type'], 'cov_kwds': cov_kwds}
else:
kwds = {}
if 'use_t' in kwargs:
kwds['use_t'] = kwargs['use_t']
# TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1., **kwds)
# TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
if warn_convergence and not retvals['converged']:
warnings.warn("Maximum Likelihood optimization failed to "
"converge. Check mle_retvals",
ConvergenceWarning)
mlefit.mle_settings = optim_settings
return mlefit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit(self, X):",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit():\n pass",
"def fit(self, x):\n pass",
"def fit(self, X, Y):\n ...",
"def fit(self, X, y, **fit_params):\n ...",
"def fit(self, X, y=..., **fit_params):\n ...",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recursive_prob = rp)\r\n model_likelihoods = []\r\n for i in df.index:\r\n c = df.context[i]\r\n trial_choice = df.subj_ts[i]\r\n conf = model.calc_posterior(c)\r\n model_likelihoods.append(conf[trial_choice])\r\n return np.array(model_likelihoods)\r\n \r\n def bias_errfunc(params,df):\r\n rp = params['rp']\r\n tsb = params['tsb']\r\n #minimize\r\n return abs(np.sum(np.log(bias_fitfunc(rp,tsb,df)))) #single value\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('rp', value = .6, min = 0, max = 1)\r\n if bias == True:\r\n fit_params.add('tsb', value = 1, min = 0)\r\n else:\r\n fit_params.add('tsb', value = 1, vary = False, min = 0)\r\n out = lmfit.minimize(bias_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(out)\r\n return out.values\r\n \r\n elif mode == \"midline\":\r\n #Fitting Functions\r\n def midline_errfunc(params,df):\r\n eps = params['eps'].value\r\n context_sgn = np.array([max(i,0) for i in df.context_sign])\r\n choice = df.subj_ts\r\n #minimize\r\n return -np.sum(np.log(abs(abs(choice - (1-context_sgn))-eps)))\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('eps', value = .1, min = 0, max = 1)\r\n midline_out = lmfit.minimize(midline_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(midline_out)\r\n return midline_out.values",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X):\n raise NotImplementedError",
"def fit(self, X,y):\n pass",
"def fit(self, x):\n raise NotImplementedError()",
"def partial_fit(self, X, y=...):\n ...",
"def partial_fit(self, X, y=...):\n ...",
"def fit(self):\n raise NotImplementedError",
"def partial_fit(self, X, y=..., **fit_params):\n ...",
"def fit(self, X, Y, **kwargs):\n raise NotImplementedError",
"def fit(self, Y):\n raise NotImplementedError",
"def fit(self, X, Y):\n K = len(np.unique(Y))\n p = np.shape(X)[1]\n n = np.shape(X)[0]\n conditional = []\n # Class conditional distribution parameters (Laplace smoothing)\n prior = []\n # Class prior distribution parameters (MLE)\n for label in xrange(K):\n indices = np.where(Y == label + 1)[0]\n temp_split = X[indices, :]\n temp_count = np.shape(temp_split)[0]\n prior.append(1. * temp_count / n)\n temp_sum = np.apply_along_axis(sum, 0, temp_split.toarray())\n conditional.append(1. * (1 + 1. * temp_sum) / (2 + temp_count))\n self.model_prior = prior\n self.model_conditional = conditional\n return self",
"def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)",
"def fit(self, X, y=None, **kwargs):\n raise NotImplementedError()",
"def fit(self, X, y=..., sample_weight=...):\n ...",
"def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))",
"def fit(self, x, y):\n raise NotImplementedError('Subclass of LinearModel must implement fit method.')",
"def fit(self):\n raise NotImplementedError('')",
"def fit(self, X, y, sample_weight=...):\n ...",
"def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())",
"def fit(self, X, y=None):\n #import pdb\n #pdb.set_trace()\n return self.partial_fit(X, y)",
"def fit(self, input):\n raise NotImplementedError()",
"def fit(self):\n raise NotImplementedError # pragma: no cover",
"def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")",
"def fit(self, X):\n raise NotImplementedError('Abstract method \"fit\" must be '\n 'specialised!')",
"def fit(self, X, y):\n\n # retain columns incase encoding occurs\n self.fit_X_columns = X.columns.tolist()\n\n # generate the imputation datasets from multiple imputation\n # then fit the analysis models on each of the imputed datasets\n self.models_ = self._apply_models_to_mi_data(\n self.linear_models, X, y\n )\n\n # generate the fit statistics from each of the m models\n self.statistics_ = self._get_stats_from_models(self.models_)\n\n # still return an instance of the class\n return self",
"def partial_fit(self, X, y=..., sample_weight=...):\n ...",
"def fit(self, X, y, sample_weight=..., **fit_params):\n ...",
"def partial_fit(self, X, y, classes=..., sample_weight=...):\n ...",
"def fit(self, Y, STATUS, ntop=100, nrecent=100, nmax=400, ntopmu=100, ntopvar=100, nkmeans=300, nkeamnsdata=5000,\n lam=1e-6):\n X = self.X\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n tested = [i for i in range(self.n) if STATUS[i] == 2]\n ytested = Y[tested].reshape(-1)\n self.y_max = np.max(ytested)\n # each 10 fits we update the hyperparameters, otherwise we just update the data which is a lot faster\n if np.mod(self.update_counter, self.updates_per_big_fit) == 0:\n print('fitting hyperparameters')\n # how many training points are there\n ntested = len(tested)\n # if more than nmax we will subsample and use the subsample to fit hyperparametesr\n if ntested > nmax:\n # subsample is uniion of 100 best points, 100 most recent points and then random points \n top = list(np.argsort(ytested)[-ntop:])\n recent = list(range(ntested - nrecent, ntested))\n topandrecent = list(set(top + recent))\n rand = list(\n np.random.choice([i for i in range(ntested) if i not in topandrecent], nmax - len(topandrecent),\n False))\n testedtrain = topandrecent + rand\n ytrain = ytested[testedtrain]\n train = [tested[i] for i in testedtrain]\n else:\n train = tested\n ytrain = ytested\n \n # use GPy code to fit hyperparameters to minimize NLL on train data\n mfy = GPy.mappings.Constant(input_dim=self.d, output_dim=1) # fit dense GPy model to this data\n ky = GPy.kern.RBF(self.d, ARD=True, lengthscale=np.ones(self.d))\n self.GP = GPy.models.GPRegression(X[train], ytrain.reshape(-1, 1), kernel=ky, mean_function=mfy)\n self.GP.optimize('bfgs')\n # strip out fitted hyperparameters from GPy model, because cant do high(ish) dim sparse inference\n self.mu = self.GP.flattened_parameters[0]\n self.a = self.GP.flattened_parameters[1]\n self.l = self.GP.flattened_parameters[2]\n self.b = self.GP.flattened_parameters[3]\n # selecting inducing points for sparse inference \n print('selecting inducing points')\n # get prediction from GPy model \n self.py = self.GP.predict(X)\n # points with 100 highest means\n topmu = [untested[i] for i in np.argsort(self.py[0][untested].reshape(-1))[-ntopmu:]]\n # points with 100 highest uncertatinty\n topvar = [untested[i] for i in np.argsort(self.py[1][untested].reshape(-1))[-ntopvar:]]\n # combine with train set above to give nystrom inducing points (inducing points that are also actual trainingdata points) \n nystrom = topmu + topvar + train\n # also get some inducing points spread throughout domain by using kmeans\n # kmeans is very slow on full dataset so choose random subset \n # also scale using length scales l so that kmeans uses approproate distance measure\n kms = KMeans(n_clusters=nkmeans, max_iter=5).fit(\n np.divide(X[list(np.random.choice(untested, nkeamnsdata))], self.l))\n # matrix of inducing points \n self.M = np.vstack((X[nystrom], np.multiply(kms.cluster_centers_, self.l)))\n # dragons...\n # email [email protected] if this bit goes wrong!\n print('fitting sparse model')\n DXM = euclidean_distances(np.divide(X, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_XM = self.a * np.exp(-DXM / 2)\n DMM = euclidean_distances(np.divide(self.M, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_MM = self.a * np.exp(-DMM / 2) + np.identity(self.M.shape[0]) * lam * self.a\n self.B = self.a + self.b - np.sum(np.multiply(np.linalg.solve(self.SIG_MM, self.SIG_XM.T), self.SIG_XM.T),0)\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n else:\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n self.update_counter += 1\n \"\"\" \n key attributes updated by fit \n \n self.SIG_XM : prior covarience matrix between data and inducing points\n self.SIG_MM : prior covarience matrix at inducing points\n \n self.SIG_MM_pos : posterior covarience matrix at inducing points\n self.mu_M_pos : posterior mean at inducing points \n \n \"\"\"",
"def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***",
"def fit_gp(self):\n # Put things into training mode.\n self.gpf_core.float()\n self.likelihood.train()\n # Now use Adam by default.\n optimizer = torch.optim.Adam([{'params': self.gpf_core.parameters()}],\n lr=0.1)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood,\n self.gpf_core)\n # TODO: Allow length of training to be an option.\n for _ in range(500):\n optimizer.zero_grad()\n output = self.gpf_core(self.tensor_x)\n loss = -mll(output, self.tensor_y)\n loss.backward()\n optimizer.step()",
"def partial_fit(self, X, y, sample_weight=...):\n ...",
"def fit(self, X, y) :\n \n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n training_set = y.sum()/y.size\n self.probabilities_ = training_set\n return self\n ### ========== TODO : END ========== ###\n \n return self",
"def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)",
"def fit(self, X, Y, sample_weight=..., **fit_params):\n ...",
"def fit(self, x, y):\r\n\r\n self.train_x = x\r\n self.train_y = y\r\n self.__find_psi__()",
"def fit(self, x, y, logger):\n history = self.model1.fit(x=x, y=y, batch_size=self.batch_size, epochs=self.epochs)\n logger.log({'ValFuncLoss': history.history['loss'][-1]})",
"def likelihood(self):\n \n raise NotImplementedError()",
"def nnls_fit(self):\n\n def sprod(a,b): #simplecting inner product between two Pauli operators\n return int(not a.commutes(b))\n\n F1 = [] #First list of terms\n F2 = [] #List of term pairs\n fidelities = [] # list of fidelities from fits\n\n for datum in self._term_data.values():\n F1.append(datum.pauli)\n fidelities.append(datum.fidelity)\n #If the Pauli is conjugate to another term in the model, a degeneracy is present\n if self._issingle(datum):\n F2.append(datum.pauli)\n else:\n pair = datum.pair\n F2.append(pair)\n\n #create commutativity matrices\n M1 = [[sprod(a,b) for a in F1] for b in F1]\n M2 = [[sprod(a,b) for a in F1] for b in F2]\n\n #check to make sure that there is no degeneracy\n if np.linalg.matrix_rank(np.add(M1,M2)) != len(F1):\n raise Exception(\"Matrix is not full rank, something went wrong!\")\n \n #perform least-squares estimate of model coefficients and return as noisemodel \n coeffs,_ = nnls(np.add(M1,M2), -np.log(fidelities)) \n self.noisemodel = NoiseModel(self.layer._cliff_layer, F1, coeffs)",
"def fit(self, X, y=None, **fit_params):\n return self",
"def __fit_model(self):\n\n labels = self.labeled_labels\n features = self.labeled_features\n\n pred = np.array(cross_val_predict(self.clf,\n features,\n labels,\n cv=self.cv))\n\n stats = self.__get_statistics(labels, pred)\n self.statistics.append(stats)\n\n self.clf.fit(features, labels)\n\n return self",
"def fit_test(self):",
"def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)",
"def fit(self, *_):\n return self",
"def fit(self, *_):\n return self",
"def fit(self, *_):\n return self",
"def fit(self):\n self.lr = LRHMC( self.X_train, self.X_test, self.y_train, self.y_test )\n self.lr.fit()",
"def fit(self, data: np.array, labels: np.array):\n raise NotImplementedError",
"def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()",
"def fit(self, X, y):\r\n newWeight = [0.0] * self.size\r\n w = [0.0] * len(X)\r\n val = self.predict_prob(X) \r\n grad = [(y-1.0) * i[1] for i in X] \r\n grad1 = float((math.exp(-math.fsum((self.weight[f]*v for f, v in X)))) * val)\r\n grad2 = [i[1] * -1 * grad1 for i in X] \r\n for i in range(len(w)):\r\n w[i] = (grad[i] - grad2[i])\r\n \r\n w = [i*self.eta for i in w]\r\n for i in range(len(X)):\r\n newWeight[i] = self.weight[X[i][0]] -w[i]\r\n \r\n self.weight = newWeight[:]\r\n \r\n pass",
"def fit(self,X,y=None,**fit_params):\n return self",
"def fit(self, *args, **kwargs):\n return self",
"def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll",
"def fit(self, data, labels):\n self.clf.fit(data, labels)",
"def fit(self, X):\n self._fit_X = X",
"def fit(self, X_train, y_train):\n \n # Number of examples where y = 0,1\n No_y_train_1 = np.sum(y_train)\n No_y_train_0 = y_train.shape[0] - No_y_train_1\n \n #Ratio of Number of examples where y=0,1 and the total number of examples\n self.theta_0 = No_y_train_0/y_train.shape[0]\n self.theta_1 = No_y_train_1/y_train.shape[0]\n \n #Ratio of Number of examples where x_j =1 and y=0,1 and Number of examples where y=0,1 respectively\n No_inst_j1 = X_train.T.dot(y_train.reshape([-1,1])) \n No_inst_j0 = X_train.T.dot(1-y_train.reshape([-1,1]))\n \n #Whether or not laplace smoothing is implemented or not\n if self.l_smooth:\n self.prob1 = (No_inst_j1 + 1)/(No_y_train_1 + 2)\n self.prob0 = (No_inst_j0 + 1)/(No_y_train_0 + 2)\n else:\n self.prob1 = No_inst_j1/No_y_train_1\n self.prob0 = No_inst_j0/No_y_train_0\n \n return self",
"def fit_transform(self, X, y=...):\n ...",
"def fit(self, data: pd.DataFrame):\n raise NotImplementedError",
"def fit(self):\n self.eval_chisq([1, 1, 1, 1])",
"def fit(self):\n # Initialize parameter estimates\n if self.estimator is not None:\n param_estimates = self.estimator(self.xf, self.yf)\n else: param_estimates = None\n self.popt, self.pcov = curve_fit(self.model, self.xf, self.yf, \n p0=param_estimates)\n self.fit_history.append({\"popt\" : self.popt, \"pcov\" : self.pcov})",
"def inner_fit(self):\n pass",
"def inner_fit(self):\n pass",
"def log_likelihood(self, data, reward_model, bias_params):",
"def fit(self, X, y, verbose=None):\n self.__cls.fit(X, y)",
"def test_linear_fitter_1D(self, model_class, constraints):\n\n model_args = linear1d[model_class]\n kwargs = {}\n kwargs.update(model_args[\"kwargs\"])\n kwargs.update(model_args[\"parameters\"])\n\n if constraints:\n kwargs.update(model_args[\"constraints\"])\n\n model = model_class(*model_args[\"args\"], **kwargs)\n\n y1 = model(self.x1)\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\",\n message=r\"The fit may be poorly conditioned\",\n category=AstropyUserWarning,\n )\n model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)\n\n if constraints:\n # For the constraints tests we're not checking the overall fit,\n # just that the constraint was maintained\n fixed = model_args[\"constraints\"].get(\"fixed\", None)\n if fixed:\n for param in fixed:\n expected = model_args[\"parameters\"][param]\n assert getattr(model_lin, param).value == expected\n else:\n assert_allclose(model_lin.parameters, model.parameters, atol=0.2)",
"def _model_fit_term(self):\n if self.likelihood.YYT is None:\n return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))\n else:\n return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))",
"def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> None:\n\n self._check_input_data(X=X, y=y)\n self._check_support(X=X)\n\n if y is None:\n self.prob = self.compute_prob_mle(X)\n else:\n n_classes = max(y) + 1\n self.prob = np.zeros(n_classes)\n\n for cls in range(n_classes):\n self.prob[cls] = self.compute_prob_mle(X[y == cls]) # type: ignore",
"def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> None:\n\n self._check_input_data(X=X, y=y)\n self._check_support(X=X)\n\n if y is None:\n self.prob = self.compute_prob_mle(X)\n else:\n n_classes = max(y) + 1\n self.prob = np.zeros(n_classes)\n\n for cls in range(n_classes):\n self.prob[cls] = self.compute_prob_mle(X[y == cls]) # type: ignore",
"def fit(self, inp, targ):\n self.model.fit(inp, targ, epochs=1, verbose=0)",
"def fit(self, X, y) :\n\n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n # create a dictionary of frequencies and convert to probabilities\n frequencies = Counter(y)\n self.probabilities_ = {key:float(value)/len(y) for (key,value) in frequencies.items()}\n ### ========== TODO : END ========== ###\n\n return self",
"def fit(self) -> None:\n\n levels = self.levels\n TSs = GetAggregateTS(self.data).aggregate(levels)\n models = {}\n residuals = {}\n fcsts = {}\n for bm in self.baseModels:\n model_name = bm.model_name\n if model_name is None: # only residuals and fcsts are provided\n models[bm.level] = None\n residuals[bm.level] = bm.residuals\n fcsts[bm.level] = bm.fcsts\n else:\n m = BASE_MODELS[model_name](\n data=TSs[bm.level],\n params=bm.model_params,\n )\n m.fit()\n models[bm.level] = m\n self.models = models\n self.info_fcsts = fcsts\n self.info_residuals = residuals",
"def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):",
"def fit(self, *, printing=False):\r\n _df_input_conditions(self._X, self._y)\r\n\r\n model = sm.Logit(self._y, sm.add_constant(self._X))\r\n\r\n if printing:\r\n print(\"Model fitting in progress...\")\r\n with _SuppressPrints(): # hide Statsmodels printing\r\n self._results = model.fit()\r\n self._results_output = self._results.summary(alpha=self._alpha)\r\n\r\n model_selection_dict = {\"log_likelihood\": self._results.llf,\r\n \"r_squared_pseudo\": self._results.prsquared,\r\n \"aic\": self._results.aic,\r\n \"bic\": self._results.bic}\r\n self._model_selection_stats = model_selection_dict\r\n self._log_likelihood = self._results.llf\r\n self._odds_ratios = pd.Series(np.exp(self._results.params\r\n .drop('const')),\r\n name='odds_ratios')\r\n\r\n self._standardize_results()\r\n\r\n self._is_fitted = True\r\n if printing:\r\n print(\"Model fitted.\")\r\n\r\n return self",
"def best_fit(self, **kwargs):\n n_fit_p = len(self.fit_parameters)\n n_wc = len(self.fit_wc_names)\n if n_fit_p + n_wc == 1:\n def f(x):\n return -self.log_likelihood([x])\n opt = scipy.optimize.minimize_scalar(f, **kwargs)\n else:\n def f(x):\n return -self.log_likelihood(x)\n if 'x0' not in kwargs:\n x0 = np.zeros(n_fit_p + n_wc)\n if n_fit_p > 1:\n x0[:n_fit_p] = self.get_central_fit_parameters\n opt = minimize_robust(f, x0, **kwargs)\n else:\n opt = minimize_robust(f, **kwargs)\n if not opt.success:\n raise ValueError(\"Optimization failed.\")\n else:\n return {'x': opt.x, 'log_likelihood': -opt.fun}",
"def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True",
"def fit(self, X, y):\n self.model_x = X\n self.model_y = y",
"def fit(self, train_matrix, train_label, sample_weight):\r\n raise NotImplementedError",
"def fit(self, X, y):\n\t\t# TODO insert stack of 1 for bias\n\t\tX = np.insert(X, 0, 1, axis=1)\n\t\tself.errors = []\n\t\tself.initialize_weights(n_features=X.shape[1])\n\t\t\n\t\t# TODO gradient descent for n_iter\n\t\tfor _ in range(self.n_iter):\n\t\t\ty_pred = X.dot(self.weights)\n\t\t\t# calculate l2 loss\n\t\t\tmse = np.mean(0.5 * (y - y_pred)**2 + self.regularization(self.weights))\n\t\t\tself.errors.append(mse)\n\t\t\t# calucalte gradiant of loss function w.r.t weights\n\t\t\tgrad_weights = -(y - y_pred).dot(X) + self.regularization.grad(self.weights)\n\t\t\t# weight updation\n\t\t\tself.weights -= self.lr * grad_weights"
] | [
"0.7537152",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7372642",
"0.7346237",
"0.7229979",
"0.7154896",
"0.7139961",
"0.71326166",
"0.71136487",
"0.71136487",
"0.7056964",
"0.7034442",
"0.7034442",
"0.7034442",
"0.7033468",
"0.7021897",
"0.69478065",
"0.6910084",
"0.6910084",
"0.68359804",
"0.6793921",
"0.67236066",
"0.6676446",
"0.66759557",
"0.66716164",
"0.6643346",
"0.6641364",
"0.6640024",
"0.66127664",
"0.6579767",
"0.65444064",
"0.6542285",
"0.65103865",
"0.64920855",
"0.64697444",
"0.6458266",
"0.64472926",
"0.6444447",
"0.6426853",
"0.64097863",
"0.6405994",
"0.6405371",
"0.640153",
"0.6399991",
"0.6373311",
"0.63712674",
"0.63697666",
"0.6366732",
"0.63639516",
"0.63583785",
"0.6354909",
"0.6349281",
"0.6339577",
"0.6326837",
"0.630596",
"0.6300507",
"0.6298816",
"0.6298816",
"0.6298816",
"0.629489",
"0.6270469",
"0.6259111",
"0.6258512",
"0.62549466",
"0.62440866",
"0.624316",
"0.6239572",
"0.62369096",
"0.62278444",
"0.6203611",
"0.62025034",
"0.6201224",
"0.6198001",
"0.61840975",
"0.61840975",
"0.6178002",
"0.6174393",
"0.61723375",
"0.61635286",
"0.6158795",
"0.61558676",
"0.6152815",
"0.6152815",
"0.615123",
"0.61471915",
"0.61452246",
"0.6140333",
"0.61378825",
"0.61359274",
"0.613347",
"0.61309075",
"0.61302555",
"0.61286205"
] | 0.0 | -1 |
(array) The predicted values of the model. An (nobs x k_endog) array. | def fittedvalues(self):
return self.model.predict(self.params)
# TODO: GH#5255 is this necessarily equivalent to self.predict()? | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predicted(self):\n return np.squeeze(self._predicted)",
"def predictions(self):\n return self._pred",
"def predict(self):\n self.kf.predict()\n self.nb_kf_pred += 1\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(self.kf.x[:2].reshape(-1))\n return self.history[-1]",
"def value_predictions(self):\n return np.array([m['critic'] for m in self.model_outs], dtype=np.float32)",
"def predict(self): \n return self.model.predict(self.test_x)",
"def predict(self, X):\n iterable = (knn_prediction(self._X, self._y, x, self.k) for x in X)\n return np.fromiter(iterable, np.int32, count=X.shape[0])",
"def predict(self, X):",
"def predict(self, X):",
"def predictions(self):\n\n return self._predictions",
"def predict_evidences(self, X):",
"def predict ( self, X: np.ndarray ):\n \n return self.knn.predict ( X )\n # End predict()",
"def predict(self, x):\n \n\n return predictions",
"def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict",
"def predict(self, X):\n if len(self.labels) == 0:\n raise ValueError(\"You should fit first!\")\n resultProba = MyKNeighborsClassifier.predict_proba(self,X)\n returnArray = []\n for i in range(0,len(X)):\n returnArray.append(np.argmax(resultProba[i]))\n return returnArray\n\n pass",
"def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels",
"def predict(self, X):\n y_pred = np.full(shape=(X.shape[0],), fill_value=self.guess)\n return y_pred",
"def predict ( self, X ):\n \n return self.knn.predict ( X )\n # End predict()",
"def getPredictionArray(self, inputTestData, NeuralNet):\n scaledTestData = self.scaleInputTestData(inputTestData)\n predictedValues = (NeuralNet.predict(scaledTestData) * 100).data.numpy()\n return predictedValues",
"def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels",
"def predict(self):\n RV = np.zeros((self.N,self.P))\n for term_i in range(self.n_terms):\n RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i]))\n return RV",
"def predict(self, X):\n\n predicted_y = np.zeros(X.shape[0])\n for i in range(X.shape[0]):\n neighbours = self.get_neighbours(X[i], self.k_nearest)\n predicted_y[i] = self.get_vote(neighbours)\n return predicted_y",
"def predict(self):\n if ((self.kf.x[6] + self.kf.x[2]) <= 0):\n self.kf.x[6] *= 0.0\n self.kf.predict()\n self.age += 1\n if (self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n return self.history[-1]",
"def predict(self, X):\n # Set dropout to 1.0 when runnig prediction of model\n risk = self.sess.run(\n [self.y], \n feed_dict = {self.X: X, self.keep_prob: 1.0}\n )\n risk = np.squeeze(risk)\n if risk.shape == ():\n risk = risk.reshape((1, ))\n return risk",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, obs):\n pred_q = self.model(obs)\n return pred_q",
"def predict(self, X):\n\n # return the numpy array y which contains the predicted values\n\n # adding extra 1s and reshaping y\n n_samples = X.shape[0]\n n_features = X.shape[1]\n temp = np.empty((n_samples, n_features+1), dtype=float)\n for i in range(n_samples):\n temp[i] = np.append(X[i], 1)\n X = temp\n y = self.sigmoid(np.dot(X, self.theta)) # predicted value\n # return the numpy array y which contains the predicted values\n y_final = np.around(y)\n return y_final",
"def predict(self, X):\n y_hats = np.zeros(self.N)\n for i in range(self.N):\n y_hats[i] = self.learners[i].predict(X)\n return y_hats",
"def predict(self, X):\n pred = []\n \n for x in X:\n y_pred = self.__feed_forward(x)\n pred.append(np.argmax(y_pred, axis=0))\n \n return np.asarray(pred)",
"def predict(self, predPoints=None):",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predicts(self,X):\n return [self.predict(x) for x in X]",
"def predict(self, X):\n if X.ndim==1: X = X[None,:]\n m, v = self.model.predict(X)\n v = np.clip(v, 1e-10, np.inf)\n return m, np.sqrt(v)",
"def predict(self, obs):\n pass",
"def predict(self):\n raise NotImplementedError",
"def _predict(self, data):\n # make sure we're talking about arrays\n data = N.asarray(data)\n\n # checks only in debug mode\n if __debug__:\n if not data.ndim == 2:\n raise ValueError, \"Data array must be two-dimensional.\"\n\n if not data.shape[1] == self.__data.nfeatures:\n raise ValueError, \"Length of data samples (features) does \" \\\n \"not match the classifier.\"\n\n # compute the distance matrix between training and test data with\n # distances stored row-wise, ie. distances between test sample [0]\n # and all training samples will end up in row 0\n dists = self.__dfx(self.__data.samples, data).T\n\n # determine the k nearest neighbors per test sample\n knns = dists.argsort(axis=1)[:, :self.__k]\n\n # predicted class labels will go here\n predicted = []\n\n if self.__voting == 'majority':\n vfx = self.getMajorityVote\n elif self.__voting == 'weighted':\n vfx = self.getWeightedVote\n else:\n raise ValueError, \"kNN told to perform unknown voting '%s'.\" \\\n % self.__voting\n\n # perform voting\n results = [vfx(knn) for knn in knns]\n\n # extract predictions\n predicted = [r[0] for r in results]\n\n # store the predictions in the state. Relies on State._setitem to do\n # nothing if the relevant state member is not enabled\n self.predictions = predicted\n self.values = [r[1] for r in results]\n\n return predicted",
"def predict(self, X_test):\n predicted_label = [self._predict(x_test) for x_test in X_test]\n\n return np.array(predicted_label)",
"def predict(self, X):\n training_inputs = [np.reshape(i, (len(X[0]), 1)) for i in X] # 将一维数组变为二维矩阵[14,1]\n test_results = [np.argmax(self.feedforward(i)) #argmax 返回最大数的索引\n for i in training_inputs]\n return np.array(test_results) #转换为数组输出",
"def predictions_conf(self):\n return self._pred_L, self._pred_R",
"def predict(self, X):\n\n y_pred = np.zeros(X.shape[0])\n y_pred = np.argmax(np.dot(X,self.W), axis=1)\n ###########################################################################\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n\n return y_pred",
"def predict(self):\n return _RateElasticNetRegressor.predict(self)",
"def predict(self):\n prediction = np.multiply(self.alpha_vec, self.label[:,np.newaxis]).T\n pred = np.dot(prediction, np.inner(self.train_data, self.train_data)) + self.intercept\n self.prediction = np.sign(pred)\n return(self.prediction)",
"def predict_proba(self):\n ...",
"def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes",
"def predict(self, X):\n y_pred = np.zeros(X.shape[0])\n ###########################################################################\n # TODO: #\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n y_pred = np.argmax(np.dot(X, self.W), axis=1)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return y_pred",
"def predict(self, data: np.array) -> np.array:\n return self.model.predict(squeeze_keep_batch(data))",
"def supervised_predict(self, x):\n z_ik = self.get_posterior(x)\n em_label = np.argmax(z_ik, axis=1)\n y_hat = [self.cluster_label_map[idx] for idx in em_label]\n return np.array(y_hat)",
"def predict(self, X):\n # yields labels of the given dataset X after calling predict_proba\n A=self.predict_proba(X)\n y_hat=np.argmax(A,axis=0)\n return y_hat",
"def predict(self,X):\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / (exp_z + 1)\r\n predictions = np.argmax(softmax_scores, axis = 1)\r\n return predictions",
"def predict(self, X_val):\n \n # Get scores\n preds = list()\n scores = self.get_scores(X_val)\n\n # Round to predictions\n for score in scores:\n preds.append(round(score))\n \n # Read as numpy array\n preds = np.array(preds).astype('int32')\n \n return preds",
"def predict(self, X):\n\n Xn = np.copy(X)\n\n preds = []\n # compute distance from all points\n for x1 in Xn:\n dist = self._euclidian_distance(self.X_data, x1)\n dist = np.vstack((dist, self.y)).T\n dist = dist[dist[:, 0].argsort(axis=0)][:,-1]\n # get a vote from top k\n pred = sts.mode(dist[0:self.k])[0][0]\n preds.append(pred)\n\n return np.array(preds)",
"def _predict_labels(self) -> pd.Series:\n\n # get the prediction dataset\n data = self._get_prediction_data()\n data_as_array = data.to_numpy()\n\n if self._standardize_data:\n data_as_array = self._final_scaler.transform(data_as_array)\n\n # predict with final model using the optimal threshold\n y_pred = self._final_model.predict_proba(data_as_array)[:,1]\n threshold_predictions = [1 if y > self._optimal_threshold else 0 for y in y_pred]\n\n # create series out of predictions\n y_labels = pd.Series(data = threshold_predictions, index = data.index)\n return y_labels",
"def predict(self, X: np.ndarray) -> np.ndarray:\n return self._rf.predict(X)",
"def predict(self, data):\n return self.result.predict(data)",
"def get_predictors(self):\n\t\treturn self.predictors",
"def model_predict(model,x_test,y_test):\n\n\n y_pred = model.predict(x_test)\n\n predict_class = np.argmax(y_pred, axis=1)\n\n predict_class = predict_class.tolist()\n\n return(y_pred,predict_class)",
"def predict(self, dt=1):\n self.kf.predict()\n if self.time_since_update > 0: # there was missed detections\n self.continuing_hits = 0\n self.time_since_update += 1\n return self.kf.x[:self.dim_z].squeeze()",
"def predict(self, X):\n (t0, t1, t2) = self.theta\n g = lambda x: t0 + t1 * x[0] + t2 * x[1]\n return np.array([\n self.classes[1] if g(x) > 0 else self.classes[0]\n for x in X\n ])",
"def prediction(self, x):\n t = self.model.predict(x.reshape(1, -1))\n return t",
"def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_",
"def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_",
"def predict(self, X):\n\n # return the numpy array y which contains the predicted values\n n_samples = X.shape[0]\n n_features = X.shape[1]\n temp = np.empty((n_samples, n_features+1), dtype=float)\n for i in range(n_samples):\n temp[i] = np.append(X[i], 1)\n X = temp\n y = np.dot(X, self.theta)\n # return the numpy array y which contains the predicted values\n return y",
"def predict(self, test_data):\r\n return self.gs.predict(test_data)",
"def model_predict(self, X):\n return self.cmodel.predict(X=X)",
"def predict(self, X:np.ndarray) -> np.ndarray:\n n_obs = X.shape[0]\n n_classes = self.mu.shape[0]\n n_features = self.mu.shape[1]\n\n # initalize the output vector\n y = np.empty(n_obs,dtype=int)\n #d=np.linalg.det()\n c_=np.zeros(n_classes)\n a=np.zeros((n_features,n_features))\n for j,x in enumerate(X):\n for i in range(n_classes):\n diag=self.sigma[i].diagonal()\n np.fill_diagonal(a,diag)\n #print(a,i)\n #sig_inv=np.linalg.inv(self.sigma[i]) #l'nverse de la matrice sigma\n sig_inv=np.linalg.inv(a)\n x_u=x - self.mu[i] \n #c_[i]=-((n_features/2.)*log(2*pi) + 0.5*log(np.linalg.det(self.sigma[i]))) - 0.5*((x_u.T).dot(sig_inv)).dot(x_u)\n c_[i]=-0.5*log(np.linalg.det(self.sigma[i])) - 0.5*((x_u.T).dot(sig_inv)).dot(x_u) + log(self.priors[i])\n y[j]=np.argmax(c_)\n \n #print(y)\n return y",
"def predict(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:",
"def predict(self, X: ArrayLike) -> np.ndarray:\n predictions = self._model.predict(X)\n return np.array([prediction for prediction in predictions])",
"def predictions(self, model):\n return get_predictions_from_df(\n model=model, df=self.prediction_df,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n )",
"def prediction_samples(self) -> np.ndarray:\n return self.prediction_samples",
"def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions",
"def predict(self, Xtest):\n ytest = np.zeros(Xtest.shape[0], dtype=int)\n if (self.params['kernel'] == 'hamming'):\n print('')\n Ktest = np.zeros([Xtest.shape[0], self.params['k']])\n for i in range (0, Xtest.shape[0]):\n for j in range (0, self.params['k']):\n Ktest[i][j] = self.hamming(Xtest[i], self.kcentre[j])\n \n \n else:\n \n Ktest = np.dot(Xtest, self.kcentre.T)\n ### YOUR CODE HERE\n sig = np.dot(Ktest, self.weights)\n sig = utils.sigmoid(sig)\n #print (sig)\n sig = np.round(sig)\n #print (sig)\n for i in range (0, ytest.shape[0]):\n ytest[i] = int(sig[i])\n ### END YOUR CODE\n #print (ytest)\n assert len(ytest) == Xtest.shape[0]\n return ytest",
"def predict(self, X):\n\n\t\tn_samples = X.shape[0]\n\t\tpredicted = np.zeros(n_samples)\n\n\t\tfor i in xrange(n_samples):\n\t\t\tpredicted[i] = self.classify_example(X[i])\n\n\t\treturn predicted",
"def predict(self, output):\n return t.argmax(output, dim=1)",
"def predict(self, X_test):\n if self.basis_func is not None:\n X_transformed = self.basis_func(X_test)\n else:\n X_transformed = X_test\n\n # Marginalise predictions over hyperparameters\n mu = np.zeros([len(self.hypers), X_transformed.shape[0]])\n var = np.zeros([len(self.hypers), X_transformed.shape[0]])\n\n for i, h in enumerate(self.hypers):\n mu[i] = np.dot(self.models[i][0].T, X_transformed.T)\n var[i] = 1. / h[1] + np.diag(np.dot(np.dot(X_transformed, self.models[i][1]), X_transformed.T))\n\n m = mu.mean(axis=0)\n v = var.mean(axis=0)\n # Clip negative variances and set them to the smallest\n # positive float value\n if v.shape[0] == 1:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n else:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0\n\n return m, v",
"def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)",
"def predict(self, X):\n\n N, D = X.shape\n predictions = np.zeros((N, self.n_targets))\n for j in range(self.n_targets):\n predictions[:, j] = self.ensemble[j].predict(X)\n return predictions",
"def predict(self, X):\n # get the unconstrained predictions\n res = orig_predict(self, X)\n\n if self.rescale:\n # convert the predictions to z-scores,\n # then rescale to match the training set distribution\n res = (((res - self.yhat_mean) / self.yhat_sd) * self.y_sd) + self.y_mean\n\n if self.constrain:\n # apply min and max constraints\n res = np.array([max(self.y_min, min(self.y_max, pred))\n for pred in res])\n\n return res",
"def predict(self, data: List):",
"def predict(self, X):\n pred = np.zeros(X.shape[0])\n ### YOUR CODE HERE 1-3 lines\n probabilities = np.array([model.probability(X) for model in self.models])\n pred=np.argmax(probabilities, axis=0)\n ### END CODE\n assert pred.shape == (X.shape[0],)\n return pred",
"def predict(self, X):\r\n y_pred = np.argmax(self.loss(X),axis=1)\r\n return y_pred",
"def predict(self, X):\n return np.argmax(self.model.predict(X), axis=1)",
"def predict(self,X): \n return self._predict(X)",
"def _predict(self, X: TimeSeriesInstances, y=None) -> np.ndarray:\n raise NotImplementedError",
"def make_predictions(model, x_test, y_test):\r\n preds = model.predict(x_test)\r\n y_hat = np.argmax(preds, axis=-1)\r\n print(type(y_test))\r\n y_test.columns = [0, 1]\r\n y = y_test.idxmax(axis=1)\r\n print(y_hat.shape)\r\n print(y.shape)\r\n return y_hat, y",
"def predict(self, X_test):\n return self.model.predict(X_test)",
"def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)",
"def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]",
"def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions",
"def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)",
"def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)",
"def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)",
"def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)",
"def _predicts(self, Xrow):\n _, row = Xrow\n return self._predicts_target_values(row, None)",
"def predict(self, X: np.ndarray) -> np.ndarray:\n\n assert X.shape[2] == self.n_features\n assert X.shape[1] == 80\n\n Y = self.model.predict(X)\n\n return Y",
"def predict(self, model, x_test):\n pass",
"def predict(self, X):\n return np.argmax(self.predict_proba(X), axis=1)",
"def target_predict(self, inp):\n return self.target_model.predict(inp)"
] | [
"0.7712333",
"0.7250647",
"0.71565753",
"0.7084843",
"0.7081569",
"0.69067216",
"0.68599117",
"0.68599117",
"0.6858041",
"0.6837202",
"0.6804691",
"0.6788451",
"0.67536706",
"0.6731195",
"0.67286116",
"0.6702297",
"0.6701598",
"0.67001456",
"0.66936904",
"0.66884923",
"0.66786695",
"0.6650343",
"0.6635592",
"0.6624512",
"0.6624512",
"0.6624512",
"0.661238",
"0.6597027",
"0.659641",
"0.6584731",
"0.6581637",
"0.65731406",
"0.65731406",
"0.65731406",
"0.65694076",
"0.656512",
"0.65549105",
"0.65524745",
"0.65488297",
"0.65084046",
"0.6507468",
"0.650487",
"0.65034527",
"0.6502562",
"0.6498038",
"0.64967775",
"0.64866614",
"0.6485806",
"0.6474376",
"0.646366",
"0.6461636",
"0.64574504",
"0.6455301",
"0.64548624",
"0.6453536",
"0.64239687",
"0.64135563",
"0.6409507",
"0.6409246",
"0.6407286",
"0.6395476",
"0.6390114",
"0.6385291",
"0.6385291",
"0.63835",
"0.63806987",
"0.63793534",
"0.6376322",
"0.6365052",
"0.6364307",
"0.6362363",
"0.6358556",
"0.6347718",
"0.6344868",
"0.6342282",
"0.63398224",
"0.6339281",
"0.6332625",
"0.63270193",
"0.63254285",
"0.6323513",
"0.63234776",
"0.6317412",
"0.63138574",
"0.6312932",
"0.631217",
"0.63096154",
"0.6308801",
"0.63039",
"0.63037837",
"0.63001835",
"0.6298416",
"0.6298416",
"0.6298416",
"0.6298416",
"0.6298416",
"0.629509",
"0.6284146",
"0.6278619",
"0.62784266"
] | 0.64621586 | 50 |
(array) The model residuals. An (nobs x k_endog) array. | def resid(self):
# GH#5255
return self.model.endog - self.fittedvalues | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def residuals(self):\r\n return self.__residuals",
"def residuals_(self):\n return self._residuals",
"def residuals(self) -> npt.NDArray[np.float64]:\n return self.data - self.theory",
"def _get_residuals(self, model: Model) -> np.ndarray:\n try:\n # pyre-fixme[16]: `Model` has no attribute `model`.\n return model.model.resid.values\n except Exception:\n fcst = model.predict(steps=1, freq=\"D\", include_history=True)\n # pyre-fixme[16]: `None` has no attribute `merge`.\n # pyre-fixme[16]: `Optional` has no attribute `to_dataframe`.\n merge = fcst.merge(model.data.to_dataframe(), on=\"time\")\n for col in merge.columns:\n if col != \"time\" and (\"fcst\" not in col):\n return merge[col].values - merge[\"fcst\"].values\n raise ValueError(\"Couldn't find residual or forecast values in model\")",
"def get_residual(self) -> np.ndarray:\n return self._calculate_residual(self.coefficients)",
"def _get_residual_matrix(self) -> np.ndarray:\n res_matrix = self.res_matrix\n if res_matrix is None:\n residuals = self._get_all_residuals()\n ks = self.levels\n freq = self.freq\n h = np.min([len(residuals[k]) // freq[k] for k in ks])\n res_matrix = []\n for k in ks:\n n = h * freq[k]\n res_matrix.append(residuals[k][-n:].reshape(h, -1).T)\n res_matrix = np.row_stack(res_matrix)\n self.res_matrix = res_matrix\n return res_matrix",
"def getResiduals(self):\n X = np.zeros((self.N*self.P,self.n_fixed_effs))\n ip = 0\n for i in range(self.n_terms):\n Ki = self.A[i].shape[0]*self.F[i].shape[1]\n X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i])\n ip += Ki\n y = np.reshape(self.Y,(self.Y.size,1),order='F')\n RV = regressOut(y,X)\n RV = np.reshape(RV,self.Y.shape,order='F')\n return RV",
"def postfit_residuals(self) -> NONEARRAY:\n pass",
"def residuals(self, X=None, y=None) -> np.ndarray:\n if y is None:\n return self.model.data.y.unnormalized_y - self.predict(X)\n else:\n return y - self.predict(X)",
"def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R",
"def _compute_residuals(self):\n residuls = self.I - self.E\n return residuls",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model[interp_model == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\n\t\t\treturn Resid.flatten()",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\t\t\treturn Resid.flatten()",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()",
"def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k )\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\t\t\t#interp_model[interp_model == 0] = np.nan\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()",
"def _residuals(self):\n if self.model_fit is None:\n self._uvts_cls_logger.error(\"No model has been fitted, residuals cannot be computed!\")\n sys.exit(\"STOP\")\n\n try:\n # use fittedvalues to fill in the model dictionary\n self.residuals = pd.Series(np.asarray(self._train_dt['y']) - np.asarray(self.fittedvalues).flatten(),\n index=self._train_dt['y'].index)\n self.upper_whisker_res = self.residuals.mean() + 1.5 * (\n self.residuals.quantile(0.75) - self.residuals.quantile(0.25))\n except (KeyError, AttributeError):\n self._uvts_cls_logger.exception(\"Exception occurred: Model was not fitted or ts has other structure\")\n\n return self",
"def _get_all_residuals(self) -> Dict[int, np.ndarray]:\n residuals = self.residuals\n # if residuals have not been calculated yet\n if residuals is None:\n levels = self.levels\n models = self.models\n residuals = {}\n for k in levels:\n # assert models is not None\n # pyre-fixme[16]: `Optional` has no attribute `__getitem__`.\n if models[k] is not None:\n try:\n vals = self._get_residuals(models[k])\n except Exception as e:\n msg = (\n f\"Failed to get residuals for level {k} with error \"\n f\"message {e}.\"\n )\n raise _log_error(msg)\n\n residuals[k] = vals\n else:\n residuals[k] = self.info_residuals[k]\n self.residuals = residuals\n return residuals",
"def postfit_residuals(self) -> NONEARRAY:\n if self._successful:\n return self._postfit_residuals\n else:\n return None",
"def compute_residuals(self, model: Optional[CameraModel] = None) -> np.ndarray:\n # use the model attribute if necessary\n if model is None:\n model = self.model\n\n return self.measurements - np.concatenate(\n [model.project_onto_image(vecs, image=ind, temperature=self.temperatures[ind])\n for ind, vecs in enumerate(self.camera_frame_directions)], axis=1\n )",
"def residual_sensor_data(self):\r\n residual = []\r\n data_0_damage = self.sensor_data_original_shape_[0, :, :, :]\r\n\r\n for i in range(self.num_of_hm_stages):\r\n subtract_vector = self.sensor_data_original_shape_[i, :, :, :] - data_0_damage\r\n residual.append(subtract_vector)\r\n\r\n self.residual = np.asarray(residual)\r\n self.residual_flattened = self.residual.reshape(-1, self.signal_length, 1)\r\n\r\n return",
"def get_training_label_residuals(self):\n \n optimised_labels = self.fit(self.training_fluxes,\n self.training_flux_uncertainties, full_output=False)\n\n return optimised_labels - self.labels_array",
"def _raw_residuals(self):\n if self.model.assortativity == 'positive':\n traj = self._solution[::-1]\n else:\n traj = self._solution\n\n # compute the residuals\n xi = np.linspace(traj[0, 0], traj[-1, 0], 10 * traj.shape[0])\n resids_arr = self.ivp.compute_residual(traj[:, :3], xi, k=5, ext=2)\n\n # convert to a data frame\n resids_arr[:, 0] = xi\n col_names = ['x', r'$\\hat{\\mu}(x)$', r'$\\hat{\\theta}(x)$']\n df = pd.DataFrame(resids_arr, columns=col_names)\n\n return df.set_index('x')",
"def hkl_residuals(self, parameters):\n self.get_parameters(parameters)\n return self.diffs()",
"def residuals_detail(self):\n if self._residuals_detail is None:\n if not self.parametric:\n unscaled = self.unscaled_residuals.values.ravel()\n adjusted = self.adjusted_residuals.values.ravel()\n unscaled = unscaled[~np.isnan(unscaled)]\n adjusted = adjusted[~np.isnan(adjusted)]\n unscaled = unscaled[unscaled != 0]\n adjusted = adjusted[adjusted != 0]\n unscaled_size = unscaled.size\n unscaled_sum = unscaled.sum(axis=0)\n unscaled_ssqr = np.sum(unscaled**2, axis=0)\n unscaled_min = unscaled.min(axis=0)\n unscaled_max = unscaled.max(axis=0)\n unscaled_mean = unscaled.mean(axis=0)\n unscaled_skew = stats.skew(unscaled, axis=0, nan_policy=\"omit\")\n unscaled_mode = stats.mode(unscaled, axis=0, nan_policy=\"omit\").mode[0]\n unscaled_cvar = stats.variation(unscaled, axis=0, nan_policy=\"omit\")\n unscaled_kurt = stats.kurtosis(unscaled, axis=0, nan_policy=\"omit\")\n unscaled_var = unscaled.var(ddof=1, axis=0)\n unscaled_std = unscaled.std(ddof=1, axis=0)\n unscaled_med = np.median(unscaled, axis=0)\n adjusted_size = adjusted.size\n adjusted_sum = adjusted.sum(axis=0)\n adjusted_ssqr = np.sum(adjusted**2, axis=0)\n adjusted_min = adjusted.min(axis=0)\n adjusted_max = adjusted.max(axis=0)\n adjusted_mean = adjusted.mean(axis=0)\n adjusted_skew = stats.skew(adjusted, axis=0, nan_policy=\"omit\")\n adjusted_mode = stats.mode(adjusted, axis=0, nan_policy=\"omit\").mode[0]\n adjusted_cvar = stats.variation(adjusted, axis=0, nan_policy=\"omit\")\n adjusted_kurt = stats.kurtosis(adjusted, axis=0, nan_policy=\"omit\")\n adjusted_var = adjusted.var(ddof=1, axis=0)\n adjusted_std = adjusted.std(ddof=1, axis=0)\n adjusted_med = np.median(adjusted, axis=0)\n self._residuals_detail = pd.DataFrame({\n \"unscaled\": [\n unscaled_size, unscaled_sum , unscaled_ssqr, unscaled_min,\n unscaled_max, unscaled_mean, unscaled_skew, unscaled_mode,\n unscaled_cvar, unscaled_kurt, unscaled_var , unscaled_std,\n unscaled_med\n ],\n \"adjusted\": [\n adjusted_size, adjusted_sum , adjusted_ssqr, adjusted_min,\n adjusted_max, adjusted_mean, adjusted_skew, adjusted_mode,\n adjusted_cvar, adjusted_kurt, adjusted_var , adjusted_std,\n adjusted_med\n ],\n },\n index=[\n \"size\", \"sum\", \"sum_of_squares\", \"minimum\", \"maximum\", \"mean\",\n \"skew\", \"mode\", \"cov\", \"kurtosis\", \"variance\",\n \"standard_deviation\", \"median\"\n ]\n )\n\n return(self._residuals_detail)",
"def _generate_residuals(exog, endog, bandwidth=0.05):\n # Turn input data into np.ndarrays.\n exog = np.array(exog)\n endog = np.array(endog)\n\n # Determine number of observations and number of columns of the\n # outcome variable.\n n = endog.shape[0]\n\n # *y* is a column vector\n if endog.ndim == 1:\n y_fit = loess(exog, endog, span=bandwidth, degree=1)\n y_fit.fit()\n res = y_fit.outputs.fitted_residuals\n\n else:\n columns = endog.shape[1]\n res = np.zeros([n, columns])\n\n for col in range(columns):\n y_fit = loess(exog, endog[:, col], span=bandwidth, degree=1)\n y_fit.fit()\n res[:, col] = y_fit.outputs.fitted_residuals\n\n return res",
"def compute_residuals(self):\n\n r = self.rsdl()\n adapt_tol = self.opt['RelStopTol']\n\n if self.opt['AutoStop', 'Enabled']:\n adapt_tol = self.tau0 / (1. + self.k)\n\n return r, adapt_tol",
"def get_residual(self, beta: ndarray) -> ndarray:\n return self.data.weight*(self.data.obs -\n self.fevar.mapping(beta))",
"def residuals_Arr(self, p, data, x):\n err = data - self.Arr(x,p)\n return err",
"def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return",
"def residual(self,name):\n state = self.getstate(name)\n m = self.hit.vec \n x = state.vec\n res = m - self.hmatrix*x\n debug('kfnode.residual',(name,res))\n return res",
"def residuals(data: DataVector, theory: TheoryVector) -> npt.NDArray[np.float64]:\n assert isinstance(data, DataVector)\n assert isinstance(theory, TheoryVector)\n return (data - theory).view(np.ndarray)",
"def initialize_Rs(self):\n return [np.eye(K.shape[0]) for K in self.Ks]",
"def calcResiduals(self, params)->np.ndarray:\r\n if self._selectedIdxs is None:\r\n self._updateSelectedIdxs()\r\n dataArr = ModelFitterCore.runSimulationNumpy(parameters=params,\r\n modelSpecification=self.roadrunnerModel,\r\n startTime=self.observedTS.start,\r\n endTime=self.endTime,\r\n numPoint=self.numPoint,\r\n selectedColumns=self.selectedColumns,\r\n _logger=self.logger,\r\n _loggerPrefix=self._loggerPrefix)\r\n if dataArr is None:\r\n residualsArr = np.repeat(LARGE_RESIDUAL, len(self._observedArr))\r\n else:\r\n truncatedArr = dataArr[self._selectedIdxs, 1:]\r\n truncatedArr = truncatedArr.flatten()\r\n residualsArr = self._observedArr - truncatedArr\r\n if self._isObservedNan:\r\n residualsArr = np.nan_to_num(residualsArr)\r\n return residualsArr",
"def _residuals(params: List[float], xs: np.ndarray, ys: np.ndarray) -> float:\n return _model(params=params, xs=xs) - ys",
"def residual ( self , dataset , **kwargs ) :\n hdata = self.make_histo ( **kwargs )\n dataset.project ( hdata , ( self.yvar.name , self.xvar.name ) )\n return self.residual_histo ( hdata )",
"def _get_residual_dict(self):\n residuals_dict = {}\n for k,v in self.__dict__.iteritems():\n if k.endswith('_'):\n residuals_dict[k] = v\n \n return residuals_dict",
"def get_residuals_loo(self, tree):\n R_j = self.Y - (self.sum_trees_output - tree.predict_output(self.num_observations))\n return R_j",
"def calculate_residuals(model, features, label):\r\n predictions = model.predict(features)\r\n df_results = pd.DataFrame({'Actual': label, 'Predicted': predictions})\r\n df_results['Residuals'] = abs(df_results['Actual']) - abs(df_results['Predicted'])\r\n \r\n return df_results",
"def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ",
"def residuals(self, ts, rvs, p):\n\n if p.npl == 0:\n return rvs\n else:\n rvmodel = np.sum(rv.rv_model(ts,p), axis=0)\n return rvs - rvmodel",
"def residuals(self,x=None,y=None,retdata=False):\n if x is None or y is None:\n if self.data is None:\n raise ValueError('must either specify data or save fitted data')\n x,y,weights = self.data\n\n if self(x).shape != y.shape:\n raise ModelTypeError('y array does not match output of model for input x')\n if retdata:\n return x,y,y-self(x)\n else:\n return y-self(x)",
"def test_regress_residuals(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_residuals(x, y)\r\n self.assertFloatEqual(result, [-0.1, 0.08, -0.14, 0.44, -0.28])",
"def write_residuals(self, s, d, h):\n r = []\n for i in range(h.nr):\n r.append(self.misfit(s[:,i], d[:,i], h.nt, h.dt))\n\n # write residuals\n np.savetxt('residuals', r)\n\n return np.array(r)",
"def _resid(self):\n # get the needed data\n sigma_sub_df = self.draws[\"sigma_sub\"]\n env_df = self.draws[self.me_map[\"env\"]]\n\n # if it is a squeeze type then we use the absolute value of the diff\n resid_df = (env_df - sigma_sub_df)[(sigma_sub_df <= env_df)].fillna(0)\n return resid_df",
"def calculateElementResidual(self):\n import pdb\n\n for ci in range(self.nc):\n self.elementResidual[ci].fill(0.0)\n #\n self.ellamDiscretization.updateElementResidual(self.elementResidual)",
"def residual(self, x, y, num_targets):\n \n x = x/sum(x) # normalize weights\n\n # RUN IM-SRG(2)\n ref = self._refs.T.dot(x)\n main(self._n_holes,self._n_particles, \n g=self._g_val, \n pb=self._pb_val, \n ref=ref, \n verbose=0, \n generator=self._generator,\n output_root = self._coeffs_root)\n\n # LOAD EVOLVED COEFFICIENTS\n H0B, H1B, H2B, eta1B_vac, eta2B_vac = pickle.load(open(self._coeffs_root+'/vac_coeffs_evolved.p', 'rb'))\n\n # PERFORM FULL CI AND GET EIGENVALUES\n hme = pyci.matrix(self._n_holes,self._n_particles, H0B, H1B, H2B, H2B, imsrg=True)\n ev_eigs = np.linalg.eigvalsh(hme)\n\n #return np.sqrt(np.mean((ev_eigs-y)**2))\n #return abs(ev_eigs[0:num_targets] - y[0:num_targets])\n #return abs(ev_eigs[1] - y[1])\n #return abs(ev_eigs[0] - y[0])\n return np.sqrt(0.80*(ev_eigs[0]-y[0])**2 + 0.20/35*((ev_eigs[1::]-y[1::]).T.dot(ev_eigs[1::]-y[1::])))",
"def residuals(x, y, filename):\n empirical_data = y\n #print(x)\n # call convert function\n\n ans = (empirical_data - run_model(x, filename))/empirical_data * 100\n #print(ans)\n return ans",
"def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err",
"def _residual_lattice(self, params):\n model = np.sqrt(self.calc_q_square())\n data = np.absolute(self.q)\n return (model[self.mask] -data[self.mask])",
"def residuals(self, p, data, X):\n err = data - self.fitfunc(X,p)\n return err",
"def calc_rmat(self):\n prnt_sep()\n print \"Generating a residual matrix...\"\n\n rmat = zeros((self.clen, self.clen), Float)\n rmatflag = zeros((self.clen, self.clen), Int)\n\n for i in xrange(self.clen):\n for j in xrange(i + self.m - 1, self.clen):\n\n frag_id1 = self.chain.fragment_list[i].fragment_id\n frag_id2 = self.chain.fragment_list[j].fragment_id\n\n data = self.datafile.grh_get_tls_record(self.chain.chain_id, \n frag_id1, frag_id2)\n if data == None or data.has_key(\"lsq_residual\") == False:\n print \"No Database Record: %s-%s\" % (frag_id1, frag_id2)\n else:\n rmat[i,j] = data[\"lsq_residual\"]\n rmatflag[i,j] = 1\n\n print \"Done.\"\n\n return rmat, rmatflag",
"def Residual(self, parameterValues, dictResidual=False):\n #YJC: want to add priors as extra rows to the residuals\n # put in simple prior on all parameters that start with U\n # we will want to define a more flexible function that \n\n f_counter[0]+=1\n\n if dictResidual:\n residuals = {}\n else:\n residuals = numpy.array([])\n \n for independentValues in self.data.experiments:\n initialSkip = self.data.initialSkip[independentValues]\n finalSkip = self.data.finalSkip[independentValues]\n if finalSkip !=0:\n X = self.data.X[independentValues][initialSkip:int(-1*finalSkip)]\n Y = self.data.Y[independentValues][initialSkip:int(-1*finalSkip)]\n errorBar = self.data.errorBar[independentValues][initialSkip:int(-1*finalSkip)]\n else:\n X = self.data.X[independentValues][initialSkip:]\n Y = self.data.Y[independentValues][initialSkip:]\n errorBar = self.data.errorBar[independentValues][initialSkip:]\n Ytheory = self.theory.Y(X, parameterValues, independentValues)\n res = (Ytheory-Y)/errorBar\n if max(res) == numpy.inf:\n print \"data set has infinite residual\", independentValues\n if dictResidual:\n residuals[independentValues] = res\n else:\n residuals = numpy.concatenate((residuals,res))\n \n #YJC: put this in joint Model too!\n priors = self.theory.Prior(parameterValues, self.theory.parameterNameList)\n\n #for p in pNames:\n # if p.startswith('U'):\n # index = pNames.index(p)\n # val = pValues[index]\n # priors.append(val)\n #priors = numpy.array(priors)\n if dictResidual is False:\n residuals = numpy.concatenate((residuals,priors))\n return residuals",
"def residual_bootstrap(self, X: np.ndarray, y: np.ndarray, n=None, B=1000, model=None):\n # fit the model if it hasn't been run\n if model.run is False:\n model.fit(X, y);\n resid = model.residuals\n pred = model.predictions\n boot_est = [None] * B\n result = {} # to store the mean, std_err\n index = 0 \n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n boot_yi = pred + resid[idx]\n model.fit(X, boot_yi)\n boot_est[index] = tuple(model.theta)\n index += 1\n \n #self.boot_est['std_err'] = np.std(statistic, ddof=1, axis=0)\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est, axis=0)\n result['est_err'] = np.std(boot_est, ddof=1, axis=0)\n return result",
"def residual4(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n E = parvals['E']\n Eh = parvals['Eh']\n Th = parvals['Th']\n model = np.log((B0*np.exp((-E/k)*((1/x)-(1/283.15)))) / (1+(np.exp((Eh/k)*((1/Th)-(1/x))))))\n return data - model",
"def _residual(self, x):\n h = x\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n h = F.avg_pool2d(h, 2)\n\n return h",
"def residual_G2D_norotation(pars,x,y,data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition_x = parvals[\"x_zero\"]\n\tcenterposition_y = parvals[\"y_zero\"]\n\tbeamwidth_x = parvals[\"omegaX_zero\"]\n\tbeamwidth_y = parvals[\"omegaY_zero\"]\n\tbgr = parvals[\"backgr\"]\n\t\n\n\tmodel = intensity_max*np.exp(-2*np.power(x-centerposition_x,2)/beamwidth_x**2 - \\\n\t\t2*np.power(y-centerposition_y,2)/beamwidth_y**2) + bgr\n\tif data is None:\n\t\treturn np.array(model) # we don't flatten here because this is for plotting\n\tif eps is None:\n\t\tresid = np.array(model - data)\n\t\treturn resid.flatten() # minimization array must be flattened (LMFIT FAQ)\n\telse:\n\t\tresid = np.array((model - data)/eps)\n\t\treturn resid.flatten()",
"def calculate_residuals(*, vo_data, model_data):\n # Drop the dates and then add back after residuals calculation\n dates = vo_data['date']\n vo_data.drop(vo_data.columns[[0]], axis=1, inplace=True)\n model_data.drop(model_data.columns[[0]], axis=1, inplace=True)\n # Calculate residuals as data minus model values\n residuals = pd.DataFrame(\n vo_data.values - model_data.values,\n columns=vo_data.columns)\n model_data.insert(0, 'date', dates)\n return residuals",
"def residual2(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n Tm = parvals['Tm']\n T0 = parvals['T0']\n model = B0*x*(x-T0)*((Tm-x)**0.5)\n return data - model",
"def _convert_to_residuals(valstarget: Series, valsnormer: Series, model) -> Series:\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n warnings.filterwarnings(\"ignore\", category=UndefinedMetricWarning)\n nonull = (valstarget.isnull() == False) & (valsnormer.isnull() == False)\n if sum(nonull) < model.get_params()[\"cv\"]:\n residuals = np.empty(len(valstarget))\n return pd.Series(residuals, index=valstarget.index)\n features = valsnormer[nonull].values.reshape(-1, 1)\n labels = valstarget[nonull].values\n model = model.fit(features, labels)\n prediction = model.predict(features)\n residuals = labels - prediction\n return pd.Series(residuals, index=valstarget[nonull].index)",
"def calc_rmse_full_mod(data, model_type=None, mod=None):\n num_patients = len(data['SI'])\n err_arr = np.zeros((num_patients, 1))\n for i in range(0,num_patients):\n y_real, y_pred_mean = calc_y_pred_full_data(data=data, i=i, model_type=model_type, mod=mod)\n err_arr[i] = calc_error(y_real, y_pred_mean)\n \n return err_arr",
"def residual(us):\n return self.h_S(z0, us) - h_P",
"def residuals(self):\n\n if np.size(self.iceicehorizons_depth1) > 0:\n resi_iceice = (self.site1.fct_age(self.iceicehorizons_depth1)-\\\n self.site2.fct_age(self.iceicehorizons_depth2))/self.iceicehorizons_sigma\n if self.iceicehorizons_correlation_bool:\n resi_iceice = lu_solve(self.iceicehorizons_lu_piv, resi_iceice)\n resi = [resi_iceice]\n else:\n resi = [np.array([])]\n\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore' and \\\n np.size(self.airairhorizons_depth1) > 0:\n resi_airair = (self.site1.fct_airage(self.airairhorizons_depth1)-\\\n self.site2.fct_airage(self.airairhorizons_depth2))/\\\n self.airairhorizons_sigma\n if self.airairhorizons_correlation_bool:\n resi_airair = lu_solve(self.airairhorizons_lu_piv, resi_airair)\n resi.append(resi_airair)\n\n if self.site2.archive == 'icecore' and np.size(self.iceairhorizons_depth1) > 0:\n resi_iceair = (self.site1.fct_age(self.iceairhorizons_depth1)-\\\n self.site2.fct_airage(self.iceairhorizons_depth2))/\\\n self.iceairhorizons_sigma\n if self.iceairhorizons_correlation_bool:\n resi_iceair = lu_solve(self.iceairhorizons_lu_piv, resi_iceair)\n resi.append(resi_iceair)\n\n if self.site1.archive == 'icecore' and np.size(self.airicehorizons_depth1) > 0:\n resi_airice = (self.site1.fct_airage(self.airicehorizons_depth1)-\\\n self.site2.fct_age(self.airicehorizons_depth2))/self.airicehorizons_sigma\n if self.airicehorizons_correlation_bool:\n resi_airice = lu_solve(self.airicehorizons_lu_piv, resi_airice)\n resi.append(resi_airice)\n\n return np.concatenate(resi)",
"def residual(t, x, xdot, result):\n result[0] = x[2]-xdot[0]\n result[1] = x[3]-xdot[1]\n result[2] = -xdot[2]+x[4]*x[0]/m\n result[3] = -xdot[3]+x[4]*x[1]/m-g\n result[4] = x[2]**2 + x[3]**2 \\\n + (x[0]**2 + x[1]**2)/m*x[4] - x[1] * g\n print(result)",
"def residual2P2Z(paras):\n initcond = setupinitcond(paras['pfun_num'].value, paras['zoo_num'].value)\n\n\n\n model = g2P2Z(initcond, timedays_model, paras)\n\n # to implement fitting algorithm make sure to calculate residual only for the last year!\n\n # will have to 1. : simplify the data (i.e. median per month)\n # will have to put data into structure to calculate efficiently (i.e. pandas dataframe like df[1] = N, df[2] = Si, etc.)\n model_ly = model[1460:1825]\n\n # aggregate model output in the same way as validation data (monthly mean)\n # create month vector to add to model output dataframe for analysis\n oneyearmodel = pandas.DataFrame()\n oneyearmodel = oneyearmodel.assign(day=pandas.Series(np.linspace(1, 365, 365)))\n\n # combine two columns\n phyto_model = pandas.DataFrame(\n {'data': model_ly[:, 4], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n phyto_monthly_median = phyto_model.groupby('month').median()\n phyto_resid = (phyto_monthly_median['data'].values - ChlA_monthly_median['ChlA'].values * 0.1)\n\n nitrate_model = pandas.DataFrame(\n {'data': model_ly[:, 0], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n nitrate_monthly_median = nitrate_model.groupby('month').median()\n nitrate_resid = (nitrate_monthly_median['data'].values - NO3NO2_monthly_median['NO3NO2'].values * 0.1)\n\n silicate_model = pandas.DataFrame(\n {'data': model_ly[:, 1], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n silicate_monthly_median = silicate_model.groupby('month').median()\n silicate_resid = (silicate_monthly_median['data'].values - SiOH_USF_monthly_median['SiOH'].values * 0.1)\n\n zoo_model = pandas.DataFrame(\n {'data': model_ly[:, 3], 'month': pandas.to_datetime(oneyearmodel['day'], format='%j').dt.month})\n zoo_monthly_median = zoo_model.groupby('month').median()\n zoo_resid = (zoo_monthly_median['data'].values - ZooBM_monthly_median['ZooBM'].values * 0.1)\n\n ss = np.concatenate((phyto_resid, nitrate_resid, silicate_resid, zoo_resid))\n return ss",
"def residualNorm(self):\n return math.sqrt(self.residualNorm2())",
"def regress_residuals(x, y):\r\n slope, intercept = regress(x, y)\r\n coords = zip(x, y)\r\n residuals = []\r\n for x, y in coords:\r\n e = y - (slope * x) - intercept\r\n residuals.append(e)\r\n return residuals",
"def residual(params, model_func, x, data, min_x_param=None, max_x_param=None,\n eps=None):\n # Crop the X data according to a fit parameter\n if min_x_param is not None or max_x_param is not None:\n min_x = params.get(min_x_param, None)\n max_x = params.get(max_x_param, None)\n x, data = crop_x_y(x, data, min_x=min_x, max_x=max_x,\n include_bounds=False)\n\n # Calculate data according to the model function\n model = model_func(x, **params)\n\n # Calculate the residuals of the model and the given data\n if eps is None:\n return model - data\n return (model - data) / eps",
"def write_residuals(self, path, syn, obs):\n nt, dt, _ = self.get_time_scheme(syn)\n nn, _ = self.get_network_size(syn)\n\n residuals = []\n # TODO freq_mask = np.loadtxt('/data1/etienneb/freq_mask.txt')\n ft_obs_se = self.load('ft_obs_se')\n freq_mask = self.load('freq_mask_se')\n \n for ii in range(nn):\n residuals.append(\n self.misfit(syn[ii].data, nt, dt,\n ft_obs_se[:,ii],\n freq_mask[:,ii])\n )\n \n filename = path+'/'+'residuals'\n if exists(filename):\n residuals.extend(list(np.loadtxt(filename)))\n\n np.savetxt(filename, residuals)",
"def _collect_params(self) -> np.ndarray:\n res = np.array([0.]*(self.dimensions))\n res[0] = self.model.rbf.variance\n res[1:-1] = self.model.rbf.lengthscale\n res[-1] = self.model.Gaussian_noise.variance\n return res",
"def residuals_Sigm(self, p, data, x):\n err = data - self.Sigm(x,p)\n return err",
"def l2_error(self, X=None, y=None) -> np.ndarray:\n return np.square(self.residuals(X, y))",
"def error_MSE(resid):\n if resid.ndim == 2:\n return (norm(np.asarray(resid).ravel())**2)/float(resid.shape[1])\n elif resid.ndim == 1:\n return (norm(np.asarray(resid).ravel())**2)\n else:\n raise Exception(\"array passed to error_MSE has incorrect shape\")",
"def ssr(self):\n return (self.resid * self.resid).sum(0)",
"def _setup_all_residues(self, model_num=0):\n all_residues = vector1()\n\n for chain_id in self.get_chain_ids(model_num):\n #print \"ChainID: \"+chain_id\n residues = self.residues(chain_id, model_num)\n all_residues.extend(residues)\n\n return all_residues",
"def residual5(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n E = parvals['E']\n El = parvals['El']\n Tl = parvals['Tl']\n model = np.log((B0*np.exp((-E/k)*((1/x)-(1/283.15)))) / (1+(np.exp((El/k)*((1/Tl)-(1/x))))))\n return data - model",
"def _residual(self, x):\n h = x\n h = self.b1(h)\n h = self.activation(h)\n h = self._upsample_conv(h, self.c1) if self.upsample else self.c1(h)\n h = self.b2(h)\n h = self.activation(h)\n h = self.c2(h)\n\n return h",
"def plot_residuals(self, series):\n model = ARIMA(series, order=(self.p, self.d, self.q))\n model_fit = model.fit(disp=0)\n print(model_fit.summary())\n\n residuals = pd.DataFrame(model_fit.resid)\n residuals.plot()\n plt.title(self.ticker + ' ARIMA residuals')\n plt.savefig('plots/ARIMA/{0}Resid_{1}{2}{3}.pdf'.format(self.ticker, self.p, self.d, self.q))\n plt.close()\n print(residuals.describe())",
"def residual(S):\n rho = seawater.density(T, S, Pa)\n return (rho_1 - rho)",
"def _calculate_residual(original_values: List[Any], new_values: List[Any]) -> List[float]:\n residual = []\n for i in range(len(original_values)):\n residual.append(original_values[i] - new_values[i])\n return residual",
"def calc_residuals(self, h, a):\n\n return _rccsdt_ri_calc_residuals(h, a)",
"def calc_residuals(self, h, a):\n\n return _rccsdt_ri_calc_residuals(h, a)",
"def _plot_residuals(self, y, yhat, _id):\n try:\n assert self.model_fit is not None\n except AssertionError:\n self._uvts_cls_logger.exception(\"Model has to be fitted first! Please call ts_fit(...)\")\n\n fig, axes = plt.subplots(2, 1, figsize=(20, 5), sharex=True)\n\n axes[0].plot(pd.Series(yhat, index=self._train_dt.index), color='y', linewidth=2.0)\n axes[0].plot(pd.Series(y, index=self._train_dt.index), color='b')\n\n axes[0].set_ylabel(\"Model Fit\")\n axes[0].set_title(\"Real (blue) and estimated values, \" + str(_id))\n #\n axes[1].plot(self.residuals, color=\"r\")\n \"\"\"\n if self.forecast is not None and self.residuals_forecast is None \\\n and self.lower_conf_int is not None and self.upper_conf_int is not None:\n axes[0].fill_between(self.lower_conf_int.index, self.lower_conf_int, self.upper_conf_int, color='k',\n alpha=.15)\n \"\"\"\n if self.lower_conf_int is not None and self.upper_conf_int is not None:\n axes[0].fill_between(self.lower_conf_int.index, self.lower_conf_int, self.upper_conf_int, color='k',\n alpha=.15)\n if self.upper_whisker_res is not None:\n axes[1].axhline(y=self.upper_whisker_res, xmin=0, xmax=1, color='m', label='upper_whisker', linestyle='--',\n linewidth=1.5)\n axes[1].axhline(y=-self.upper_whisker_res, xmin=0, xmax=1, color='m', label='upper_whisker', linestyle='--',\n linewidth=1.5)\n\n axes[1].set_ylabel('Residuals')\n axes[1].set_title('Difference between model output and the real data and +/- upper whisker, ' + str(_id))\n\n return fig, axes",
"def residual_of(self, z):\n return np.subtract(z, self.HJacob(self.x)@self.x_prior)",
"def residual_G2D(pars,x,y,data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition_x = parvals[\"x_zero\"]\n\tcenterposition_y = parvals[\"y_zero\"]\n\tbeamwidth_x = parvals[\"omegaX_zero\"]\n\tbeamwidth_y = parvals[\"omegaY_zero\"]\n\ttheta = parvals[\"theta_rot\"]\n\tbgr = parvals[\"backgr\"]\n\t\n\t# the model function is based on this http://www.cs.brandeis.edu/~cs155/Lecture_06.pdf\n\t# slide 23; it should describe rotation by angle theta around an arbitrary point \n\t# if I understood the notes correctly, then this transformation should be correct \n\t# but I didn't check the math myself\n\n\t# the rotation is clockwise\n\n\tmodel = intensity_max*np.exp(-2*np.power(x*np.cos(theta)-y*np.sin(theta)+centerposition_x*(1-np.cos(theta))+centerposition_y*np.sin(theta)-centerposition_x,2)/beamwidth_x**2 - \\\n\t\t2*np.power(x*np.sin(theta)+y*np.cos(theta)+centerposition_y*(1-np.cos(theta))-centerposition_x*np.sin(theta)-centerposition_y,2)/beamwidth_y**2) + bgr\n\tif data is None:\n\t\treturn np.array(model) # we don't flatten here because this is for plotting\n\tif eps is None:\n\t\tresid = np.array(model - data)\n\t\treturn resid.flatten() # minimization array must be flattened (LMFIT FAQ)\n\telse:\n\t\tresid = np.array((model - data)/eps)\n\t\treturn resid.flatten()",
"def get_vis_residuals(\n model: MPoLModel,\n u_true: NDArray[floating[Any]],\n v_true: NDArray[floating[Any]],\n V_true: NDArray[complexfloating[Any, Any]],\n channel: int = 0,\n) -> NDArray[complexfloating[Any, Any]]:\n nufft = NuFFT(coords=model.coords, nchan=model.nchan, uu=u_true, vv=v_true)\n\n vis_model = nufft(model.icube())\n # convert to numpy, select channel\n vis_model = vis_model.detach().numpy()[channel]\n\n vis_resid: NDArray[complexfloating[Any, Any]]\n vis_resid = V_true - vis_model\n\n return vis_resid",
"def residuals_cjp_displacements(self, inp: list or np.array) -> np.ndarray:\n z = inp\n\n cjp_disp_x, cjp_disp_y = cjp_displ_field(z, self.phi_grid, self.r_grid, self.material)\n\n residual = np.asarray([cjp_disp_x - self.interp_disp_x, cjp_disp_y - self.interp_disp_y])\n residual = residual.reshape(-1)\n # filter out nan values\n residual = residual[~np.isnan(residual)]\n return residual",
"def _residual(self, x):\n h = x\n h = self.activation(h)\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n if self.downsample:\n h = F.avg_pool2d(h, 2)\n\n return h",
"def _projected_residuals(model, plot_width=400, plot_height=400):\n warnings.warn(\"This visualization are deprecated.\", DeprecationWarning)\n hover = HoverTool(tooltips=[(\"#SampleID\", \"@index\")])\n pcvar = model.percent_explained()\n resid = model.residuals()\n p = figure(plot_width=plot_width, plot_height=plot_height,\n tools=[hover, BoxZoomTool(), ResetTool(),\n WheelZoomTool(), SaveTool(), PanTool()])\n resid_source = ColumnDataSource(resid)\n\n p.circle(resid.columns[0], resid.columns[1], size=7,\n source=resid_source, fill_color='blue', legend='residuals')\n\n p.title.text = 'Projected Residuals'\n p.title_location = 'above'\n p.xaxis.axis_label = '{} ({:.2%})'.format(pcvar.index[0], pcvar.iloc[0])\n p.yaxis.axis_label = '{} ({:.2%})'.format(pcvar.index[1], pcvar.iloc[1])\n return p",
"def residuals(self,\n series: TimeSeries,\n forecast_horizon: int = 1,\n verbose: bool = False) -> TimeSeries:\n\n series._assert_univariate()\n\n # get first index not contained in the first training set\n first_index = series.time_index()[self.min_train_series_length]\n\n # compute fitted values\n p = self.backtest(series, None, first_index, forecast_horizon, 1, True, verbose=verbose)\n\n # compute residuals\n series_trimmed = series.slice_intersect(p)\n residuals = series_trimmed - p\n\n return residuals",
"def casdi_residual_star(params):\n return np.nanvar(casdi_residual(*params))",
"def get_residuals(params, n_cameras, n_points, camera_indices, point_indices, points_2d):\n camera_params = params[:n_cameras * 9].reshape((n_cameras, 9))\n points_3d = params[n_cameras * 9:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()",
"def residual3(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n E = parvals['E']\n Eh = parvals['Eh']\n El = parvals['El']\n Th = parvals['Th']\n Tl = parvals['Tl']\n model = np.log((B0*np.exp((-E/k)*((1/x)-(1/283.15)))) / (1+(np.exp((El/k)*((1/Tl)-(1/x))))+(np.exp((Eh/k)*((1/Th)-(1/x))))))\n return data - model",
"def results(self):\n\n return np.concatenate(\n (np.expand_dims(self.time, axis=1), self.res), axis=1)",
"def get_model_init(self) -> ndarray:\n beta = np.zeros(self.fevar.size)\n gamma = np.zeros(self.revar.size)\n var = np.hstack([beta, gamma])\n grad_beta = self.gradient(var)[:self.fevar.size]\n hess_beta = self.hessian(var)[:self.fevar.size,\n :self.fevar.size]\n beta = beta - np.linalg.solve(\n hess_beta + np.identity(self.fevar.size),\n grad_beta\n )\n return np.hstack([beta, gamma])",
"def residual1(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n a = parvals['a']\n b = parvals['b']\n c = parvals['c']\n d = parvals['d']\n model = a + b*x + c*x**2 + d*x**3\n return data - model",
"def residuals(self, b):\n x, y = self.xvals, self.yvals\n return self._numexpr(x, *b) - y",
"def residual_G1D(pars, x, data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition = parvals[\"r_zero\"]\n\tbeamwidth = parvals[\"omega_zero\"]\n\tbgr = parvals[\"backgr\"]\n\tmodel = intensity_max*np.exp(-2*np.power(x-centerposition,2)/beamwidth**2) + bgr\n\tif data is None:\n\t\treturn np.array(model)\n\tif eps is None:\n\t\treturn np.array(model - data)\n\treturn np.array((model - data)/eps)",
"def compute_residual(self, augmented_data, bias=None, synapse=None, bkgd=None):\n N = self.N\n T = augmented_data[\"T\"]\n F = augmented_data[\"F\"]\n W = self.weight_model.W\n\n assert bias is not None or synapse is not None\n n_pre, n_post = self._get_n(bias, synapse)\n\n # compute psi, excluding the bias or synapse, whichever is specified\n psi = np.zeros(T)\n\n if bias is None:\n psi += self.bias_model.b[None, n_post]\n\n # Only compute residual if W is nonzero\n if not np.allclose(W[:,n_post], 0):\n for nn in xrange(N):\n if nn == n_pre:\n continue\n psi += np.dot(F[:,nn,:], W[nn, n_post, :])\n\n if bkgd is None:\n psi += self.background_model.mean_background_activation(augmented_data)[:,n_post]\n\n return psi",
"def predict(self):\n RV = np.zeros((self.N,self.P))\n for term_i in range(self.n_terms):\n RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i]))\n return RV",
"def get_recovery_variables(self):\n if self.m is None or self.dirty is True:\n m, n, h = self.simulator.get_recovery_variables()\n n_compartments = self.neuron_collection.total_compartments()\n self.m = np.array(m).reshape([len(m) / n_compartments, n_compartments])\n self.n = np.array(n).reshape([len(n) / n_compartments, n_compartments])\n self.h = np.array(h).reshape([len(h) / n_compartments, n_compartments])\n\n self.dirty = False\n t = int(self.T / self.dt)\n return self.m[:t, :], self.n[:t, :], self.h[:t, :]"
] | [
"0.7572452",
"0.7444132",
"0.72688514",
"0.7094661",
"0.7090032",
"0.68978417",
"0.6875627",
"0.68409854",
"0.6705539",
"0.66076",
"0.6558537",
"0.65114504",
"0.65114504",
"0.65114504",
"0.6500384",
"0.64960504",
"0.6495944",
"0.6441256",
"0.6383142",
"0.63715947",
"0.6365249",
"0.63602054",
"0.6332733",
"0.6265409",
"0.6176459",
"0.6139622",
"0.60943216",
"0.604102",
"0.6023277",
"0.59893227",
"0.59863067",
"0.5970002",
"0.59573287",
"0.5936788",
"0.5892115",
"0.5845807",
"0.58286184",
"0.5819209",
"0.5817612",
"0.5809933",
"0.5799393",
"0.57821894",
"0.5698678",
"0.56771237",
"0.56646836",
"0.5659221",
"0.56540734",
"0.5639633",
"0.5628699",
"0.5618848",
"0.5599316",
"0.556202",
"0.55470085",
"0.5509499",
"0.5508148",
"0.5500729",
"0.54992115",
"0.5480651",
"0.5478095",
"0.5464522",
"0.5460065",
"0.53548867",
"0.53372586",
"0.53317356",
"0.5307832",
"0.5272797",
"0.52663565",
"0.5255194",
"0.52409947",
"0.5237193",
"0.523494",
"0.5233343",
"0.5219563",
"0.5213511",
"0.52009505",
"0.5196791",
"0.51913077",
"0.51870906",
"0.51655877",
"0.51645106",
"0.51645106",
"0.5161823",
"0.51561254",
"0.5150062",
"0.51422966",
"0.5132559",
"0.5121127",
"0.511493",
"0.5091819",
"0.50540406",
"0.5047786",
"0.5040168",
"0.50342244",
"0.5034034",
"0.50340086",
"0.50334936",
"0.50243485",
"0.49914828",
"0.49802458",
"0.49710187"
] | 0.5674281 | 44 |
Call self.model.predict with self.params as the first argument. | def predict(self, exog=None, transform=True, *args, **kwargs):
is_pandas = _is_using_pandas(exog, None)
exog_index = exog.index if is_pandas else None
if transform and hasattr(self.model, 'formula') and (exog is not None):
design_info = self.model.data.design_info
from patsy import dmatrix
if isinstance(exog, pd.Series):
# we are guessing whether it should be column or row
if (hasattr(exog, 'name') and
isinstance(exog.name, str) and
exog.name in design_info.describe()):
# assume we need one column
exog = pd.DataFrame(exog)
else:
# assume we need a row
exog = pd.DataFrame(exog).T
orig_exog_len = len(exog)
is_dict = isinstance(exog, dict)
exog = dmatrix(design_info, exog, return_type="dataframe")
if orig_exog_len > len(exog) and not is_dict:
if exog_index is None:
warnings.warn("nan values have been dropped", ValueWarning)
else:
exog = exog.reindex(exog_index)
exog_index = exog.index
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog,
*args, **kwargs)
# TODO: Shouldn't this be done by wrapping?
if exog_index is not None and not hasattr(predict_results,
'predicted_values'):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
# FIXME: columns-->neq_names for e.g. MNLogit, VAR
ynames = self.model.data.ynames
return pd.DataFrame(predict_results, index=exog_index,
columns=ynames)
else:
return predict_results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predict(self, params, exog=None, *args, **kwargs):\n raise NotImplementedError # pragma: no cover",
"def predict(self, **kwargs):\n raise NotImplementedError",
"def predict(self, *args, **kwargs):\n return self(*args, **kwargs)",
"def _predict(self, x):\n pass",
"def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)",
"def predict(self):\n raise NotImplementedError",
"def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict, kwargs)\n return np.squeeze(self.model.predict(x, **kwargs))",
"def predict(self, inputs):\n return self.model.predict(inputs)",
"def predict(self, model, context, data):\n pass",
"def gp_predict(model, params):\n predic = model.predict(params)\n return predic[0]",
"def __call__(self, X, *args, **kwargs):\n return self.predict(X, **kwargs)",
"def predict(self, to_predict):\n\t\treturn self.model.predict(to_predict)",
"def predict(self): \n return self.model.predict(self.test_x)",
"def predict(self):\n\n self.eval()\n return self.forward(self.data)",
"def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)",
"def predict(self, inputs):\n\n return self.model.predict(inputs)",
"def predict(self, x):\n raise NotImplementedError('Subclass of LinearModel must implement predict method.')",
"def _predict(self, X):\n raise NotImplementedError",
"def fit_predict(self):\n raise AttributeError",
"def predict(self, X):\n raise NotImplementedError('Abstract method \"predict\" must be '\n 'specialised!')",
"def predict(self, sample, **kwargs):\r\n return self.model.predict(sample, **kwargs)",
"def predict(self,x):\n return self._model.predict(x)",
"def predict(self, session, *args, predict_data_iterator=None, **kwargs):\n raise NotImplementedError(\"Implement predict() method\")",
"def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)",
"def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)",
"def predict(self, x, **kwargs):\n raise NotImplementedError",
"def predict(self, model, x_test):\n pass",
"def target_predict(self, inp):\n return self.target_model.predict(inp)",
"def predict(self, x):\n return self.model.predict(x, batch_size=1, verbose=0)",
"def get_prediction(self, *args, **kwargs) -> Tensor:\n training = self.model.training\n self.model.eval()\n\n with torch.no_grad():\n out = self.model(*args, **kwargs)\n\n self.model.train(training)\n\n return out",
"def predict(self, x, **kwargs):\n return self.tuner.get_best_models(1)[0].predict(x, **kwargs)",
"def predict(self, params, exog=None):\n if exog is None:\n exog = self.exog\n\n return np.dot(exog, params)",
"def predict(self, params, exog=None):\n if exog is None:\n exog = self.exog\n\n return np.dot(exog, params)",
"def _predict(self, testX):\n pass",
"def predict(self, instances):\r\n raise NotImplementedError",
"def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction",
"def predict(self, first_preprocessed_inputs,second_preprocessed_inputs,third_preprocessed_inputs):\r\n pass",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, x):\n return self.model.predict(x)",
"def predict(self, X, **kwargs):\n return Learner.predict(self, X, **kwargs)",
"def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n # print len(data[0])\n # print type(data[0])\n # print data.shape\n return self.model.predict(data, 1, verbose) # ,steps)",
"def predict(self, predPoints=None):",
"def predict(data, model: str = None, **kwargs):\n\n model_instance = get_model(model)\n log.debug(\"Predict with \" + str(model_instance))\n return model_instance.predict(data, **kwargs)",
"def predict(self, sample, **kwargs):\n return self.model.predict(sample, **kwargs)",
"def model_predict(self, X):\n return self.cmodel.predict(X=X)",
"def predict(self, x):\n pred = x\n for m in self.models:\n pred = m.predict(pred)\n\n return pred",
"def predict(self, obs):\n return self.model(obs)",
"def test_predict():\n args = get_layer('predict', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)",
"def predict(self, data):\r\n return self.sess.run([self.predict_op, self.Mu], feed_dict={self.X: data})",
"def _predict(self, data, async, *args, **kwargs):\n if self.trained:\n if async:\n return self.backend.predict.delay(\n copy.deepcopy(self.model_dict), data, async, *args,\n **kwargs)\n else:\n return self.backend.predict(\n copy.deepcopy(self.model_dict), data, async, *args,\n **kwargs)\n else:\n raise Exception(\"You must have a trained model\"\n \"in order to make predictions\")",
"def predict(self,X): \n return self._predict(X)",
"def predict(self, data, *args, **kwargs):\n return self._predict(data, async=False, *args, **kwargs)",
"def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)",
"def predict(x):\n model = Model()\n res = model.predict([x])[0][0]\n click.echo(res)",
"def predict(self, x, **kwargs):\n return self.__multi_layer_perceptron.predict(x)",
"def predict(self, data):\n\t\traise NotImplementedError",
"def predict(self, inp_data: T_co) -> T_co:\n raise NotImplementedError",
"def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)",
"def predict(X, y, parameters):\n pass",
"def predict(self, X):\n if self.model is None:\n print(\"%s.predict: implement me\" % (self.__class__.__name__))\n return np.zeros((1, self.odim))",
"def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)",
"def predict(self, X):\n return self.model.predict(X)",
"def predict(self, X):\n return self.model.predict(X)",
"def _predict(self,\n X=None):\n\n if X is None:\n ypred = self.clf_pipeline.predict(self.Xtrain)\n else:\n ypred = self.clf_pipeline.predict(X)\n\n return ypred",
"def predict(self, xs, **kwargs):",
"def predict(self, x):\n raise NotImplementedError(\"Please Implement this method\")",
"def predict(self, X):",
"def predict(self, X):",
"def predict(self, xs, stochastic=True, **kwargs):\n return super().predict(xs, **kwargs)",
"def predict(self, data):\n return self.result.predict(data)",
"def predict(self,function,args):\r\n param = self.model._get_params()\r\n fs = []\r\n for p in self.chain:\r\n self.model._set_params(p)\r\n fs.append(function(*args))\r\n self.model._set_params(param)# reset model to starting state\r\n return fs",
"def predict(self, data_in):\n pass",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()",
"def predict_batch(self, model, context, data=None):\n pass",
"def predict(self, x: np.ndarray):\n return self.model.predict(x)",
"def postprocess(self, prediction_dict, **params):\r\n pass",
"def predict(self):\n return _RateElasticNetRegressor.predict(self)",
"def predict(self, input_data):\n if not self.predict_as_probability_:\n return self.ensemble_model_.predict(input_data)\n else:\n return self.ensemble_model_.predict_proba(input_data)",
"def predict(self, X):\n\n return self.model.predict(X)",
"def predict_proba(self):\n ...",
"def predict ( self, X: np.ndarray ):\n \n return self.knn.predict ( X )\n # End predict()",
"def predict(self, data):\n params = self.get_params(self.opt_state)\n return self.predict_jax(params, data)",
"def predict(self, data):\n return self.model.predict(data, batch_size=data.shape[1])",
"def predict(self, X):\n raise NotImplementedError",
"def predict_step(self, x):\n\n input_x = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n predictions_op = self.session.graph.get_operation_by_name(\"output/predictions\").outputs[0] \n\n d_ = {\n input_x: x\n }\n\n self.init_dataset(d_)\n\n return self.session.run([predictions_op])",
"def predict(self, fit_result, x):\n raise NotImplementedError()",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self, obs):\n pass",
"def _make_predict(self):\n with context.context(training=False):\n prediction = self(*self.inputs)\n return theano.function(self.inputs, prediction)",
"def Predict(self, request, global_params=None):\n config = self.GetMethodConfig('Predict')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()",
"def prediction(self, x):\n t = self.model.predict(x)\n return t",
"def predict(self, x, batch_size=1):\n assert self.model, \"Model was not initialized\"\n return self.model.predict(x, batch_size=batch_size)",
"def _get_prediction(self):\n raise NotImplementedError"
] | [
"0.818373",
"0.78223795",
"0.7495833",
"0.7317303",
"0.73074985",
"0.72913486",
"0.72899944",
"0.7286579",
"0.7285986",
"0.72676605",
"0.7258348",
"0.7226006",
"0.72209585",
"0.7216009",
"0.720618",
"0.72039515",
"0.7197023",
"0.7196392",
"0.71649635",
"0.7164158",
"0.7131129",
"0.71084714",
"0.71066815",
"0.7094991",
"0.7094991",
"0.70908785",
"0.7076897",
"0.70644027",
"0.7058447",
"0.70423865",
"0.704062",
"0.70266116",
"0.70266116",
"0.702183",
"0.70193416",
"0.70184636",
"0.70175457",
"0.70043087",
"0.70043087",
"0.70043087",
"0.6981656",
"0.6952858",
"0.69409037",
"0.6915323",
"0.691303",
"0.6912017",
"0.6909722",
"0.69091123",
"0.69053894",
"0.69037086",
"0.6900811",
"0.6894815",
"0.68908405",
"0.6878788",
"0.6877619",
"0.6874898",
"0.6863742",
"0.68351686",
"0.68229026",
"0.6815269",
"0.6814722",
"0.68091905",
"0.6800491",
"0.6793643",
"0.6793643",
"0.67864215",
"0.67813885",
"0.67809725",
"0.6770886",
"0.6770886",
"0.6767914",
"0.67676085",
"0.676383",
"0.6751266",
"0.6746605",
"0.6746605",
"0.6746605",
"0.6744191",
"0.67319036",
"0.6726952",
"0.6725695",
"0.6714135",
"0.67063636",
"0.67046",
"0.6701023",
"0.6700742",
"0.6690668",
"0.6680473",
"0.6680141",
"0.66736275",
"0.66731596",
"0.6667177",
"0.6667177",
"0.6667177",
"0.6656634",
"0.66558444",
"0.6645551",
"0.6645471",
"0.6644729",
"0.6644481",
"0.6631915"
] | 0.0 | -1 |
Summarize the Regression Results | def summary(self, yname=None, xname=None, title=None, alpha=.05):
# TODO: Make this raise upstream instead of just "pass"
raise NotImplementedError # pragma: no cover
# TODO: move the GenericLikelihoodModelResults implementation here? | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()",
"def summarize(self, fit_result, x, y, dy=None):\n return str(fit_result)",
"def print_results(self, regressor=False):\n if regressor:\n for i, model in enumerate(self.list_of_models):\n print \"Model: \", model\n print \"R2 score: \", self.r2_scores[i]\n print \"MSE: \", self.mse_scores[i]\n else:\n for i, model in enumerate(self.list_of_models):\n print \"Model: \", model\n print \"F1 score: \", self.f1_scores[i]\n print \"recall score: \", self.recall_scores[i]\n print \"precision score: \", self.precision_scores[i]\n print \"accuracy score: \", self.accuracy_scores[i]",
"def _eval_regressor(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n rmse_baseline = self._rmse(y_label_baseline, y_pred_baseline)\n mae_baseline = mean_absolute_error(y_label_baseline, y_pred_baseline)\n r2_baseline = r2_score(y_label_baseline, y_pred_baseline)\n\n rmse_sample = self._rmse(y_label_sample, y_pred_sample)\n mae_sample = mean_absolute_error(y_label_sample, y_pred_sample)\n r2_sample = r2_score(y_label_sample, y_pred_sample)\n\n metrics_df = pd.DataFrame(\n {\n \"RMSE\": [rmse_baseline, rmse_sample],\n \"MAE\": [mae_baseline, mae_sample],\n \"R2\": [r2_baseline, r2_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df",
"def _compute(self):\n\n super(RegressionStatistics, self)._compute()\n sets = self.sets\n Nsets = len(sets)\n\n stats = {}\n\n funcs = {\n 'RMP_t': lambda p,t:rootMeanPowerFx(t),\n 'STD_t': lambda p,t:N.std(t),\n 'RMP_p': lambda p,t:rootMeanPowerFx(p),\n 'STD_p': lambda p,t:N.std(p),\n 'CCe': CorrErrorFx(),\n 'CCp': CorrErrorPFx(),\n 'RMSE': RMSErrorFx(),\n 'RMSE/RMP_t': RelativeRMSErrorFx()\n }\n\n for funcname, func in funcs.iteritems():\n funcname_all = funcname + '_all'\n stats[funcname_all] = []\n for i, (targets, predictions, values) in enumerate(sets):\n stats[funcname_all] += [func(predictions, targets)]\n stats[funcname_all] = N.array(stats[funcname_all])\n stats[funcname] = N.mean(stats[funcname_all])\n stats[funcname+'_std'] = N.std(stats[funcname_all])\n stats[funcname+'_max'] = N.max(stats[funcname_all])\n stats[funcname+'_min'] = N.min(stats[funcname_all])\n\n # create ``summary`` statistics, since some per-set statistics\n # might be uncomputable if a set contains just a single number\n # (like in the case of correlation coefficient)\n targets, predictions = [], []\n for i, (targets_, predictions_, values_) in enumerate(sets):\n targets += list(targets_)\n predictions += list(predictions_)\n\n for funcname, func in funcs.iteritems():\n funcname_all = 'Summary ' + funcname\n stats[funcname_all] = func(predictions, targets)\n\n self._stats.update(stats)",
"def summary(self, yname=None, xname_fe=None, xname_re=None,\n title=None, alpha=.05):\n\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n\n info = OrderedDict()\n info[\"Model:\"] = \"MixedLM\"\n if yname is None:\n yname = self.model.endog_names\n info[\"No. Observations:\"] = str(self.model.n_totobs)\n info[\"No. Groups:\"] = str(self.model.n_groups)\n\n gs = np.array([len(x) for x in self.model.endog_li])\n info[\"Min. group size:\"] = \"%.0f\" % min(gs)\n info[\"Max. group size:\"] = \"%.0f\" % max(gs)\n info[\"Mean group size:\"] = \"%.1f\" % np.mean(gs)\n\n info[\"Dependent Variable:\"] = yname\n info[\"Method:\"] = self.method\n info[\"Scale:\"] = self.scale\n info[\"Likelihood:\"] = self.llf\n info[\"Converged:\"] = \"Yes\" if self.converged else \"No\"\n smry.add_dict(info)\n smry.add_title(\"Mixed Linear Model Regression Results\")\n\n float_fmt = \"%.3f\"\n\n sdf = np.nan * np.ones((self.k_fe + self.k_re2, 6),\n dtype=np.float64)\n\n # Coefficient estimates\n sdf[0:self.k_fe, 0] = self.fe_params\n\n # Standard errors\n sdf[0:self.k_fe, 1] =\\\n np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))\n\n # Z-scores\n sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]\n\n # p-values\n sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))\n\n # Confidence intervals\n qm = -norm.ppf(alpha / 2)\n sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]\n sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]\n\n # Names for all pairs of random effects\n jj = self.k_fe\n for i in range(self.k_re):\n for j in range(i + 1):\n sdf[jj, 0] = self.cov_re[i, j]\n sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]\n jj += 1\n\n sdf = pd.DataFrame(index=self.model.data.param_names, data=sdf)\n sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',\n '[' + str(alpha/2), str(1-alpha/2) + ']']\n for col in sdf.columns:\n sdf[col] = [float_fmt % x if np.isfinite(x) else \"\"\n for x in sdf[col]]\n\n smry.add_df(sdf, align='r')\n\n return smry",
"def _graph_results(self, X_test, y_test, y_pred):\n if self.regression is None:\n print(\"Regression results aren't available. Have you run linear_regression() yet?\")\n return\n\n if self.attributes.shape[1] > 1:\n print(\"Graphing is supported for one feature only.\")\n return\n\n plt.scatter(X_test, y_test, color=\"black\")\n plt.plot(X_test, y_pred, color=\"blue\", linewidth=3)\n plt.xticks(())\n plt.yticks(())\n plt.show()",
"def get_regression_metrics(model, actual, predicted):\n regr_metrics = {\n 'Root Mean Squared Error' : metrics.mean_squared_error(actual, predicted)**0.5,\n 'Mean Absolute Error' : metrics.mean_absolute_error(actual, predicted),\n 'R^2' : metrics.r2_score(actual, predicted),\n 'Explained Variance' : metrics.explained_variance_score(actual, predicted)\n }\n\n #return reg_metrics\n df_regr_metrics = pd.DataFrame.from_dict(regr_metrics, orient='index')\n df_regr_metrics.columns = [model]\n return df_regr_metrics",
"def get_regression(self):\n return self.regression",
"def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res",
"def nnRegression(data):",
"def regression_metrics(self, target_data, predicted):\n from sklearn import metrics \n if len(target_data) != 0:\n #Mean Absolute Error\n mae = round(metrics.mean_absolute_error(target_data, predicted),2)\n #Mean Squared Error\n mse = round(metrics.mean_squared_error(target_data, predicted),2)\n #R2\n r2 = round(metrics.r2_score(target_data, predicted),2)\n results = {'mae':mae, 'mse':mse, 'R2':r2}\n return results\n else:\n raise Exception(\"Metrics calculation failed\")",
"def summary(self, yname=None, xname=None, title=None, alpha=.05):\n #TODO: add a summary text for options that have been used\n\n jvalue, jpvalue, jdf = self.jtest()\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['GMM']),\n ('Date:', None),\n ('Time:', None),\n ('No. Observations:', None),\n #('Df Residuals:', None), #[self.df_resid]), #TODO: spelling\n #('Df Model:', None), #[self.df_model])\n ]\n\n top_right = [#('R-squared:', [\"%#8.3f\" % self.rsquared]),\n #('Adj. R-squared:', [\"%#8.3f\" % self.rsquared_adj]),\n ('Hansen J:', [\"%#8.4g\" % jvalue] ),\n ('Prob (Hansen J):', [\"%#6.3g\" % jpvalue]),\n #('F-statistic:', [\"%#8.4g\" % self.fvalue] ),\n #('Prob (F-statistic):', [\"%#6.3g\" % self.f_pvalue]),\n #('Log-Likelihood:', None), #[\"%#6.4g\" % self.llf]),\n #('AIC:', [\"%#8.4g\" % self.aic]),\n #('BIC:', [\"%#8.4g\" % self.bic])\n ]\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Results\"\n\n # create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n return smry",
"def _standardize_results(self):\r\n # Standardization accounts for NaN values (via Pandas)\r\n stdev_X = self._X.std(ddof=1)\r\n self._X_standardized = (\r\n self._X - self._X.mean()) / stdev_X\r\n\r\n # Model fitting\r\n model_standardized = sm.Logit(self._y,\r\n sm.add_constant(self._X_standardized))\r\n results_obj = model_standardized.fit(disp=0) # hide second output info\r\n\r\n # Initialize dataframe (regressors in index only)\r\n output_indices = results_obj.params.drop('const').index\r\n output_cols = ['coef', 't', 'P>|t|',\r\n 'coef_stdX', 'coef_stdXy', 'stdev_X']\r\n std_results_output = pd.DataFrame(index=output_indices,\r\n columns=output_cols)\r\n std_results_output = std_results_output.rename_axis(self._y.name)\r\n\r\n # Gather values from model that took the raw data\r\n std_results_output['coef'] = self._results.params\r\n std_results_output['t'] = self._results.tvalues # col 1\r\n std_results_output['P>|t|'] = self._results.pvalues # col 2\r\n if not results_obj.use_t:\r\n # Output will be labelled as z-scores, not t-values\r\n std_results_output.rename(columns={'t': 'z', 'P>|t|': 'P>|z|'},\r\n inplace=True)\r\n test_dist_name = std_results_output.columns[1] # store for dict later\r\n p_col_name = std_results_output.columns[2] # store for dict later\r\n # Gather values from the model that took the standardized data\r\n std_results_output['coef_stdX'] = results_obj.params\r\n std_results_output['stdev_X'] = stdev_X\r\n\r\n # Now calculate std_XY (via Long's method):\r\n var_explained = self._results.fittedvalues.std() ** 2\r\n var_ystar = var_explained + np.pi ** 2 / 3 # ystar is latent variable\r\n std_results_output['coef_stdXy'] = ((std_results_output['coef'] *\r\n stdev_X)\r\n / np.sqrt(var_ystar))\r\n\r\n # Make Pandas Styler object\r\n std_results_output = std_results_output\\\r\n .style.format({'coef': \"{:+.4f}\",\r\n test_dist_name: '{:+.3f}',\r\n p_col_name: '{:.3f}',\r\n 'coef_stdX': '{:+.4f}',\r\n 'coef_stdXy': '{:+.4f}',\r\n 'stdev_X': '{:.4f}'})\r\n std_results_output.set_caption(\r\n \"Unstandardized and Standardized Estimates\")\r\n self._results_output_standardized = std_results_output\r\n pass",
"def metrics_regression(y_real, y_pred):\n mae=mean_absolute_error(y_real, y_pred)\n mape= mean_absolute_percentage_error(y_real, y_pred)\n mse=mean_squared_error(y_real, y_pred)\n evs= explained_variance_score(y_real, y_pred)\n maximo_error= max_error(y_real, y_pred)\n r2=r2_score(y_real, y_pred)\n metrics_reg= {'Mean Absolute Error': mae, \n 'Mean Absolute Percentage Error': mape,\n 'Mean Squared Error': mse,\n 'Explained Variance Score': evs, \n 'Max Error': maximo_error,\n 'R2 Score':r2} \n \n return metrics_reg",
"def get_regression(data):\n\txmean = mean_calc(data, 1)\n\txvariance = var_calc(data, 1, xmean)\n\tymean = mean_calc(data, 2)\n\tyvariance = var_calc(data, 2, ymean)\n\tcovariance = cov_calc(data, 1, xmean, 2, ymean)\n\tB1 = round(covariance / xvariance, 4)\n\tB0 = round(ymean - B1 * xmean, 4)\n\treturn 'Body Weight = %s * Brain Weight + %s' % (B1, B0)",
"def test_regression_of_slope_sum_distribution():\n\n meaningful_stats = pd.read_pickle(\n 'files/meaningfull_stats.pkl')\n\n print meaningful_stats['std'].tolist()\n print meaningful_stats['returns_diff'].tolist()\n\n def make_float(array):\n \"\"\"\n takes an array and makes all the number in it floats\n \"\"\"\n finial_array = []\n\n for number in array:\n finial_array.append(float(number))\n return finial_array\n\n seaborn.regplot(meaningful_stats['std'], meaningful_stats['returns_diff'])\n\n plt.title(\"STD and Returns\")\n\n plt.axhline(y=00, color='r', linestyle='-')\n\n plt.show()",
"def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')",
"def _validateRegression(self, trainingSet):\n \n sumErrors = [0] * len(trainingSet[0].label) \n\n sumTotal = 0\n \n for example in trainingSet:\n Y = self.test(example)\n \n errors = [(example.label[i] - Y[i])**2 for i in range(0,self.K)]\n \n for i in range(len(errors)):\n sumErrors[i] += errors[i]\n \n sumTotal += sum(errors) \n \n return 0.5 * sumTotal, errors",
"def regression_analysis(cls, y_true, y_pred, path=None):\n residual = y_true - y_pred\n print(\"Histogram\")\n cls.histogram(residual, \"Residual\")\n print(\"Scatter\")\n cls.scatter_plot(y_pred, residual, \"pred\", \"residual\", path=path)\n print(\"Scatter\")\n cls.scatter_plot( y_true, y_pred, \"y_test\", \"pred\", path=path)",
"def summarize_results(self, models, scores):\n mu = np.mean(scores)\n sigma = np.std(scores)\n best_model = models[np.argmax(scores)]\n best_score = max(scores)\n return best_model, best_score, mu, sigma",
"def linear_regression(X, Y, Xs_test, Ys_test):\n\n ## YOUR CODE HERE\n #################\n return 0",
"def do_scikit_learn_regression(data, verbose = False):\n \n \n regr = linear_model.LinearRegression()\n\n x = data['c'].values.reshape(100,1)\n y = data['f'].values.reshape(100,1)\n \n regr.fit(x, y)\n \n if verbose:\n\n string = '\\n'.join((\n f'Coefficient of {regr.coef_[0][0]} compared to actual {9/5}',\n f'Intercept of {regr.intercept_[0]} compared to actual {32}'\n ))\n\n print (string)\n\n return regr.coef_[0][0], regr.intercept_[0]",
"def results_row(self):\n results_row = super(LogisticRegressionBatch, self).results_row()\n\n # append on logistic regression-specific results\n neg_log_loss = -self.log_loss()\n more_details = {\n \"lambda\":[self.lam],\n \"lambda normalized\":[self.lam_norm],\n \"eta0\":[self.eta0],\n \"eta\": [self.eta], # learning rate\n \"log loss\": [self.log_loss()],\n \"-(log loss), training\": [neg_log_loss],\n \"-(log loss)/N, training\": [neg_log_loss/self.N],\n \"iteration\": [self.iteration]\n }\n results_row.update(more_details)\n return results_row",
"def regression_results(filename):\n regression_df = pd.read_csv(filename)\n intercept = regression_df.loc[regression_df.term == '(Intercept)', 'estimate'][0]\n exportness = _exportness(regression_df)\n coefficients = _coefficients(regression_df)\n return intercept, exportness, coefficients",
"def metrics():\n\tmetrics = pd.DataFrame(index=['accuracy', 'precision', 'recall'],\n\t\t\t\t\t\t columns=['LogisticReg', 'NeuralNetwork'])\n\treturn metrics",
"def get_model_summary(self):\n\n summary = Summary()\n\n # add the model equation with estimated parameters\n model_equation = self._get_model_equation()\n summary.tables.append(model_equation)\n\n # add the parameter summary\n params_summary = self._get_params_summary()\n summary.tables.append(params_summary)\n\n res = self._model.fit()\n\n # add more summary statistics\n gleft = self._get_left_summary_table(res)\n gright = self._get_right_summary_table(res)\n summary.add_table_2cols(res, gleft=gleft, gright=gright)\n\n # add extreme influence and outlier table\n high_leverage = ('High leverage:', self._FLOAT_STRING_FORMAT.format(3 * res.params.shape[0] / res.nobs))\n extreme_outlier = ('Extreme outlier (Standardized residual):', self._FLOAT_STRING_FORMAT.format(3))\n dfn = res.params.shape[0] + 1\n dfd = res.nobs + res.params.shape[0]\n high_influence_cooksd = (\"High influence (Cook's D)\",\n self._FLOAT_STRING_FORMAT.format(stats.f.ppf(0.9, dfn=dfn, dfd=dfd)))\n high_influence_dffits = (\"High influence (DFFITS)\",\n self._FLOAT_STRING_FORMAT.format(2 * np.sqrt(res.params.shape[0] / res.nobs)))\n influence_and_outlier_table_data = [high_leverage,\n extreme_outlier,\n high_influence_cooksd,\n high_influence_dffits]\n influence_and_outlier_table = SimpleTable(data=influence_and_outlier_table_data)\n summary.tables.append(influence_and_outlier_table)\n\n return summary",
"def fit_and_report(model, X, y, Xv, yv, mode = 'regression'):\n model.fit(X, y)\n if mode.lower().startswith('regress'):\n errors = [mean_squared_error(y, model.predict(X)), mean_squared_error(yv, model.predict(Xv))]\n if mode.lower().startswith('classif'):\n errors = [1 - model.score(X,y), 1 - model.score(Xv,yv)] \n \n # tests\n assert len(errors) ==2, 'the len of errors is 2'\n \n return errors",
"def getRegressionOutput(self):\n\n # Construct train data\n X_tmp = np.empty(shape=(0, 14))\n for flightNum in range(len(self.routes)):\n # concatenate the buy or wait info to get the total datas\n y_train = self.y_train.reshape((self.y_train.shape[0],1))\n y_train_price = self.y_train_price.reshape((self.y_train_price.shape[0],1))\n\n X_train = np.concatenate((self.X_train, y_train, y_train_price), axis=1)\n\n # choose one route datas\n X_train = X_train[np.where(X_train[:, flightNum]==1)[0], :]\n\n # remove dummy variables\n # feature 8: departure date; feature 9: observed date state\n # feature 10: minimum price; feature 11: maximum price\n # feature 12: prediction(buy or wait); feature 13: current price\n X_train = X_train[:, 0:14]\n\n # group by the feature: departure date\n departureDates_train = np.unique(X_train[:, 8])\n\n # get the final datas, the observed data state should be from large to small(i.e. for time series)\n for departureDate in departureDates_train:\n indexs = np.where(X_train[:, 8]==departureDate)[0]\n datas = X_train[indexs, :]\n minPrice = min(datas[:, 10])\n datas[:, 12] = minPrice\n \"\"\"\n print departureDate\n print minPrice\n print datas\n \"\"\"\n X_tmp = np.concatenate((X_tmp, datas), axis=0)\n\n X_train = X_tmp[:, 0:12]\n y_train = X_tmp[:, 12]\n y_train_price = X_tmp[:, 13]\n y_train = y_train.reshape((y_train.shape[0], 1))\n y_train_price = y_train_price.reshape((y_train_price.shape[0], 1))\n\n\n X_train = np.concatenate((X_train, y_train_price), axis=1)\n np.save('inputReg/X_train', X_train)\n np.save('inputReg/y_train', y_train)\n np.save('inputReg/y_train_price', y_train_price)\n\n\n # Construct test data\n X_tmp = np.empty(shape=(0, 14))\n for flightNum in range(len(self.routes)):\n # concatenate the buy or wait info to get the total datas\n y_test = self.y_test.reshape((self.y_test.shape[0],1))\n y_test_price = self.y_test_price.reshape((self.y_test_price.shape[0],1))\n\n X_test = np.concatenate((self.X_test, y_test, y_test_price), axis=1)\n\n # choose one route datas\n X_test = X_test[np.where(X_test[:, flightNum]==1)[0], :]\n\n # remove dummy variables\n # feature 8: departure date; feature 9: observed date state\n # feature 10: minimum price; feature 11: maximum price\n # feature 12: prediction(buy or wait); feature 13: current price\n X_test = X_test[:, 0:14]\n\n # group by the feature: departure date\n departureDates_test = np.unique(X_test[:, 8])\n\n # get the final datas, the observed data state should be from large to small(i.e. for time series)\n for departureDate in departureDates_test:\n indexs = np.where(X_test[:, 8]==departureDate)[0]\n datas = X_test[indexs, :]\n minPrice = min(datas[:, 10])\n datas[:, 12] = minPrice\n \"\"\"\n print departureDate\n print minPrice\n print datas\n \"\"\"\n X_tmp = np.concatenate((X_tmp, datas), axis=0)\n\n X_test = X_tmp[:, 0:12]\n y_test = X_tmp[:, 12]\n y_test_price = X_tmp[:, 13]\n y_test = y_test.reshape((y_test.shape[0], 1))\n y_test_price = y_test_price.reshape((y_test_price.shape[0], 1))\n X_test = np.concatenate((X_test, y_test_price), axis=1)\n np.save('inputReg/X_test', X_test)\n np.save('inputReg/y_test', y_test)\n np.save('inputReg/y_test_price', y_test_price)",
"def metrics(logger, model, X_train, y_train, X_test, y_test):\n\n results = dict()\n y_preds = model.predict(X_test)\n results['Train Accuracy'] = model.score(X_train, y_train)\n results['Test Accuracy'] = accuracy_score(y_test, y_preds)\n results['Precision'] = precision_score(y_test, y_preds)\n results['Recall'] = recall_score(y_test, y_preds)\n\n metric_cols = data_config['params']['metrics_cols']\n res_df = pd.DataFrame(results.items(), columns=metric_cols)\n\n metrics_path = data_config['outputs']['logreg_metrics']\n res_df.to_csv(metrics_path, index=False)\n print(f'Metrics saved to {metrics_path}')\n\n return",
"def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')",
"def print_results(name, train_r2, val_r2, coeffs, poly):\n if poly:\n print(f\"With Polynomial Features: degree = {poly}...\\n\")\n print(f'{name} Regression Scores: ', val_r2, '\\n')\n\n print(f'{name}.R. Train - Mean R^2: {np.mean(train_r2):.3f} +- {np.std(train_r2):.3f}')\n print(f'{name}.R. Val - Mean R^2: {np.mean(val_r2):.3f} +- {np.std(val_r2):.3f}')\n\n print('\\nCoefficients: ', coeffs)\n print('\\n\\n')",
"def simple_cat_reg(param, dataframe):\n model = ols(param, data=dataframe).fit()\n print(f\"R-squared = {model.rsquared}\")\n print(\"Parameters are:\")\n print(model.summary())\n return model.rsquared, model.params",
"def lm_model(x_all, y_all, print_ind = True, plot_ind = True):\n \n # data\n x_train = x_all[x_all[\"yyyymm\"] < TRAIN_DATE].iloc[:,1:].to_numpy()\n x_test = x_all[x_all[\"yyyymm\"] >= TRAIN_DATE].iloc[:,1:].to_numpy()\n y_train = y_all[y_all[\"yyyymm\"] < TRAIN_DATE].iloc[:,1].to_numpy()\n y_test = y_all[y_all[\"yyyymm\"] >= TRAIN_DATE].iloc[:,1].to_numpy()\n \n # Fit model\n X = sm.add_constant(x_train)\n lm = sm.OLS(y_train, X).fit()\n \n if print_ind:\n print(lm.summary())\n print()\n \n # test\n X_test = sm.add_constant(x_test)\n ypred = lm.predict(X_test)\n MSE = np.sum((ypred - y_test)**2) / len(y_test)\n\n # plot\n if plot_ind:\n # Residual plot\n plt.figure(figsize = (10,7))\n plt.scatter(lm.fittedvalues, lm.resid)\n plt.xlabel('Fitted Value', fontsize = 15)\n plt.ylabel('Residual', fontsize = 15)\n plt.title(\"Residual plot\", fontsize = 25)\n plt.axhline(y = 0)\n plt.show()\n \n # QQ plot\n plt.figure(figsize = (10,7))\n sm.ProbPlot(lm.resid).qqplot()\n plt.title(\"qq-plot\", fontsize = 25)\n abline(1,0)\n plt.show()\n \n # print\n if print_ind:\n print(\"Test MSE: {:.6f}\".format(MSE))\n print(\"Test MSE as % of square mean: {:.6f}\".format(MSE / (np.mean(y_test) ** 2)))\n \n \n return lm, MSE",
"def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params",
"def summarize_regression(summary, test_names, titles, x, y, gradient,\n alpha, ylabel, ylim):\n\n num_plots = len(test_names)\n\n # Generates the figure\n fig, (ax_r1) = plt.subplots(1, num_plots)\n fig.set_size_inches(num_plots * 2, 2)\n\n # ...\n for (test_name, title, ax_reg) in \\\n zip(*(test_names, titles, ax_r1,)):\n # Plots the regression\n gradient_regression(\n ax_reg,\n x=x, y=y,\n gradient=gradient,\n data=summary.loc[summary['test'] == test_name].copy(),\n alpha=alpha\n )\n format_regression_axis(ax_reg)\n ax_reg.set_title(title)\n ax_reg.set_xticklabels(ax_reg.get_xticks())\n\n ax_reg.set_aspect('equal')\n\n # Adds axis labels\n ax_r1[0].set_yticklabels(ax_r1[0].get_yticks())\n ax_r1[0].set_ylabel('%s Power' % ylabel)\n ax_r1[0].set_yticklabels(ax_r1[0].get_yticks())\n\n return fig",
"def regression_evaluation(self, test_set, predicted_values):\r\n\r\n MAE = self.mean_absolute_error(test_set, predicted_values)\r\n MSE = self.mean_square_error(test_set, predicted_values)\r\n print(f\"Mean Percent Error:\\t{MAE:.2f}\")\r\n print(f\"Mean Square Error:\\t{MSE:.2f}\")",
"def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do",
"def RegressionAnalysis(df, Independent, Explanatory, Indicators, prefix=None):\n\n if Indicators == None:\n Indicators = [\"OLS\", \"GLSAR\", \"RecursiveLS\", \"Yule Walker Order 1\", \"Yule Walker Order 2\",\n \"Yule Walker Order 3\", \"Burg Order 1\", \"Burg Order 2\", \"Burg Order 3\",\n \"QuantReg\", \"GLM Binomial\", \"GLM Gamma\", \"GLM Gaussian\", \"GLM Inverse Gaussian\",\n \"GLM Negative Binomial\", \"GLM Poisson\", \"GLM Tweedie\"\n \"AR\", \"ARMA\", \"ARIMA\", \"Granger Causality\",\n \"Levinson Durbin\", \"Cointegration\"]\n\n # Pre-processing\n Independent = df[Independent]\n Independent = pd.DataFrame(Independent)\n\n Explanatory = df[Explanatory]\n Explanatory = pd.DataFrame(Explanatory)\n\n y_sm = np.array(Independent).reshape((-1, 1))\n\n x_sm = np.array(Explanatory)\n x_sm = sm.add_constant(x_sm)\n\n NumDecimal = 3 # Number of decimals for rounding numbers\n\n OneValueIndicators = {}\n\n if prefix == None:\n prefix = \"\"\n\n ##################################################\n ##### PART 1: Linear Regression\n ##################################################\n\n \"\"\"\n ########## Section 1: OLS\n \"\"\"\n name = \"OLS\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.OLS(y_sm, x_sm)\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 2: WLS\n \"\"\"\n\n ### Not Implemented\n\n \"\"\"\n ########## Section 3: GLS\n \"\"\"\n\n ### Not Implemented\n\n \"\"\"\n ########## Section 4: GLSAR\n \"\"\"\n\n name = \"GLSAR\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLSAR(y_sm, x_sm, 1)\n results = model.iterative_fit(1)\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 5: RLS\n \"\"\"\n\n name = \"RecursiveLS\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.RecursiveLS(y_sm, x_sm)\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[name + \" Z Value\"] = results.zvalues\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n # Cumsum\n # Not Implemented\n\n \"\"\"\n ########## Section 6: Yule Walker ORder 1\n \"\"\"\n name = \"Yule Walker Order 1\"\n\n if name in Indicators and len(Explanatory.columns) == 1:\n name = prefix + name\n\n rho, sigma = statsmodels.regression.linear_model.yule_walker(x_sm[:, 1].flatten(), order=1)\n\n ### One Value Indicators\n\n # Rho\n OneValueIndicators[name + \" Rho\"] = round(rho[0], NumDecimal)\n\n # Sigma\n OneValueIndicators[name + \" Sigma\"] = round(sigma, NumDecimal)\n\n \"\"\"\n ########## Section 7: Yule Walker ORder 2\n \"\"\"\n name = \"Yule Walker Order 2\"\n\n if name in Indicators and len(Explanatory.columns) == 1:\n name = prefix + name\n\n rho, sigma = statsmodels.regression.linear_model.yule_walker(x_sm[:, 1].flatten(), order=2)\n\n ### One Value Indicators\n\n # Rho\n OneValueIndicators[name + \" Rho\"] = round(rho[0], NumDecimal)\n\n # Sigma2\n OneValueIndicators[name + \" Sigma\"] = round(sigma, NumDecimal)\n\n \"\"\"\n ########## Section 8: Yule Walker ORder 3\n \"\"\"\n name = \"Yule Walker Order 3\"\n\n if name in Indicators and len(Explanatory.columns) == 1:\n name = prefix + name\n\n rho, sigma = statsmodels.regression.linear_model.yule_walker(x_sm[:, 1].flatten(), order=3)\n\n ### One Value Indicators\n\n # Rho\n OneValueIndicators[name + \" Rho\"] = round(rho[0], NumDecimal)\n\n # Sigma\n OneValueIndicators[name + \" Sigma\"] = round(sigma, NumDecimal)\n\n \"\"\"\n ########## Section 9: Burg's AR(p) ORder 1\n \"\"\"\n\n name = \"Burg Order 1\"\n\n if name in Indicators and len(Explanatory.columns) == 1:\n name = prefix + name\n\n rho, sigma2 = statsmodels.regression.linear_model.burg(x_sm[:, 1].flatten(), order=1)\n\n ### One Value Indicators\n\n # Rho\n OneValueIndicators[name + \" Rho\"] = round(rho[0], NumDecimal)\n\n # Sigma2\n OneValueIndicators[name + \" Sigma2\"] = round(sigma2, NumDecimal)\n\n \"\"\"\n ########## Section 10: Burg's AR(p) ORder 2\n \"\"\"\n\n name = \"Burg Order 2\"\n\n if name in Indicators and len(Explanatory.columns) == 1:\n name = prefix + name\n\n rho, sigma2 = statsmodels.regression.linear_model.burg(x_sm[:, 1].flatten(), order=2)\n\n ### One Value Indicators\n\n # Rho\n OneValueIndicators[name + \" Rho\"] = round(rho[0], NumDecimal)\n\n # Sigma2\n OneValueIndicators[name + \" Sigma2\"] = round(sigma2, NumDecimal)\n\n \"\"\"\n ########## Section 11: Burg's AR(p) ORder 3\n \"\"\"\n\n name = \"Burg Order 3\"\n\n if name in Indicators and len(Explanatory.columns) == 1:\n name = prefix + name\n\n rho, sigma2 = statsmodels.regression.linear_model.burg(x_sm[:, 1].flatten(), order=3)\n\n ### One Value Indicators\n\n # Rho\n OneValueIndicators[name + \" Rho\"] = round(rho[0], NumDecimal)\n\n # Sigma2\n OneValueIndicators[name + \" Sigma2\"] = round(sigma2, NumDecimal)\n\n \"\"\"\n ########## Section 12: Quantile Regression\n \"\"\"\n\n name = \"QuantReg\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.QuantReg(y_sm, x_sm)\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n ##################################################\n ##### PART 2: Generalized Linear Models\n ##################################################\n\n \"\"\"\n ########## Section 1: GLM Binomial\n \"\"\"\n\n name = \"GLM Binomial\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.Binomial())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 2: GLM Gamma\n \"\"\"\n\n name = \"GLM Gamma\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.Gamma())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 3: GLM Gaussian\n \"\"\"\n\n name = \"GLM Gaussian\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.Gaussian())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 3: GLM InverseGaussian\n \"\"\"\n\n name = \"GLM Inverse Gaussian\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.InverseGaussian())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 4: GLM NegativeBinomial\n \"\"\"\n\n name = \"GLM Negative Binomial\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.NegativeBinomial())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 5: GLM Poisson\n \"\"\"\n\n name = \"GLM Poisson\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.Poisson())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n \"\"\"\n ########## Section 6: GLM Tweedie\n \"\"\"\n\n name = \"GLM Tweedie\"\n\n if name in Indicators:\n name = prefix + name\n\n model = sm.GLM(y_sm, x_sm, family=sm.families.Tweedie())\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[\"Pearson chi2\"] = round(results.pearson_chi2, NumDecimal)\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n ##################################################\n ##### PART 3: Robust Linear Models\n ##################################################\n\n ##################################################\n ##### PART 4: AR models\n ##################################################\n\n name = \"AR\"\n\n if name in Indicators:\n name = prefix + name\n\n model = statsmodels.tsa.ar_model.AR(Independent)\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[name + \" Final Prediction Error\"] = results.fpe\n\n OneValueIndicators[name + \" Hannan-Quinn Information Criterion\"] = results.hqic\n\n OneValueIndicators[name + \" Roots\"] = results.roots\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n ##################################################\n ##### PART 5: ARMA\n ##################################################\n\n name = \"ARMA\"\n\n if name in Indicators:\n\n name = prefix + name\n\n model = statsmodels.tsa.arima_model.ARMA(y_sm, (5, 5), x_sm)\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[name + \" AR Params\"] = results.arparams\n\n OneValueIndicators[name + \" AR Roots\"] = results.arroots\n\n OneValueIndicators[name + \" AR Freq\"] = results.arfreq\n\n OneValueIndicators[name + \" Hannan-Quinn Information Criterion\"] = results.hqic\n\n OneValueIndicators[name + \" MA Params\"] = results.maparams\n\n try:\n OneValueIndicators[name + \" MA Roots\"] = results.maroots\n except:\n pass\n\n try:\n OneValueIndicators[name + \" MA Freq\"] = results.mafreq\n except:\n pass\n\n OneValueIndicators[name + \" Sigma2\"] = results.sigma2\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n ##################################################\n ##### PART 6: ARIMA\n ##################################################\n\n name = \"ARIMA\"\n\n if name in Indicators:\n\n name = prefix + name\n\n model = statsmodels.tsa.arima_model.ARIMA(Independent, (2, 2, 2), Explanatory)\n results = model.fit()\n\n ### One Value Indicators\n\n OneValueIndicators = Statsmodels_Regression_All_OneValueIndicators(OneValueIndicators, name, results,\n Explanatory, NumDecimal)\n\n OneValueIndicators[name + \" AR Params\"] = results.arparams\n\n OneValueIndicators[name + \" AR Roots\"] = results.arroots\n\n OneValueIndicators[name + \" AR Freq\"] = results.arfreq\n\n OneValueIndicators[name + \" Hannan-Quinn Information Criterion\"] = results.hqic\n\n OneValueIndicators[name + \" MA Params\"] = results.maparams\n\n OneValueIndicators[name + \" MA Roots\"] = results.maroots\n\n OneValueIndicators[name + \" MA Freq\"] = results.mafreq\n\n OneValueIndicators[name + \" Sigma2\"] = results.sigma2\n\n ### Time Series Indicators\n\n # Fitted Values\n df = Statsmodels_FittedValues(df, results, name)\n\n # Residuals\n df = Statsmodels_LR_Residuals(df, results, name)\n\n ##################################################\n ##### PART 7: Univariate Analysis\n ##################################################\n\n # Granger Causality\n name = \"Granger Causality\"\n name = prefix + name\n if name in Indicators:\n OneValueIndicators[name] = ts.grangercausalitytests(\n Independent.merge(Explanatory, how=\"inner\", left_index=True, right_index=True), maxlag=10)\n\n # Levinson Durbin\n name = \"Levinson Durbin\"\n name = prefix + name\n if name in Indicators:\n OneValueIndicators[name] = ts.levinson_durbin(Independent)\n\n # Cointegration\n name = \"Cointegration\"\n name = prefix + name\n if name in Indicators:\n OneValueIndicators[name] = ts.coint(Independent, Explanatory, trend=\"ct\", return_results=False)\n\n ##################################################\n ##### Not Implemented\n ##################################################\n\n # BDS Statistic (residuals analysis)\n # Not Implemented\n\n # Return’s Ljung-Box Q Statistic (AR)\n # Not Implemented\n OneValueIndicators = pd.DataFrame.from_dict(OneValueIndicators, orient=\"index\")\n\n return df, OneValueIndicators",
"def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)",
"def regression(x_train, y_train, x_test=[], y_test=[], D=2, plot=False):\n Ntrain = len(x_train)\n Ntest = len(x_test)\n Xtrain = np.asmatrix(x_train)\n Ytrain = np.asmatrix(y_train).transpose()\n Xtest = np.asmatrix(x_test)\n Ytest = np.asmatrix(y_test).transpose() \n \n X = get_feature_matrix(Ntrain, Xtrain, D)\n X_test = get_feature_matrix(Ntest, Xtest, D)\n\n w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Ytrain))\n w = w.reshape((w.shape[0],)).tolist()[0]\n \n predicted_Y = X.dot(w).T\n Rtrain = np.linalg.norm(predicted_Y - Ytrain) #training error\n\n predicted_Y_test = X_test.dot(w).T\n Rtest = np.linalg.norm(predicted_Y_test - Ytest) #test error \n\n average_training_error = (Rtrain**2) / Ntrain\n average_test_error = (Rtest**2) / Ntest\n\n if plot:\n # plots\n x = np.linspace(-5, 5, 1000)\n y = predict_y(x, w)\n plt.subplot(211)\n plt.scatter(x_train, y_train)\n plt.plot(x, y)\n plt.title('Training samples and regression')\n plt.grid(True)\n\n x = np.linspace(-5, 5, 1000)\n y = predict_y(x, w)\n plt.subplot(212)\n plt.scatter(x_test, y_test)\n plt.plot(x,y)\n plt.title('Test samples and regression')\n plt.grid(True) \n\n plt.show()\n \n return {'weights': w, \n 'average_training_error': average_training_error,\n 'average_test_error': average_test_error,\n }",
"def eval_regression_model(model: NeuralNetwork, X_test: np.ndarray, y_test: np.ndarray):\n preds = model.forward(X_test)\n preds = preds.reshape(-1, 1)\n print(\"Mean absolute error: {:.2f}\".format(mae(preds, y_test)))\n print()\n print(\"Root mean squared error {:.2f}\".format(rmse(preds, y_test)))",
"def fit(self):\n self.model = RegressionModel(model_expression=self.model_expression,\n fit_filters=self.filters, predict_filters=self.out_filters,\n ytransform=None, name=self.name)\n\n df = get_data(tables = self.tables,\n filters = self.filters,\n model_expression = self.model_expression)\n \n results = self.model.fit(df)\n \n self.name = self._generate_name()\n self.summary_table = str(results.summary())\n print(self.summary_table)\n \n # We don't strictly need to save the fitted parameters, because they are also\n # contained in the urbansim.models.RegressionModel() sub-object. But maintaining\n # a parallel data structure to other templates will make it easier to refactor the\n # code later on to not rely on RegressionModel any more. \n \n self.fitted_parameters = results.params.tolist()\n self.residuals = results.resid",
"def make_results(self, X, y, verbose=True):\n import xarray as xr\n from sklearn.metrics import r2_score\n from sklearn.metrics import explained_variance_score\n feature_dim, mt_dim = get_feature_multitask_dim(X, y, self.sample_dim)\n rds = y.to_dataset(name='original').copy(deep=False, data=None)\n if sk_attr(self, 'coef_') and sk_attr(self, 'intercept_'):\n rds[feature_dim] = X[feature_dim]\n if mt_dim:\n rds['params'] = xr.DataArray(self.coef_, dims=[mt_dim,\n feature_dim])\n rds['intercept'] = xr.DataArray(self.intercept_, dims=[mt_dim])\n pvals = get_p_values(X, y, self.sample_dim)\n rds['pvalues'] = xr.DataArray(pvals, dims=[mt_dim,\n feature_dim])\n else:\n rds['params'] = xr.DataArray(self.coef_, dims=feature_dim)\n rds['intercept'] = xr.DataArray(self.intercept_)\n pvals = get_p_values(X, y, self.sample_dim)\n rds['pvalues'] = xr.DataArray(pvals, dims=feature_dim)\n elif sk_attr(self, 'feature_importances_'):\n if mt_dim:\n rds['feature_importances'] = xr.DataArray(self.\n feature_importances_,\n dims=[mt_dim,\n feature_dim])\n else:\n rds['feature_importances'] = xr.DataArray(self.\n feature_importances_,\n dims=[feature_dim])\n predict = self.predict(X)\n if mt_dim:\n predict = predict.rename({self.reshapes: mt_dim})\n rds['predict'] = predict\n r2 = r2_score(y, predict, multioutput='raw_values')\n rds['r2'] = xr.DataArray(r2, dims=mt_dim)\n else:\n rds['predict'] = predict\n r2 = r2_score(y, predict)\n rds['r2'] = xr.DataArray(r2)\n if feature_dim:\n r2_adj = 1.0 - (1.0 - rds['r2']) * (len(y) - 1.0) / \\\n (len(y) - X.shape[1])\n else:\n r2_adj = 1.0 - (1.0 - rds['r2']) * (len(y) - 1.0) / (len(y))\n rds['r2_adj'] = r2_adj\n rds['predict'].attrs = y.attrs\n rds['resid'] = y - rds['predict']\n rds['resid'].attrs = y.attrs\n rds['resid'].attrs['long_name'] = 'Residuals'\n rds['dw_score'] = (rds['resid'].diff(self.sample_dim)**2).sum(self.sample_dim,\n keep_attrs=True) / (rds['resid']**2).sum(self.sample_dim, keep_attrs=True)\n exp_var = explained_variance_score(y, rds['predict'].values)\n rds['explained_variance'] = exp_var\n\n# rds['corrcoef'] = self.corrcoef(X, y)\n # unstack dims:\n if mt_dim:\n rds = rds.unstack(mt_dim)\n # put coords attrs back:\n# for coord, attr in y.attrs['coords_attrs'].items():\n# rds[coord].attrs = attr\n# # remove coords attrs from original, predict and resid:\n# rds.original.attrs.pop('coords_attrs')\n# rds.predict.attrs.pop('coords_attrs')\n# rds.resid.attrs.pop('coords_attrs')\n all_var_names = [x for x in rds.data_vars.keys()]\n sample_types = [x for x in rds.data_vars.keys()\n if self.sample_dim in rds[x].dims]\n feature_types = [x for x in rds.data_vars.keys()\n if feature_dim in rds[x].dims]\n error_types = list(set(all_var_names) - set(sample_types +\n feature_types))\n rds.attrs['sample_types'] = sample_types\n rds.attrs['feature_types'] = feature_types\n rds.attrs['error_types'] = error_types\n rds.attrs['sample_dim'] = self.sample_dim\n rds.attrs['feature_dim'] = feature_dim\n # add X to results:\n rds['X'] = X\n if verbose:\n print('Producing results...Done!')\n return rds",
"def compute_statistics(self):",
"def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))",
"def summary(self, yname=None, xname=None, title=None, alpha=.05):\n\n #TODO: import where we need it (for now), add as cached attributes\n from statsmodels.stats.stattools import (jarque_bera,\n omni_normtest, durbin_watson)\n jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)\n omni, omnipv = omni_normtest(self.wresid)\n\n #TODO: reuse condno from somewhere else ?\n #condno = np.linalg.cond(np.dot(self.wexog.T, self.wexog))\n wexog = self.model.wexog\n eigvals = np.linalg.linalg.eigvalsh(np.dot(wexog.T, wexog))\n eigvals = np.sort(eigvals) #in increasing order\n condno = np.sqrt(eigvals[-1]/eigvals[0])\n\n # TODO: check what is valid.\n # box-pierce, breusch-pagan, durbin's h are not with endogenous on rhs\n # use Cumby Huizinga 1992 instead\n self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,\n omni=omni, omnipv=omnipv, condno=condno,\n mineigval=eigvals[0])\n\n #TODO not used yet\n #diagn_left_header = ['Models stats']\n #diagn_right_header = ['Residual stats']\n\n #TODO: requiring list/iterable is a bit annoying\n #need more control over formatting\n #TODO: default do not work if it's not identically spelled\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['Two Stage']),\n ('', ['Least Squares']),\n ('Date:', None),\n ('Time:', None),\n ('No. Observations:', None),\n ('Df Residuals:', None), #[self.df_resid]), #TODO: spelling\n ('Df Model:', None), #[self.df_model])\n ]\n\n top_right = [('R-squared:', [\"%#8.3f\" % self.rsquared]),\n ('Adj. R-squared:', [\"%#8.3f\" % self.rsquared_adj]),\n ('F-statistic:', [\"%#8.4g\" % self.fvalue] ),\n ('Prob (F-statistic):', [\"%#6.3g\" % self.f_pvalue]),\n #('Log-Likelihood:', None), #[\"%#6.4g\" % self.llf]),\n #('AIC:', [\"%#8.4g\" % self.aic]),\n #('BIC:', [\"%#8.4g\" % self.bic])\n ]\n\n diagn_left = [('Omnibus:', [\"%#6.3f\" % omni]),\n ('Prob(Omnibus):', [\"%#6.3f\" % omnipv]),\n ('Skew:', [\"%#6.3f\" % skew]),\n ('Kurtosis:', [\"%#6.3f\" % kurtosis])\n ]\n\n diagn_right = [('Durbin-Watson:', [\"%#8.3f\" % durbin_watson(self.wresid)]),\n ('Jarque-Bera (JB):', [\"%#8.3f\" % jb]),\n ('Prob(JB):', [\"%#8.3g\" % jbpv]),\n ('Cond. No.', [\"%#8.3g\" % condno])\n ]\n\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Regression Results\"\n\n #create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=True)\n\n smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,\n yname=yname, xname=xname,\n title=\"\")\n\n\n\n return smry",
"def _do_first_regression(self, Y, X, W):\n\n\t\twls_model = sm.WLS(Y, X, weights = 1.0 / W)\n\t\tresults = wls_model.fit()\n\t\tb, a = results.params # convention from paper\n\t\treturn (a,b)",
"def compute_metrics(self, results: list) -> dict:",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def train_results(self):\n\n return self.train_loss_results, self.train_accuracy_results, self.train_pred_results",
"def computeStats(results, independentVariable):\n common = {}\n\n # Collect lists of the values\n for v in results:\n test = v.__dict__[independentVariable]\n try:\n common[test].append(v)\n except:\n common[test] = [v]\n\n result = []\n # Sanity check for number of data items being summarized\n # print (\"Computing stats on \" + str(len(common.values()[0])))\n for measurements in list(common.values()):\n resultValues = {}\n resultValues[independentVariable] = measurements[0].__dict__[\n independentVariable\n ]\n # print (\"Measurement[\\\"\"+independentVariable+\"\\\"] : \" + str(resultValues[independentVariable]))\n fieldnames = list(measurements[0].__dict__.keys())\n fieldnames.remove(independentVariable)\n for stat in fieldnames:\n values = [m.__dict__[stat] for m in measurements]\n resultValues[stat] = mean(values)\n resultValues[stat + \"_SD\"] = standardDeviation(values, resultValues[stat])\n result.append(\n measurement(list(resultValues.keys()), list(resultValues.values()))\n )\n # print (\"Result: \" + str(result))\n return result",
"def summarize(self):\n # go recursively in the model architecture\n summary_str = self.recursive_summarize(self, 0, self.name)\n\n # Sum the model parameters.\n num_total_params = sum([np.prod(p.size()) for p in self.parameters()])\n mod_trainable_params = filter(lambda p: p.requires_grad, self.parameters())\n num_trainable_params = sum([np.prod(p.size()) for p in mod_trainable_params])\n\n summary_str += 'Total Trainable Params: {}\\n'.format(num_trainable_params)\n summary_str += 'Total Non-trainable Params: {}\\n'.format(num_total_params-num_trainable_params) \n summary_str += '='*80 + '\\n'\n\n return summary_str",
"def summarizeFitData(X, y, w=None, categories=None, showavevarminmax=True):\n \n print(\"X.shape=\", X.shape, \"y.shape=\", y.shape,end=\"\")\n if w is None:\n w=pd.Series(np.ones(y.shape))\n else:\n print(\"w.shape=\", w.shape,end=\"\")\n\n print()\n print(\"columns=\", X.columns)\n \n if categories is None:\n categories=y\n\n uniquecategories=sorted(categories.unique())\n print(\"categories=\",uniquecategories)\n print()\n \n print(\"sum of weights per category\")\n length=max([len(str(x)) for x in uniquecategories]+[10])\n print(('{:>'+str(length)+'}').format(\"all\"),('{:>'+str(length)+'}').format(w.sum()))\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(cat), ('{:>'+str(length)+'}').format(w[categories==cat].sum()))\n print(\"\\n\")\n\n if showavevarminmax:\n print(\"average\")\n variablelength=max([len(x) for x in X.columns]+[len(\"variable/class\")])\n print(('{:>'+str(variablelength)+'}').format(\"variable/class\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all\"),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(cat),end=\"\")\n print(\"\")\n \n for i,variable in enumerate(X.columns):\n print(('{:>'+str(variablelength)+'}').format(variable),end=\"\")\n print(('{:>'+str(length)+'.3}').format(np.average(X[variable], weights=w)),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'.3}').format(np.average(X[variable][categories==cat], weights=w[categories==cat])),end=\"\")\n print()\n print(\"\\n\")\n \n print(\"variance\")\n print(('{:>'+str(variablelength)+'}').format(\"variable/class\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all\"),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(cat),end=\"\")\n print()\n \n for i,variable in enumerate(X.columns):\n print(('{:>'+str(variablelength)+'}').format(variable),end=\"\")\n print(('{:>'+str(length)+'.3}').format(variance(X[variable], weights=w)),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'.3}').format(variance(X[variable][categories==cat], weights=w[categories==cat])),end=\"\")\n print()\n print(\"\\n\")\n\n print(\"min/max\")\n print(('{:>'+str(variablelength)+'}').format(\"variable/class\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all/min\"),end=\"\")\n print(('{:>'+str(length)+'}').format(\"all/max\"),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'}').format(str(cat)+\"/min\"),end=\"\")\n print(('{:>'+str(length)+'}').format(str(cat)+\"/max\"),end=\"\")\n print()\n \n for i,variable in enumerate(X.columns):\n print(('{:>'+str(variablelength)+'}').format(variable),end=\"\")\n print(('{:>'+str(length)+'.3}').format(float(np.min(X[variable]))),end=\"\")\n print(('{:>'+str(length)+'.3}').format(float(np.max(X[variable]))),end=\"\")\n for cat in uniquecategories:\n print(('{:>'+str(length)+'.3}').format(float(np.min(X[variable][categories==cat]))),end=\"\")\n print(('{:>'+str(length)+'.3}').format(float(np.max(X[variable][categories==cat]))),end=\"\")\n print()\n print(\"\\n\")",
"def summary(self):\r\n print(self.model.summary())",
"def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"",
"def summary(self):\n from statsmodels.iolib.summary import Summary\n from statsmodels.iolib.table import SimpleTable\n model = self.model\n title = model.__class__.__name__ + ' Model Results'\n\n dep_variable = 'endog'\n if isinstance(self.model.endog, pd.DataFrame):\n dep_variable = self.model.endog.columns[0]\n elif isinstance(self.model.endog, pd.Series):\n dep_variable = self.model.endog.name\n seasonal_periods = None if self.model.seasonal is None else self.model.seasonal_periods\n lookup = {'add': 'Additive', 'additive': 'Additive',\n 'mul': 'Multiplicative', 'multiplicative': 'Multiplicative', None: 'None'}\n transform = self.params['use_boxcox']\n box_cox_transform = True if transform else False\n box_cox_coeff = transform if isinstance(transform, str) else self.params['lamda']\n if isinstance(box_cox_coeff, float):\n box_cox_coeff = '{:>10.5f}'.format(box_cox_coeff)\n top_left = [('Dep. Variable:', [dep_variable]),\n ('Model:', [model.__class__.__name__]),\n ('Optimized:', [str(np.any(self.optimized))]),\n ('Trend:', [lookup[self.model.trend]]),\n ('Seasonal:', [lookup[self.model.seasonal]]),\n ('Seasonal Periods:', [str(seasonal_periods)]),\n ('Box-Cox:', [str(box_cox_transform)]),\n ('Box-Cox Coeff.:', [str(box_cox_coeff)])]\n\n top_right = [\n ('No. Observations:', [str(len(self.model.endog))]),\n ('SSE', ['{:5.3f}'.format(self.sse)]),\n ('AIC', ['{:5.3f}'.format(self.aic)]),\n ('BIC', ['{:5.3f}'.format(self.bic)]),\n ('AICC', ['{:5.3f}'.format(self.aicc)]),\n ('Date:', None),\n ('Time:', None)]\n\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n formatted = self.params_formatted # type: pd.DataFrame\n\n def _fmt(x):\n abs_x = np.abs(x)\n scale = 1\n if abs_x != 0:\n scale = int(np.log10(abs_x))\n if scale > 4 or scale < -3:\n return '{:>20.5g}'.format(x)\n dec = min(7 - scale, 7)\n fmt = '{{:>20.{0}f}}'.format(dec)\n return fmt.format(x)\n\n tab = []\n for _, vals in formatted.iterrows():\n tab.append([_fmt(vals.iloc[1]),\n '{0:>20}'.format(vals.iloc[0]),\n '{0:>20}'.format(str(bool(vals.iloc[2])))])\n params_table = SimpleTable(tab, headers=['coeff', 'code', 'optimized'],\n title=\"\",\n stubs=list(formatted.index))\n\n smry.tables.append(params_table)\n\n return smry",
"def descriptive_statistics(relfreqs): \n means = np.mean(relfreqs, axis=\"columns\")\n stdevs = np.std(relfreqs, axis=\"columns\")\n return means, stdevs",
"def summary(self):\n self.model.summary()",
"def linear_regression_manual2(data):\n dataset = np.array(data)\n\n X = dataset[:,0]\n Y = dataset[:,1]\n\n X_avg = X.mean()\n Y_avg = Y.mean()\n\n X_cntr = X - X_avg\n Y_cntr = Y - Y_avg\n\n a = (X_cntr*Y_cntr).sum() / (X_cntr*X_cntr).sum()\n b = Y_avg - a*X_avg\n\n return (a, b)",
"def _reg_score(self, X, y, output=True):\n X, y = self._format_batch(X, y)\n _y = self.predict(X)\n mse = np.sum((_y-y)**2)/len(y)\n if output:\n print(f\"Mean Squared Error: {mse}\")\n return mse",
"def other_regression(df, x_cols, y_col):\n df = df[~ np.isnan(df[y_col])]\n for col in x_cols:\n df = df[~ np.isnan(df[col])]\n\n X = df[x_cols].to_numpy()\n X = sm.add_constant(X)\n y = df[y_col].to_numpy()\n mod = sm.OLS(y, X)\n res = mod.fit()\n return res\n #print(res.summary())",
"def test_rr_summary(results):\n # pylint: disable=unidiomatic-typecheck\n test_result = results.summary()\n assert type(test_result).__name__ == \"Summary\"\n assert type(test_result.tables) == list\n assert len(test_result.tables) == 3\n assert len(test_result.extra_txt) > 0",
"def summary(self, test_type='t-test'):\n summary = f'Results for running {self.cv_method} evaluation for {self.method} '\n summary += f'on {self.n_model} models:\\n\\n'\n name_length = max([max(len(m.name) for m in self.models) + 1, 6])\n means = self.get_means()\n sems = self.get_sem()\n if means is None:\n means = np.nan * np.ones(self.n_model)\n if sems is None:\n sems = np.nan * np.ones(self.n_model)\n try:\n p_zero = self.test_zero(test_type=test_type)\n p_noise = self.test_noise(test_type=test_type)\n except ValueError:\n p_zero = np.nan * np.ones(self.n_model)\n p_noise = np.nan * np.ones(self.n_model)\n # header of the results table\n summary += 'Model' + (' ' * (name_length - 5))\n summary += '| Eval \\u00B1 SEM |'\n summary += ' p (against 0) |'\n summary += ' p (against NC) |\\n'\n summary += '-' * (name_length + 51)\n summary += '\\n'\n for i, m in enumerate(self.models):\n summary += m.name + (' ' * (name_length - len(m.name)))\n summary += f'| {means[i]: 5.3f} \\u00B1 {sems[i]:4.3f} |'\n if p_zero[i] < 0.001:\n summary += ' < 0.001 |'\n else:\n summary += f'{p_zero[i]:>13.3f} |'\n if p_noise[i] < 0.001:\n summary += ' < 0.001 |'\n else:\n summary += f'{p_noise[i]:>14.3f} |'\n summary += '\\n'\n summary += '\\n'\n if self.cv_method == 'crossvalidation':\n summary += 'No p-values available as crossvalidation provides no variance estimate'\n elif test_type == 't-test':\n summary += 'p-values are based on uncorrected t-tests'\n elif test_type == 'bootstrap':\n summary += 'p-values are based on percentiles of the bootstrap samples'\n elif test_type == 'ranksum':\n summary += 'p-values are based on ranksum tests'\n return summary",
"def summary(self):\n print(self.model.summary())",
"def evaluate(self):\n # Method variables definition\n X_train, X_test, y_train, y_test = dm.reshape_y_set_split_data(self.datasetManager)\n featureScaleDependentVariables = self.datasetManager.params.featureScaleDependentVariables\n\n # Feature Scaling\n X_scaler, X_train = dm.do_feature_scaling(X_train)\n if featureScaleDependentVariables:\n y_scaler, y_train = dm.do_feature_scaling(y_train)\n else:\n y_scaler = None\n y_train = self.datasetManager.y_train\n \n self.X_scaler = X_scaler\n self.y_scaler = y_scaler\n\n # Training the SVR model on the training set\n regressor = SVR(kernel = 'rbf')\n regressor.fit(X_train, y_train.ravel())\n self.regressor = regressor\n\n # Predicting the Test set results\n self.y_pred = y_scaler.inverse_transform(regressor.predict(X_scaler.transform(X_test))) if featureScaleDependentVariables else regressor.predict(X_test)\n \n # Returning the process result : the regression type and the predicted dependent variables set\n return [\"Support Vector Regression\", self.get_r2_score(y_test, self.y_pred)]",
"def calculate_dataset_metrics(self):\n pass",
"def linear_regression(X, Y, Xs_test, Ys_test):\n\n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n XL = np.concatenate((X_n, np.ones((len(X),1))), axis = 1)\n w = np.linalg.solve(XL.T.dot(XL),XL.T.dot(Y))\n mses = []\n for i, X_test in enumerate(Xs_test):\n Y_test = Ys_test[i]\n XL_test = np.concatenate(((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0), \n np.ones((len(X_test),1))), axis = 1)\n Y_pred = XL_test.dot(w)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1))) \n mses.append(mse) \n return mses",
"def _results(self):\n results = {}\n results[\"coeff\"] = self._coeff_to_dict()\n results[\"coeff_path\"] = dict(\n zip(\n [f\"{col}\" for col in self.X_train_.columns.tolist()],\n (\n self.model_.coef_path_.reshape(-1, self.model_.coef_path_.shape[-1])\n ).tolist(),\n )\n )\n results[\"cv_standard_error\"] = self.model_.cv_standard_error_.tolist()\n results[\"cv_mean_score\"] = self.model_.cv_mean_score_.tolist()\n results[\"lambda_path\"] = self.model_.lambda_path_.tolist()\n results[\"lambda_best\"] = self.model_.lambda_best_[0]\n results[\"lambda_max\"] = self.model_.lambda_max_\n results[\"n_lambda\"] = self.model_.n_lambda_\n results[\"intercept\"] = self.model_.intercept_\n results[\"intercept_path\"] = self.model_.intercept_path_.tolist()[0]\n results[\"params\"] = self.model_.get_params()\n results[\"module\"] = self.model_.__module__\n\n return results",
"def get_summary_stats(self):\r\n n = len(self.results)\r\n\r\n if n == 0:\r\n mean = None\r\n stdev = None\r\n\r\n elif n == 1:\r\n mean = numpy.mean(self.results)\r\n stdev = None\r\n\r\n else:\r\n mean = numpy.mean(self.results)\r\n stdev = numpy.std(self.results)\r\n\r\n sum_stats = {'n': n, 'mean': mean, 'stdev': stdev}\r\n\r\n return sum_stats",
"def handle_regression_results(self, results):\n # Make sure the regression results are a named map\n if not isinstance(results, dict) or not all(isinstance(key, str) for key in results):\n self.fail(\"Regression test '\" +\n self.get_test_method_name() +\n \"' didn't return a named map of regression results\")\n\n # Handle each result individually\n for name, result in results.items():\n self.handle_regression_result(name, result)",
"def score(self, x_test, y_test, regressor=False):\n if regressor:\n r2_scores = []\n mse_scores = []\n for model in self.list_of_models:\n predictions = model.predict(x_test)\n self.r2_scores.append(r2_score(y_test, predictions))\n self.mse_scores.append(mean_squared_error(y_test, predictions))\n self.print_results(regressor=True)\n else:\n self.f1_scores = []\n self.recall_scores = []\n self.precision_scores = []\n self.accuracy_scores = []\n for model in self.list_of_models:\n predictions = model.predict(x_test)\n self.f1_scores.append(f1_score(y_test, predictions))\n self.recall_scores.append(recall_score(y_test, predictions))\n self.precision_scores.append(precision_score(y_test, predictions))\n self.accuracy_scores.append(accuracy_score(y_test, predictions))\n self.print_results()",
"def get_results(self):\n result = [round(self.mr / self.test_size, 1), round(self.mrr / self.test_size, 3),\n round(self.hits1 / self.test_size, 3), round(self.hits3 / self.test_size, 3),\n round(self.hits5 / self.test_size, 3), round(self.hits10 / self.test_size, 3)]\n return result",
"def _get_aggregated_results(self):\n gradients = self.gradients\n client_traj_infos = flatten_lists(self.client_traj_infos)\n client_opt_infos = self._combine_client_opt_infos(self.client_opt_infos)\n \n self.gradients = []\n self.client_traj_infos = []\n self.client_opt_infos = []\n\n return gradients, client_traj_infos, client_opt_infos",
"def compute_statistics(self):\n for i in range(len(self.wine_matrix[0, :])):\n feature = self.wine_matrix[:, i]\n self.wine_stats['feature ' + str(i)] = {}\n if i == 11: # results column\n self.wine_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.wine_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()\n\n for i in range(len(self.cancer_matrix[0, :])):\n feature = self.cancer_matrix[:, i]\n self.cancer_stats['feature ' + str(i)] = {}\n if i == 10: # results column\n self.cancer_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.cancer_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()",
"def compute_stat(gold, pred, verbose):\n y_gold = gold.loc[:, POLARITY]\n y_pred = pred.loc[:, POLARITY]\n\n print(\"General Statistics:\")\n print(sklm.classification_report(y_gold, y_pred), end=\"\\n\\n\")\n\n # Macro-Averaged F1 Score\n print(\n \"Macro-Averaged F1-Score (Positive and Negative Classes):\"\n \" {:.2%}\".format(\n sklm.f1_score(y_gold, y_pred,\n labels=(POSITIVE, NEGATIVE), average=\"macro\")\n ))\n\n # Micro-Averaged F1 Score\n print(\n \"Micro-Averaged F1-Score (All Classes): {:.4%}\".format(\n sklm.f1_score(y_gold, y_pred, average=\"micro\")\n ), end=\"\\n\\n\")\n\n # Confusion Matrix and Examples\n if verbose:\n print(\"Confusion Matrix:\")\n print(sklm.confusion_matrix(\n gold.loc[:, POLARITY], pred.loc[:, POLARITY]))\n print(\"\")\n\n print(\"Examples:\")\n for y_g, y_p, (_, df) in zip(y_gold, y_pred, gold.iterrows()):\n if y_g != y_p:\n print(\n (\"<<<\\tgold:\\t{:s}\\n>>>\\tpredicted:\\t{:s}\"\n \"\\n{:s}\\t{:s}\\n\").format(\n y_g, y_p, df[ID], df[TOKS]\n )\n )",
"def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()],\n '--r', linewidth=2)\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n extra = plt.Rectangle((0, 0), 0, 0, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n ax.legend([extra], [scores], loc='upper left')\n title = title + '\\n Evaluation in {:.2f} seconds'.format(elapsed_time)\n ax.set_title(title)",
"def linear_regression_sklearn(data):\n# Split the data into training/testing sets\n dataset = np.array(data)\n\n X_train = dataset[:,0].reshape(-1,1)\n y_train = dataset[:,1]\n\n# Create linear regression object\n regr = linear_model.LinearRegression()\n\n# Train the model using the training sets\n regr.fit(X_train, y_train)\n\n return (regr.coef_[0], regr.intercept_)",
"def model_metrics(X, y, model, data_set = 'data_set'):\n score = model.score(X, y)\n matrix = confusion_matrix(y, model.predict(X))\n tpr = matrix[1,1] / (matrix[1,1] + matrix[1,0])\n fpr = matrix[0,1] / (matrix[0,1] + matrix[0,0])\n tnr = matrix[0,0] / (matrix[0,0] + matrix[0,1])\n fnr = matrix[1,0] / (matrix[1,1] + matrix[1,0])\n prc = matrix[1,1] / (matrix[1,1] + matrix[0,1])\n \n print(f'{data_set} accuracy score: {score:.2%}')\n print(f'{data_set} precision score {prc:.2%}')\n print(f'{data_set} recall score: {tpr:.2%}\\n')\n class_report = classification_report(y, model.predict(X), zero_division=True)\n print('-------------------------------')\n print(f'classification report')\n print(class_report)\n print ('-------------------------------\\n')\n print('confusion matrix')\n print(f'{matrix}\\n')\n print(f'{data_set} model metrics')\n print('---------------------------------')\n print(f'True positive rate for the model is {tpr:.2%}')\n print(f'False positive rate for the model is {fpr:.2%}')\n print(f'True negative rate for the model is {tnr:.2%}')\n print(f'False negative rate for the model is {fnr:.2%}\\n')",
"def calc_metrics(model, X, y):\n\n # Get model predictions\n y_predict_r = model.predict(X)\n\n # Calculate evaluation metrics for assesing performance of the model.\n roc = roc_auc_score(y, y_predict_r)\n acc = accuracy_score(y, y_predict_r)\n prec = precision_score(y, y_predict_r)\n rec = recall_score(y, y_predict_r)\n f1 = f1_score(y, y_predict_r)\n\n return {\"acc\": acc, \"roc\": roc, \"prec\": prec, \"rec\": rec, \"f1\": f1}",
"def format_multiple_results(results, features_to_show=[]):\n best_score_array = np.array([])\n for r in results:\n print r\n print r[-1]\n best_score_array = np.append(best_score_array, r[-1])\n\n print \"BEST SCORE ARRAY\", best_score_array\n print \"Best Score\"\n print \"Mean: \", best_score_array.mean()\n print \"Std: \", best_score_array.std()",
"def getAllMetrics(self):\n result = self.getReportMetrics()\n result.update(self.getOptimizationMetrics())\n return result",
"def compute_metrics(self, x, y):\n\n self.model.fit(x, y)\n y_predicted = self.model.predict(x)\n self.intercept_ = self.model.intercept_\n self.coeffs_ = self.model.coeffs_\n self.rmse_ = mean_squared_error(y, y_predicted)\n self.r2_ = r2_score(y, y_predicted)\n return self",
"def _create_metric_sum(a,b):\n metric_sum = GridSearchRegressionMetrics()\n metric_sum.explained_variance = a.explained_variance + b.explained_variance\n metric_sum.mean_absolute_error = a.mean_absolute_error + b.mean_absolute_error\n metric_sum.mean_squared_error = a.mean_squared_error + b.mean_squared_error\n metric_sum.r2 = a.r2 + b.r2\n metric_sum.root_mean_squared_error = a.root_mean_squared_error + b.root_mean_squared_error\n return metric_sum",
"def summary(self):\n summary = defaultdict(int)\n\n for r in self.results:\n summary[r.result] += 1\n\n return summary",
"def analyze(data):\n # perform fit\n regr_results = sm.OLS.from_formula('mosquitos ~ temperature + rainfall', data).fit()\n print(regr_results.tvalues)\n \n fig = plt.figure(figsize=(6, 9))\n\n # plot predicted vs. measured mosquito populations from fitted model \n ax0 = fig.add_subplot(3, 1, 1)\n\n parameters = regr_results.params\n predicted = (parameters['Intercept'] + \n parameters['temperature'] * data['temperature'] + \n parameters['rainfall'] * data['rainfall'])\n\n ax0.plot(predicted, data['mosquitos'], 'gd')\n\n ax0.set_xlabel('predicted mosquito population')\n ax0.set_ylabel('measured mosquito population')\n \n # plot mosquitos vs. temperature\n ax1 = fig.add_subplot(3, 1, 2)\n\n ax1.plot(data['temperature'], data['mosquitos'], 'ro')\n ax1.set_xlabel('temperature')\n ax1.set_ylabel('mosquitos')\n\n # plot mosquitos vs. rainfall\n ax2 = fig.add_subplot(3, 1, 3)\n\n ax2.plot(data['rainfall'], data['mosquitos'], 'bs')\n ax2.set_xlabel('rainfall')\n ax2.set_ylabel('mosquitos')\n \n # adjust layout of axes according to label placement\n plt.tight_layout()\n \n return fig",
"def get_statistics(self):\n train_loss, test_loss = self.learner.get_statistics()\n return train_loss, test_loss, np.mean(self.rewards), np.mean(self.surrogate_losses)",
"def get_scores(model, X, y):\n y_hat = model.predict(X)\n pred = pd.get_dummies(y_hat.argmax(axis=1))\n acc = accuracy_score(y,pred)\n rec = recall_score(y,pred, average='weighted')\n pre = precision_score(y,pred, average='weighted')\n f1 = f1_score(y,pred, average='weighted')\n print(f\"ACC: {acc}\")\n print(f\"REC: {rec}\")\n print(f\"PRE: {pre}\")\n print(f\"F1: {f1}\")\n return acc, rec, pre, f1, pred",
"def getOptimizationMetrics(self):\n return self.__unwrapResults().optimizationMetrics",
"def summarize(self, data):\n\n return self.summary(data).flatten()",
"def advancedStats():",
"def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err",
"def mean_squared_error(self):\n print('Mean squared error regression loss: ' + str(mean_squared_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))",
"def get_statistics(self):\n return self.results",
"def generate_summary_dict(self, with_html=True):\n try:\n # from https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/linear_model.py#L1999\n jb, jbpv, skew, kurtosis = sm.stats.stattools.jarque_bera(self.results.wresid)\n self.summary_dict = {\n 'source_file': self.source_file,\n 'features': ', '.join(self.features),\n 'target': self.target,\n 'analysis_type': 'OLS Linear Regression',\n 'aic': self.results.aic,\n 'bic': self.results.bic,\n 'num_observations': self.results.nobs,\n 'df_residuals': self.results.df_resid,\n 'r_squared': self.results.rsquared,\n 'r_squared_adjusted': self.results.rsquared_adj,\n 'f_statistic': self.results.fvalue,\n 'jarque_bera': jb,\n 'jarque_bera_prob': jbpv,\n 'skew': skew,\n 'kurtosis': kurtosis,\n }\n if with_html:\n base_summary_html = self.results.summary().as_html()\n # some styling magic for Bootstrap CSS\n self.summary_dict['summary_html'] = base_summary_html.replace('simpletable', 'table')\n fig = self.construct_plot()\n self.summary_dict['plot_html'] = mpld3.fig_to_html(fig)\n return self.summary_dict\n except AttributeError:\n raise Exception(\"You must execute `run_ols_regression` method first\")",
"def test_linear_regression_results(strategy: str) -> None:\n mapie = MapieRegressor(**STRATEGIES[strategy])\n mapie.fit(X, y)\n _, y_pis = mapie.predict(X, alpha=0.05)\n y_pred_low, y_pred_up = y_pis[:, 0, 0], y_pis[:, 1, 0]\n width_mean = (y_pred_up - y_pred_low).mean()\n coverage = regression_coverage_score(y, y_pred_low, y_pred_up)\n np.testing.assert_allclose(width_mean, WIDTHS[strategy], rtol=1e-2)\n np.testing.assert_allclose(coverage, COVERAGES[strategy], rtol=1e-2)",
"def lm(formula, data):\n\ty, X = patsy.dmatrices(formula, data, return_type='dataframe')\n\tresults = sm.OLS(y, X).fit()\n\tprint(results.summary())\n\treturn results",
"def get_metrics(x, y, num_labels): \n total_f1_score = 0\n total_accuracy = 0\n \n for inp, out in zip(x, y): \n f1 = fscore(inp, list(out), labels=np.arange(num_labels), average='weighted')\n \n total_f1_score += f1\n total_accuracy += get_accuracy(inp, out) \n \n return total_f1_score/len(x), total_accuracy/len(x)",
"def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error",
"def metrics(self, weights=None):\n yt = self.ytrue\n yp = self.ypred\n w = weights\n\n if yt.size > 0:\n mse = skm.mean_squared_error(yt, yp, sample_weight=w)\n rmse = np.sqrt(mse)\n mae = skm.mean_absolute_error(yt, yp, sample_weight=w)\n median_absolute_error = skm.median_absolute_error(yt, yp)\n r2_score = skm.r2_score(yt, yp, sample_weight=w)\n ev_score = skm.explained_variance_score(yt, yp, sample_weight=w)\n max_error = skm.max_error(yt, yp)\n support = len(yt)\n else:\n mse = 0.0\n rmse = 0.0\n mae = 0.0\n median_absolute_error = 0.0\n r2_score = 0.0\n ev_score = 0.0\n max_error = 0.0\n support = 0\n\n return {\n \"mean_squared_error\": mse,\n \"root_mean_squared_error\": rmse,\n \"mean_absolute_error\": mae,\n \"median_absolute_error\": median_absolute_error,\n \"r2_score\": r2_score,\n \"explained_variance_score\": ev_score,\n \"max_error\": max_error,\n \"support\": support,\n }"
] | [
"0.6406348",
"0.63713837",
"0.6356689",
"0.63342285",
"0.6287831",
"0.6285578",
"0.6261427",
"0.62577116",
"0.62223923",
"0.6219097",
"0.61927754",
"0.6186985",
"0.61651766",
"0.6127877",
"0.6116654",
"0.60860497",
"0.6075616",
"0.60577536",
"0.6052378",
"0.6042831",
"0.6040221",
"0.6033976",
"0.60065836",
"0.6006008",
"0.5984977",
"0.59799314",
"0.5973644",
"0.59459215",
"0.5945861",
"0.5924497",
"0.59234643",
"0.5918118",
"0.58992386",
"0.58925605",
"0.588867",
"0.5862882",
"0.58549464",
"0.5837997",
"0.5837473",
"0.58253795",
"0.5823158",
"0.5821692",
"0.5802279",
"0.5781245",
"0.57789135",
"0.5778408",
"0.5769503",
"0.57448393",
"0.5735959",
"0.5718302",
"0.57121664",
"0.57044363",
"0.56869054",
"0.567913",
"0.56754464",
"0.5656217",
"0.5655312",
"0.5649343",
"0.56469685",
"0.56406695",
"0.5625529",
"0.5616668",
"0.56154585",
"0.56132954",
"0.56121945",
"0.5611438",
"0.5611086",
"0.56035167",
"0.5598563",
"0.5592767",
"0.5588496",
"0.55870885",
"0.5576277",
"0.5572047",
"0.5571667",
"0.55698675",
"0.5569345",
"0.5565468",
"0.5564751",
"0.55636215",
"0.5557522",
"0.5555983",
"0.5554214",
"0.5553564",
"0.555318",
"0.55529845",
"0.5551747",
"0.5550832",
"0.55472517",
"0.55452627",
"0.5541386",
"0.5536567",
"0.553206",
"0.5527729",
"0.5521642",
"0.551491",
"0.55060875",
"0.55052674",
"0.55048656",
"0.5504097"
] | 0.56184536 | 61 |
(float) The value of the loglikelihood function evaluated at `params`. | def llf(self):
return self.model.loglike(self.params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)",
"def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))",
"def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood",
"def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')",
"def neg_log_likelihood(self,params: ndarray) -> float:\n\n return -self.compute_log_likelihood(params)",
"def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n variance = self.std ** 2\n ndim = params.ndim\n mean_diff = params - self.mean\n scaled_sq_err = jnp.dot(mean_diff, mean_diff) / variance\n # log determinant of covariance matrix\n log_det_cov = 2 * ndim * jnp.log(self.std)\n norm_term = ndim * jnp.log(2 * jnp.pi)\n return -0.5 * (log_det_cov + scaled_sq_err + norm_term)",
"def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl",
"def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj",
"def log_params(params):\n mlflow.log_params(params)",
"def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll",
"def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)",
"def compute_log_prior(self,params: ndarray) -> float:\n ln_tE = params[0]\n ln_A0 = params[1]\n ln_deltaT = params[2]\n fbl = params[3]\n mb = params[4]\n\n # Equation (16,15,17) (note that Albrow uses \"log\" for log10)\n log10e = np.log10(np.exp(1))\n ln_pr_ln_tE = np.log(0.476) - ((log10e*ln_tE - 1.333)**2 / 0.330) + np.log(log10e)\n ln_pr_ln_A0 = np.log(0.660) - (1.289*log10e*ln_A0) + np.log(log10e)\n ln_pr_ln_deltaT = np.log(0.156) - ((log10e*ln_deltaT - 1.432)**2 / 0.458) +\\\n np.log(log10e)\n \n # Paper doesnt mention the prior used, but I assume it to be uniform\n ln_pr_fbl = uniform.logpdf(fbl,0.0,1.0)\n\n # Paper doesnr mention the prior used but I will asuumed it to be uniform\n ln_pr_mb = uniform.logpdf(mb,self.mag_min - 1.0, self.mag_max + 1.0)\n \n \n return ln_pr_fbl + ln_pr_ln_A0 + ln_pr_ln_deltaT + ln_pr_ln_tE + ln_pr_mb",
"def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like",
"def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))",
"def grad_llh(self, params):\n grad = np.clip(self.grad_log_likelihood(params[0], params[1], params[2:]), SMALLEST_NUMBER,\n LARGEST_NUMBER)\n\n return grad",
"def log_prob_parameters(self, parameters):\n lp = 0.0\n parameters_model = self.get_parameters_model\n index = 0\n\n for parameter in parameters_model:\n dimension = parameter.dimension\n lp += parameter.log_prior(parameters[index: index + dimension])\n\n index += dimension\n\n if not np.isinf(lp):\n lp += self.log_likelihood(parameters[0], parameters[1], parameters[2:])\n\n return lp",
"def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood",
"def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))",
"def f(self, x):\n error = log_likelihood_calc(x[1], x[0], self.data)\n return error",
"def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)",
"def log_likelihood(self, data, reward_model, bias_params):",
"def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")",
"def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx",
"def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)",
"def get_total_log_likelihood(self, x, **kwargs):\n pass",
"def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)",
"def likelihood(self):\n if self._likelihood is None:\n self._likelihood = exp(self.log_likelihood)\n if self._likelihood == 0:\n self._likelihood = sys.float_info.min\n return self._likelihood",
"def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood",
"def lnprior(self, params):\n self.debug.start_function('lnprior')\n lower_bounds = self.mcmc_version.prior_bounds[:, 0]\n upper_bounds = self.mcmc_version.prior_bounds[:, 1]\n inside_bounds = np.logical_and(params > lower_bounds,\n params < upper_bounds)\n\n if False in inside_bounds:\n self.debug.end_function()\n return self.zero_lhood\n\n if self.has_logz:\n z_input = params[self.param_idxs['logz']]\n else:\n z = params[self.param_idxs['z']]\n z_input = np.log10(z / z_sun)\n\n prior_lhood = np.log(self.z_prior(z_input))\n\n # ===== anisotropy/inclination priors =====\n if self.has_two_f:\n xi_ratio = params[self.param_idxs['f_p']] / params[self.param_idxs['f_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n elif self.has_xi_ratio:\n xi_ratio = params[self.param_idxs['xi_ratio']]\n d_b = params[self.param_idxs['d_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n prior_lhood += np.log(self.d_b_prior(d_b))\n\n self.debug.variable('prior_lhood', prior_lhood, formatter='f')\n self.debug.end_function()\n return prior_lhood",
"def log_likelihood(self, theta, active=None):\n return sum(self.log_likelihood_term(theta, active=active))",
"def neg_log_prob(self,params: ndarray) -> float:\n return -self.compute_log_prob(params)",
"def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z",
"def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood",
"def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)",
"def log_prob(self):",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")",
"def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)",
"def LLwrapper(params):\n NLL = LogLikelihood(gauss, s)\n return NLL(params[0], params[1])",
"def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood",
"def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def value(self, a):\n if isinstance(a, Term) and a.functor == \"lfi_prob\":\n rval = self._get_weight(*a.args)\n else:\n rval = math.log(float(a))\n return rval",
"def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z",
"def grad_neg_log_like(params):\n gp.set_parameter_vector(params)\n return -gp.grad_log_likelihood(y, quiet=True)",
"def _log_probability(self, theta, model, bounds, x, y, yerr):\n lp = self._log_prior(theta, bounds)\n if not np.isfinite(lp):\n return -np.inf\n return lp + self._log_likelihood(theta, model, x, y, yerr)",
"def posterior(self, val, **kwargs) -> float:\n\n data = self.data\n\n # override val with parameters specified via kwargs\n val = copy.deepcopy(val)\n for key, value in kwargs.items():\n setattr(val, key, value)\n\n # extract parameters\n gain = val.gain\n states = val.states\n pi = val.transitions\n pi_conc = val.transitions_conc\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n load_weight = val.load_weight\n num_rois = val.num_rois\n num_load = val.num_load\n num_data = val.num_data\n num_states = val.num_states\n\n # calculate shape parameters\n idx = mu_flor_mean > 0\n mu_flor_scale = np.zeros(mu_flor_mean.shape)\n mu_flor_scale[idx] = mu_flor_mean[idx] / mu_flor_shape[idx]\n mu_back_scale = mu_back_mean / mu_back_shape\n # calculate effective pi for collapsed state space when weight on load is taken into account\n pi_eff = pi.copy()\n pi_eff[-1, :] *= load_weight\n pi_eff[-1, -1] = 1 - load_weight\n\n # probability from likelihood\n brightness = np.zeros(shape=data.shape)\n for r in range(num_rois):\n brightness[r, :] = mu_flor @ states_to_pops(states[r, :, :], num_states) + mu_back[r]\n lhood = np.sum(stats.gamma.logpdf(data, a=brightness, scale=gain))\n\n # probability from phototrajectory\n kinetic = 0\n for i in range(num_states):\n if pi_eff[-1, i] > 0:\n kinetic += np.sum(states[:, :, 0] == i) * np.log(pi_eff[-1, i])\n for j in range(num_states):\n if pi_eff[i, j] > 0:\n kinetic += np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j)) * np.log(pi_eff[i, j])\n\n # probability from prior\n prior = (\n # prior on fluorophore brightness (ignore dark states)\n np.sum(stats.gamma.logpdf(mu_flor[idx], a=mu_flor_shape[idx], scale=mu_flor_scale[idx]))\n # prior on background brightness\n + np.sum(stats.gamma.logpdf(mu_back, a=mu_back_shape, scale=mu_back_scale))\n # prior on transitions\n + np.sum(Dirichlet.logpdf(pi, pi_conc))\n )\n\n prob = lhood + kinetic + prior\n\n return prob",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def get_param_score(param, max_value, weight=1):\n return (math.log(1 + param) / math.log(1 + max(param, max_value))) * weight",
"def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood",
"def likelihood_ratio(\n param: np.ndarray, likelihood: np.ndarray, param1: float, param2: float\n) -> float:\n index1 = np.argmin(np.abs(param - param1))\n index2 = np.argmin(np.abs(param - param2))\n return likelihood[index1] / likelihood[index2]",
"def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z",
"def weight_log(val):\n return val * math.log(val)",
"def likelihood(self):\n \n raise NotImplementedError()",
"def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood",
"def lnprior(params):\n a, b, f = params\n if -10.0 < b < 0. and 0. < a < 10 and 0. < f:\n return 0.0\n\n return -np.inf",
"def relative_likelihood(self) -> Optional[float]:\n return pulumi.get(self, \"relative_likelihood\")",
"def get_log_likelihood(response_probability, response):\n pass",
"def log_likelihood(self, X, Y, theta):\n \n alphas, logZ = self.alpha_chain(X, theta)\n total = 0\n \n s_prev = self.S\n \n for t,s in enumerate(Y):\n total += self.log_psi(theta, t, s_prev, s, X[t]) \n s_prev = s\n \n total -= logZ\n return total / X.shape[0]",
"def log_likelihood(self, theta):\n raise NotImplementedError()",
"def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)",
"def lnprob(params, cos2, y, yerr):\n\n # Get prior given parameters\n lp = lnprior(params)\n if not np.isfinite(lp):\n return -np.inf\n\n # Include likelihood given data\n llh = lp + lnlike(params, cos2, y, yerr)\n\n return llh",
"def score(self, params, *args, **kwargs):\n try:\n # If an analytic score_obs is available, try this first before\n # falling back to numerical differentiation below\n return self.score_obs(params, *args, **kwargs).sum(0)\n except NotImplementedError:\n # Fallback in case a `loglike` is implemented but `loglikeobs`\n # is not.\n approx_func = (approx_fprime_cs\n if self._use_approx_cs else approx_fprime)\n return approx_func(params, self.loglike, args=args, kwargs=kwargs)",
"def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval",
"def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop",
"def likelihood_func(params, key, coeffs_init, t, y, y_err, planet_parameters):\n \n # Replace coefficients\n if len(params) > 0:\n for index, coeff_key in enumerate(key):\n coeffs_init[coeff_key] = params[index]\n \n # Build transit model\n transit_model = build_transit_model(coeffs_init, t, planet_parameters)\n # Build sigma and chi**2\n sigma = np.mean(y_err)\n chi2 = chi_squared(transit_model, y, y_err)\n likelihood = -len(y)*np.log(sigma) - 0.5*len(y)*np.log(2*np.pi) - .5*chi2\n \n return likelihood",
"def log_likelihood(self, theta, x, **kwargs):\n\n u, logdet_dudx, log_a = self.forward(theta, x, **kwargs)\n\n constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))\n # log_likelihood = torch.log(torch.sum(torch.exp(log_a - 0.5 * u ** 2 + logdet_dudx), dim=2))\n log_likelihood = torch.logsumexp(log_a - 0.5 * u**2 + logdet_dudx, dim=2)\n log_likelihood = constant + torch.sum(log_likelihood, dim=1)\n\n return u, log_likelihood",
"def log_likelihood_grad_bias(self, data, reward_model, bias_params):",
"def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def calculate_likelihood(truth, log_forecast):\n\n return tf.reduce_sum(truth * log_forecast) # Dimensions [batch_size, N_LABEL_TIMESTEPS, N_LABEL_CLASSES]",
"def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)",
"def logPriorFlat(paramsVec, params):\n logPrior = 0 #ln(1) = 0\n if params is None:\n #Maximally flat prior: p=1 always\n pass\n else:\n paramsDict = params.valuesdict()\n\n #Loop through parameter bounds and update the prior\n for kindex, key in enumerate(paramsDict.keys()):\n if (params[key].min < paramsVec[kindex] < params[key].max):\n pass\n else:\n logPrior = -np.inf #ln(0) = -inf\n return logPrior",
"def loglikeobs(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n if not transformed:\n params = self.transform_params(params)\n results = self._filter(params)\n return results[5]",
"def parameterized_likelihood(params: NamedParameters):\n return ParamaterizedLikelihood(params)",
"def log_likelihood_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-y)",
"def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)",
"def NLL(sample, params):\n mu = params[:,:,0]\n logsigma = params[:,:,1]\n \n c = normalization.to(mu.device)\n inv_sigma = torch.exp(-logsigma)\n tmp = (sample - mu) * inv_sigma\n return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))",
"def objective(\n self,\n parameters: object\n ) -> float:\n pass",
"def negative_loglikelihood(targets, estimated_distribution):\n return -estimated_distribution.log_prob(targets)",
"def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)",
"def _getCurrentPosteriorLikelihood(self): \n likelihood = 0\n T = self.T\n K= self.K \n final_likelihood = 0\n total_log_lik = 0\n \n for n in range(1,self.N+1):\n # Compute total Likelihood for all Instances P(x1...xn / theta) \n tot_lik = 0\n tot_scale_factor = 0\n \n for i in range(1,self.K+1): \n likelihood = self.posterior_state_trellis[n][(T,i)]\n tot_lik = tot_lik + likelihood\n\n try:\n total_log_lik = math.log(tot_lik) \n except ValueError:\n ipdb.set_trace()\n \n for t in range(1,self.T):\n scale_factor = self.forward_scaling_vector[n][t] \n tot_scale_factor = tot_scale_factor + math.log(scale_factor)\n\n final_likelihood = final_likelihood + (total_log_lik - tot_scale_factor)\n\n return final_likelihood",
"def log_likelihood_function(self, instance):\r\n\r\n try:\r\n return self.fit_interferometer_for_instance(\r\n instance=instance\r\n ).figure_of_merit\r\n except (\r\n exc.PixelizationException,\r\n exc.InversionException,\r\n exc.GridException,\r\n OverflowError,\r\n ) as e:\r\n raise exc.FitException from e",
"def score(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n return approx_fprime_cs(params, self.loglike, args=(transformed,))",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8",
"def loglikeobs(self, params, *args, **kwargs):\n raise NotImplementedError # pragma: no cover",
"def log_gaussian_likelihood(x, mu, log_std):\n log_gaussian_prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 - log_std - 0.5 * np.log(2 * np.pi))\n return tf.reduce_sum(log_gaussian_prob, axis=1)",
"def log1p(x):\n return 0.0",
"def lnprior(self,theta):\n kwargs = dict(zip(self.params,theta))\n err = np.seterr(invalid='raise')\n try:\n lnprior = np.sum(np.log([self.priors[k](v) for k,v in kwargs.items()]))\n except FloatingPointError,ValueError:\n lnprior = -np.inf\n np.seterr(**err)\n return lnprior",
"def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))",
"def log_likelihood(self, theta=None, phi=None):\n theta = theta if theta is not None else self.theta\n phi = phi if phi is not None else self.phi\n ret = 0.\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n tp = 0.\n for k in range(self.n_components):\n tp += theta[m, k] * phi[k, w_mn]\n ret += np.log(tp)\n return ret",
"def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)",
"def objective_function(params):\n cirq_circuit = variational_state_evolve(params)\n mean_value = self.expectation(\n all_qubits_in_circuit, cirq_circuit, hamiltonian)\n self._current_expectation = mean_value\n return mean_value"
] | [
"0.7583201",
"0.75121284",
"0.7457546",
"0.7209427",
"0.7124011",
"0.70207",
"0.6955007",
"0.69263536",
"0.68272287",
"0.6826398",
"0.6725675",
"0.66759413",
"0.66617227",
"0.66419446",
"0.6640716",
"0.661356",
"0.65459085",
"0.65014017",
"0.64994335",
"0.64519876",
"0.6434851",
"0.6423711",
"0.64190364",
"0.6412945",
"0.6409324",
"0.6381725",
"0.6336786",
"0.6309134",
"0.62959677",
"0.6262083",
"0.6227528",
"0.6220239",
"0.6187902",
"0.6185113",
"0.6143812",
"0.61414236",
"0.6141413",
"0.6123618",
"0.61121815",
"0.6111343",
"0.61082786",
"0.6061131",
"0.6041884",
"0.6020639",
"0.60174507",
"0.6009481",
"0.59847873",
"0.59819806",
"0.5964474",
"0.59469974",
"0.59455854",
"0.5937992",
"0.59337425",
"0.5919133",
"0.58909076",
"0.58903253",
"0.58767015",
"0.5875236",
"0.5872267",
"0.58477664",
"0.58205235",
"0.5799763",
"0.57955533",
"0.57906663",
"0.5780087",
"0.57785517",
"0.57520753",
"0.5750667",
"0.5742591",
"0.57203436",
"0.5720056",
"0.5711202",
"0.5704996",
"0.56972396",
"0.56864",
"0.5667041",
"0.5667041",
"0.56581044",
"0.5654766",
"0.5646105",
"0.5640658",
"0.5635249",
"0.5633265",
"0.562917",
"0.5628895",
"0.56188637",
"0.5617177",
"0.56076306",
"0.5602952",
"0.56018955",
"0.55990064",
"0.5594984",
"0.5589748",
"0.55864483",
"0.5582473",
"0.557823",
"0.557803",
"0.55740154",
"0.55731004",
"0.5572044",
"0.5568623"
] | 0.0 | -1 |
(float) The value of the loglikelihood function evaluated at `params`. | def llf_obs(self):
return self.model.loglikeobs(self.params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)",
"def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))",
"def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood",
"def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')",
"def neg_log_likelihood(self,params: ndarray) -> float:\n\n return -self.compute_log_likelihood(params)",
"def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n variance = self.std ** 2\n ndim = params.ndim\n mean_diff = params - self.mean\n scaled_sq_err = jnp.dot(mean_diff, mean_diff) / variance\n # log determinant of covariance matrix\n log_det_cov = 2 * ndim * jnp.log(self.std)\n norm_term = ndim * jnp.log(2 * jnp.pi)\n return -0.5 * (log_det_cov + scaled_sq_err + norm_term)",
"def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl",
"def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj",
"def log_params(params):\n mlflow.log_params(params)",
"def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll",
"def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)",
"def compute_log_prior(self,params: ndarray) -> float:\n ln_tE = params[0]\n ln_A0 = params[1]\n ln_deltaT = params[2]\n fbl = params[3]\n mb = params[4]\n\n # Equation (16,15,17) (note that Albrow uses \"log\" for log10)\n log10e = np.log10(np.exp(1))\n ln_pr_ln_tE = np.log(0.476) - ((log10e*ln_tE - 1.333)**2 / 0.330) + np.log(log10e)\n ln_pr_ln_A0 = np.log(0.660) - (1.289*log10e*ln_A0) + np.log(log10e)\n ln_pr_ln_deltaT = np.log(0.156) - ((log10e*ln_deltaT - 1.432)**2 / 0.458) +\\\n np.log(log10e)\n \n # Paper doesnt mention the prior used, but I assume it to be uniform\n ln_pr_fbl = uniform.logpdf(fbl,0.0,1.0)\n\n # Paper doesnr mention the prior used but I will asuumed it to be uniform\n ln_pr_mb = uniform.logpdf(mb,self.mag_min - 1.0, self.mag_max + 1.0)\n \n \n return ln_pr_fbl + ln_pr_ln_A0 + ln_pr_ln_deltaT + ln_pr_ln_tE + ln_pr_mb",
"def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like",
"def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))",
"def grad_llh(self, params):\n grad = np.clip(self.grad_log_likelihood(params[0], params[1], params[2:]), SMALLEST_NUMBER,\n LARGEST_NUMBER)\n\n return grad",
"def log_prob_parameters(self, parameters):\n lp = 0.0\n parameters_model = self.get_parameters_model\n index = 0\n\n for parameter in parameters_model:\n dimension = parameter.dimension\n lp += parameter.log_prior(parameters[index: index + dimension])\n\n index += dimension\n\n if not np.isinf(lp):\n lp += self.log_likelihood(parameters[0], parameters[1], parameters[2:])\n\n return lp",
"def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood",
"def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))",
"def f(self, x):\n error = log_likelihood_calc(x[1], x[0], self.data)\n return error",
"def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)",
"def log_likelihood(self, data, reward_model, bias_params):",
"def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")",
"def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx",
"def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)",
"def get_total_log_likelihood(self, x, **kwargs):\n pass",
"def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)",
"def likelihood(self):\n if self._likelihood is None:\n self._likelihood = exp(self.log_likelihood)\n if self._likelihood == 0:\n self._likelihood = sys.float_info.min\n return self._likelihood",
"def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood",
"def lnprior(self, params):\n self.debug.start_function('lnprior')\n lower_bounds = self.mcmc_version.prior_bounds[:, 0]\n upper_bounds = self.mcmc_version.prior_bounds[:, 1]\n inside_bounds = np.logical_and(params > lower_bounds,\n params < upper_bounds)\n\n if False in inside_bounds:\n self.debug.end_function()\n return self.zero_lhood\n\n if self.has_logz:\n z_input = params[self.param_idxs['logz']]\n else:\n z = params[self.param_idxs['z']]\n z_input = np.log10(z / z_sun)\n\n prior_lhood = np.log(self.z_prior(z_input))\n\n # ===== anisotropy/inclination priors =====\n if self.has_two_f:\n xi_ratio = params[self.param_idxs['f_p']] / params[self.param_idxs['f_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n elif self.has_xi_ratio:\n xi_ratio = params[self.param_idxs['xi_ratio']]\n d_b = params[self.param_idxs['d_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n prior_lhood += np.log(self.d_b_prior(d_b))\n\n self.debug.variable('prior_lhood', prior_lhood, formatter='f')\n self.debug.end_function()\n return prior_lhood",
"def log_likelihood(self, theta, active=None):\n return sum(self.log_likelihood_term(theta, active=active))",
"def neg_log_prob(self,params: ndarray) -> float:\n return -self.compute_log_prob(params)",
"def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z",
"def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood",
"def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)",
"def log_prob(self):",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")",
"def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)",
"def LLwrapper(params):\n NLL = LogLikelihood(gauss, s)\n return NLL(params[0], params[1])",
"def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood",
"def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def value(self, a):\n if isinstance(a, Term) and a.functor == \"lfi_prob\":\n rval = self._get_weight(*a.args)\n else:\n rval = math.log(float(a))\n return rval",
"def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z",
"def grad_neg_log_like(params):\n gp.set_parameter_vector(params)\n return -gp.grad_log_likelihood(y, quiet=True)",
"def _log_probability(self, theta, model, bounds, x, y, yerr):\n lp = self._log_prior(theta, bounds)\n if not np.isfinite(lp):\n return -np.inf\n return lp + self._log_likelihood(theta, model, x, y, yerr)",
"def posterior(self, val, **kwargs) -> float:\n\n data = self.data\n\n # override val with parameters specified via kwargs\n val = copy.deepcopy(val)\n for key, value in kwargs.items():\n setattr(val, key, value)\n\n # extract parameters\n gain = val.gain\n states = val.states\n pi = val.transitions\n pi_conc = val.transitions_conc\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n load_weight = val.load_weight\n num_rois = val.num_rois\n num_load = val.num_load\n num_data = val.num_data\n num_states = val.num_states\n\n # calculate shape parameters\n idx = mu_flor_mean > 0\n mu_flor_scale = np.zeros(mu_flor_mean.shape)\n mu_flor_scale[idx] = mu_flor_mean[idx] / mu_flor_shape[idx]\n mu_back_scale = mu_back_mean / mu_back_shape\n # calculate effective pi for collapsed state space when weight on load is taken into account\n pi_eff = pi.copy()\n pi_eff[-1, :] *= load_weight\n pi_eff[-1, -1] = 1 - load_weight\n\n # probability from likelihood\n brightness = np.zeros(shape=data.shape)\n for r in range(num_rois):\n brightness[r, :] = mu_flor @ states_to_pops(states[r, :, :], num_states) + mu_back[r]\n lhood = np.sum(stats.gamma.logpdf(data, a=brightness, scale=gain))\n\n # probability from phototrajectory\n kinetic = 0\n for i in range(num_states):\n if pi_eff[-1, i] > 0:\n kinetic += np.sum(states[:, :, 0] == i) * np.log(pi_eff[-1, i])\n for j in range(num_states):\n if pi_eff[i, j] > 0:\n kinetic += np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j)) * np.log(pi_eff[i, j])\n\n # probability from prior\n prior = (\n # prior on fluorophore brightness (ignore dark states)\n np.sum(stats.gamma.logpdf(mu_flor[idx], a=mu_flor_shape[idx], scale=mu_flor_scale[idx]))\n # prior on background brightness\n + np.sum(stats.gamma.logpdf(mu_back, a=mu_back_shape, scale=mu_back_scale))\n # prior on transitions\n + np.sum(Dirichlet.logpdf(pi, pi_conc))\n )\n\n prob = lhood + kinetic + prior\n\n return prob",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def get_param_score(param, max_value, weight=1):\n return (math.log(1 + param) / math.log(1 + max(param, max_value))) * weight",
"def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood",
"def likelihood_ratio(\n param: np.ndarray, likelihood: np.ndarray, param1: float, param2: float\n) -> float:\n index1 = np.argmin(np.abs(param - param1))\n index2 = np.argmin(np.abs(param - param2))\n return likelihood[index1] / likelihood[index2]",
"def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z",
"def weight_log(val):\n return val * math.log(val)",
"def likelihood(self):\n \n raise NotImplementedError()",
"def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood",
"def lnprior(params):\n a, b, f = params\n if -10.0 < b < 0. and 0. < a < 10 and 0. < f:\n return 0.0\n\n return -np.inf",
"def relative_likelihood(self) -> Optional[float]:\n return pulumi.get(self, \"relative_likelihood\")",
"def get_log_likelihood(response_probability, response):\n pass",
"def log_likelihood(self, X, Y, theta):\n \n alphas, logZ = self.alpha_chain(X, theta)\n total = 0\n \n s_prev = self.S\n \n for t,s in enumerate(Y):\n total += self.log_psi(theta, t, s_prev, s, X[t]) \n s_prev = s\n \n total -= logZ\n return total / X.shape[0]",
"def log_likelihood(self, theta):\n raise NotImplementedError()",
"def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)",
"def lnprob(params, cos2, y, yerr):\n\n # Get prior given parameters\n lp = lnprior(params)\n if not np.isfinite(lp):\n return -np.inf\n\n # Include likelihood given data\n llh = lp + lnlike(params, cos2, y, yerr)\n\n return llh",
"def score(self, params, *args, **kwargs):\n try:\n # If an analytic score_obs is available, try this first before\n # falling back to numerical differentiation below\n return self.score_obs(params, *args, **kwargs).sum(0)\n except NotImplementedError:\n # Fallback in case a `loglike` is implemented but `loglikeobs`\n # is not.\n approx_func = (approx_fprime_cs\n if self._use_approx_cs else approx_fprime)\n return approx_func(params, self.loglike, args=args, kwargs=kwargs)",
"def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval",
"def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop",
"def likelihood_func(params, key, coeffs_init, t, y, y_err, planet_parameters):\n \n # Replace coefficients\n if len(params) > 0:\n for index, coeff_key in enumerate(key):\n coeffs_init[coeff_key] = params[index]\n \n # Build transit model\n transit_model = build_transit_model(coeffs_init, t, planet_parameters)\n # Build sigma and chi**2\n sigma = np.mean(y_err)\n chi2 = chi_squared(transit_model, y, y_err)\n likelihood = -len(y)*np.log(sigma) - 0.5*len(y)*np.log(2*np.pi) - .5*chi2\n \n return likelihood",
"def log_likelihood(self, theta, x, **kwargs):\n\n u, logdet_dudx, log_a = self.forward(theta, x, **kwargs)\n\n constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))\n # log_likelihood = torch.log(torch.sum(torch.exp(log_a - 0.5 * u ** 2 + logdet_dudx), dim=2))\n log_likelihood = torch.logsumexp(log_a - 0.5 * u**2 + logdet_dudx, dim=2)\n log_likelihood = constant + torch.sum(log_likelihood, dim=1)\n\n return u, log_likelihood",
"def log_likelihood_grad_bias(self, data, reward_model, bias_params):",
"def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def logp(self, value: TensorType, **kwargs) -> TensorType:",
"def calculate_likelihood(truth, log_forecast):\n\n return tf.reduce_sum(truth * log_forecast) # Dimensions [batch_size, N_LABEL_TIMESTEPS, N_LABEL_CLASSES]",
"def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)",
"def logPriorFlat(paramsVec, params):\n logPrior = 0 #ln(1) = 0\n if params is None:\n #Maximally flat prior: p=1 always\n pass\n else:\n paramsDict = params.valuesdict()\n\n #Loop through parameter bounds and update the prior\n for kindex, key in enumerate(paramsDict.keys()):\n if (params[key].min < paramsVec[kindex] < params[key].max):\n pass\n else:\n logPrior = -np.inf #ln(0) = -inf\n return logPrior",
"def loglikeobs(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n if not transformed:\n params = self.transform_params(params)\n results = self._filter(params)\n return results[5]",
"def parameterized_likelihood(params: NamedParameters):\n return ParamaterizedLikelihood(params)",
"def log_likelihood_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-y)",
"def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)",
"def NLL(sample, params):\n mu = params[:,:,0]\n logsigma = params[:,:,1]\n \n c = normalization.to(mu.device)\n inv_sigma = torch.exp(-logsigma)\n tmp = (sample - mu) * inv_sigma\n return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))",
"def objective(\n self,\n parameters: object\n ) -> float:\n pass",
"def negative_loglikelihood(targets, estimated_distribution):\n return -estimated_distribution.log_prob(targets)",
"def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)",
"def _getCurrentPosteriorLikelihood(self): \n likelihood = 0\n T = self.T\n K= self.K \n final_likelihood = 0\n total_log_lik = 0\n \n for n in range(1,self.N+1):\n # Compute total Likelihood for all Instances P(x1...xn / theta) \n tot_lik = 0\n tot_scale_factor = 0\n \n for i in range(1,self.K+1): \n likelihood = self.posterior_state_trellis[n][(T,i)]\n tot_lik = tot_lik + likelihood\n\n try:\n total_log_lik = math.log(tot_lik) \n except ValueError:\n ipdb.set_trace()\n \n for t in range(1,self.T):\n scale_factor = self.forward_scaling_vector[n][t] \n tot_scale_factor = tot_scale_factor + math.log(scale_factor)\n\n final_likelihood = final_likelihood + (total_log_lik - tot_scale_factor)\n\n return final_likelihood",
"def log_likelihood_function(self, instance):\r\n\r\n try:\r\n return self.fit_interferometer_for_instance(\r\n instance=instance\r\n ).figure_of_merit\r\n except (\r\n exc.PixelizationException,\r\n exc.InversionException,\r\n exc.GridException,\r\n OverflowError,\r\n ) as e:\r\n raise exc.FitException from e",
"def score(self, params, transformed=True):\n params = np.array(params, ndmin=1)\n return approx_fprime_cs(params, self.loglike, args=(transformed,))",
"def log_likelihood_grad_rew(self, data, reward_model, bias_params):",
"def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8",
"def loglikeobs(self, params, *args, **kwargs):\n raise NotImplementedError # pragma: no cover",
"def log_gaussian_likelihood(x, mu, log_std):\n log_gaussian_prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 - log_std - 0.5 * np.log(2 * np.pi))\n return tf.reduce_sum(log_gaussian_prob, axis=1)",
"def log1p(x):\n return 0.0",
"def lnprior(self,theta):\n kwargs = dict(zip(self.params,theta))\n err = np.seterr(invalid='raise')\n try:\n lnprior = np.sum(np.log([self.priors[k](v) for k,v in kwargs.items()]))\n except FloatingPointError,ValueError:\n lnprior = -np.inf\n np.seterr(**err)\n return lnprior",
"def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))",
"def log_likelihood(self, theta=None, phi=None):\n theta = theta if theta is not None else self.theta\n phi = phi if phi is not None else self.phi\n ret = 0.\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n tp = 0.\n for k in range(self.n_components):\n tp += theta[m, k] * phi[k, w_mn]\n ret += np.log(tp)\n return ret",
"def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)",
"def objective_function(params):\n cirq_circuit = variational_state_evolve(params)\n mean_value = self.expectation(\n all_qubits_in_circuit, cirq_circuit, hamiltonian)\n self._current_expectation = mean_value\n return mean_value"
] | [
"0.7583201",
"0.75121284",
"0.7457546",
"0.7209427",
"0.7124011",
"0.70207",
"0.6955007",
"0.69263536",
"0.68272287",
"0.6826398",
"0.6725675",
"0.66759413",
"0.66617227",
"0.66419446",
"0.6640716",
"0.661356",
"0.65459085",
"0.65014017",
"0.64994335",
"0.64519876",
"0.6434851",
"0.6423711",
"0.64190364",
"0.6412945",
"0.6409324",
"0.6381725",
"0.6336786",
"0.6309134",
"0.62959677",
"0.6262083",
"0.6227528",
"0.6220239",
"0.6187902",
"0.6185113",
"0.6143812",
"0.61414236",
"0.6141413",
"0.6123618",
"0.61121815",
"0.6111343",
"0.61082786",
"0.6061131",
"0.6041884",
"0.6020639",
"0.60174507",
"0.6009481",
"0.59847873",
"0.59819806",
"0.5964474",
"0.59469974",
"0.59455854",
"0.5937992",
"0.59337425",
"0.5919133",
"0.58909076",
"0.58903253",
"0.58767015",
"0.5875236",
"0.5872267",
"0.58477664",
"0.58205235",
"0.5799763",
"0.57955533",
"0.57906663",
"0.5780087",
"0.57785517",
"0.57520753",
"0.5750667",
"0.5742591",
"0.57203436",
"0.5720056",
"0.5711202",
"0.5704996",
"0.56972396",
"0.56864",
"0.5667041",
"0.5667041",
"0.56581044",
"0.5654766",
"0.5646105",
"0.5640658",
"0.5635249",
"0.5633265",
"0.562917",
"0.5628895",
"0.56188637",
"0.5617177",
"0.56076306",
"0.5602952",
"0.56018955",
"0.55990064",
"0.5594984",
"0.5589748",
"0.55864483",
"0.5582473",
"0.557823",
"0.557803",
"0.55740154",
"0.55731004",
"0.5572044",
"0.5568623"
] | 0.0 | -1 |
Return the tstatistic for a given parameter estimate. | def tvalues(self):
return self.params / self.bse | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_",
"def t_measure_estimate(self):\n ho = self.humidity_oversampling\n to = self.temperature_oversampling\n po = self.pressure_oversampling\n typ = 1. + 2.*to + (2.*po + 0.5)*bool(po) + (2.*ho +0.5)*bool(ho)\n mx = 1.25 + 2.3*to + (2.3*po + 0.575)*bool(po) + (2.3*ho +0.575)*bool(ho)\n return typ, mx",
"def ttest(x):\n from ..group.onesample import stat\n t = stat(x.T, id='student', axis=0)\n return np.squeeze(t)",
"def get_estimate(self):\n if not self.has_samplers():\n self.draw_samplers()\n \n v = np.percentile(self.samplers, [16, 50, 84])\n return v[1], v[2]-v[1], v[1]-v[0]",
"def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)",
"def _tstat_generic(value1, value2, std_diff, dof, alternative, diff=0):\n\n tstat = (value1 - value2 - diff) / std_diff\n if alternative in [\"two-sided\", \"2-sided\", \"2s\"]:\n pvalue = stats.t.sf(np.abs(tstat), dof) * 2\n elif alternative in [\"larger\", \"l\"]:\n pvalue = stats.t.sf(tstat, dof)\n elif alternative in [\"smaller\", \"s\"]:\n pvalue = stats.t.cdf(tstat, dof)\n else:\n raise ValueError(\"invalid alternative\")\n return tstat, pvalue",
"def calc_timestep_statistic(self, statistic, time):\n ti = np.where(self.times == time)[0][0]\n ma = np.where(self.masks[ti].ravel() == 1)\n if statistic in ['mean', 'max', 'min', 'std', 'ptp']:\n stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()\n elif statistic == 'median':\n stat_val = np.median(self.timesteps[ti].ravel()[ma])\n elif 'percentile' in statistic:\n per = int(statistic.split(\"_\")[1])\n stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)\n elif 'dt' in statistic:\n stat_name = statistic[:-3]\n if ti == 0:\n stat_val = 0\n else:\n stat_val = self.calc_timestep_statistic(stat_name, time) - \\\n self.calc_timestep_statistic(stat_name, time - 1)\n else:\n stat_val = np.nan\n return stat_val",
"def parstat_val(params: xml.etree.ElementTree, param_id: str) -> str:\n if params.tag == \"parameters\":\n param_record = params.find(\".//parameter[ID='%s']\" % param_id)\n elif params.tag == \"statistics\":\n param_record = params.find(\".//statistic[ID='%s']\" % param_id)\n else:\n raise ValueError(\"Unknown tag for element\")\n\n if param_record is None:\n raise IndexError(\"Can not find element with parameter name: \" + param_id)\n param_value = param_record.find('value')\n if param_value is None:\n raise IndexError(\"Can not find value for parameter: \" + param_id)\n return param_value.text",
"def optht(beta, sv=None, sigma=None, trace=True):\n\n # *************************************************************************\n # *** Author: N. Benjamin Erichson <[email protected]> ***\n # *** <2016> ***\n # *** License: BSD 3 clause ***\n # *************************************************************************\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # compute aspect ratio of the input matrix\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if isinstance(beta, np.ndarray):\n m = min(beta.shape)\n n = max(beta.shape)\n beta = m / n\n\n if beta < 0 or beta > 1:\n raise ValueError('beta must be in (0,1].')\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Sigma unknown\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n if sigma is None:\n if trace is True:\n print('Sigma unknown:')\n print('*************')\n\n coef = optimal_SVHT_coef_sigma_unknown(beta)\n if trace is True:\n print('approximated coefficient w(beta): ', coef)\n\n coef = (optimal_SVHT_coef_sigma_known(beta)\n / np.sqrt(MedianMarcenkoPastur(beta)))\n if trace is True:\n print('optimal coefficient w(beta): ', coef)\n\n if sv is not None:\n cutoff = coef * np.median(sv)\n if trace is True:\n print('cutoff value: ', cutoff)\n\n k = np.max(np.where(sv > cutoff)) + 1\n if trace is True:\n print('target rank: ', k)\n\n return k\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Sigma known\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n else:\n if trace is True:\n print('Sigma known:')\n print('*************')\n\n coef = optimal_SVHT_coef_sigma_known(beta)\n if trace is True:\n print('w(beta) value: ', coef)\n\n if sv is not None:\n cutoff = coef * np.sqrt(len(sv)) * sigma\n if trace is True:\n print('cutoff value: ', cutoff)\n\n k = np.max(np.where(sv > cutoff)) + 1\n if trace is True:\n print('target rank: ', k)\n\n return k\n return coef",
"def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError",
"def tsz_profile_for_params(self, params, nu=None):\n # Save current parameters\n params0 = self.get_profile_params()\n \n # Temporarily set new profile parameters and calculate new profile\n self.set_profile_params(params)\n prof = self.tsz_profile(nu)\n \n # Reset to original params and return result\n self.set_profile_params(params0)\n return prof",
"def tstat_GLM(GLMMod, z_stat=False): \r\n \r\n k = GLMMod.nVars # (scalar) number of ind. vars (includes constant)\r\n n = GLMMod.nObs # (scalar) number of observations\r\n #se_betas = np.reshape(np.sqrt(np.diag(GLMMod.var_Betas)),(-1,1))\r\n tStat = GLMMod.Betas/GLMMod.std_err#GLMMod.Betas/se_betas \r\n ts_result = []\r\n for t in tStat:\r\n if z_stat:\r\n ts_result.append((t, stats.norm.sf(abs(t))*2))\r\n else:\r\n ts_result.append((t, stats.t.sf(abs(t),n-k)*2))\r\n \r\n return ts_result",
"def calc_indttest_90(varx,vary):\n print('\\n>>> Using calc_ttest function!')\n \n ### Import modules\n import numpy as np\n import scipy.stats as sts\n \n ### 2-independent sample t-test\n stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')\n \n ### Significant at 90% confidence level\n pvalue[np.where(pvalue >= 0.1)] = np.nan\n pvalue[np.where(pvalue < 0.1)] = 1.\n pvalue[np.isnan(pvalue)] = 0.\n \n print('*Completed: Finished calc_ttest function!')\n return stat,pvalue",
"def ttest_mean(self, value=0, alternative=\"two-sided\"):\n # TODO: check direction with R, smaller=less, larger=greater\n tstat = (self.mean - value) / self.std_mean\n dof = self.sum_weights - 1\n # TODO: use outsourced\n if alternative == \"two-sided\":\n pvalue = stats.t.sf(np.abs(tstat), dof) * 2\n elif alternative == \"larger\":\n pvalue = stats.t.sf(tstat, dof)\n elif alternative == \"smaller\":\n pvalue = stats.t.cdf(tstat, dof)\n else:\n raise ValueError(\"alternative not recognized\")\n\n return tstat, pvalue, dof",
"def TestStatistic(self, x_b, x_t):\n # Instantiate NearestNeighbors class\n NN = NearestNeighborsClass(n_neighbors = self.K)\n \n # Build kd-trees with fixed K\n NN.fit(x_b, x_t)\n \n # Compute distances r_{j,B}, r_{j,T} of Kth-NN in B and T,\n # from x_j in Trial\n self.r_B, self.r_T, _ = NN.compute_distances(x_t)\n \n # Compute estimated density ratio on Trial points\n r_hat = np.power(np.divide(self.r_B, self.r_T), self.D) * (self.NB/float(self.NT-1))\n \n # Compute test statistic over Trial points\n TS = np.mean( np.log(r_hat) )\n \n return(TS)",
"def tstat_beta(self):\n return self._tstat_beta",
"def Statsmodels_TTest(results, Explanatory, NumDecimal):\n\n TTest = []\n for item in results.t_test(np.eye(len(results.params))).tvalue:\n TTest.append(ss.t.cdf(item, results.df_model))\n TTest = [str(round(item, NumDecimal)) for item in TTest]\n for item in range(0, len(Explanatory.columns)):\n TTest[item + 1] = str(TTest[item + 1]) + ' ' + str(Explanatory.columns[item])\n TTest[0] = str(TTest[0])\n TTest = ', '.join(TTest)\n\n return TTest",
"def ttest(x, mu=0, alpha=0.05, is_bernoulli=False, two_sided=True, return_tuple=False):\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = f'X_bar = {mu}'\n h1 = f'X_bar != {mu}'\n else:\n quant_order = 1 - alpha\n h0 = f'X_bar <= {mu}'\n h1 = f'X_bar > {mu}'\n\n # Input vector as array\n x = np.asarray(x)\n # Sample size\n n = len(x)\n\n # Empirical mean\n x_bar = x.mean()\n # s estimator (variance)\n if is_bernoulli:\n s2 = x_bar * (1 - x_bar)\n else:\n s2 = desc.var(x)\n\n # Degrees of freedom\n df = n - 1\n\n # T statistic\n t = (x_bar - mu) / (math.sqrt(s2 / n))\n if two_sided:\n t = math.fabs(t)\n # p and critical values\n p = 2.0 * (1.0 - scp.t.cdf(t, df=df))\n\n if n > 30:\n cv = scp.norm.ppf(quant_order)\n else:\n cv = scp.t.ppf(quant_order, df=df)\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='One Sample Student test',\n h0=h0, h1=h1,\n alpha=alpha)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ",
"def getrawpval(teststat, statlist):\n \n nstats = len(statlist)\n highend = lowend = 0 # specifies the range of the p-value\n \n for i in range(nstats):\n \n if teststat == statlist[i]:\n lowend = 1 - (i + 1)/float(nstats)\n if highend < lowend:\n highend = 1 - i/float(nstats)\n \n elif teststat < statlist[i]:\n if highend == 0:\n return 1 - i/float(nstats)\n \n return random.uniform(lowend, highend)",
"def param_extractor(ts, amps):\n # using scipy's curve fit model, p_cov is accuracy\n p, pcov = curve_fit(model_growth_rate, ts, amps, maxfev=6000)\n a_0, omega = p\n # calculates the standard deviation and returns an array\n # first value is the error of a_0, second value is error of omega\n perr = np.sqrt(np.diag(pcov))\n omega_err = perr[1]\n return a_0, omega, omega_err",
"def parametric_mte(rslt, file):\n init_dict = read(file)\n data_frame = pd.read_pickle(init_dict[\"ESTIMATION\"][\"file\"])\n\n # Define quantiles and read in the original results\n quantiles = [0.0001] + np.arange(0.01, 1.0, 0.01).tolist() + [0.9999]\n\n # Calculate the MTE and confidence intervals\n mte = calculate_mte(rslt, data_frame, quantiles)\n mte_up, mte_d = calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles)\n\n return quantiles, mte, mte_up, mte_d",
"def _get_tb(I, nu, beam):\n from astropy import units as u\n return (1222.0*I/(nu**2*(beam.minor/1.0).to(u.arcsecond)*(beam.major/1.0).to(u.arcsecond))).value",
"def summary(self, use_t=True, alpha=0.05, usevar=\"pooled\", value=0):\n\n d1 = self.d1\n d2 = self.d2\n\n confint_percents = 100 - alpha * 100\n\n if use_t:\n tstat, pvalue, _ = self.ttest_ind(usevar=usevar, value=value)\n lower, upper = self.tconfint_diff(alpha=alpha, usevar=usevar)\n else:\n tstat, pvalue = self.ztest_ind(usevar=usevar, value=value)\n lower, upper = self.zconfint_diff(alpha=alpha, usevar=usevar)\n\n if usevar == \"pooled\":\n std_err = self.std_meandiff_pooledvar\n else:\n std_err = self.std_meandiff_separatevar\n\n std_err = np.atleast_1d(std_err)\n tstat = np.atleast_1d(tstat)\n pvalue = np.atleast_1d(pvalue)\n lower = np.atleast_1d(lower)\n upper = np.atleast_1d(upper)\n conf_int = np.column_stack((lower, upper))\n params = np.atleast_1d(d1.mean - d2.mean - value)\n\n title = \"Test for equality of means\"\n yname = \"y\" # not used in params_frame\n xname = [\"subset #%d\" % (ii + 1) for ii in range(tstat.shape[0])]\n\n from statsmodels.iolib.summary import summary_params\n\n return summary_params(\n (None, params, std_err, tstat, pvalue, conf_int),\n alpha=alpha,\n use_t=use_t,\n yname=yname,\n xname=xname,\n title=title,\n )",
"def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)",
"def calcT(self, point):\n index = np.argmax(np.abs(self.start.normalVector[:2]-self.end.normalVector[:2]))\n return (point[index] - self.start[index])/(self.end[index]-self.start[index])",
"def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val",
"def _tstat_beta(self):\n return _handle_ab(self._tstat_all, self.use_const)[1]",
"def _summarize_t(noncentrality, df, alpha=0.05):\n\n x = np.arange(-7.5, 7.6, 0.1)\n y1 = scipy.stats.t.pdf(x, loc=0, scale=1, df=df)\n y2 = scipy.stats.t.pdf(x, loc=noncentrality, scale=1, df=df)\n\n crit = scipy.stats.t.ppf(1 - alpha/2, df=df)\n\n return x, y1, y2, crit",
"def find_t(self, df, confidence=0.95):\n t_table = self.t_table\n nearest_confidence = round(find_nearest(list(t_table), 1.0-confidence), 4)\n nearest_df = round(find_nearest(t_table.index, df), 0)\n t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)\n\n return t_score",
"def _return_t(model):\n summary = base.summary(model)\n unsum = base.unclass(summary)\n return unsum.rx2(\"coefficients\")[:, -1]",
"def _tconfint_generic(mean, std_mean, dof, alpha, alternative):\n\n if alternative in [\"two-sided\", \"2-sided\", \"2s\"]:\n tcrit = stats.t.ppf(1 - alpha / 2.0, dof)\n lower = mean - tcrit * std_mean\n upper = mean + tcrit * std_mean\n elif alternative in [\"larger\", \"l\"]:\n tcrit = stats.t.ppf(alpha, dof)\n lower = mean + tcrit * std_mean\n upper = np.inf\n elif alternative in [\"smaller\", \"s\"]:\n tcrit = stats.t.ppf(1 - alpha, dof)\n lower = -np.inf\n upper = mean + tcrit * std_mean\n else:\n raise ValueError(\"invalid alternative\")\n\n return lower, upper",
"def t_tailed_prob(t, df, tails):\r\n if tails == 'high':\r\n return t_high(t, df)\r\n elif tails == 'low':\r\n return t_low(t, df)\r\n else:\r\n return tprob(t, df)",
"def ttest_ind(self, alternative=\"two-sided\", usevar=\"pooled\", value=0):\n d1 = self.d1\n d2 = self.d2\n\n if usevar == \"pooled\":\n stdm = self.std_meandiff_pooledvar\n dof = d1.nobs - 1 + d2.nobs - 1\n elif usevar == \"unequal\":\n stdm = self.std_meandiff_separatevar\n dof = self.dof_satt()\n else:\n raise ValueError('usevar can only be \"pooled\" or \"unequal\"')\n\n tstat, pval = _tstat_generic(\n d1.mean, d2.mean, stdm, dof, alternative, diff=value\n )\n\n return tstat, pval, dof",
"def TST_ME_DK(X, Y, T, X_org, Y_org, T_org, alpha, sigma, sigma0, epsilon, flag_debug = False):\r\n J = T.shape[0]\r\n s = compute_ME_stat(X, Y, T, X_org, Y_org, T_org, sigma, sigma0, epsilon)\r\n pvalue = stats.chi2.sf(s.item(), J)\r\n if pvalue<alpha:\r\n h = 1\r\n else:\r\n h = 0\r\n if flag_debug:\r\n pdb.set_trace()\r\n return h, pvalue, s",
"def variational_objective(params, t, num_samples, beta=1.):\n\n # 1. draw samples from the variational posterior, eps ~ N(0,I)\n zs, ldet_sums = draw_variational_samples(params, num_samples)\n\n # 1.5 negative entropy of z0 --- likely we need this for KL though\n # not needed for optimization\n\n # 2. compute expected value of the sum of jacobian terms\n E_ldet_sum = np.mean(ldet_sums)\n\n # 3. compute data term\n lls = logprob(zs, t)\n E_logprob = np.mean(lls)\n\n if debug_print:\n print \"entropy term: \", E_ldet_sum\n print \"data term : \", E_logprob, \" (+/- \", np.std(lls), \")\", \" min = \", np.min(lls)\n\n # return lower bound\n beta = 1. if t >= len(beta_schedule) else beta_schedule[t]\n lower_bound = beta * E_logprob + E_ldet_sum\n return -lower_bound",
"def estimate_tau(t, y):\n dt = np.min(np.diff(t))\n tt = np.arange(t.min(), t.max(), dt)\n yy = np.interp(tt, t, y, 1)\n f = acor_fn(yy)\n fs = gaussian_filter(f, 50)\n w = dt * np.arange(len(f))\n m = np.arange(1, len(fs)-1)[(fs[1:-1] > fs[2:]) & (fs[1:-1] > fs[:-2])]\n if len(m):\n return w[m[np.argmax(fs[m])]]\n return w[-1]",
"def estimate_tau(t, y):\n dt = np.min(np.diff(t))\n tt = np.arange(t.min(), t.max(), dt)\n yy = np.interp(tt, t, y, 1)\n f = acor_fn(yy)\n fs = gaussian_filter(f, 50)\n w = dt * np.arange(len(f))\n m = np.arange(1, len(fs)-1)[(fs[1:-1] > fs[2:]) & (fs[1:-1] > fs[:-2])]\n if len(m):\n return w[m[np.argmax(fs[m])]]\n return w[-1]",
"def estimates(self):\n return self._est",
"def two_tailed_t_test(samples: np.ndarray, H0: float):\n empirical_mean = np.mean(samples, axis=0)\n number_samples = samples.shape[0]\n standard_error = np.std(samples, ddof=1, axis=0) / np.sqrt(number_samples)\n t_value = (empirical_mean - H0) / standard_error\n p_value = 2.0 * (1.0 - t(df=number_samples - 1).cdf(np.abs(t_value)))\n return t_value, p_value",
"def estimate(self):\n self.p_est = sum([t.estimate() for t in self.tasks])\n return self.p_est",
"def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf",
"def get_parameter_unit(self, parameter_name):\n parameter_units = {\n 'tsky': units.Unit(\"Kelvin\"),\n 'kelvin': self.data_unit\n }\n return parameter_units.get(parameter_name)",
"def sparameters_te():\n return sparameters(\n fiber_angle_deg=15, period=682e-9, ff=343 / 682, wg_height=220e-9\n )",
"def t_one_tailed(x, mu=1.0):\n return t_one_sample(x, mu, tails=1)",
"def get_parameter_summary(self):\n prior = self.parcov.get(self.posterior_parameter.col_names)\n if prior.isdiagonal:\n prior = prior.x.flatten()\n else:\n prior = np.diag(prior.x)\n post = np.diag(self.posterior_parameter.x)\n ureduce = 100.0 * (1.0 - (post / prior))\n return pd.DataFrame({\"prior_var\":prior,\"post_var\":post,\n \"percent_reduction\":ureduce},\n index=self.posterior_parameter.col_names)",
"def estimateParameterValues(self, name, rawData):\n std = np.nanstd(np.ravel(rawData))\n\n if name == self.parameterNames[0]:\n return oint(0, 2 * std, 1000)\n else:\n raise ConfigurationError('White noise model does not contain a parameter \"{}\".'.format(name))",
"def estimate_params(self, data, setToZero=None):\n\t\t\n\t\tn = np.sum(data[\"len\"])\n\n\t\tX = np.zeros(shape=(n,self.nStateFeatures),dtype=np.float32)\n\t\tA = np.zeros(shape=(n,self.actionDim),dtype=np.float32)\n\n\t\ti = 0\n\t\tfor ep_n,ep_len in enumerate(data[\"len\"]):\n\t\t\tX[i:i+ep_len] = data[\"s\"][ep_n][0:ep_len]\n\t\t\tA[i:i+ep_len] = data[\"a\"][ep_n][0:ep_len]\n\t\t\ti += ep_len\n\n\t\tif setToZero is not None:\n\t\t\tX = np.delete(X,setToZero,1)\n\t\t\n\t\tself.params = np.linalg.lstsq(X, A, rcond=None)[0].T\n\t\t#print(self.params)\n\t\t#print(np.dot(np.dot(np.linalg.inv(np.dot(X.T,X)),X.T),A).T)\n\n\t\tif setToZero is not None:\n\t\t\tself.params = np.insert(self.params,setToZero,0,1)\n\n\t\treturn self.params",
"def t_test_(x):\n assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))\n\n if (len(x) <= 1) or (not np.all(np.isfinite(x))):\n return 1.0 # Can't say anything about scale => p=1\n\n _, pval = sst.ttest_1samp(x, 0.0)\n if np.isnan(pval):\n # Should only be possible if scale underflowed to zero:\n assert np.var(x, ddof=1) <= 1e-100\n # It is debatable if the condition should be ``np.mean(x) == 0.0`` or\n # ``np.all(x == 0.0)``. Should not matter in practice.\n pval = np.float(np.mean(x) == 0.0)\n assert 0.0 <= pval and pval <= 1.0\n return pval",
"def estimateParameterValues(self, name, rawData):\n observations = np.array([d[0] for d in rawData])\n min = np.nanmin(observations)\n max = np.nanmax(observations)\n delta = max - min\n\n if name == self.parameterNames[0]:\n return oint(min-delta, max+delta, 1000)\n else:\n raise ConfigurationError('Gaussian mean model does not contain a parameter \"{}\".'.format(name))",
"def t_one_sample(x, mu, tails=2):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x = np.asarray(x)\n N = x.size\n df = N - 1\n t_obs = (x.mean() - mu) / (x.std() / np.sqrt(N))\n p_value = tails * st.t.sf(abs(t_obs), df)\n return TtestResults(t_obs, p_value)",
"def get_measurements(self, param):\n return tuple(self.__buffer[param])",
"def get(self, param, phase=\"last\", name=\"Main\"):\n df = self.summary(name=name)\n if param not in df.columns:\n raise KeyError(f\"@param must be in {', '.join(df.columns)}.\")\n if phase == \"last\":\n phase = df.index[-1]\n return df.loc[phase, param]",
"def _get_params_summary(self, alpha=0.1):\n\n # TODO: Acknowledge that this code was modified from the statsmodels package\n\n results = self._model.fit()\n\n def forg(x, prec=3):\n if prec == 3:\n # for 3 decimals\n if (abs(x) >= 1e4) or (abs(x) < 1e-4):\n return '%9.3g' % x\n else:\n return '%9.3f' % x\n elif prec == 4:\n if (abs(x) >= 1e4) or (abs(x) < 1e-4):\n return '%10.4g' % x\n else:\n return '%10.4f' % x\n else:\n raise NotImplementedError\n\n # Parameters part of the summary table\n conf_int = results.conf_int(alpha)\n\n # Dictionary to store the header names for the parameter part of the\n # summary table. look up by modeltype\n alp = str((1 - alpha) * 100) + '%'\n\n param_header = ['coef', 'std err', 't', 'P>|t|',\n '[' + alp + ' Conf. Int.]']\n\n xname = self._model.exog_names\n\n params_stubs = xname\n\n exog_idx = range(len(xname))\n\n # center confidence intervals if they are unequal lengths\n confint = [\"%s %s\" % tuple(map(forg, conf_int.ix[i])) for i in exog_idx]\n len_ci = list(map(len, confint))\n max_ci = max(len_ci)\n min_ci = min(len_ci)\n\n if min_ci < max_ci:\n confint = [ci.center(max_ci) for ci in confint]\n\n # explicit f/g formatting, now uses forg, f or g depending on values\n params_data = zip([forg(results.params[i], prec=4) for i in exog_idx],\n [forg(results.bse[i]) for i in exog_idx],\n [forg(results.tvalues[i]) for i in exog_idx],\n # [\"%#6.3f\" % (results.pvalues[i]) for i in exog_idx],\n [\"%#6.3g\" % (results.pvalues[i]) for i in exog_idx],\n confint\n )\n params_data = list(params_data)\n parameter_table = SimpleTable(params_data,\n param_header,\n params_stubs,\n txt_fmt=fmt_params\n )\n\n if results.params.shape[0] > 2:\n vif_table = self._get_vif_table()\n parameter_table.extend_right(vif_table)\n\n return parameter_table",
"def get_estimation(self):\n self.calculate_variables()\n if self.validate_preconditions():\n return self.estimate()\n else:\n return None",
"def estimate_var(sample, threshold):\n sample_size = len(sample)\n index_at = get_var_level_index(sample_size, threshold)\n sample.sort()\n return sample[index_at]",
"def get_measurement_parameter(self, trace: int) -> str:\n if trace not in range(1, 5):\n raise ValueError(\"Trace must be between 1 and 4\")\n\n return self.query(f\"CALC:PAR{trace}:DEF?\")",
"def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))",
"def _tstat_all(self):\n return np.squeeze(self.solution) / self._se_all",
"def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var",
"def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception",
"def twr_ret(self) -> float:\n if float(self.tsdf.iloc[0]) == 0.0:\n raise Exception('First data point == 0.0')\n return float(((self.tsdf.iloc[-1] / self.tsdf.iloc[0]) ** (1 / self.length) - 1) * self.periods_in_a_year)",
"def estimateParameterValues(self, name, rawData):\n if name == self.parameterNames[0]:\n # lower is boundary is zero by definition, upper boundary is chosen as 1.25*(largest observation)\n return oint(0, 1.25*np.nanmax(np.ravel(rawData)), 1000)\n else:\n raise ConfigurationError('Poisson model does not contain a parameter \"{}\".'.format(name))",
"def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )",
"def recommend_T(self):\n max_eigvalue = torch.max(torch.linalg.eigvalsh(self.Wc))\n T = (self.settings[\"tgtStd\"] ** 2) * \\\n (self.vars['bowl_strength'] - max_eigvalue)\n return T",
"def get_param_values(self,obs_name,param):\n\n return self.datasets[obs_name][param]",
"def estimateParameterValues(self, name, rawData):\n mean = np.nanmean(np.ravel(rawData))\n std = np.nanstd(np.ravel(rawData))\n\n if name == self.parameterNames[0]:\n return cint(mean-2*std, mean+2*std, 200)\n elif name == self.parameterNames[1]:\n return oint(0, 2 * std, 200)\n else:\n raise ConfigurationError('Gaussian model does not contain a parameter \"{}\".'.format(name))",
"def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n x = mean(a)\r\n v = var(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v)/float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = betai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,min(a),max(a),\r\n statname,t,prob)\r\n return t,prob",
"def T_from_xp(x,p):\n\ttemp = x + skew_slope*np.log(p)\n\treturn temp",
"def getTau(self) -> float:\n return self.tau",
"def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result",
"def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result",
"def value(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][0]",
"def std(x):\n return sqrt(TinyStatistician.var(x))",
"def generate_parameter_sample(self, t): \n \n theta_hat = np.zeros(self.K) \n k = np.random.choice(self.Npar, 1, p=self.w)[0] # np.random.choice outputs an array\n theta_hat = self.Particles[k] \n return theta_hat",
"def _p_value(self):\n p_value = chi2.sf(self.test_statistic, 2)\n\n return p_value",
"def p_value(set1, set2):\n\ts, p = stats.ttest_ind(set1, set2)\n\treturn p",
"def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def std_params(params):\n std_params = []\n for i in range(len(params[0])):\n std_params.append(np.std(np.array(params)[:, i]))\n return std_params",
"def paramshuman(chrom, outdir, alpha_inter, gamma_inter, p_a, p_b, seed, diag, filter_high):\n simulatehuman.cmd_estimate_params(chrom, outdir, alpha_inter=alpha_inter, gamma_inter=gamma_inter,\n p_a=p_a, p_b=p_b, seed=seed, diag=diag, filter_high=filter_high,\n plot=True)",
"def get_kendell_tau(ys):\n\n from scipy.stats import kendalltau\n\n # calculate Kendall tau\n tau, p = kendalltau(range(len(ys)), ys)\n\n return tau, p",
"def est_am(t,y):\n ch=t>t_eq\n y1=y[ch]\n return y1[-1]-y1[0]",
"def ti_func(self):\n return self.ti.val - self.calc_ti()",
"def summary(self, alpha=0.05, xname=None):\n\n yname = \"None\"\n effect = np.atleast_1d(self.prob1)\n if self.pvalue is None:\n statistic, pvalue = self.test_prob_superior()\n else:\n pvalue = self.pvalue\n statistic = self.statistic\n pvalues = np.atleast_1d(pvalue)\n ci = np.atleast_2d(self.conf_int(alpha=alpha))\n if ci.shape[0] > 1:\n ci = ci.T\n use_t = self.use_t\n sd = np.atleast_1d(np.sqrt(self.var_prob))\n statistic = np.atleast_1d(statistic)\n if xname is None:\n xname = ['c%d' % ii for ii in range(len(effect))]\n\n xname2 = ['prob(x1>x2) %s' % ii for ii in xname]\n\n title = \"Probability sample 1 is stochastically larger\"\n from statsmodels.iolib.summary import summary_params\n\n summ = summary_params((self, effect, sd, statistic,\n pvalues, ci),\n yname=yname, xname=xname2, use_t=use_t,\n title=title, alpha=alpha)\n return summ",
"def T(self, point = -1):\n return self.solution('T', point)",
"def calcT(self, theta, T_ss):\n return T_ss * np.cos(theta)**0.25",
"def get_stats(self):\n stats = {\"param_noise_stddev\": self.current_std}\n return stats",
"def t(self):\n index = self.var_index()\n return self.var_data(index)",
"def estimated_speed(self):\n return self._estimates[3].item(0)",
"def _get_T(self, polarization, net_t_amp, n_i, n_f, theta_i=0., theta_f=0.):\n if (polarization=='s'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n elif (polarization=='p'):\n return np.abs(net_t_amp**2) * (n_f/n_i)\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")",
"def est(self):\n self.min_key = 'average'\n if self.iteration >= 3:\n # Poll estimated times from different algorithms\n lin_est_time = self.lin_est_no_outlier()\n lin_no_est_time = self.lin_est_outlier()\n average_est_time = self.avg_est_no_outlier()\n average_no_est_time = self.avg_est_outlier()\n\n # Record discrepancies between the estimated delta t's and the\n # actual delta t.\n if self.iteration > 8:\n self.err_rec()\n\n # Review the choice of algorithm after every 15 jobs and switch\n # to a better one if necessary.\n if not self.override:\n if self.iteration % 5 == 0 and self.iteration > 8:\n self.least_err()\n\n # Return the time associated with the algorithm that offers the\n # highest accuracy.\n if self.min_key is 'average':\n est_time = average_est_time\n if self.min_key is 'average_no':\n est_time = average_no_est_time\n elif self.min_key is 'lin':\n est_time = lin_est_time\n elif self.min_key is 'lin_no':\n est_time = lin_no_est_time\n\n est_time = int(round(est_time))\n else:\n est_time = 0\n\n # Bypasses negative estimates occasionally generated by the linear\n # algorithm and huge numbers occasionally generated by the positive\n # exponential algorithm. 3.2e7 is a little over a year.\n if est_time < 0:\n est_time = self.est_time\n if not self.override:\n self.min_key = 'average'\n else:\n self.est_time = est_time\n\n return est_time",
"def league_ttest(df_league_one: pd.DataFrame, df_league_two: pd.DataFrame, parameter: str, alpha: float, ):\n assert isinstance(df_league_one, pd.DataFrame), 'df_league_one needs to be a pandas dataframe.'\n assert isinstance(df_league_two, pd.DataFrame), 'df_league_two needs to be a pandas dataframe.'\n assert isinstance(alpha, float), 'alpha needs to be a float.'\n\n\n df_league_one_mean = df_league_one.mean()\n n = len(df_league_one['club'])\n df = n-1\n t_critical = stats.t.ppf(1-alpha, df)\n leagues_ttest = stats.ttest_1samp(a= df_league_two[f'{parameter}'], popmean= df_league_one_mean)\n t_value = leagues_ttest[0]\n p_value = leagues_ttest[1]\n\n stats_values = {}\n\n stats_values['p_value'] = round(list(p_value)[0], 4)\n\n if stats_values['p_value'] < alpha:\n return ('Enough evidence to reject null hypothesis')\n elif stats_values['p_value'] > alpha:\n return ('Not enough evidence to reject null hypothesis')",
"def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')",
"def generate_statistic_H1(NUM_STATISTICS, sigma_w, N):\n\n T_y = np.zeros(NUM_STATISTICS, dtype=np.complex)\n\n for ind in range(NUM_STATISTICS):\n x = sigma_s * np.random.randn(N, 1)\n for k in range(K):\n x[k * (N_c + N_d): k * (N_c + N_d) + N_c] = x[k * (N_c + N_d) + N_d: (k + 1) * (N_c + N_d)]\n\n w = sigma_w * np.random.randn(N, 2).view(np.complex128)\n\n y = x + w\n\n # Calculate test statistic\n val = np.complex(0)\n for n in range(N_c):\n for k in range(K):\n val += y[n + k * (N_c + N_d)] * np.conjugate(y[n + k * (N_c + N_d) + N_d])\n\n T_y[ind] = 1 / K * val\n\n return T_y",
"def rate(self, t, y):\n if y[1] >= self.parameters.T:\n return super(SFORT, self).rate(t, y)\n else:\n return 0",
"def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)",
"def t_test(self, r_matrix, cov_p=None, scale=None, use_t=None): # noqa:E501\n from patsy import DesignInfo\n names = self.model.data.param_names\n LC = DesignInfo(names).linear_constraint(r_matrix)\n r_matrix, q_matrix = LC.coefs, LC.constants\n num_ttests = r_matrix.shape[0]\n num_params = r_matrix.shape[1]\n\n if (cov_p is None and self.normalized_cov_params is None and\n not hasattr(self, 'cov_params_default')):\n raise ValueError('Need covariance of parameters for computing '\n 'T statistics') # pragma: no cover\n if num_params != self.params.shape[0]: # pragma: no cover\n raise ValueError('r_matrix and params are not aligned')\n if q_matrix is None:\n q_matrix = np.zeros(num_ttests)\n else:\n q_matrix = np.asarray(q_matrix)\n q_matrix = q_matrix.squeeze()\n if q_matrix.size > 1:\n if q_matrix.shape[0] != num_ttests: # pragma: no cover\n raise ValueError(\"r_matrix and q_matrix must have the same \"\n \"number of rows\")\n\n if use_t is None:\n # switch to use_t false if undefined\n use_t = (hasattr(self, 'use_t') and self.use_t)\n\n tstat = _sd = None\n\n _effect = np.dot(r_matrix, self.params)\n # nan_dot multiplies with the convention nan * 0 = 0\n\n cparams = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)\n # Perform the test\n if num_ttests > 1:\n _sd = np.sqrt(np.diag(cparams))\n else:\n _sd = np.sqrt(cparams)\n tstat = (_effect - q_matrix) * recipr(_sd)\n\n df_resid = getattr(self, 'df_resid_inference', self.df_resid)\n\n if use_t:\n return ContrastResults(effect=_effect, t=tstat, sd=_sd,\n df_denom=df_resid)\n else:\n return ContrastResults(effect=_effect, statistic=tstat, sd=_sd,\n df_denom=df_resid,\n distribution='norm')",
"def get_fitness_stdev(self):\n return self.get_fitness_stat(stdev)",
"def TS(self, T):\n eta = np.array([arm.eta for arm in self.MAB])\n\n def f(x):\n return np.random.normal(x.Sa/x.Na, eta/np.sqrt(x.Na))\n return self.Index_Policy(T, f)",
"def parameter_optimize(self, estimator, parameters, X_test, y_test):\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n clf = grid_search.GridSearchCV(estimator, parameters[1], cv = cv, n_jobs =4)\n t1 = time.time()\n clf.fit(self.X, self.y)\n print \"The optimize parameters for %s is: %s\"%(parameters[0], clf.best_params_)\n y_pred = clf.predict(X_test)\n t2 = time.time()\n print \"The running time for %s is: %f sec\"%(parameters[0], t2 - t1)\n score = metrics.accuracy_score(y_test, y_pred)\n print \"The accuracy score for %s is: %f\"%(parameters[0], score), \"\\n\"\n return {\"%s\"%parameters[0]: {\"estimator_parameters\": clf.best_params_, \n \"running_time\": t2-t1, \"accuracy_score\": score}}"
] | [
"0.572344",
"0.5572131",
"0.55429137",
"0.54679704",
"0.54327816",
"0.53799844",
"0.536686",
"0.5357605",
"0.53125864",
"0.5266699",
"0.52574205",
"0.5219881",
"0.52093285",
"0.5167227",
"0.51620823",
"0.5152715",
"0.51152396",
"0.51094764",
"0.5073365",
"0.5017591",
"0.50017464",
"0.49907333",
"0.49889088",
"0.4988145",
"0.49851653",
"0.49730366",
"0.49719355",
"0.49630105",
"0.49524036",
"0.49356666",
"0.49302685",
"0.49239042",
"0.49123555",
"0.49120715",
"0.4906305",
"0.49050054",
"0.49050054",
"0.48710918",
"0.4868439",
"0.48612133",
"0.48603082",
"0.48549637",
"0.48529166",
"0.4832241",
"0.48301223",
"0.48149812",
"0.48121274",
"0.4809941",
"0.48093528",
"0.479955",
"0.47785482",
"0.47631556",
"0.47500977",
"0.4744889",
"0.4743544",
"0.47306645",
"0.47299376",
"0.47263473",
"0.47258437",
"0.47161952",
"0.47085366",
"0.47078156",
"0.47046712",
"0.46973172",
"0.46962842",
"0.46910903",
"0.46862784",
"0.46834165",
"0.4680538",
"0.46792462",
"0.46792462",
"0.46764675",
"0.46764436",
"0.4673523",
"0.46733347",
"0.4671667",
"0.46712998",
"0.4669056",
"0.4666563",
"0.4655838",
"0.46516627",
"0.4650765",
"0.46436957",
"0.46403188",
"0.46290293",
"0.46261495",
"0.4617514",
"0.46151337",
"0.46148425",
"0.46106434",
"0.46026987",
"0.4597998",
"0.45901182",
"0.4590041",
"0.4581437",
"0.45812333",
"0.4576781",
"0.45757404",
"0.45629722"
] | 0.5572323 | 2 |
Returns the variance/covariance matrix. The variance/covariance matrix can be of a linear contrast of the estimates of params or all params multiplied by scale which will usually be an estimate of sigma^2. Scale is assumed to be a scalar. | def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
dot_fun = nan_dot
else:
dot_fun = np.dot
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')): # pragma: no cover
raise ValueError('need covariance of parameters for computing '
'(unnormalized) covariances')
if column is not None and (r_matrix is not None or other is not None):
raise ValueError('Column should be specified without other '
'arguments.') # pragma: no cover
if other is not None and r_matrix is None: # pragma: no cover
raise ValueError('other can only be specified with r_matrix')
if cov_p is None:
if hasattr(self, 'cov_params_default'):
cov_p = self.cov_params_default
else:
if scale is None:
scale = self.scale
cov_p = self.normalized_cov_params * scale
if column is not None:
column = np.asarray(column)
if column.shape == ():
return cov_p[column, column]
else:
return cov_p[column[:, None], column]
elif r_matrix is not None:
r_matrix = np.asarray(r_matrix)
if r_matrix.shape == (): # pragma: no cover
raise ValueError("r_matrix should be 1d or 2d")
if other is None:
other = r_matrix
else:
other = np.asarray(other)
tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other)))
return tmp
else: # if r_matrix is None and column is None:
return cov_p | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def params_to_scale_mean_cov(params_):\n # Extract scale and mean\n #scale_sqrt_ = params_[0]\n #scale_ = scale_sqrt_ * scale_sqrt_\n scale_ = params_[0]\n\n mean_ = params_[1:n + 1]\n\n # Get eigenvalues\n evals_sqrt_ = numpy.array(params_[n + 1:n2 + 1])\n evals_ = evals_sqrt_ * evals_sqrt_\n #evals_ = numpy.array(params_[n + 1:n2 + 1])\n\n ## Reconstruct the transformed eigenvector matrix\n #cov_c_ = numpy.zeros((n, n))\n #cov_c_[upper] = params_[n2 + 1:]\n #cov_c_.transpose()[upper] = -cov_c_[upper]\n #\n ## Use an inverse Cayley transform to get the true eigenvector matrix\n #evecs_ = (eye - cov_c_).dot(linalg.inv(eye + cov_c_))\n\n # Get eigenvector matrix from orthogonal_matrix()\n evecs_ = orthogonal_matrix_from_angles(n, params_[n2 + 1:])\n\n # Get the covariance matrix from the eigenvectors and eigenvalues\n cov_ = evecs_.dot(numpy.diag(evals_).dot(evecs_.transpose()))\n\n return scale_, mean_, cov_",
"def apply_scale( vectors, scale ):\n # create a scaling matrix\n matrix = numpy.array([\n [ scale[ 0 ], 0.0, 0.0 ],\n [ 0.0, scale[ 1 ], 0.0 ],\n [ 0.0, 0.0, scale[ 2 ] ]\n ])\n return numpy.dot( vectors, matrix )",
"def scaleMatrix(self, sx=0, sy=0, sz=0):\n\n return np.array([[sx, 0, 0, 0],\n [0, sy, 0, 0],\n [0, 0, sz, 0],\n [0, 0, 0, 1]])",
"def sigma(self):\n with ops.name_scope(self.name):\n return self._cov.to_dense()",
"def get_VaR(data, alpha=0.99, scale=52):\n logger = logging.getLogger(__name__)\n ret = get_returns(data, style='log', fillna=False)\n if ret.ndim==1:\n ret = ret.to_frame()\n from basic.mathe import covariance\n vol = np.sqrt( ret.apply(covariance) * scale)\n mean = ret.apply(np.mean) * scale\n from scipy import stats\n alpha = np.array([alpha]).ravel()\n result = pd.DataFrame(0, columns=[\"{:.0%}\".format(x) for x in alpha], index=ret.columns)\n for t in ret.columns:\n result.loc[t,:] = stats.norm.ppf(1-np.array(alpha), mean[t], vol[t])\n return result",
"def _get_variance_covariance_table(self):\n\n # variance-covariance matrix\n res = self._model.fit()\n X = self._model.exog\n x_prime_x_inverse = np.linalg.inv(np.matmul(X.transpose(), X))\n var_cov_matrix = res.mse_resid * x_prime_x_inverse\n var_cov_table = SimpleTable(data=var_cov_matrix,\n headers=self._model.exog_names,\n stubs=self._model.exog_names,\n title='Variance-covariance matrix')\n\n return var_cov_table",
"def getScalingMatrix(sx, sy, sz):\n return MatrixExtended([\n [sx, 0, 0, 0],\n [0, sy, 0, 0],\n [0, 0, sz, 0],\n [0, 0, 0, 1]])",
"def compute_covariance_matrix(Xs, sigma_2):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma",
"def scaled_vcov(self):\n return (self.rss()[np.newaxis, np.newaxis, :]\n * 1. / self._rdf * self._vcov[:, :, np.newaxis])",
"def homog_scale_mtx(scale: float) -> numpy.array:\n return numpy.array(\n [[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]],\n dtype=numpy.float64,\n )",
"def matScale(mat, scale):\n shape=matShape(mat)\n return [[matGet(mat,x,y)*scale for y in range(shape[1])] \\\n for x in range(shape[0])]",
"def getMatrix(self, scale):\n \n kbx = np.sqrt(self.kbxn**2. + self.kbxSF**2.) # rms kbeta\n kby = np.sqrt(self.kbyn**2. + self.kbySF**2.) # rms kbeta\n\n kbx = kbx / scale.lg\n kby = kby / scale.lg\n \n matx = np.zeros([2,2])\n\n if (kbx > 0.):\n matx[0][0] = np.cos(kbx*self.undlen)\n matx[0][1] = 1. /kbx * np.sin(kbx*self.undlen)\n matx[1][0] = -kbx * np.sin(kbx*self.undlen)\n matx[1][1] = np.cos(kbx*self.undlen)\n else: # just a free space drift\n matx[0][0] = 1.\n matx[0][1] = self.undlen\n matx[1][0] = 0.\n matx[1][1] = 1.\n\n maty = np.zeros([2,2])\n\n if (kby > 0.):\n maty[0][0] = np.cos(kby*self.undlen)\n maty[0][1] = 1. /kby * np.sin(kby*self.undlen)\n maty[1][0] = -kby * np.sin(kby*self.undlen)\n maty[1][1] = np.cos(kby*self.undlen)\n else: # just a free space drift\n maty[0][0] = 1.\n maty[0][1] = self.undlen\n maty[1][0] = 0.\n maty[1][1] = 1.\n \n return matx, maty",
"def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz",
"def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)",
"def variance(self):\n return self.sigma",
"def variance(self):\n return (math.exp(self.sigma ** 2) - 1.0) \\\n * math.exp(2.0 * self.mu + self.sigma ** 2)",
"def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))",
"def _likelihood_der2_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize Hessian\n d2ell_dscale2 = numpy.zeros((scale.size, scale.size), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n MKnpMz = self.M_dot(self.C, self.Y, eta, KnpMz)\n\n for q in range(scale.size):\n\n # 1. Compute zMKnqMKnpMz\n if p == q:\n KnqMz = KnpMz\n else:\n KnqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[q])\n zMKnqMKnpMz = numpy.dot(KnqMz, MKnpMz)\n\n # 2. Compute zMKnpqMz\n KnpqMz = self.mixed_cor.dot(self.Mz, eta=eta,\n derivative=[p, q])\n zMKnpqMz = numpy.dot(self.Mz, KnpqMz)\n\n # 3. Computing trace of Knpq * M in three steps\n\n # Compute the first component of trace of Knpq * Kninv\n Knpq = self.mixed_cor.get_matrix(eta, derivative=[p, q])\n if self.stochastic_traceinv:\n trace_KnpqKninv = self.mixed_cor.traceinv(\n eta, B=Knpq,\n imate_options={'method': 'hutchinson'})\n else:\n KnpqKninv = Knpq @ self.Kninv\n trace_KnpqKninv = imate.trace(KnpqKninv, method='exact')\n\n # Compute the second component of trace of Knpq * M\n KnpqY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p, q])\n YtKnpqY = numpy.matmul(self.Y.T, KnpqY)\n CYtKnpqY = numpy.matmul(self.C, YtKnpqY)\n trace_CYtKnpqY = numpy.trace(CYtKnpqY)\n\n # Compute trace of Knpq * M\n trace_KnpqM = trace_KnpqKninv - trace_CYtKnpqY\n\n # 4. Compute trace of Knp * M * Knq * M\n\n # Compute first part of trace of Knp * M * Knq * M\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n Knq = self.mixed_cor.get_matrix(eta, derivative=[q])\n if self.stochastic_traceinv:\n trace_KnpMKnqM_1 = self.mixed_cor.traceinv(\n eta, B=Knq, C=Knp,\n imate_options={'method': 'hutchinson'})\n else:\n KnpKninvKnqKninv = numpy.matmul(self.KnpKninv[p],\n self.KnpKninv[q])\n trace_KnpMKnqM_1 = imate.trace(KnpKninvKnqKninv,\n method='exact')\n\n # Compute the second part of trace of Knp * M * Knq * M\n KnpY = Knp @ self.Y\n if p == q:\n KnqY = KnpY\n else:\n KnqY = Knq @ self.Y\n KninvKnqY = self.mixed_cor.solve(KnqY, eta=eta)\n YtKnpKninvKnqY = numpy.matmul(KnpY.T, KninvKnqY)\n F21 = numpy.matmul(self.C, YtKnpKninvKnqY)\n F22 = numpy.matmul(self.C, YtKnpKninvKnqY.T)\n trace_KnpMKnqM_21 = numpy.trace(F21)\n trace_KnpMKnqM_22 = numpy.trace(F22)\n\n # Compute the third part of trace of Knp * M * Knq * M\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n if p == q:\n YtKnqY = YtKnpY\n else:\n YtKnqY = numpy.matmul(self.Y.T, KnqY)\n Dp = numpy.matmul(self.C, YtKnpY)\n if p == q:\n Dq = Dp\n else:\n Dq = numpy.matmul(self.C, YtKnqY)\n D = numpy.matmul(Dp, Dq)\n trace_KnpMKnqM_3 = numpy.trace(D)\n\n # Compute trace of Knp * M * Knq * M\n trace_KnpMKnqM = trace_KnpMKnqM_1 - trace_KnpMKnqM_21 - \\\n trace_KnpMKnqM_22 + trace_KnpMKnqM_3\n\n # 5. Second \"local\" derivatives w.r.t scale\n local_d2ell_dscale2 = -0.5*trace_KnpqM + 0.5*trace_KnpMKnqM + \\\n (0.5*zMKnpqMz - zMKnqMKnpMz) / sigma2\n\n # Computing total second derivative\n dp_log_sigma2 = -numpy.dot(self.Mz, KnpMz) / \\\n (self.rdof*sigma2)\n if p == q:\n dq_log_sigma2 = dp_log_sigma2\n else:\n dq_log_sigma2 = -numpy.dot(self.Mz, KnqMz) / \\\n (self.rdof*sigma2)\n d2ell_dscale2[p, q] = local_d2ell_dscale2 + \\\n 0.5 * self.rdof * dp_log_sigma2 * dq_log_sigma2\n\n if p != q:\n d2ell_dscale2[q, p] = d2ell_dscale2[p, q]\n\n return d2ell_dscale2",
"def var(self):\n\n return self.scale ** -2 \\\n * (m.gamma(1 + 2 * self.shape ** -1) - m.gamma(1 + self.shape ** -1) ** 2)",
"def scaling_matrix(sx, sy, sz):\n S = np.array([[sx, 0, 0, 0],\n [0, sy, 0, 0],\n [0, 0, sz, 0],\n [0, 0, 0, 1]])\n return S",
"def scale_axes(self, scale, axes=None):\n axes = self._get_axes_numbers(axes)\n scale = self._cook_args(scale, axes, [1., 1.])\n return Signal2D(self.values, index=self.index*scale[0], columns=self.columns*scale[1])",
"def reconstructions_variance(self):\n self.assert_sampled()\n return [[j.variance().numpy() for j in i] for i in self._reconstructions]",
"def __mul__(self, scale):\n return Vec(self.x * scale, self.y * scale)",
"def _likelihood_der1_scale(self, hyperparam):\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam)\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n # Initialize jacobian\n dell_dscale = numpy.zeros((scale.size, ), dtype=float)\n\n # Update Y, C, Mz\n self._update_Y_C_Mz(hyperparam)\n\n # Find optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam)\n\n # Compute (or update) Kninv and KnpKninv\n if not self.stochastic_traceinv:\n self._update_Kninv_KnpKninv(hyperparam)\n\n # Knp is the derivative of mixed_cor (Kn) w.r.t p-th element of scale.\n for p in range(scale.size):\n\n if self.stochastic_traceinv:\n # Compute traceinv using stochastic estimation method. Note\n # that since Knp is not positive-definite, we cannot use\n # Cholesky method in imate. The only viable option is\n # Hutchinson's method.\n Knp = self.mixed_cor.get_matrix(eta, derivative=[p])\n trace_KnpKninv = self.mixed_cor.traceinv(\n eta, B=Knp, imate_options={'method': 'hutchinson'})\n else:\n trace_KnpKninv = imate.trace(self.KnpKninv[p], method='exact')\n\n # Compute the second component of trace of Knp * M\n KnpY = self.mixed_cor.dot(self.Y, eta=eta, derivative=[p])\n YtKnpY = numpy.matmul(self.Y.T, KnpY)\n CYtKnpY = numpy.matmul(self.C, YtKnpY)\n trace_CYtKnpY = numpy.trace(CYtKnpY)\n\n # Compute trace of Knp * M\n trace_KnpM = trace_KnpKninv - trace_CYtKnpY\n\n # Compute zMKnpMz\n KnpMz = self.mixed_cor.dot(self.Mz, eta=eta, derivative=[p])\n zMKnpMz = numpy.dot(self.Mz, KnpMz)\n\n # Derivative of ell w.r.t p-th element of distance scale\n dell_dscale[p] = -0.5*trace_KnpM + 0.5*zMKnpMz / sigma2\n\n return dell_dscale",
"def scale(self):\n return self._gev_bijector.scale",
"def getCovarianceNoiseMatrix(self):\n return np.dot ( self.getB().T, self.getB() )",
"def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma",
"def test_set_scale():\n data = io.create_sample_Dataset()\n tmp = data.piv.set_scale(1.0)\n assert np.allclose(tmp[\"x\"], data[\"x\"])\n\n tmp = data.copy()\n tmp.piv.set_scale(2.0)\n tmp_mean = tmp[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n data_mean = data[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n assert np.allclose(tmp_mean / data_mean, 2.0)",
"def calculate_covariance_matrix(X, Y=None):\n\tif Y is None:\n\t\tY = X\n\tn_samples = np.shape(X)[0]\n\tcovariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\treturn np.array(covariance_matrix, dtype=float)",
"def _getScalesDiag(self, termx=0):\n assert self.P > 1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'\n assert self.noisPos is not None, 'VarianceDecomposition:: noise term has to be set'\n assert termx < self.n_randEffs - 1, 'VarianceDecomposition:: termx>=n_randEffs-1'\n assert self.trait_covar_type[self.noisPos] not in [\n 'lowrank', 'block', 'fixed'\n ], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'\n assert self.trait_covar_type[termx] not in [\n 'lowrank', 'block', 'fixed'\n ], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'\n scales = []\n res = self._getH2singleTrait(self.vd.getTerm(termx).getK())\n scaleg = sp.sqrt(res['varg'].mean())\n scalen = sp.sqrt(res['varn'].mean())\n for term_i in range(self.n_randEffs):\n if term_i == termx:\n _scales = scaleg * self.diag[term_i]\n elif term_i == self.noisPos:\n _scales = scalen * self.diag[term_i]\n else:\n _scales = 0. * self.diag[term_i]\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate(\n (_scales, sp.array([sp.sqrt(self.jitter[term_i])])))\n scales.append(_scales)\n return sp.concatenate(scales)",
"def getCovMatrix(self, caliStep, weights):\n\n Sigma = np.zeros([self.numObs, self.numObs])\n # scale observation data with normalized variance parameter to get covariance matrix\n for i in range(self.numObs):\n # use smaller weights for higher precision\n if self.scaleCovWithMax:\n Sigma[i, i] = self.sigma * weights[i] * max(self.obsData[:, i]) ** 2\n else:\n Sigma[i, i] = self.sigma * weights[i] * self.obsData[caliStep, i] ** 2\n return Sigma",
"def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data",
"def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale",
"def svt(X, tau):\n U, S, Vt = la.svd(X,full_matrices=False)\n Xs = np.dot(U * st(S,tau), Vt)\n return Xs",
"def matrix_variance(matrix):\n return sum(map(lambda row:variance(row,correct=False),matrix))",
"def sigma(self) -> tp.Union[\"Array\", \"Scalar\"]:\n return self.parameters[\"sigma\"] # type: ignore",
"def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()",
"def compute_variance(\n self,\n parameters: NDArray,\n resids: NDArray,\n sigma2: NDArray,\n backcast: Union[float, NDArray],\n var_bounds: NDArray,\n ) -> NDArray:",
"def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n (u,s,v)=np.linalg.svd(matrix)\n ### END YOUR CODE\n\n return u, s, v",
"def getCovarianceMatrix(self):\n #ypost = np.dot ( self.getA().T, self.priorX )\n\n theta = np.mat ( self.getA() )\n Xm = np.mat ( self.priorX )\n\n ypost = Xm * theta\n yprior = self.priorY\n error = ypost - yprior\n #error = error - np.mean ( error, axis = 0 )\n return np.dot ( error.T, error )",
"def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))",
"def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))",
"def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))",
"def variance(values, weights=None, axis=0):\n \n average = np.average(values, weights=weights, axis=axis)\n variance = np.average((values-average)**2, weights=weights, axis=axis)\n return variance",
"def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples - 1)) * (\n X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)",
"def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale",
"def variance(self):\n if self.dirty:\n self._finalize()\n return self.vvar",
"def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return u, s, v",
"def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance",
"def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance",
"def variance(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n mean = self.mean()\n weighted_central_moment = sum(\n count * (value - mean) ** 2 for value, count in clean.items()\n )\n return weighted_central_moment / total",
"def col_scale(x, s):\r\n\r\n if x.format == 'csc':\r\n return ColScaleCSC()(x, s)\r\n elif x.format == 'csr':\r\n return RowScaleCSC()(x.T, s).T\r\n else:\r\n raise NotImplementedError()",
"def scale(self):\n return self.scale_factor / CONSTANTS.AU",
"def get_return_vol(data, scale=1, ret=False, plotit=False):\n from basic.mathe import covariance\n logger = logging.getLogger(__name__)\n if data.ndim==1:\n data = data.to_frame()\n if ret:\n rts = data\n else:\n rts = get_returns(data, 'simple')\n ret = rts.mean().values * scale\n vol = rts.apply(lambda x: np.sqrt(covariance(x) * scale)).values\n if plotit:\n from invest.plot import return_vol\n return_vol(ret, vol, data.columns)\n return pd.DataFrame({\"Return\":ret, \"Volatility\":vol}, index=data.columns)",
"def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)",
"def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")",
"def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res",
"def scale_data(x):\n mu = x.mean(axis=0)\n sigma = x.std(axis=0)\n x = (x - mu) / sigma\n return (x, mu, sigma)",
"def scaling(self):\n return self.stacked._box_scaling[1]",
"def GetUserScale(*args, **kwargs):\n return _gdi_.DC_GetUserScale(*args, **kwargs)",
"def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return (Matrix(U), Matrix(S), Matrix(Vh))",
"def pvi(inp, scale: int = 10):\n\n if len(inp.data.shape) == 1:\n data = inp.data[:, np.newaxis]\n else:\n data = inp.data\n\n delta_inp = np.abs((data[scale:, :] - data[:-scale, :]))\n delta_inp2 = np.sum(delta_inp ** 2, axis=1)\n sigma = np.mean(delta_inp2)\n result = np.array(delta_inp2 / sigma)\n\n time = inp.coords[inp.dims[0]].data\n\n result = xr.DataArray(result, coords=[time[0:len(delta_inp)]],\n dims=[inp.dims[0]], attrs=inp.attrs)\n\n result.attrs[\"units\"] = \"dimensionless\"\n\n return result",
"def calculate_sigma(self, params):\n ms = self.get_matrices(params)\n m_beta = ms['Beta']\n m_lambda = ms['Lambda']\n m_psi = ms['Psi']\n m_theta = ms['Theta']\n\n m_c = np.linalg.pinv(np.identity(m_beta.shape[0]) - m_beta)\n return m_lambda @ m_c @ m_psi @ m_c.T @ m_lambda.T + m_theta",
"def sigma_R(field, scale):\n field_filtered = filter_Field(field, tophat_kernel, (scale,))\n return field_filtered.t.std()",
"def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat),mat)/(nPts-1)",
"def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var",
"def svd(a, full_matrices=1, compute_uv=1):\n return SVD(full_matrices, compute_uv)(a)",
"def scale_rot_matrix(self, u, v):\n # signed angle old new vectors (rotation)\n a = self.signed_angle(u, v)\n # scale factor\n scale = v.length / u.length\n ca = scale * cos(a)\n sa = scale * sin(a)\n return scale, Matrix([\n [ca, -sa],\n [sa, ca]\n ])",
"def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat), mat) / (nPts - 1)",
"def variance(self):\n raise RuntimeError(\"Needs to be implemented in base class\")",
"def test_vec3_scale(self):\n\n vec = Vec3(1, 2, 5)\n\n vec.scale(2)\n\n self.assertEqual(Vec3(2, 4, 10), vec)",
"def variance(self):\n return self.k * self.theta ** 2",
"def variance(data, m=None):\n n, ss = _SS(data, m)\n if n < 2:\n raise ValueError('sample variance or standard deviation'\n ' requires at least two data points')\n return ss/(n-1)",
"def scalers(self):\n sc = StandardScaler() if self.scm == 'ss' else MinMaxScaler()\n sc.fit(self.t)\n return pd.DataFrame(sc.transform(self.t), columns=self.t.columns.values), pd.DataFrame(sc.transform(self.v), columns=self.v.columns.values)",
"def scale(self):\n return self._a",
"def sqrtvariance(values, weights=None, axis=0):\n \n return np.sqrt(variance(values, weights=weights, axis=axis))",
"def explained_variance(returns, values):\n exp_var = 1 - torch.var(returns - values) / torch.var(returns)\n return exp_var.item()",
"def se(self):\n return np.sqrt(self.scaled_vcov().diagonal().T)",
"def variance(self):\n return self.properties.get('variance')",
"def scale(curve):\n return curve/rmsd(curve)",
"def convert_scaling_to_form_factors(qz, scale):\n apply_absorption_correction(qz, scale)\n apply_Lorentz_correction(qz, scale)\n for i in xrange(len(scale)):\n scale[i] = np.sign(scale[i]) * math.sqrt(abs(scale[i]))",
"def scale(self):\n return self.distribution.scale",
"def matrix(self, v1, v2, lengths):\n M = [[self.covariance(i, j, lengths) for j in v2] for i in v1]\n return array(M)",
"def scale(self) -> Tuple[float, float]:\n return self._scale",
"def matrix_std(data, axis=None):\n if axis not in [0, 1, None]:\n raise ValueError(\"Expected axis in [0, 1, None]. Got {}\".format(axis))\n index = None\n if isinstance(data, pd.DataFrame) and axis is not None:\n if axis == 1:\n index = data.index\n elif axis == 0:\n index = data.columns\n data = to_array_or_spmatrix(data)\n if sparse.issparse(data):\n if axis is None:\n if isinstance(data, (sparse.lil_matrix, sparse.dok_matrix)):\n data = data.tocoo()\n data_sq = data.copy()\n data_sq.data = data_sq.data**2\n variance = data_sq.mean() - data.mean() ** 2\n std = np.sqrt(variance)\n else:\n if axis == 0:\n data = data.tocsc()\n next_fn = data.getcol\n N = data.shape[1]\n elif axis == 1:\n data = data.tocsr()\n next_fn = data.getrow\n N = data.shape[0]\n std = []\n for i in range(N):\n col = next_fn(i)\n col_sq = col.copy()\n col_sq.data = col_sq.data**2\n variance = col_sq.mean() - col.mean() ** 2\n std.append(np.sqrt(variance))\n std = np.array(std)\n else:\n std = np.std(data, axis=axis)\n if index is not None:\n std = pd.Series(std, index=index, name=\"std\")\n return std",
"def colorscale(self):\n return self[\"colorscale\"]",
"def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)",
"def get_scale(self):\n\n scales = []\n for i in range(len(self.train_series)):\n series = self.train_series.iloc[i].values\n series = series[np.argmax(series != 0):]\n scale = ((series[1:] - series[:-1]) ** 2).mean()\n scales.append(scale)\n\n return np.array(scales)",
"def _get_va(self, dim):\n assert len(dim) == 2\n dim = N.array(dim)\n if dim.any() < 0 or dim.any() >= self.d:\n raise ValueError(\"dim elements should be between 0 and dimension\"\n \" of the mixture.\")\n\n if self.mode == 'diag':\n return self.va[:, dim]\n elif self.mode == 'full':\n ld = dim.size\n vaselid = N.empty((ld * self.k, ld), N.int)\n for i in range(self.k):\n vaselid[ld*i] = dim[0] + i * self.d\n vaselid[ld*i+1] = dim[1] + i * self.d\n vadid = N.empty((ld * self.k, ld), N.int)\n for i in range(self.k):\n vadid[ld*i] = dim\n vadid[ld*i+1] = dim\n return self.va[vaselid, vadid]\n else:\n raise ValueError(\"Unkown mode\")",
"def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)",
"def get_scale(self, fe_params, cov_re):\n\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n qf = 0.\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n qf += np.dot(resid, mat)\n\n if self.reml:\n qf /= (self.n_totobs - self.k_fe)\n else:\n qf /= self.n_totobs\n\n return qf",
"def scv(SP):\n scv = ((np.std(SP,axis=1)/np.mean(SP,axis=1)))\n return scv",
"def plot_variance(self, ax):\n sigma = self.sigma\n S = self.S\n\n ax.plot(sigma/S**2, 'ko-', label='variance', lw=2.0)\n ax.set_yscale('log')\n ax.set_title(r'Variance $\\sigma^2/\\sigma_i^2$')\n ax.set_xlabel(r'$i$')\n ax.grid()",
"def svd(self, X): # [5pts]\n N,D = X.shape[0],X.shape[1]\n if X.ndim == 3:\n U = np.zeros((N,N,3))\n S = np.zeros((min(N,D),3))\n V = np.zeros((D,D,3))\n for i in range(3):\n U_temp,S_temp,V_temp = np.linalg.svd(X[:,:,i],compute_uv=True, full_matrices=True,hermitian=False)\n U[:,:,i] = U_temp\n S[:,i] = S_temp\n V[:,:,i] = V_temp\n else:\n U,S,V = np.linalg.svd(X,compute_uv=True,full_matrices=True, hermitian=False)\n return U,S,V",
"def prior_variance(self):\n S = self.eval_S(self.kappa, self.sigma_f)\n variance = np.sum((self.eigenfunctions * S[None, :]).T *\n self.eigenfunctions.T, axis=0)\n return variance",
"def svd_shrink(X, tau):\n U,s,V = np.linalg.svd(X, full_matrices=False)\n return np.dot(U, np.dot(np.diag(shrink(s, tau)), V))",
"def GetScale(self):\n ...",
"def calculate_variance(X):\n\tmean = np.ones(np.shape(X)) * X.mean(0)\n\tn_samples = np.shape(X)[0]\n\tvariance = (1 / n_samples) * np.diag((X - mean).T.dot(X - mean))\n\treturn variance",
"def Covariance(m,bias= False, rowvar=True, inplace=False): \n \n if m.dim() > 2:\n raise ValueError('m has more than 2 dimensions')\n if m.dim() < 2:\n m = m.view(1, -1)\n if not rowvar and m.size(0) != 1:\n m = m.t()\n # m = m.type(torch.double) # uncomment this line if desired\n fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / (m.size(1))\n if inplace:\n m -= torch.mean(m, dim=1, keepdim=True)\n else:\n m = m - torch.mean(m, dim=1, keepdim=True)\n mt = m.t() # if complex: mt = m.t().conj()\n return fact * m.matmul(mt).squeeze()",
"def variance(self, mean=None):\n raise NotImplementedError",
"def _variance(self,gp):\r\n return self.variance"
] | [
"0.5861856",
"0.577266",
"0.56941545",
"0.5600745",
"0.55539244",
"0.5537247",
"0.55147076",
"0.5510998",
"0.5503762",
"0.5450632",
"0.5436815",
"0.54117376",
"0.53925276",
"0.5346251",
"0.5309275",
"0.5307099",
"0.5306794",
"0.530079",
"0.5273775",
"0.5265709",
"0.5201272",
"0.51850647",
"0.5166639",
"0.515987",
"0.5151642",
"0.5147851",
"0.5146864",
"0.5122595",
"0.51189536",
"0.5098389",
"0.50891024",
"0.5062753",
"0.50455475",
"0.504406",
"0.50343585",
"0.5028156",
"0.5011968",
"0.5005504",
"0.49964046",
"0.49698082",
"0.4969102",
"0.4969102",
"0.4961536",
"0.49593174",
"0.49516016",
"0.4945105",
"0.49430034",
"0.4926761",
"0.49177653",
"0.49177653",
"0.49004582",
"0.48944512",
"0.48884237",
"0.48781395",
"0.48665997",
"0.48328656",
"0.48248062",
"0.48195195",
"0.48144805",
"0.48104373",
"0.47963476",
"0.47916308",
"0.47898853",
"0.47892082",
"0.47875923",
"0.4777327",
"0.47740677",
"0.47610557",
"0.47597894",
"0.47480807",
"0.47473484",
"0.47419816",
"0.4741403",
"0.4735969",
"0.472875",
"0.47286898",
"0.47262716",
"0.47210896",
"0.4719955",
"0.47182447",
"0.47121924",
"0.4711147",
"0.47086933",
"0.4700676",
"0.46916452",
"0.46857134",
"0.46846732",
"0.4682897",
"0.46812496",
"0.46803725",
"0.467866",
"0.4673637",
"0.46636015",
"0.46635497",
"0.46609706",
"0.46606633",
"0.46565625",
"0.46539775",
"0.4653029",
"0.46514314",
"0.46480295"
] | 0.0 | -1 |
Compute a ttest for a each linear hypothesis of the form Rb = q | def t_test(self, r_matrix, cov_p=None, scale=None, use_t=None): # noqa:E501
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
num_ttests = r_matrix.shape[0]
num_params = r_matrix.shape[1]
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('Need covariance of parameters for computing '
'T statistics') # pragma: no cover
if num_params != self.params.shape[0]: # pragma: no cover
raise ValueError('r_matrix and params are not aligned')
if q_matrix is None:
q_matrix = np.zeros(num_ttests)
else:
q_matrix = np.asarray(q_matrix)
q_matrix = q_matrix.squeeze()
if q_matrix.size > 1:
if q_matrix.shape[0] != num_ttests: # pragma: no cover
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
if use_t is None:
# switch to use_t false if undefined
use_t = (hasattr(self, 'use_t') and self.use_t)
tstat = _sd = None
_effect = np.dot(r_matrix, self.params)
# nan_dot multiplies with the convention nan * 0 = 0
cparams = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
# Perform the test
if num_ttests > 1:
_sd = np.sqrt(np.diag(cparams))
else:
_sd = np.sqrt(cparams)
tstat = (_effect - q_matrix) * recipr(_sd)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_t:
return ContrastResults(effect=_effect, t=tstat, sd=_sd,
df_denom=df_resid)
else:
return ContrastResults(effect=_effect, statistic=tstat, sd=_sd,
df_denom=df_resid,
distribution='norm') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)",
"def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def evaluate_regression(x_test,t_test,basis,bias,w,degree=1,mu=None,s=1):\n \n phi = design_matrix(x_test,basis,degree,bias,mu,s)\n pred_test=phi@w\n # Measure root mean squared error on testing data.\n t_est = pred_test\n #print(\"deleteeeeeeeeeee\",t_est)\n #print(np.shape(t_est))\n err = np.sqrt((np.square(pred_test-t_test)).mean())\n \n \n\n return (t_est, err)",
"def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)",
"def test_TRt(self):\n\n test_value = self.portfolio._get_total_portfolio[\n self.test_row_number]\n\n test_total = self.portfolio._df_total.iloc(axis=0)[\n self.test_row_number].values\n test_weights = self.weights.iloc(axis=0)[\n self.test_row_number].values\n calculated_value = sum(self.list_multiplication(test_total,\n test_weights))\n self.assertAlmostEqual(test_value, calculated_value)",
"def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')",
"def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob",
"def ttest(x):\n from ..group.onesample import stat\n t = stat(x.T, id='student', axis=0)\n return np.squeeze(t)",
"def ttest(x, mu=0, alpha=0.05, is_bernoulli=False, two_sided=True, return_tuple=False):\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = f'X_bar = {mu}'\n h1 = f'X_bar != {mu}'\n else:\n quant_order = 1 - alpha\n h0 = f'X_bar <= {mu}'\n h1 = f'X_bar > {mu}'\n\n # Input vector as array\n x = np.asarray(x)\n # Sample size\n n = len(x)\n\n # Empirical mean\n x_bar = x.mean()\n # s estimator (variance)\n if is_bernoulli:\n s2 = x_bar * (1 - x_bar)\n else:\n s2 = desc.var(x)\n\n # Degrees of freedom\n df = n - 1\n\n # T statistic\n t = (x_bar - mu) / (math.sqrt(s2 / n))\n if two_sided:\n t = math.fabs(t)\n # p and critical values\n p = 2.0 * (1.0 - scp.t.cdf(t, df=df))\n\n if n > 30:\n cv = scp.norm.ppf(quant_order)\n else:\n cv = scp.t.ppf(quant_order, df=df)\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='One Sample Student test',\n h0=h0, h1=h1,\n alpha=alpha)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ",
"def test_radiometer_eqn(Ns, T_sys):\n # boltzmann constant in m^2 kg s^-2 K^-1\n k_b = 1.3806503e-23 # ARP: defining constants should really occur outside of the function\n \n # define characteristic value for bandpass in Hz \n B = 1e6 \n \n # the standard deviation of the voltage noise\n sigma_V = np.sqrt(k_b*T_sys*B) \n \n sigma_T_measured = []\n sigma_T_theory = []\n for N in Ns:\n print N # ARP: I find users to be rather impatient unless you show that something is happening :)\n \n T_estimates = []\n N_mc = 10000 # ARP: maybe make this a parameter w/ default value?\n for i in range(N_mc):\n \n # draw N gaussian random variates with mean zero and std dev sigma_V\n voltage = norm.rvs(loc=0.0, scale=sigma_V, size=N)\n \n # calculate power as the voltage squared\n power = voltage**2\n \n # compute the product of B*T, bandpass times total integration time\n # N = 2*B*T\n BT = N/2. # ARP: this is fine, but does take me at my word that 2BT is how you count samples. Might be interesting to think of a way to show that N=2BT...\n \n # get temperature from power and store our measured system temp\n temp = power / (k_b*B)\n T_estimated = np.mean(temp)\n T_estimates.append(T_estimated)\n \n # compute the erorr on our measured system temp \n sigma_T_measured.append(np.std(T_estimates))\n \n # compute the true error from radiometer equation: sigma_T = T_sys / sqrt(BT)\n sigma_T_theory.append(T_sys / np.sqrt(BT))\n \n # plot the error on measured T as function of number of samples \n pl.plot(Ns, sigma_T_theory, c='k')\n pl.plot(Ns, sigma_T_measured, ls='', marker='o', c='b')\n # ARP: as per CLT, loglog is the only way to plot power laws\n # ARP: and separating plotting and calculation enhances reusability of code\n pl.xlabel('number of independent samples, N=2BT', fontsize=16)\n pl.ylabel(r'error in $T_\\mathrm{sys}$ estimation, $\\sigma_T$', fontsize=16)\n pl.title(r'Testing the radiometer equation, assuming $T_\\mathrm{sys}$ = %d K' %T_sys)\n pl.savefig('radiometer_sigmaT.png') # ARP: I guess I prefer showing the plot, rather than silently generating files.\n \n return",
"def test_TPt(self):\n\n test_value = self.portfolio.calculate_total_performance(\n *self.boarder)[self.test_row_number]\n calculated_value = self.manual_cumprod(\n self.portfolio._get_total_portfolio)\n self.assertAlmostEqual(test_value, calculated_value)",
"def calc_indttest_90(varx,vary):\n print('\\n>>> Using calc_ttest function!')\n \n ### Import modules\n import numpy as np\n import scipy.stats as sts\n \n ### 2-independent sample t-test\n stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')\n \n ### Significant at 90% confidence level\n pvalue[np.where(pvalue >= 0.1)] = np.nan\n pvalue[np.where(pvalue < 0.1)] = 1.\n pvalue[np.isnan(pvalue)] = 0.\n \n print('*Completed: Finished calc_ttest function!')\n return stat,pvalue",
"def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))",
"def eeg_twosample_ttest(array1,array2):\t\n\tfrom scipy.stats import ttest_rel\n\ts1 = array1.shape\n\tp = np.zeros(s1[1])\n\tt = np.zeros(s1[1])\n\tfor i in range(s1[1]):\n\t\ttval,pval = ttest_rel(array1[:,i],array2[:,i])\n\t\tp[i]=pval\n\t\tt[i]=tval\n\t\t\n\treturn t,p",
"def test_rr_se(results):\n truese = np.asarray([2.09826858, 30.60745128, 108.51947421, 0.95693751,\n 0.6564318])\n test_se = results.params_se()\n assert test_se == pytest.approx(truese)",
"def t_test_(x):\n assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))\n\n if (len(x) <= 1) or (not np.all(np.isfinite(x))):\n return 1.0 # Can't say anything about scale => p=1\n\n _, pval = sst.ttest_1samp(x, 0.0)\n if np.isnan(pval):\n # Should only be possible if scale underflowed to zero:\n assert np.var(x, ddof=1) <= 1e-100\n # It is debatable if the condition should be ``np.mean(x) == 0.0`` or\n # ``np.all(x == 0.0)``. Should not matter in practice.\n pval = np.float(np.mean(x) == 0.0)\n assert 0.0 <= pval and pval <= 1.0\n return pval",
"def ttest_ind_corrected(performance_a, performance_b, k=10, r=10):\n df = k * r - 1\n\n x = performance_a - performance_b\n m = np.mean(x)\n\n sigma_2 = np.var(x, ddof=1)\n denom = np.sqrt((1 / k * r + 1 / (k - 1)) * sigma_2)\n\n with np.errstate(divide='ignore', invalid='ignore'):\n t = np.divide(m, denom)\n\n prob = stats.t.sf(np.abs(t), df) * 2\n\n return t, prob",
"def test_repeated_right_tailed(self):\n rng = np.random.default_rng(3571954324)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = stdev(a)**2\r\n v2 = stdev(b)**2\r\n n1 = len(a)\r\n n2 = len(b)\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2)/float(df)\r\n t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,min(a),max(a),\r\n name2,n2,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t,prob",
"def evaluate_regression(x, t, w, basis, degree):\n \t# TO DO:: Compute t_est and err \n #w_tranpose=w.T\n\n\n # My logic goes as follows:\n # Definition of test error is when you run the trained\n # model against a dataset that it hasn't been exposed to\n # this dataset is known as the testset \n\n # As such the basic algorithm goes as follows:\n # We do not need to recompute the weights but we need to recompute\n # phi for our test data\n\n # As such, we are interested in how well our trained weights\n # estimate against the test data so we matrix multiply our\n # weights against the phi from our test data\n # thus t_est = w_train.T*phi(x) since we want to know how well our\n # trained model estimates against the training data\n # but in implementation we do phi(x)*w_train\n # to match array dimensions \n\n\n #Compute design matrix from test data \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n\n # Compute testing weights // just in case we require this variable\n #if(t is not None):\n #w_test=phi_cross.dot(t)\n #w_test=phi_cross.dot(t)\n\n # We want to be able to index into our target vector\n\n #t_est=phi.dot(w_test)\n #if (t is not None):\n # testing_estimate=phi.dot(w_test)\n #testing_estimate=phi.dot(w_test)\n\n # Estimate of our targets according to test data against learned \n # coefficients\n t_est=phi.dot(w)\n #print(\"t_est\",t_est)\n #t_est = None\n\n # We calculate the RMS error as follows\n # Take equation 3.12 of PRML and modify as follows\n # My logic:\n # The equation given in PRML gives the SSE (sum of squares error)\n # By definition the MSE (mean squared error) takes the SSE and divides \n # it by population size, we also preserve the 1/2 constant \n # throughout our calcuations \n # Afterwards we take our MSE and square root it.\n\n # Compute difference between target and estimate\n\n if(t is not None):\n \n diff=t-t_est\n # Square all observations\n diff_squared=np.power(diff,2)\n # Sum up all the observations in our vector\n sig_squared=diff_squared.sum()\n half_sig_squared=0.5*(sig_squared)\n # Calculate population size\n population_size=t.shape[0]\n rmse=np.sqrt(half_sig_squared/population_size)\n err=rmse\n else:\n err=None\n\n #diff=t-t_est\n\n\n # Square all observations \n #diff_squared=np.power(diff,2)\n\n # Sum up all the observations in our vector\n #sig_squared=diff_squared.sum()\n\n #half_sig_squared=0.5*(sig_squared)\n\n # Calculate population size\n #population_size=t.shape[0]\n\n #rmse=np.sqrt(half_sig_squared/population_size)\n #err = rmse\n #print(\"err inside function\",err)\n #err=rmse\n return (t_est, err)",
"def test_tau_score(sample_weight):\n np.testing.assert_almost_equal(\n tau_score(Y_true, Y_pred, sample_weight),\n _tau_score(Y_true, Y_pred, sample_weight))",
"def T(self, q = np.zeros(1) , dq = np.zeros(1) , ddq = np.zeros(1) , R = 1 ): \n \n F = self.F( q , dq , ddq )\n \n Tl = self.Tlosses( dq , ddq )\n \n T = np.dot( 1. / R , F ) + np.dot( R , Tl ) \n \n return T",
"def test_one_sample_right_tailed(self):\n rng = np.random.default_rng(615419864354)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(12.2, 1, 100)\n\n ttest = one_sample_ttest(data1, 12.2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def test_Rt(self):\n\n test_value = self.portfolio._get_asset_portfolio[self.test_row_number]\n\n test_prices = self.portfolio._df_asset.iloc(axis=0)[\n self.test_row_number].values\n test_weights = self.weights.iloc(axis=0)[\n self.test_row_number].values\n calculated_value = sum(self.list_multiplication(test_prices,\n test_weights))\n self.assertAlmostEqual(test_value, calculated_value)",
"def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs",
"def TestStatistic(self, x_b, x_t):\n # Instantiate NearestNeighbors class\n NN = NearestNeighborsClass(n_neighbors = self.K)\n \n # Build kd-trees with fixed K\n NN.fit(x_b, x_t)\n \n # Compute distances r_{j,B}, r_{j,T} of Kth-NN in B and T,\n # from x_j in Trial\n self.r_B, self.r_T, _ = NN.compute_distances(x_t)\n \n # Compute estimated density ratio on Trial points\n r_hat = np.power(np.divide(self.r_B, self.r_T), self.D) * (self.NB/float(self.NT-1))\n \n # Compute test statistic over Trial points\n TS = np.mean( np.log(r_hat) )\n \n return(TS)",
"def ttest(array1, array2):\n diff = np.mean(array1) - np.mean(array2)\n if diff < c.cart_p60:\n return c.low_score\n if array1.size <= 1 or array2.size <= 1:\n return min(diff, c.single_item_cart_max)\n return 1 - ttest_ind(array1, array2, equal_var=False).pvalue\n # return diff",
"def mm_lrt_test(y, K):\n lm = LinearModel(y)\n lmm = LinearMixedModel(y)\n lmm.add_random_effect(K)\n lmm_res = lmm.get_ML()\n ll0 = lm.get_ll()\n ll1 = lmm_res['max_ll']\n D = 2 * (ll1 - ll0)\n pval = stats.chi2.sf(D, 1)\n return {'pval':pval, 'lrt_stat':D}",
"def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val",
"def test_score_3():\n\n tpot_obj = TPOTRegressor(scoring='neg_mean_squared_error')\n tpot_obj._pbar = tqdm(total=1, disable=True)\n known_score = 8.9673743407873712 # Assumes use of mse\n # Reify pipeline with known score\n tpot_obj._optimized_pipeline = creator.Individual.\\\n from_string('ExtraTreesRegressor(GradientBoostingRegressor(input_matrix, 100.0, 0.11), 0.17999999999999999)', tpot_obj._pset)\n tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)\n tpot_obj._fitted_pipeline.fit(training_features_r, training_classes_r)\n\n # Get score from TPOT\n score = tpot_obj.score(testing_features_r, testing_classes_r)\n\n # http://stackoverflow.com/questions/5595425/\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n assert isclose(known_score, score)",
"def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )",
"def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))",
"def t_test(result, reference):\n \n # Check that result and reference are 1D and that they have the same length\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing t test\\n')\n \n t_stat, p_value = scipy.stats.ttest_ind(result, reference)\n \n print('t test completed successfully!\\n')\n \n print('t statistic: {} // p value: {}'.format(t_stat, p_value))\n \n return t_stat, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')",
"def testTsysLLSp(self):\n self._runTest('tsys', True, self.tsys_funcs.keys(), 'linear,linear')",
"def test_q_hat(self):\n # Set weights and pack data into PSpecData\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)\n Nfreq = self.ds.Nfreqs\n Ntime = self.ds.Ntimes\n Ndlys = Nfreq - 3\n self.ds.spw_Ndlys = Ndlys\n\n\n # Set baselines to use for tests\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n key3 = [(0, 24, 38), (0, 24, 38)]\n key4 = [(1, 25, 38), (1, 25, 38)]\n\n for input_data_weight in ['identity', 'iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n # Loop over list of taper functions\n for taper in taper_selection:\n self.ds.set_taper(taper)\n\n # Calculate q_hat for a pair of baselines and test output shape\n q_hat_a = self.ds.q_hat(key1, key2)\n self.assertEqual(q_hat_a.shape, (Ndlys, Ntime))\n\n\n # Check that swapping x_1 <-> x_2 results in complex conj. only\n q_hat_b = self.ds.q_hat(key2, key1)\n q_hat_diff = np.conjugate(q_hat_a) - q_hat_b\n for i in range(Ndlys):\n for j in range(Ntime):\n self.assertAlmostEqual(q_hat_diff[i,j].real,\n q_hat_diff[i,j].real)\n self.assertAlmostEqual(q_hat_diff[i,j].imag,\n q_hat_diff[i,j].imag)\n\n # Check that lists of keys are handled properly\n q_hat_aa = self.ds.q_hat(key1, key4) # q_hat(x1, x2+x2)\n q_hat_bb = self.ds.q_hat(key4, key1) # q_hat(x2+x2, x1)\n q_hat_cc = self.ds.q_hat(key3, key4) # q_hat(x1+x1, x2+x2)\n\n # Effectively checks that q_hat(2*x1, 2*x2) = 4*q_hat(x1, x2)\n for i in range(Ndlys):\n\n for j in range(Ntime):\n self.assertAlmostEqual(q_hat_a[i,j].real,\n 0.25 * q_hat_cc[i,j].real)\n self.assertAlmostEqual(q_hat_a[i,j].imag,\n 0.25 * q_hat_cc[i,j].imag)\n\n\n self.ds.spw_Ndlys = Nfreq\n # Check that the slow method is the same as the FFT method\n for input_data_weight in ['identity', 'iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n # Loop over list of taper functions\n for taper in taper_selection:\n\n self.ds.set_taper(taper)\n q_hat_a_slow = self.ds.q_hat(key1, key2, allow_fft=False)\n q_hat_a = self.ds.q_hat(key1, key2, allow_fft=True)\n self.assertTrue(np.isclose(np.real(q_hat_a/q_hat_a_slow), 1).all())\n self.assertTrue(np.isclose(np.imag(q_hat_a/q_hat_a_slow), 0, atol=1e-6).all())\n\n #Test if error is raised when one tried FFT approach on exact_norm\n pytest.raises(NotImplementedError, self.ds.q_hat, key1, key2, exact_norm=True, allow_fft = True)",
"def test_repeated_two_tailed(self):\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def mk_test(input_data):\r\n\r\n\ttrend, h, p, z, Tau, s, var_s, slope, intercept = mk.original_test(input_data)\r\n\r\n\treturn trend, h, p, z, Tau, s, var_s, slope, intercept",
"def test_rr_z(results):\n truez = np.asarray([5.86702731, 0.26691899, 0.26663868,\n 5.36612236, 7.92390398])\n test_z = results.params_z()\n assert test_z == pytest.approx(truez)",
"def test_get_taylor(self, spec, order, expected):\n bcs = BoundaryConditions(spec, order)\n\n series = bcs.get_taylor(order=None)\n\n assert str(series) == expected",
"def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception",
"def Statsmodels_TTest(results, Explanatory, NumDecimal):\n\n TTest = []\n for item in results.t_test(np.eye(len(results.params))).tvalue:\n TTest.append(ss.t.cdf(item, results.df_model))\n TTest = [str(round(item, NumDecimal)) for item in TTest]\n for item in range(0, len(Explanatory.columns)):\n TTest[item + 1] = str(TTest[item + 1]) + ' ' + str(Explanatory.columns[item])\n TTest[0] = str(TTest[0])\n TTest = ', '.join(TTest)\n\n return TTest",
"def t_test(dataType):\n\n\t# read the data\n\tparser = ExperimentUtils()\n\tdata = parser.parse_data(dataType)\n\n\tN = len(data.keys()) # number participants\n\n\t# - for trial 1 and trial 2:\n\t# \tL2 norm over each timestep, then sum all the values together\n\t# - average over two trials for each participant \n\ttask_avgs = {}\n\n\t# participant ID can take values 0 - 9\n\tfor ID in data.keys():\n\t\tfor task in data[ID]:\n\t\t\t# dont include the familiarization task (task can take values 1,2,3)\n\t\t\tif task != 0:\n\t\t\t\tif task not in task_avgs:\n\t\t\t\t\ttask_avgs[task] = {}\n\t\t\t\t\ttask_avgs[task][\"A\"] = np.array([0.0]*N)\n\t\t\t\t\ttask_avgs[task][\"B\"] = np.array([0.0]*N)\n\n\t\t\t\ttrialAsum = [0.0,0.0]\n\t\t\t\ttrialBsum = [0.0,0.0]\n\t\t\t\t# trial can take values 1 or 2\n\t\t\t\tfor trial in data[ID][task]:\n\t\t\t\t\t# only compute metrics over data, not timestamps\n\t\t\t\t\tAdata = data[ID][task][trial]['A'][1:8]\n\t\t\t\t\tBdata = data[ID][task][trial]['B'][1:8]\n\t\t\t\n\t\t\t\t\t#print str(ID)+str(task)+str(trial)+\"A\"\n\t\t\t\t\t#print \"Adata: \" + str(Adata)\n\t\t\t\t\t#print str(ID)+str(task)+str(trial)+\"B\"\n\t\t\t\t\t#print \"Bdata: \" + str(Bdata)\n\n\t\t\t\t\t(h, w) = np.shape(Adata)\n\t\t\t\t\tfor i in range(w):\n\t\t\t\t\t\ttrialAsum[trial-1] += np.linalg.norm(Adata[:,i])\n\t\t\t\t\t(h, w) = np.shape(Bdata)\n\t\t\t\t\tfor i in range(w):\n\t\t\t\t\t\ttrialBsum[trial-1] += np.linalg.norm(Bdata[:,i])\n\t\t\t\tavg_methodA = (trialAsum[0]+trialAsum[1])/2.0\n\t\t\t\tavg_methodB = (trialBsum[0]+trialBsum[1])/2.0\n\n\t\t\t\ttask_avgs[task][\"A\"][ID] = avg_methodA\n\t\t\t\ttask_avgs[task][\"B\"][ID] = avg_methodB\n\n\t# comput independent two-sample t-test \n\t# NOTE: we can assume that the two sample sizes are the same, and \n\t#\t\tthat the two distributions have the same variance\n\tfor task in range(1,4):\n\t\ttaskA = task_avgs[task][\"A\"]\n\t\ttaskB = task_avgs[task][\"B\"]\n\n\t\tmeanA = np.mean(taskA)\n\t\tmeanB = np.mean(taskB)\n\t\tprint \"meanA: \" + str(meanA)\n\t\tprint \"meanB: \" + str(meanB)\n\t\tdiff = meanA - meanB\n\t\tprint \"diff: \" + str(diff)\n\n\t\t(statistic, pvalue) = stats.ttest_ind(a=taskA, b=taskB, equal_var=True)\n\n\t\tprint \"\\n\"\n\t\tprint \"task\"+str(task)+\" statistic: \" + str(statistic)\n\t\tprint \"task\"+str(task)+\" pvalue: \" + str(pvalue)",
"def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))",
"def test_f_two_sample(self):\r\n\r\n # The expected values in this test are obtained through R.\r\n # In R the F test is var.test(x,y) different alternative hypotheses\r\n # can be specified (two sided, less, or greater).\r\n # The vectors are random samples from a particular normal distribution\r\n #(mean and sd specified).\r\n\r\n # a: 50 elem, mean=0 sd=1\r\n a = [-0.70701689, -1.24788845, -1.65516470, 0.10443876, -0.48526915,\r\n -0.71820656, -1.02603596, 0.03975982, -2.23404324, -0.21509363,\r\n 0.08438468, -0.01970062, -0.67907971, -0.89853667, 1.11137131,\r\n 0.05960496, -1.51172084, -0.79733957, -1.60040659, 0.80530639,\r\n -0.81715836, -0.69233474, 0.95750665, 0.99576429, -1.61340216,\r\n -0.43572590, -1.50862327, 0.92847551, -0.68382338, -1.12523522,\r\n -0.09147488, 0.66756023, -0.87277588, -1.36539039, -0.11748707,\r\n -1.63632578, -0.31343078, -0.28176086, 0.33854483, -0.51785630,\r\n 2.25360559, -0.80761191, 1.18983499, 0.57080342, -1.44601700,\r\n -0.53906955, -0.01975266, -1.37147915, -0.31537616, 0.26877544]\r\n\r\n # b: 50 elem, mean=0, sd=1.2\r\n b = [\r\n 0.081418743, 0.276571612, -\r\n 1.864316504, 0.675213612, -0.769202643,\r\n 0.140372825, -1.426250184, 0.058617884, -\r\n 0.819287409, -0.007701916,\r\n -0.782722020, -\r\n 0.285891593, 0.661980419, 0.383225191, 0.622444946,\r\n -0.192446150, 0.297150571, 0.408896059, -\r\n 0.167359383, -0.552381362,\r\n 0.982168338, 1.439730446, 1.967616101, -\r\n 0.579607307, 1.095590943,\r\n 0.240591302, -1.566937143, -\r\n 0.199091349, -1.232983905, 0.362378169,\r\n 1.166061081, -0.604676222, -\r\n 0.536560206, -0.303117595, 1.519222792,\r\n -0.319146503, 2.206220810, -\r\n 0.566351124, -0.720397392, -0.452001377,\r\n 0.250890097, 0.320685395, -\r\n 1.014632725, -3.010346273, -1.703955054,\r\n 0.592587381, -1.237451255, 0.172243366, -0.452641122, -0.982148581]\r\n\r\n # c: 60 elem, mean=5, sd=1\r\n c = [4.654329, 5.242129, 6.272640, 5.781779, 4.391241, 3.800752,\r\n 4.559463, 4.318922, 3.243020, 5.121280, 4.126385, 5.541131,\r\n 4.777480, 5.646913, 6.972584, 3.817172, 6.128700, 4.731467,\r\n 6.762068, 5.082983, 5.298511, 5.491125, 4.532369, 4.265552,\r\n 5.697317, 5.509730, 2.935704, 4.507456, 3.786794, 5.548383,\r\n 3.674487, 5.536556, 5.297847, 2.439642, 4.759836, 5.114649,\r\n 5.986774, 4.517485, 4.579208, 4.579374, 2.502890, 5.190955,\r\n 5.983194, 6.766645, 4.905079, 4.214273, 3.950364, 6.262393,\r\n 8.122084, 6.330007, 4.767943, 5.194029, 3.503136, 6.039079,\r\n 4.485647, 6.116235, 6.302268, 3.596693, 5.743316, 6.860152]\r\n\r\n # d: 30 elem, mean=0, sd =0.05\r\n d = [\r\n 0.104517366, 0.023039678, 0.005579091, 0.052928250, 0.020724823,\r\n -0.060823243, -0.019000890, -\r\n 0.064133996, -0.016321594, -0.008898334,\r\n -0.027626992, -0.051946186, 0.085269587, -\r\n 0.031190678, 0.065172938,\r\n -0.054628573, 0.019257306, -\r\n 0.032427056, -0.058767356, 0.030927400,\r\n 0.052247357, -\r\n 0.042954937, 0.031842104, 0.094130522, -0.024828465,\r\n 0.011320453, -0.016195062, 0.015631245, -0.050335598, -0.031658335]\r\n\r\n a, b, c, d = map(array, [a, b, c, d])\r\n self.assertEqual(map(len, [a, b, c, d]), [50, 50, 60, 30])\r\n\r\n # allowed error. This big, because results from R\r\n # are rounded at 4 decimals\r\n error = 1e-4\r\n\r\n self.assertFloatEqual(f_two_sample(a, a), (49, 49, 1, 1), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b), (49, 49, 0.8575, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(b, a), (49, 49, 1.1662, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='low'),\r\n (49, 49, 0.8575, 0.2963), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='high'),\r\n (49, 49, 0.8575, 0.7037), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, c),\r\n (49, 59, 0.6587, 0.1345), eps=error)\r\n # p value very small, so first check df's and F value\r\n self.assertFloatEqualAbs(f_two_sample(d, a, tails='low')[0:3],\r\n (29, 49, 0.0028), eps=error)\r\n assert f_two_sample(d, a, tails='low')[3] < 2.2e-16 # p value\r",
"def test_trid(self):\n fun = get_problem('trid', dimension=2)\n self.assertAlmostEqual(fun(np.array([2.0, 2.0])), -2.0)",
"def Tlosses(self, q = np.zeros(2) , dq = np.zeros(2) , ddq = np.zeros(2) ): \n \n J_a = self.jacobian_actuators( q )\n dJ_a = self.jacobian_actuators_diff( q , dq )\n \n T = np.dot( J_a , np.dot( self.Ia , ddq ) + np.dot( self.Da , dq ) ) + np.dot( dJ_a , np.dot( self.Ia , dq ) )\n \n return T",
"def Tlosses(self, q = np.zeros(2) , dq = np.zeros(2) , ddq = np.zeros(2) ): \n \n J_a = self.jacobian_actuators( q )\n dJ_a = self.jacobian_actuators_diff( q , dq )\n \n T = np.dot( J_a , np.dot( self.Ia , ddq ) + np.dot( self.Da , dq ) ) + np.dot( dJ_a , np.dot( self.Ia , dq ) )\n \n return T",
"def test_tma(self):\n periods = 200\n tma = qufilab.tma(self.close, periods)\n tma_talib = talib.TRIMA(self.close, periods)\n np.testing.assert_allclose(tma, tma_talib, rtol = self.tolerance)",
"def test_unequal_variance_right_tailed(self):\n rng = np.random.default_rng(887943278)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = rng.normal(2, 1, 100)\n\n ttest = unequal_variance_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def test_single_linear_regression_coefficients(single_linear_regression_model):\n print(single_linear_regression_model)\n expected_coefficients = [(0, 151.27), (1, 303.90)]\n no_of_betas = len(single_linear_regression_model.B)\n for n in range(no_of_betas):\n assert single_linear_regression_model.B[n] == pytest.approx(\n expected_coefficients[n][1], 0.001\n )",
"def Y(t, p, q):\n \n if t <= 0:\n return float('inf')\n \n if q == 1:\n return (t**(p+1) - 1) / (p * (p+1)) - np.log(t) / q + (p - 1) / p * (t-1)\n else:\n return (t**(p+1) - 1) / (p * (p+1)) + (t**(1-q) - 1) / (q*(q-1)) + (p - q) / (p * q) * (t-1)",
"def league_ttest(df_league_one: pd.DataFrame, df_league_two: pd.DataFrame, parameter: str, alpha: float, ):\n assert isinstance(df_league_one, pd.DataFrame), 'df_league_one needs to be a pandas dataframe.'\n assert isinstance(df_league_two, pd.DataFrame), 'df_league_two needs to be a pandas dataframe.'\n assert isinstance(alpha, float), 'alpha needs to be a float.'\n\n\n df_league_one_mean = df_league_one.mean()\n n = len(df_league_one['club'])\n df = n-1\n t_critical = stats.t.ppf(1-alpha, df)\n leagues_ttest = stats.ttest_1samp(a= df_league_two[f'{parameter}'], popmean= df_league_one_mean)\n t_value = leagues_ttest[0]\n p_value = leagues_ttest[1]\n\n stats_values = {}\n\n stats_values['p_value'] = round(list(p_value)[0], 4)\n\n if stats_values['p_value'] < alpha:\n return ('Enough evidence to reject null hypothesis')\n elif stats_values['p_value'] > alpha:\n return ('Not enough evidence to reject null hypothesis')",
"def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))",
"def test_onesample_two_tailed(self):\n rng = np.random.default_rng(13489132474)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(-5, 2, 100)\n\n ttest = one_sample_ttest(data1, -5)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def test_stat(df,ivar,tvar,equal_var=True,ddof=0):\n ivar_uniques = df[ivar].unique().shape[0]\n tvar_uniques = df[tvar].unique().shape[0]\n if tvar_uniques < 2:\n print \"Only one sample can be generated\"\n return None\n if ivar_uniques <= 10: #This the case of a categorical independant variable. We use chisquare\n ss = pd.crosstab(df[ivar],df[tvar])\n ss = (ss.T/ss.sum(axis=1)).T\n s0,s1 = ss[0].values,ss[1].values\n\n return chisquare(s1,s0,ddof=ddof)\n\n if ivar_uniques >10: #Consider using ttest\n s0 = df[ivar][df[tvar] == 0]\n s1 = df[ivar][df[tvar] == 1]\n return ttest_ind(s1,s0,equal_var=equal_var)",
"def test_T0():",
"def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T",
"def two_tailed_t_test(samples: np.ndarray, H0: float):\n empirical_mean = np.mean(samples, axis=0)\n number_samples = samples.shape[0]\n standard_error = np.std(samples, ddof=1, axis=0) / np.sqrt(number_samples)\n t_value = (empirical_mean - H0) / standard_error\n p_value = 2.0 * (1.0 - t(df=number_samples - 1).cdf(np.abs(t_value)))\n return t_value, p_value",
"def test_rr_ci(results):\n trueci = np.asarray([[8.19806824, -51.8197922, -183.75877191, 3.25948071,\n 3.91491988],\n [16.42312995, 68.15921213, 241.62975025, 7.01060682,\n 6.48808526]])\n test_ci = results.params_ci()\n assert test_ci == pytest.approx(trueci)",
"def Yp(t, p, q):\n \n return (t**p - 1) / p + (1-t**(-q)) / q",
"def realtest(r,stds,mean_returns,covar,start2,end2,tickers):\n \n # Get allocation weights of minimum variance portfolio for r during model time period\n weights = calc_min_variance_portfolio(mean_returns,covar,r)\n \n # Get data during backtesting time period\n from find_optimized_portfolio import process_data\n stds_new,mean_returns_new,covar_new,tickers,start2,end2=process_data(start2,end2,tickers)\n \n # Dot backtesting period's returns with model's weights to get the portfolio's return\n # during backtest period\n realtest_r = weights @ mean_returns_new\n # Get stdev during backtest period\n realtest_std=calc_portfolio_stdev(covar_new,weights)\n\n return realtest_r,realtest_std, weights, tickers, start2, end2",
"def test_T3():",
"def test_T3():",
"def eval_test(self, rng_key, svi_state):\n def body_fn(i, loss_sum):\n rng_key_i = random.fold_in(rng_key, i) \n rng_key_i, rng_key_ls, rng_key_var, rng_key_sigma = random.split(rng_key_i, 4)\n \n length_i = numpyro.sample(\"length\", dist.InverseGamma(1,.1), rng_key=rng_key_ls)\n var_i = numpyro.sample(\"var\", dist.LogNormal(0,0.1), rng_key=rng_key_var)\n sigma_i = numpyro.sample(\"noise\", dist.HalfNormal(0.1), rng_key=rng_key_sigma)\n \n batch = self.gp_predictive(rng_key_i, self.x\n , ls=length_i, var=var_i, sigma=sigma_i\n )\n\n loss = self.svi.evaluate(svi_state, batch['y']) / self.batch_size\n loss_sum += loss\n return loss_sum\n\n loss = lax.fori_loop(0, self.num_test, body_fn, 0.0)\n loss = loss / self.num_test\n\n return loss",
"def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n x = mean(a)\r\n v = var(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v)/float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = betai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,min(a),max(a),\r\n statname,t,prob)\r\n return t,prob",
"def askewtest(a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n b2 = askew(a,dimension)\r\n n = float(a.shape[dimension])\r\n y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )\r\n beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )\r\n W2 = -1 + N.sqrt(2*(beta2-1))\r\n delta = 1/N.sqrt(N.log(N.sqrt(W2)))\r\n alpha = N.sqrt(2/(W2-1))\r\n y = N.where(y==0,1,y)\r\n Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))\r\n return Z, (1.0-zprob(Z))*2",
"def test_taylor(self, order, expected):\n x = sp.symbols('x')\n series = taylor(x, order)\n assert str(series) == expected",
"def test_trotter_hamiltonian_three_qubit_term(backend):\n from scipy.linalg import expm\n from qibo.core.terms import HamiltonianTerm\n m1 = random_hermitian(3)\n m2 = random_hermitian(2)\n m3 = random_hermitian(1)\n\n terms = [HamiltonianTerm(m1, 0, 1, 2), HamiltonianTerm(m2, 2, 3),\n HamiltonianTerm(m3, 1)]\n ham = hamiltonians.SymbolicHamiltonian()\n ham.terms = terms\n\n # Test that the `TrotterHamiltonian` dense matrix is correct\n eye = np.eye(2, dtype=m1.dtype)\n mm1 = np.kron(m1, eye)\n mm2 = np.kron(np.kron(eye, eye), m2)\n mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye))\n target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3)\n K.assert_allclose(ham.matrix, target_ham.matrix)\n\n dt = 1e-2\n initial_state = random_state(4)\n if K.op is not None:\n with pytest.raises(NotImplementedError):\n circuit = ham.circuit(dt=dt)\n else:\n circuit = ham.circuit(dt=dt)\n final_state = circuit(np.copy(initial_state))\n u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)]\n target_state = u[1].dot(u[0].dot(initial_state))\n target_state = u[0].dot(u[1].dot(target_state))\n K.assert_allclose(final_state, target_state)",
"def qrst_tm(x):\n return 0.2228*x - 0.6685",
"def test_rr_testeffect(results):\n test_t0 = results.test_effect()\n test_t1 = results.test_effect(0.)\n test_t2 = results.test_effect(5.2)\n assert test_t0 == pytest.approx(1.1920928955078125e-07)\n assert test_t1 == pytest.approx(1.1920928955078125e-07)\n assert test_t2 == 1.0",
"def test_t3(self):\n periods = 200\n t3 = qufilab.t3(self.close, periods)\n t3_talib = talib.T3(self.close, periods)\n np.testing.assert_allclose(t3, t3_talib, rtol = self.tolerance)",
"def test_single_linear_regression_r_squared(single_linear_regression_model):\n # Train Data\n train_r_squared = single_linear_regression_model.calculate_r_squared(\n single_linear_regression_model.predictor_vars_train,\n single_linear_regression_model.response_var_train[:, 0],\n )\n\n test_r_squared = single_linear_regression_model.calculate_r_squared(\n single_linear_regression_model.predictor_vars_test,\n single_linear_regression_model.response_var_test[:, 0],\n )\n\n assert pytest.approx(train_r_squared, 0.001) == 1\n assert pytest.approx(test_r_squared, 0.001) == 1",
"def test_var_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.var(qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (2, 2)\r\n\r\n expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def test_voigtfit():\n import os.path\n\n fn = os.path.join(os.path.dirname(__file__), \"example_civ_tau.npz\")\n taus = np.load(fn)[\"arr_0\"]\n for tau in taus:\n assert np.shape(tau) == (473,)\n prof = voigtfit.Profiles(tau,5.0103430332365999,elem=\"C\",ion=4,line=1548)\n prof.do_fit()\n (ll, tfit) = prof.get_fitted_profile()\n #Check the fit is reasonable\n assert np.sum((tfit - tau)**2/(tau+0.5)**2)/np.size(tfit) < 0.05",
"def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)",
"def t(l3,Ei,Et,Et_axis):\n Ef=Ei-Et\n T=(-(l3/vFrmE(Ef))+(l3/np.sqrt(vFrmE(Ei)**2-vsq_from_E(Et_axis))))*1e6\n return (T)",
"def test_parse_trflp(self):\r\n\r\n data = \\\r\n \"\"\"\tBin (10bp)\tBin (20bp)\tBin (30bp)\tBin (40 bp)\r\nSamp-le 1\t1000\t2000\t3000\t4000\r\nSample 2\t\t2000\t3000\t4000\r\nSample 3\t\t\t3000\t4000\r\nSample 4\t\t\t\t4000\r\nSample 5\t25\t\t\t\"\"\"\r\n samples, otus, data = parse_trflp(data.split('\\n'))\r\n\r\n samples_exp = [\r\n 'Samp.le.1',\r\n 'Sample.2',\r\n 'Sample.3',\r\n 'Sample.4',\r\n 'Sample.5']\r\n otus_exp = ['Bin__10bp_', 'Bin__20bp_', 'Bin__30bp_', 'Bin__40_bp_']\r\n data_exp = array([[1000, 0, 0, 0, 25],\r\n [2000, 2000, 0, 0, 0],\r\n [3000, 3000, 3000, 0, 0],\r\n [4000, 4000, 4000, 4000, 0]])\r\n\r\n self.assertEqual(samples, samples_exp)\r\n self.assertEqual(otus, otus_exp)\r\n assert_almost_equal(data, data_exp)",
"def test_tlri_metric():\n\n manager = simulation_manager_setup()\n subject = observer.MetricsSubject()\n\n tlri_observer = node_metrics.TLRI()\n subject.attach(tlri_observer)\n\n manager.simulate(subject)\n tlri = tlri_observer.get_metric()\n assert tlri",
"def t(o, r):\n return (r/o)**2",
"def test_TP_twophase(self):\n with pytest.raises(StateError):\n State(substance=\"water\", T=Q_(373.1242958476844, \"K\"), p=Q_(101325.0, \"Pa\"))",
"def quasi_optimalityTV(f, lam_init = 2.0, q = 0.9):\n \n lam = lam_init\n max_iter = 50\n error = np.zeros(max_iter)\n #alt_error = np.zeros(max_iter)\n u_old = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n for i in range(1, max_iter):\n lam = lam_init * (q ** i)\n u_new = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n error[i] = np.linalg.norm(u_old - u_new)\n #alt_error[i] = np.linalg.norm(u_old - u_new) /abs(lam_init*(q ** i - q ** (i-1)))\n u_old = np.copy(u_new)\n\n #plt.plot(error)\n #plt.plot(alt_error)\n #plt.show()\n opt_idx = np.argmin(error[error != 0.0])\n t = 1.0 / (1.0 + lam_init * (q ** opt_idx))\n lam = lam_init * (q ** opt_idx)\n u= ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n \n return u, t",
"def test_timescale_beta(self):\n param_pairs = [(0, 1, self.rule.tau2)]\n\n nsteps = 10\n self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \\\n else np.zeros(self.Nc)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n W0 = np.copy(self.syns.W)\n\n for params in param_pairs:\n self.rule.alpha = params[0]\n self.rule.beta = params[1]\n tau = params[2]\n\n self.tutor.out_fct = lambda i: (self.rule.theta + (10 if i == 0 else 0))*\\\n np.ones(self.Ns)\n\n self.syns.W = np.copy(W0)\n sim.run(self.dt)\n\n change0 = self.syns.W - W0\n\n self.assertGreater(np.linalg.norm(change0), 1e-10)\n \n self.tutor.out_fct = lambda i: (self.rule.theta + (10\n if i == nsteps-1 else 0))*np.ones(self.Ns)\n\n self.syns.W = np.copy(W0)\n sim.run(nsteps*self.dt)\n\n change1 = self.syns.W - W0\n\n change1_exp = change0*(1 - float(self.dt)/tau)**(nsteps-1)\n\n self.assertTrue(np.allclose(change1, change1_exp),\n msg=\"Timescale not verified, alpha={}, beta={}.\".format(*params[:2]))",
"def test_quadratic_trinomial_init(self):\n a, b, c = 2, 3, 4\n expected = Polynomial(a, b, c)\n\n qt = QuadraticTrinomial(a, b, c)\n\n self.assertEqual(expected, qt)",
"def test_rr_pz(results):\n truepz = np.asarray([4.43677606e-09, 7.89531535e-01, 7.89747372e-01,\n 8.04473756e-08, 2.22044605e-15])\n test_pz = results.params_pvalue()\n assert test_pz == pytest.approx(truepz)",
"def test_smoohted_LM(self):\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_smoothed_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_smoothed_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')\n\t\t# YOUR CODE HERE",
"def test_unequal_variance_two_tailed(self):\n rng = np.random.default_rng(135481321)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 200)\n data2 = rng.normal(10, 2, 200)\n\n ttest = unequal_variance_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)",
"def getTru(n,int_method,func) :\n m = np.asarray([0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10])\n bes = np.asarray([-0.5,0,0.43675,0.47773,0.49231,0.49316,0.49280,0.50325,\n 0.51140,0.52169,0.55823,0.58086,0.60463,0.61483,0.66995])\n p = np.asarray([1,0,0.61007,0.77491,0.84071,0.87689,0.89914,0.91365,\n 0.92449,0.93279,0.94451,0.95289,0.95904,0.96385,\n 0.96731])\n h1 = np.asarray([0,0,-0.07257,-0.04963,-0.03313,-0.02282,-0.01648,\n -0.01248,-0.00970,-0.00773,-0.00522,-0.00369,-0.00272,\n -0.00206,-0.00164])\n h2 = np.asarray([0,0,-0.20048,-0.15556,-0.12070,-0.09611,-0.07919,\n -0.06747,-0.05829,-0.05106,-0.04060,-0.03311,-0.02768,\n -0.02353,-0.02053])\n h3 = np.asarray([0,0,0.01647,0.08284,0.14390,0.19680,0.24168,0.27969,\n 0.31280,0.34181,0.39002,0.42942,0.46208,0.48997,0.51325])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h1,int_h2,int_h3])",
"def test_repeated_left_tailed(self):\n rng = np.random.default_rng(734516519)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2, 'left')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def test_TLearner(self):\n # TLearner test\n # Instantiate TLearner\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedTLearner(models=automl_model_reg())\n\n # Test constant and heterogeneous treatment effect, single and multi output y\n\n est.fit(Y, T, X=X)\n _ = est.effect(X)",
"def test_rb(self):\n\n # Load simulator\n backend = qiskit.Aer.get_backend('qasm_simulator')\n\n # Test up to 2 qubits\n nq_list = [1, 2]\n\n for nq in nq_list:\n\n print(\"Testing %d qubit RB\" % nq)\n\n for pattern_type in range(2):\n for multiplier_type in range(2):\n # See documentation of choose_pattern for the meaning of\n # the different pattern types\n\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(\n pattern_type, nq)\n # if the pattern type is not relevant for nq\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n\n # Generate the sequences\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = ('Skipping tests for %s qubits because '\n 'tables are missing' % str(nq))\n print(skip_msg)\n continue\n\n # Perform an ideal execution on the generated sequences\n # basis_gates = ['u1','u2','u3','cx'] # use U, CX for now\n # Shelly: changed format to fit qiskit current version\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(\n qiskit.execute(rb_circs[seed], backend=backend,\n basis_gates=basis_gates,\n shots=shots).result())\n\n # Verify the generated sequences\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n\n self.assertEqual(\n rb_circs[seed][circ_index].name,\n 'rb_seed_%s_length_%s' % (\n str(seed), str(vec_len)),\n 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts,\n vec_len, result[seed], shots)\n\n self.assertEqual(circ_index, len(rb_circs),\n \"Error: additional circuits exist\")",
"def test_all_pairs_t_test(self):\r\n # We aren't testing the numeric values here, as they've already been\r\n # tested in the functions that compute them. We are interested in the\r\n # format of the returned string.\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# The nonparametric p-values were calculated using 999 Monte Carlo permutations.\r\n# The nonparametric p-values contain the correct number of significant digits.\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\t0.095\t0.285\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\t0.101\t0.303\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\t0.217\t0.651\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))",
"def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c",
"def test_lsr_pairwise():\n for case in iter_testcases('pairwise'):\n n_items = case[\"n_items\"]\n data = case[\"data\"]\n assert np.allclose(\n case[\"lsr_est\"], lsr_pairwise(n_items, data),\n atol=ATOL, rtol=RTOL)",
"def Tlosses(self, dq = np.zeros(1) , ddq = np.zeros(1)): \n \n T = np.dot( self.Ia , ddq ) + np.dot( self.Da , dq )\n \n return T",
"def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs",
"def _lrt(tup):\n d = np.abs(2 * (tup[0].logLike - tup[1].logLike))\n return chi2.sf(d, np.abs(tup[0].coefs.shape[0] - tup[1].coefs.shape[0]))",
"def test_T2():",
"def test_T2():",
"def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)"
] | [
"0.6740125",
"0.64186555",
"0.6367431",
"0.6359604",
"0.6324414",
"0.62921345",
"0.6085627",
"0.60754967",
"0.59786165",
"0.59353685",
"0.5921201",
"0.58064675",
"0.58024555",
"0.5798893",
"0.5790756",
"0.5783054",
"0.5782556",
"0.57736266",
"0.57735896",
"0.5765224",
"0.57569",
"0.5729648",
"0.57172316",
"0.5689828",
"0.5674796",
"0.56444013",
"0.56324095",
"0.5631628",
"0.56095934",
"0.5605493",
"0.5589702",
"0.55694634",
"0.55635417",
"0.55566466",
"0.554734",
"0.5543565",
"0.55384445",
"0.55205685",
"0.5503283",
"0.5500659",
"0.5497249",
"0.5496282",
"0.54874885",
"0.54744554",
"0.5464074",
"0.54546386",
"0.54546386",
"0.5447627",
"0.54269403",
"0.54245055",
"0.54211354",
"0.53913724",
"0.5388749",
"0.53816307",
"0.53578746",
"0.5357619",
"0.53550434",
"0.53454846",
"0.5345327",
"0.53435636",
"0.53424555",
"0.53225595",
"0.53225595",
"0.53202677",
"0.5315844",
"0.52849174",
"0.5279042",
"0.5277065",
"0.52745795",
"0.5263676",
"0.5255397",
"0.5253005",
"0.5248952",
"0.5244735",
"0.5243113",
"0.52412426",
"0.52254826",
"0.5211988",
"0.5207954",
"0.52064174",
"0.51943195",
"0.51909226",
"0.5190047",
"0.5179591",
"0.51760966",
"0.51737916",
"0.5170918",
"0.51674646",
"0.51635003",
"0.5161157",
"0.515191",
"0.51505613",
"0.5146174",
"0.5145754",
"0.5144442",
"0.51386446",
"0.51384765",
"0.5137573",
"0.5137573",
"0.5119556"
] | 0.6122987 | 6 |
Compute the Ftest for a joint linear hypothesis. This is a special case of `wald_test` that always uses the F distribution. | def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale,
invcov=invcov, use_f=True)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_joint_fitter(self):\n p1 = [14.9, 0.3]\n p2 = [13, 0.4]\n A = 9.8\n p = np.r_[A, p1, p2]\n\n def model(A, p, x):\n return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)\n\n def errfunc(p, x1, y1, x2, y2):\n return np.ravel(\n np.r_[model(p[0], p[1:3], x1) - y1, model(p[0], p[3:], x2) - y2]\n )\n\n coeff, _ = optimize.leastsq(\n errfunc, p, args=(self.x, self.ny1, self.x, self.ny2)\n )\n assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))",
"def test_f_uni(self):\n s = np.array([100.0, 0, 0, 0, 0, 0])\n e = np.array([0.1, -0.05, -0.05, 0, 0, 0])\n f_direct = self.model.f(s, e, self.t, self.T)\n \n sdev = s - np.array([1,1,1,0,0,0]) * np.sum(s[:3]) / 3.0\n se = np.sqrt(3.0/2.0) * la.norm(sdev)\n ee = np.sqrt(2.0/3.0) * la.norm(e)\n\n g_direct = self.smodel.g(se, ee, self.t, self.T)\n \n self.assertTrue(np.isclose(g_direct, f_direct[0]))\n\n self.assertTrue(np.isclose(-g_direct/2.0, f_direct[1]))\n self.assertTrue(np.isclose(-g_direct/2.0, f_direct[2]))\n\n self.assertTrue(np.allclose([0,0,0], f_direct[3:]))",
"def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)",
"def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,\n use_f=None):\n if use_f is None:\n # switch to use_t false if undefined\n use_f = (hasattr(self, 'use_t') and self.use_t)\n\n from patsy import DesignInfo\n names = self.model.data.param_names\n LC = DesignInfo(names).linear_constraint(r_matrix)\n r_matrix, q_matrix = LC.coefs, LC.constants\n\n if (self.normalized_cov_params is None and cov_p is None and\n invcov is None and not hasattr(self, 'cov_params_default')):\n raise ValueError('need covariance of parameters for computing '\n 'F statistics') # pragma: no cover\n\n cparams = np.dot(r_matrix, self.params[:, None])\n J = float(r_matrix.shape[0]) # number of restrictions\n if q_matrix is None:\n q_matrix = np.zeros(J)\n else:\n q_matrix = np.asarray(q_matrix)\n if q_matrix.ndim == 1:\n q_matrix = q_matrix[:, None]\n if q_matrix.shape[0] != J:\n raise ValueError(\"r_matrix and q_matrix must have the same \"\n \"number of rows\")\n Rbq = cparams - q_matrix\n if invcov is None:\n cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)\n if np.isnan(cov_p).max():\n raise ValueError(\"r_matrix performs f_test for using \"\n \"dimensions that are asymptotically \"\n \"non-normal\")\n invcov = np.linalg.pinv(cov_p)\n J_ = np.linalg.matrix_rank(cov_p)\n if J_ < J:\n warnings.warn('covariance of constraints does not have full '\n 'rank. The number of constraints is %d, but '\n 'rank is %d' % (J, J_), ValueWarning)\n J = J_\n\n if (hasattr(self, 'mle_settings') and\n self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):\n F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)\n else:\n F = np.dot(np.dot(Rbq.T, invcov), Rbq)\n\n df_resid = getattr(self, 'df_resid_inference', self.df_resid)\n if use_f:\n F /= J\n return ContrastResults(F=F, df_denom=df_resid,\n df_num=J)\n else:\n return ContrastResults(chi2=F, df_denom=J, statistic=F,\n distribution='chi2', distargs=(J,))",
"def _scipy_fk_test(self, mode='median', alpha=0.01):\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n\n with warnings.catch_warnings(): # supress scipy warnings\n warnings.filterwarnings('ignore')\n fstats, pval = fligner(q0, q1, center=mode)\n\n stats_fk = {'z': fstats, 'pval': pval}\n\n if stats_fk['pval'] <= alpha: # With CHI2 approximation\n h = 1\n else:\n h = 0\n\n return h, stats_fk",
"def test_f_divergence(alpha, dist1, dist2):\n def f_alpha(alpha):\n if alpha == 1:\n def f(x):\n return x * np.log2(x)\n elif alpha == -1:\n def f(x):\n return - np.log2(x)\n else:\n def f(x):\n return 4.0 / (1.0 - alpha**2) * (1.0 - np.power(x, (1.0 + alpha) / 2))\n return f\n\n def f_tsallis(alpha):\n if alpha == 1:\n def f(x):\n return -np.log2(x)\n else:\n def f(x):\n return (np.power(x, 1.0 - alpha) - 1.0) / (alpha - 1.0)\n return f\n\n test_functions = [\n (f_alpha(alpha), partial(alpha_divergence, alpha=alpha)),\n (f_tsallis(alpha), partial(tsallis_divergence, alpha=alpha)),\n ]\n\n for f, div_func in test_functions:\n div1 = f_divergence(dist1, dist2, f)\n div2 = div_func(dist1, dist2)\n assert div1 == pytest.approx(div2, abs=1e-1)",
"def f_test(chi1,df1,chi2,df2,red_chi = True):\n\n# if chi1/df1 > chi2/df2:\n#\tprob = 2. * f.cdf(chi1/df1, chi2/df2, df1, df2)\n# else:\n#\tprob = 2. * f.cdf(chi2/df2, chi1/df1, df2, df1)\n if red_chi:\n\tfval = (chi1/df1) / (chi2/df2)\n else:\n\tfval = chi1 / chi2\n prob = 2. * f.cdf((chi1/df1) / (chi2/df2), df1, df2)\n if prob > 1.: \n\treturn 2. - prob\n else:\n\treturn prob",
"def test_f_1_is_equal_to_sklearn_fbeta(self):\n\n y_true = [1, 0, 1, 1, 0, 1]\n y_pred = [0, 0, 1, 1, 1, 1]\n tp = 3\n fp = 1\n fn = 1\n tn = 1\n beta = 1.0\n\n f_beta_custom = Metrics.f_beta(tp=tp, fp=fp, fn=fn, beta=beta)\n f_beta_sklearn = f1_score(y_true=y_true, y_pred=y_pred)\n\n self.assertEqual(f_beta_custom, f_beta_sklearn)",
"def test_non_linear_lsq_fitter_with_weights(self, fitter):\n fitter = fitter()\n\n np.random.seed(42)\n norder = 2\n\n fitter2 = LinearLSQFitter()\n\n model = models.Polynomial1D(norder)\n npts = 10000\n c = [2.0, -10.0, 7.0]\n tw = np.random.uniform(0.0, 10.0, npts)\n tx = np.random.uniform(0.0, 10.0, npts)\n ty = c[0] + c[1] * tx + c[2] * (tx**2)\n ty += np.random.normal(0.0, 1.5, npts)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n tf1 = fitter(model, tx, ty, weights=tw)\n tf2 = fitter2(model, tx, ty, weights=tw)\n\n assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))\n assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))\n\n model = models.Gaussian1D()\n if isinstance(fitter, (TRFLSQFitter, LMLSQFitter)):\n with pytest.warns(\n AstropyUserWarning, match=r\"The fit may be unsuccessful; *.\"\n ):\n fitter(model, tx, ty, weights=tw)\n else:\n fitter(model, tx, ty, weights=tw)\n\n model = models.Polynomial2D(norder)\n nxpts = 100\n nypts = 150\n npts = nxpts * nypts\n c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]\n tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)\n tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)\n ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)\n tz = (\n c[0]\n + c[1] * tx\n + c[2] * (tx**2)\n + c[3] * ty\n + c[4] * (ty**2)\n + c[5] * tx * ty\n )\n tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n tf1 = fitter(model, tx, ty, tz, weights=tw)\n tf2 = fitter2(model, tx, ty, tz, weights=tw)\n\n assert_allclose(tf1.parameters, tf2.parameters, atol=10 ** (-16))\n assert_allclose(tf1.parameters, c, rtol=10 ** (-2), atol=10 ** (-2))",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fmeasure(y_true, y_pred):\n return fbeta_score(y_true, y_pred, beta=1)",
"def fligner_policello_test(X, Y):\n P_i = []\n for x in X:\n count = 0\n for y in Y:\n if y <= x:\n count += 1\n P_i.append(count)\n\n Q_j = []\n for y in Y:\n count = 0\n for x in X:\n if x <= y:\n count += 1\n Q_j.append(count)\n\n P_i = np.array(P_i)\n Q_j = np.array(Q_j)\n P_bar = np.average(P_i)\n Q_bar = np.average(Q_j)\n V1 = sum((P_i - P_bar) ** 2)\n V2 = sum((Q_j - Q_bar) ** 2)\n z = (sum(Q_j) - sum(P_i)) / (2 * np.sqrt(V1 + V2 + P_bar * Q_bar))\n p_value = 2. * norm.sf(abs(z)) # two sided test\n\n return z, p_value",
"def test_adf(self):\n\n dftest = adfuller(self.ts_df['y'], autolag='AIC')\n dfoutput = pd.Series(dftest[0:4],\n index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput)\n if dftest[0] > dftest[4]['5%']:\n print(\n \"Test statistic greater than critical value at 5% --> series seems to be not stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be less than test statistic.\")\n else:\n print(\n \"Test statistic less than critical value at 5% --> series seems to be stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be greater than test statistic.\")",
"def test_partial_derivative_f1(self):\r\n # Verified with Wolfram Alpha.\r\n\r\n # f2 > 0\r\n obs = self.estimator1._partial_derivative_f1(2, 3, 10, 42)\r\n assert_almost_equal(obs, 1.22672908818)\r\n\r\n # f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(2, 0, 10, 42)\r\n assert_almost_equal(obs, 1.272173492918482)\r\n\r\n # f1 == 0, f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(0, 0, 10, 42)\r\n assert_almost_equal(obs, 1.2961664362634027)",
"def test_f_two_sample(self):\r\n\r\n # The expected values in this test are obtained through R.\r\n # In R the F test is var.test(x,y) different alternative hypotheses\r\n # can be specified (two sided, less, or greater).\r\n # The vectors are random samples from a particular normal distribution\r\n #(mean and sd specified).\r\n\r\n # a: 50 elem, mean=0 sd=1\r\n a = [-0.70701689, -1.24788845, -1.65516470, 0.10443876, -0.48526915,\r\n -0.71820656, -1.02603596, 0.03975982, -2.23404324, -0.21509363,\r\n 0.08438468, -0.01970062, -0.67907971, -0.89853667, 1.11137131,\r\n 0.05960496, -1.51172084, -0.79733957, -1.60040659, 0.80530639,\r\n -0.81715836, -0.69233474, 0.95750665, 0.99576429, -1.61340216,\r\n -0.43572590, -1.50862327, 0.92847551, -0.68382338, -1.12523522,\r\n -0.09147488, 0.66756023, -0.87277588, -1.36539039, -0.11748707,\r\n -1.63632578, -0.31343078, -0.28176086, 0.33854483, -0.51785630,\r\n 2.25360559, -0.80761191, 1.18983499, 0.57080342, -1.44601700,\r\n -0.53906955, -0.01975266, -1.37147915, -0.31537616, 0.26877544]\r\n\r\n # b: 50 elem, mean=0, sd=1.2\r\n b = [\r\n 0.081418743, 0.276571612, -\r\n 1.864316504, 0.675213612, -0.769202643,\r\n 0.140372825, -1.426250184, 0.058617884, -\r\n 0.819287409, -0.007701916,\r\n -0.782722020, -\r\n 0.285891593, 0.661980419, 0.383225191, 0.622444946,\r\n -0.192446150, 0.297150571, 0.408896059, -\r\n 0.167359383, -0.552381362,\r\n 0.982168338, 1.439730446, 1.967616101, -\r\n 0.579607307, 1.095590943,\r\n 0.240591302, -1.566937143, -\r\n 0.199091349, -1.232983905, 0.362378169,\r\n 1.166061081, -0.604676222, -\r\n 0.536560206, -0.303117595, 1.519222792,\r\n -0.319146503, 2.206220810, -\r\n 0.566351124, -0.720397392, -0.452001377,\r\n 0.250890097, 0.320685395, -\r\n 1.014632725, -3.010346273, -1.703955054,\r\n 0.592587381, -1.237451255, 0.172243366, -0.452641122, -0.982148581]\r\n\r\n # c: 60 elem, mean=5, sd=1\r\n c = [4.654329, 5.242129, 6.272640, 5.781779, 4.391241, 3.800752,\r\n 4.559463, 4.318922, 3.243020, 5.121280, 4.126385, 5.541131,\r\n 4.777480, 5.646913, 6.972584, 3.817172, 6.128700, 4.731467,\r\n 6.762068, 5.082983, 5.298511, 5.491125, 4.532369, 4.265552,\r\n 5.697317, 5.509730, 2.935704, 4.507456, 3.786794, 5.548383,\r\n 3.674487, 5.536556, 5.297847, 2.439642, 4.759836, 5.114649,\r\n 5.986774, 4.517485, 4.579208, 4.579374, 2.502890, 5.190955,\r\n 5.983194, 6.766645, 4.905079, 4.214273, 3.950364, 6.262393,\r\n 8.122084, 6.330007, 4.767943, 5.194029, 3.503136, 6.039079,\r\n 4.485647, 6.116235, 6.302268, 3.596693, 5.743316, 6.860152]\r\n\r\n # d: 30 elem, mean=0, sd =0.05\r\n d = [\r\n 0.104517366, 0.023039678, 0.005579091, 0.052928250, 0.020724823,\r\n -0.060823243, -0.019000890, -\r\n 0.064133996, -0.016321594, -0.008898334,\r\n -0.027626992, -0.051946186, 0.085269587, -\r\n 0.031190678, 0.065172938,\r\n -0.054628573, 0.019257306, -\r\n 0.032427056, -0.058767356, 0.030927400,\r\n 0.052247357, -\r\n 0.042954937, 0.031842104, 0.094130522, -0.024828465,\r\n 0.011320453, -0.016195062, 0.015631245, -0.050335598, -0.031658335]\r\n\r\n a, b, c, d = map(array, [a, b, c, d])\r\n self.assertEqual(map(len, [a, b, c, d]), [50, 50, 60, 30])\r\n\r\n # allowed error. This big, because results from R\r\n # are rounded at 4 decimals\r\n error = 1e-4\r\n\r\n self.assertFloatEqual(f_two_sample(a, a), (49, 49, 1, 1), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b), (49, 49, 0.8575, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(b, a), (49, 49, 1.1662, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='low'),\r\n (49, 49, 0.8575, 0.2963), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='high'),\r\n (49, 49, 0.8575, 0.7037), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, c),\r\n (49, 59, 0.6587, 0.1345), eps=error)\r\n # p value very small, so first check df's and F value\r\n self.assertFloatEqualAbs(f_two_sample(d, a, tails='low')[0:3],\r\n (29, 49, 0.0028), eps=error)\r\n assert f_two_sample(d, a, tails='low')[3] < 2.2e-16 # p value\r",
"def test_hof(a, b):\n def f(g, x):\n return g(x) * g(x + 10.0)\n\n def g(x):\n return x * b\n\n return f(g, a) + f(g, b)",
"def fittest(self, population, f_thres=None):\n pass",
"def test_f_divergence2(alpha, dist1, dist2):\n def f_alpha(alpha):\n if alpha == 1:\n def f(x):\n return x * np.log2(x)\n elif alpha == -1:\n def f(x):\n return - np.log2(x)\n else:\n def f(x):\n return 4.0 / (1.0 - alpha**2) * (1.0 - np.power(x, (1.0 + alpha) / 2))\n return f\n\n def f_tsallis(alpha):\n if alpha == 1:\n def f(x):\n return -np.log2(x)\n else:\n def f(x):\n return (np.power(x, 1.0 - alpha) - 1.0) / (alpha - 1.0)\n return f\n\n test_functions = [\n (f_alpha(alpha), partial(alpha_divergence, alpha=alpha)),\n (f_tsallis(alpha), partial(tsallis_divergence, alpha=alpha)),\n ]\n\n for f, div_func in test_functions:\n div1 = f_divergence(dist1=dist1, dist2=dist2, f=f, rvs=[0, 1])\n div2 = div_func(dist1=dist1, dist2=dist2, rvs=[0, 1])\n assert div1 == pytest.approx(div2, abs=1e-1)",
"def ftest(self,lam2min,ndf,alpha=0.05):\n\n k = 2 # two parameters, phi and dt.\n F = stats.f.ppf(1-alpha,k,ndf)\n lam2alpha = lam2min * ( 1 + (k/(ndf-k)) * F)\n return lam2alpha",
"def test_fwhm(self):\n m = self.sp.model\n bp = SpectralElement(\n Gaussian1D, mean=m.mean, amplitude=m.amplitude, stddev=m.stddev)\n assert_quantity_allclose(bp.fwhm(), 100 * u.AA, rtol=1e-3) # 0.1%",
"def test_w_f_approx(self):\n x = np.logspace(-3., 3., 100)\n y = np.logspace(-3., 3., 100)\n\n X, Y = np.meshgrid(x, y)\n\n w_f_app = self.gaussian_kappa_ellipse.w_f_approx(X+1j*Y)\n w_f_scipy = wofz(X+1j*Y)\n\n npt.assert_allclose(w_f_app.real, w_f_scipy.real, rtol=4e-5, atol=0)\n npt.assert_allclose(w_f_app.imag, w_f_scipy.imag, rtol=4e-5, atol=0)\n\n # check `derivatives()` method with and without `scipy.special.wofz()`\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0\n sigma = 1.\n amp = 2.\n\n # with `scipy.special.wofz()`\n gauss_scipy = GaussianEllipseKappa(use_scipy_wofz=True)\n f_x_sp, f_y_sp = gauss_scipy.derivatives(x, y, amp, sigma, e1, e2)\n\n # with `GaussEllipseKappa.w_f_approx()`\n gauss_approx = GaussianEllipseKappa(use_scipy_wofz=False)\n f_x_ap, f_y_ap = gauss_approx.derivatives(x, y, amp, sigma, e1, e2)\n\n npt.assert_almost_equal(f_x_sp, f_x_ap, decimal=4)\n npt.assert_almost_equal(f_y_sp, f_y_ap, decimal=4)",
"def test_partial_derivative_f2(self):\r\n # Verified with Wolfram Alpha.\r\n\r\n # f2 > 0\r\n obs = self.estimator1._partial_derivative_f2(2, 3, 10, 42)\r\n assert_almost_equal(obs, 0.9651585982441183)\r\n\r\n # f2 == 0\r\n obs = self.estimator1._partial_derivative_f2(2, 0, 10, 42)\r\n assert_almost_equal(obs, 0.9208698803111386)\r\n\r\n # f1 ==0, f2 == 0\r\n obs = self.estimator1._partial_derivative_f2(0, 0, 10, 42)\r\n assert_almost_equal(obs, 1.0)",
"def test_figure34(self):\n star = 0.1\n current = 1.37\n func = lambda x: x**6 + 3 * x - 4\n\n logging.info(\"\\nCONFIRMING FIGURE 3.4\")\n rf_results = undertest.regula_falsi(func, star, current, 100)",
"def test_newton_rhapson(testFunctions, tol, printFlag): \n pass",
"def lfprob (dfnum, dfden, F):\r\n p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))\r\n return p",
"def extrapolate_fwds(h, ufr, llfr, alpha=0.10):\n fwd_fsp_fsp_plus_h = np.log(1 + ufr) + (llfr - np.log(1 + ufr)) * big_b(h, alpha)\n return fwd_fsp_fsp_plus_h",
"def test_newton_rhapson_system(testFunctions, tol, printFlag): \n pass",
"def test_log():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.log(fwd.sin(x)+y**2)\n dfdx = lambda x, y: np.cos(x) / (np.sin(x)+y**2)\n dfdy = lambda x, y: 2*y / (np.sin(x)+y**2)\n d2fdxdy = lambda x, y: -2*y*np.cos(x) / (np.sin(x)+y**2)**2\n assert equals(f.evaluation_at({x: 1.5, y:2.5}), np.log(np.sin(1.5)+2.5**2))\n assert equals(f.derivative_at(x, {x: 1.5, y:2.5}), dfdx(1.5, 2.5))\n assert equals(f.derivative_at(y, {x: 1.5, y:2.5}), dfdy(1.5, 2.5))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}), d2fdxdy(1.5, 2.5))\n with pytest.raises(NotImplementedError):\n f.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def get_fnllh(self):\n\n def fnllh(p):\n return 0.5 * anp.sum(self.get_fres()(p) ** 2)\n\n return fnllh",
"def test_pow_2ndord():\n # one variable\n x = fwd.Variable()\n f = (x+1)**3\n assert equals(f.derivative_at(x, {x: 2.0}, order=2), 18.0)\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = (x+y)**3\n assert equals(g.derivative_at(x, {x: 2.0, y: 1.0}, order=2), 18.0)\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def test_var_exp(self):\n with self.test_context() as session:\n test_setups, F, feed = self.prepare()\n for test_setup in test_setups:\n l = test_setup.likelihood\n y = test_setup.Y\n l.compile()\n r1 = session.run(l.logp(F, y), feed_dict=feed)\n zero = F * 0.\n r2 = session.run(\n l.variational_expectations(F, zero, test_setup.Y), feed_dict=feed)\n assert_allclose(r1, r2, atol=test_setup.tolerance, rtol=test_setup.tolerance)",
"def test_half_life():\n assert np.isclose(\n half_life(\"tritium\").to(u.s).value, (12.32 * u.yr).to(u.s).value, rtol=2e-4\n ), \"Incorrect half-life for tritium.\"",
"def test_twodstats():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(2)\n else:\n logger = None\n\n model = piff.Gaussian(fastfit=True)\n interp = piff.Polynomial(order=1) # should find that order=1 is better\n # create background model\n stars, true_model = generate_starlist(100)\n psf = piff.SimplePSF(model, interp)\n psf.fit(stars, None, None)\n stars = psf.stars # These have the right fit parameters\n\n # check the coeffs of sigma and g2, which are actually linear fits\n # skip g1 since it is actually a 2d parabola\n # factor of 0.263 is to account for going from pixel xy to wcs uv\n np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),\n np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)\n np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),\n np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),\n decimal=4)\n\n stats = piff.TwoDHistStats(nbins_u=5, nbins_v=5) # implicitly np.median\n stats.compute(psf, stars, logger=logger)\n # check the twodhists\n # get the average value in the bin\n u_i = 3\n v_i = 3\n icen = stats.twodhists['u'][v_i, u_i] / 0.263\n jcen = stats.twodhists['v'][v_i, u_i] / 0.263\n print('icen = ',icen)\n print('jcen = ',jcen)\n icenter = 1000\n jcenter = 2000\n # the average value in the bin should match up with the model for the average coordinates\n sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)\n gsq = g1**2 + g2**2\n T = 2*sigma**2 * (1+gsq)/(1-gsq)\n T_average = stats.twodhists['T'][v_i, u_i]\n g1_average = stats.twodhists['g1'][v_i, u_i]\n g2_average = stats.twodhists['g2'][v_i, u_i]\n # assert equal to 4th decimal\n print('T, g1, g2 = ',[T,g1,g2])\n print('av T, g1, g2 = ',[T_average,g1_average,g2_average])\n np.testing.assert_almost_equal([T, g1, g2], [T_average, g1_average, g2_average],\n decimal=2)\n\n # Test the plotting and writing\n twodstats_file = os.path.join('output','twodstats.pdf')\n stats.write(twodstats_file)\n\n with np.testing.assert_raises(ValueError):\n stats.write() # If not given in constructor, must give file name here.\n\n # repeat for whisker\n stats = piff.WhiskerStats(nbins_u=21, nbins_v=21, reducing_function='np.mean')\n stats.compute(psf, stars)\n # Test the plotting and writing\n whisker_file = os.path.join('output','whiskerstats.pdf')\n stats.write(whisker_file)\n with np.testing.assert_raises(ValueError):\n stats.write()\n\n # With large number of bins, many will have no objects. This is ok.\n # Also, can use other np functions like max, std, instead to get different stats\n # Not sure when these would be useful, but they are allowed.\n # And, check usage where file_name is given in init.\n twodstats_file2 = os.path.join('output','twodstats.pdf')\n stats2 = piff.TwoDHistStats(nbins_u=50, nbins_v=50, reducing_function='np.std',\n file_name=twodstats_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars, logger=logger)\n stats2.write()\n\n whisker_file2 = os.path.join('output','whiskerstats.pdf')\n stats2 = piff.WhiskerStats(nbins_u=100, nbins_v=100, reducing_function='np.max',\n file_name=whisker_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars)\n stats2.write()",
"def AFTER(df_train, df_test, lambd):\n\n # forecast errors\n errors = df_train.iloc[:, 1:].subtract(df_train.iloc[:, 0], axis=0)\n sq_errors = errors**2\n\n # combining weights\n nominator = np.exp((-lambd) * sq_errors.sum(axis=0))\n denominator = nominator.sum()\n comb_w = nominator / denominator\n\n # predictions\n df_pred = pd.DataFrame({\"AFTER\": df_test.dot(comb_w)})\n\n return df_pred",
"def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):\n\n weights = 1.0 / (self.ydata / 10.0)\n\n fitter0 = fitter0()\n model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)\n g1e = models.Gaussian1D(100, 5.0, stddev=1)\n\n fitter1 = fitter1()\n emodel = fitter1(\n g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True\n )\n assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))",
"def test_prop_fluctuation(self):\n tmax = 10.0\n dt = 1.0\n\n ini_rate = 80.0\n\n nsteps = int_r(tmax/dt)\n\n tutor = SimpleNeurons(1, out_fct=lambda i: ini_rate + i*20.0/nsteps - 10.0)\n reward = MockReward(lambda _: 1.0)\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim.run(tmax)\n\n drates = (tutor_rule.rates - ini_rate)[:, 0]\n\n fluctuations = (np.arange(nsteps)*20.0/nsteps - 10.0)\n mask = (fluctuations > 0)\n ratio = np.mean(drates[mask] / fluctuations[mask])\n\n self.assertLess(np.max(np.abs(drates - ratio*fluctuations)), 1e-6)",
"def FDA_test(X_test, w_star, w_0):\n\n y_proj = X_test.dot(w_star)[:, np.newaxis]\n\n #1-of-K code scheme\n y_1 = y_proj >= w_0\n y_2 = y_proj < w_0\n y_pred = np.hstack((y_1, y_2)).astype(int)\n\n return y_pred",
"def f_test_var(data1,data2):\n var1, var2 = np.var(data1,ddof = 1),np.var(data2,ddof = 1)\t# compute variance\n df1, df2, = len(data1) - 1, len(data2) - 1\t\t# compute degrees of freedom\n if var1 > var2:\n\tprob = 2. * f.cdf(var1/var2,df1,df2)\n else:\n\tprob = 2. * f.cdf(var2/var1,df2,df1)\n if prob > 1.:\n\treturn 2. - prob\n else:\n\treturn prob",
"def get_fde(forecasted_trajectory, gt_trajectory) -> float:\n fde = torch.sqrt(\n (forecasted_trajectory[:,-1, 0] - gt_trajectory[:,-1, 0]) ** 2\n + (forecasted_trajectory[:,-1, 1] - gt_trajectory[:,-1, 1]) ** 2\n )\n return fde.mean()",
"def test_positive_definite2(dist, alpha, divergence):\n assert divergence(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(0)\n assert hellinger_sum(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(1)",
"def _fk_test(self, mode='median', alpha=0.01):\n\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q0['group'] = 0\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n q1['goup'] = 1\n\n df = pd.concat([q0, q1], axis=0)\n\n h, stats_fk = fk_test(df, mode, 'X2', alpha)\n\n return h, stats_fk",
"def test_falsifications(self):\n implications = get_conditional_independencies(asia_example.graph)\n issues = get_falsifications(implications, asia_example.data)\n self.assertEqual(0, len(issues.failures))\n self.assertEqual(len(issues.evidence), len(implications))",
"def freedom(L=5):\n\n Y = -1\n return Y",
"def bisection(f, fu, point_a, point_b, point_c, point_d, lower_bound, upper_bound, length):\n n = 1\n theta = 0\n a = lower_bound\n b = upper_bound\n while n <= 100:\n theta = (a + b) / 2.0\n if -1e-6 < f(fu(point_a, point_b, point_c, theta), point_d) - length < 1e-6:\n # print 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n # print 'iteration', n\n return theta\n else:\n n = n + 1\n if f(fu(point_a, point_b, point_c, theta), point_d) - length > 0:\n b = theta\n else:\n a = theta\n\n print 'failedtheta', theta, 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n print 'iteration', n\n return False",
"def test_tan_2ndord():\n # one variable\n x = fwd.Variable()\n f = fwd.tan(2.0*x - 3.0)\n assert equals( f.derivative_at(x, {x: 1.5}, order=2), \n 8.0*np.tan(2.0*1.5-3.0)/(np.cos(2.0*1.5-3.0))**2 )\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = fwd.tan(2.0*x / y)\n assert equals(g.derivative_at(x, {x: 1.5, y: 2.5}, order=2), \n 8.0*np.tan(2.0*1.5/2.5) / (np.cos(2.0*1.5/2.5)**2 * (2.5**2)) )\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):\n fitter0 = fitter0()\n model = fitter0(self.gauss, self.xdata, self.ydata)\n g1e = models.Gaussian1D(100, 5.0, stddev=1)\n\n fitter1 = fitter1()\n emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)\n assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))",
"def fdq1(f, x, h=1e-5):\n return (f(x+h) - f(x))/h\n \n raise NotImplementedError(\"Problem 2 Incomplete\")",
"def calc_F(self, peq):\n return self.dmat_d_.dot(np.log(peq))",
"def test_single_expectation_value(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def test_with_optimize(self, fitter):\n fitter = fitter()\n\n model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)\n\n def func(p, x):\n return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)\n\n def errfunc(p, x, y):\n return func(p, x) - y\n\n result = optimize.leastsq(\n errfunc, self.initial_values, args=(self.xdata, self.ydata)\n )\n assert_allclose(model.parameters, result[0], rtol=10 ** (-3))",
"def levenberg_marquadt(x0, F, DF, lamda0=1, nMax=100, tol=10**(-8)):\n n = len(x0)\n x, lamda = x0, lamda0\n\n objectives = [0]\n residuals = [0]\n gradients = [0]\n\n for k in range(nMax):\n\n fk = F(x)\n Dfk = DF(x)\n objectives.append(norm(fk)**2)\n gradients.append(norm(2*Dfk.T @ fk))\n\n if norm( 2 * Dfk.T @ fk) < tol:\n break\n\n xk = x - inv( Dfk.T @ Dfk + lamda * eye(n) ) @ Dfk.T @ fk\n\n if norm(F(xk)) < norm(fk):\n x = xk\n lamda = 0.8 * lamda\n else:\n lamda = 2 * lamda\n\n return x, { 'objectives': objectives, 'gradients': gradients }",
"def est_fwhm(x, y):\n half_max = 0.5*y.max()\n within_halfmax = y > half_max\n x_within_halfmax = x[within_halfmax]\n return x_within_halfmax.max() - x_within_halfmax.min()",
"def test_hurwicz():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n alpha = 0.75\n R = common_metrics.hurwicz(f, maximise=True, alpha=alpha)\n expected = np.asarray(\n [0.625, 0.6225])\n assert np.allclose(R, expected)\n R = common_metrics.hurwicz(f, maximise=False, alpha=alpha)\n expected = np.asarray(\n [-0.875, -0.6675])\n assert np.allclose(R, expected)\n R = common_metrics.hurwicz(f, maximise=False)\n expected = np.asarray(\n [-0.75, -0.645])\n assert np.allclose(R, expected)",
"def test_fdr_correction(self):\r\n pvals = array([.1, .7, .5, .3, .9])\r\n exp = array([.5, .7 * 5 / 4., .5 * 5 / 3., .3 * 5 / 2., .9])\r\n obs = fdr_correction(pvals)\r\n self.assertFloatEqual(obs, exp)",
"def test_exp_2ndord():\n # one variable\n x = fwd.Variable()\n f = fwd.exp(2.0*x + 3.0)\n assert equals(f.derivative_at(x, {x: 1.5}, order=2), 4.0*np.exp(2.0*1.5+3.0))\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = fwd.exp(2.0*x / y)\n assert equals(g.derivative_at(x, {x: 1.5, y: 2.5}, order=2), \n 4.0*np.exp(2.0*1.5/2.5) / (2.5**2) )\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def adf_test(series,title=''):\n print(f'Augmented Dickey-Fuller Test: {title}')\n result = adfuller(series.dropna(),autolag='AIC') # .dropna() handles differenced data\n \n labels = ['ADF test statistic','p-value','# lags used','# observations']\n out = pd.Series(result[0:4],index=labels)\n\n for key,val in result[4].items():\n out[f'critical value ({key})']=val\n \n print(out.to_string()) # .to_string() removes the line \"dtype: float64\"\n \n if result[1] <= 0.05:\n print(\"Strong evidence against the null hypothesis\")\n print(\"Reject the null hypothesis\")\n print(\"Data has no unit root and is stationary\")\n else:\n print(\"Weak evidence against the null hypothesis\")\n print(\"Fail to reject the null hypothesis\")\n print(\"Data has a unit root and is non-stationary\")",
"def testAlgn(x,y):\n\n A = numpy.ones((3,3))\n A[:,1] = x\n A[:,2] = y\n return numpy.linalg.det(A)",
"def ret_f(t,y):\n\n f = np.zeros(3)\n f[0] = 77.27*(y(1) - y(0)*y(1)+ y(0)-8.375e-6*y(0)*y(0))\n f[1] = (1.0/77.27)*(-y(1)-y(0)*y(1)+y(2))\n f[2] = 0.161*(y(0)-y(2))\n\n return f",
"def ddalf(x):\n return dalf_spl.derivatives(x)[1]",
"def test_linear_fitter_with_weights():\n Xin, Yin = np.mgrid[0:21, 0:21]\n fitter = LinearLSQFitter()\n\n with NumpyRNGContext(_RANDOM_SEED):\n zsig = np.random.normal(0, 0.01, size=Xin.shape)\n\n p2 = models.Polynomial2D(3)\n p2.parameters = np.arange(10) / 1.2\n z = p2(Xin, Yin)\n pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))\n assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))",
"def find_f_equals_1():\n f1 = scipy.optimize.brentq(f2, 0, -10)\n return f1",
"def finite_difference(f, p, h):\n tweaker = FunctionTweak(f, h)\n tweaked_funcs = tweaker(np.atleast_2d(p))\n main_func = tweaked_funcs[0]\n\n def finite_func(t):\n list_of_diffs = [(tweaked_func(t) - main_func(t))/h\n for tweaked_func in tweaked_funcs[1:]]\n return np.column_stack(list_of_diffs)\n\n return finite_func",
"def test_non_finite_error(fitter, weights):\n\n x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n # Raise warning, notice fit fails due to nans\n with pytest.raises(\n NonFiniteValueError, match=r\"Objective function has encountered.*\"\n ):\n fit(m_init, x, y, weights=weights)",
"def test_threat(self):\n metric = verif.metric.Threat()\n obs = np.array([0, 1, 2, 3])\n fcst = np.array([0, 3, 1, 2])\n\n # Hits: 1\n # FA: 1\n # Miss: 1\n # CR: 0\n interval = verif.interval.Interval(1.5, np.inf, True, True)\n f_interval = verif.interval.Interval(1.5, np.inf, True, True)\n value = metric.compute_from_obs_fcst(obs, fcst, interval, f_interval)\n self.assertEqual(value, 1.0/3)",
"def test_temporal_derivative_distribution_repair(fname, tmp_path):\n raw = read_raw_nirx(fname)\n raw_od = optical_density(raw)\n raw_hb = beer_lambert_law(raw_od)\n\n # With optical densities\n # Add a baseline shift artifact about half way through data\n max_shift = np.max(np.diff(raw_od._data[0]))\n shift_amp = 5 * max_shift\n raw_od._data[0, 0:30] = raw_od._data[0, 0:30] - shift_amp\n # make one channel zero std\n raw_od._data[1] = 0.0\n raw_od._data[2] = 1.0\n assert np.max(np.diff(raw_od._data[0])) > shift_amp\n # Ensure that applying the algorithm reduces the step change\n raw_od = tddr(raw_od)\n assert np.max(np.diff(raw_od._data[0])) < shift_amp\n assert_allclose(raw_od._data[1], 0.0) # unchanged\n assert_allclose(raw_od._data[2], 1.0) # unchanged\n\n # With Hb\n # Add a baseline shift artifact about half way through data\n max_shift = np.max(np.diff(raw_hb._data[0]))\n shift_amp = 5 * max_shift\n raw_hb._data[0, 0:30] = raw_hb._data[0, 0:30] - (1.1 * shift_amp)\n # make one channel zero std\n raw_hb._data[1] = 0.0\n raw_hb._data[2] = 1.0\n assert np.max(np.diff(raw_hb._data[0])) > shift_amp\n # Ensure that applying the algorithm reduces the step change\n raw_hb = tddr(raw_hb)\n assert np.max(np.diff(raw_hb._data[0])) < shift_amp\n assert_allclose(raw_hb._data[1], 0.0) # unchanged\n assert_allclose(raw_hb._data[2], 1.0) # unchanged",
"def ForceFitPowerlaw(p0, f, x, model='h'):\n hertz = ['h', 'H', 'hertz', 'Hertz']\n sneddon = ['s', 'S', 'sneddon', 'Sneddon']\n if model in hertz:\n model = 3./2\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n elif model in sneddon:\n model = 2.\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n else:\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n\n fit = leastsq(erf, p0, args=(f,x,model))[0]\n return fit",
"def testSymmetric(self):\n with self.test_context() as session:\n nClasses = 5\n nPoints = 10\n tolerance = 1e-4\n epsilon = 1e-3\n F = tf.placeholder(settings.float_type)\n F_data = np.ones((nPoints, nClasses))\n feed = {F: F_data}\n rng = np.random.RandomState(1)\n Y = rng.randint(nClasses, size=(nPoints, 1))\n\n l = gpflow.likelihoods.MultiClass(nClasses)\n l.invlink.epsilon = epsilon\n l.compile()\n\n mu, _ = session.run(l.predict_mean_and_var(F, F), feed_dict=feed)\n pred = session.run(l.predict_density(F, F, Y), feed_dict=feed)\n variational_expectations = session.run(\n l.variational_expectations(F, F, Y), feed_dict=feed)\n expected_mu = (1. / nClasses * (1. - epsilon) + (1. - 1. / nClasses) * \\\n epsilon / (nClasses - 1)) * np.ones((nPoints, 1))\n\n self.assertTrue(np.allclose(mu, expected_mu, tolerance,\n tolerance)) # assert_allclose() would complain about shape mismatch\n expected_log_denisty = np.log(expected_mu)\n self.assertTrue(np.allclose(pred, expected_log_denisty, 1e-3, 1e-3))\n validation_variational_expectation = 1. / nClasses * np.log(1. - epsilon) + \\\n (1. - 1. / nClasses) * np.log(epsilon / (nClasses - 1))\n assert_allclose(\n variational_expectations,\n np.ones((nPoints, 1)) * validation_variational_expectation,\n tolerance, tolerance)",
"def test_null_distribution_wald(self, n_cells: int = 2000, n_genes: int = 100):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n summary = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n pval_h0 = stats.kstest(test.pval, 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0 is <= 0.05!\"\n\n return True",
"def test_forfatal_functions(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n num_observations = 10\n num_features = 2\n\n sim = Simulator(num_observations=num_observations, num_features=num_features)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs),\n \"batch\": np.random.randint(2, size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime + batch\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n\n summary = test.summary()\n ids = test.gene_ids\n\n # 1. Test all additional functions which depend on model computation:\n # 1.1. Only continuous model:\n temp = test.log_fold_change(genes=ids, nonnumeric=False)\n temp = test.max(genes=ids, nonnumeric=False)\n temp = test.min(genes=ids, nonnumeric=False)\n temp = test.argmax(genes=ids, nonnumeric=False)\n temp = test.argmin(genes=ids, nonnumeric=False)\n temp = test.summary(nonnumeric=False)\n # 1.2. Full model:\n temp = test.log_fold_change(genes=ids, nonnumeric=True)\n temp = test.max(genes=ids, nonnumeric=True)\n temp = test.min(genes=ids, nonnumeric=True)\n temp = test.argmax(genes=ids, nonnumeric=True)\n temp = test.argmin(genes=ids, nonnumeric=True)\n temp = test.summary(nonnumeric=True)\n\n return True",
"def test_get_wrf_fitness():\n if [on_aws, on_cheyenne, on_magma].count(True) is 0:\n print('\\n!!!Not running test_wrf_era5_diff -- switch to Magma, Cheyenne, or AWS!!!')\n return\n fitness, ghi_mean_error, wpd_mean_error, runtime = get_wrf_fitness(param_ids, start_date, end_date, verbose=True)\n assert fitness >= 0\n assert ghi_mean_error >= 0\n assert wpd_mean_error >= 0\n assert type(runtime) is str",
"def f_np(phi, lambd):\n f = 1. / (np.absolute(1. - sft(phi, lambd)) ** 2)\n return f",
"def f(p, phi, phib, df):\n\treturn - p + exp( - df + Ns*(log((1 - p*phi)/(1 - phi - phib)) + \\\n\t\t(p - 1)*phi - phib + (9./4)*alpha*((phi + phib)**(5./4) - (p*phi)**(5./4))))",
"def fidelity(A: numpy.ndarray, B: numpy.ndarray) -> float:\n Asqrtm = scipy.linalg.sqrtm(A)\n return (numpy.trace(scipy.linalg.sqrtm(Asqrtm@B@Asqrtm)).real)**2",
"def test_fitted_lof_score():\n lof_detector = LOF(k=10)\n x_ref = np.random.randn(100, 2)\n lof_detector.fit(x_ref)\n x = np.array([[0, 10], [0.1, 0]])\n y = lof_detector.predict(x)\n y = y['data']\n assert y['instance_score'][0] > y['instance_score'][1]\n assert not y['threshold_inferred']\n assert y['threshold'] is None\n assert y['is_outlier'] is None\n assert y['p_value'] is None",
"def fdq2(f, x, h=1e-5):\n return (-3*f(x) + 4*f(x+h) - f(x+2*h))/(2*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")",
"def afprob (dfnum, dfden, F):\r\n if type(F) == N.ndarray:\r\n return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))\r\n else:\r\n return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))",
"def test_F(x, y, level):\n if len(x) < 2 or len(y) < 2:\n return True\n vx = np.var(x, 0, ddof=1)\n vy = np.var(y, 0, ddof=1)\n vx, vy = vx[vx*vy>0], vy[vx*vy>0]\n if len(vx)==0:\n return False\n F = vx/vy\n p_value = stat.f.cdf(F, len(x)-1, len(y)-1)\n p_value = 2*np.min([p_value, 1-p_value], axis=0)\n if np.any(p_value < level):\n return False\n else:\n return True",
"def ddalf(x):\n return dalf_spl.derivatives(x)[1]",
"def test_fitted_lof_ensemble_score():\n lof_detector = LOF(k=[10, 14, 18])\n x_ref = np.random.randn(100, 2)\n lof_detector.fit(x_ref)\n x = np.array([[0, 10], [0.1, 0]])\n with pytest.raises(ThresholdNotInferredError):\n lof_detector.predict(x)\n\n with pytest.raises(ThresholdNotInferredError):\n lof_detector.score(x)",
"def test_sin_2ndord():\n # one variable\n x = fwd.Variable()\n f = fwd.sin(x)\n assert equals(f.derivative_at(x, {x: 1.0}, order=2), -np.sin(1.0))\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = fwd.sin(x*y)\n assert equals(g.derivative_at(x, {x:1.0, y: 2.0}, order=2), \n -2.0**2 * np.sin(2.0))\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def test_get_fss_components_one_time_fourier(self):\n\n this_actual_sse, this_reference_sse = (\n learning_curves._get_fss_components_one_time(\n actual_target_matrix=ACTUAL_TARGET_MATRIX,\n probability_matrix=PROBABILITY_MATRIX,\n eval_mask_matrix=MASK_MATRIX, matching_distance_px=None\n )\n )\n\n self.assertTrue(numpy.isclose(\n this_actual_sse, FOURIER_FSS_ACTUAL_SSE, atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_reference_sse, FOURIER_FSS_REFERENCE_SSE, atol=TOLERANCE\n ))",
"def test_ForwardEuler_against_linear_solution():\n def f(u, t):\n return 0.2 + (u - u_exact(t))**4\n\n def u_exact(t):\n return 0.2*t + 3\n\n u, t = ForwardEuler(f, U0=u_exact(0), T=3, n=5)\n u_e = u_exact(t)\n error = np.abs(u_e - u).max()\n success = error < 1E-14\n assert success, '|exact - u| = %g != 0' % error",
"def test_intra_power_law_fit(self):\n\n\t\tprint(type(self.fc_layers[0:2]), self.fc_layers[0:2])\n\t\tdetails= self.watcher.analyze(layers=self.fc_layers[0:2], intra=True, randomize=False, vectors=False, pl_package=POWERLAW_PACKAGE, xmax=XMAX_FORCE)\n\t\tactual_alpha = details.alpha[0]\n\t\t#actual_best_fit = details.best_fit[0]\n\t\t#print(actual_alpha,actual_best_fit)\n\n\t\texpected_alpha = 2.654 # not very accurate because of the sparisify transform\n\t\t#expected_best_fit = LOG_NORMAL\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=1)\n\t\t#self.assertEqual(actual_best_fit, expected_best_fit)",
"def test_var_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.var(qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (2, 2)\r\n\r\n expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def compute_stability_fh(H, t0, u_attr, r_air, z_t, d0, cp=1004.16):\n L_ob = H .expression(\n '-(r_air * cp * t0 * (u_attr ** 3.0) / 0.41 / 9.806 / H)',\n {'cp': cp, 'H': H, 'r_air': r_air, 't0': t0, 'u_attr': u_attr})\n L_ob = L_ob.where(L_ob.gte(0), -99)\n mh = H \\\n .expression(\n '((1 - (16.0 * (z_t - d0) / L_ob)) ** 0.25)',\n {'d0': d0, 'L_ob': L_ob, 'z_t': z_t}) \\\n .where(L_ob.eq(-99), 0.0)\n fh = H \\\n .expression('(2.0 * log((1.0 + (mh ** 2.0)) / 2.0))', {'mh': mh}) \\\n .where(L_ob.lte(-100).Or(L_ob.gte(100)), 0)\n\n return fh",
"def test_exp_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.exp(x/y)\n df_dxdy = lambda x, y: -(x*np.exp(x/y) + y*np.exp(x/y)) / y**3\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2),\n f.derivative_at( x, {x: 1.5, y:2.5}, order=2)) \n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n df_dxdy(1.5, 2.5))",
"def FoBa(X, y, epsilon=0.1, maxit_f=100, maxit_b=5, backwards_freq=5):\n\n # Initializations\n n, d = X.shape\n F = {} # dict so save features in each step\n F[0] = set()\n w = {} # dict to save corresponding weight vector\n w[0] = np.zeros((d, 1), dtype=np.float64)\n k = 0\n delta = {}\n\n for forward_iter in range(maxit_f): # forward step\n k = k + 1\n\n zero_coeffs = np.where(w[k - 1] == 0)[0] # all coefficients, which are zero, to determine next best candidate\n if len(zero_coeffs) == 0: return w[k - 1] # all coefficients are included in model --> return\n\n current_coeffs = np.where(w[k - 1] != 0)[0]\n err_after_addition = []\n X_current = X[:, current_coeffs]\n\n # check which feature reduces residual most\n for i in zero_coeffs:\n X_test = np.concatenate([X_current, X[:, i].reshape(-1, 1)], 1) # add candidate feature to current best X\n w_best, err, _, _ = np.linalg.lstsq(X_test, y, rcond=None)\n err_after_addition.append(err) # save residuals\n\n best_new_trial = np.argmin(err_after_addition)\n best_new_index = zero_coeffs[best_new_trial] # feature which reduces residual most\n\n F[k] = F[k - 1].union({best_new_index}) # update currently best features\n w[k] = np.zeros((d, 1), dtype=np.float64)\n # calculate new weight vector; would be slightly more efficient to save results above\n w[k][list(F[k])] = np.linalg.lstsq(X[:, list(F[k])], y, rcond=None)[0]\n\n # check for break condition\n delta[k] = np.linalg.norm(X.dot(w[k - 1]) - y) - np.linalg.norm(X.dot(w[k]) - y)\n if delta[k] < epsilon: return w[k - 1] # improvement was smaller than epsilon --> use old weights\n\n # backward step, do once every few forward steps\n if forward_iter % backwards_freq == 0 and forward_iter > 0:\n\n for backward_iter in range(maxit_b):\n\n non_zeros = np.where(w[k] != 0) # current features, which are candidates for removal\n err_after_simplification = []\n\n for j in non_zeros[0]:\n w_simple = np.copy(w[k])\n w_simple[j] = 0 # remove candidate\n # calculate residual for simpler model\n err_after_simplification.append(np.linalg.norm(X.dot(w_simple) - y))\n least_important = np.argmin(err_after_simplification)\n i_least_important = non_zeros[0][least_important] # least important feature\n w_simple = np.copy(w[k])\n w_simple[i_least_important] = 0\n\n # check for break condition on backward step\n delta_p = err_after_simplification[least_important] - np.linalg.norm(X.dot(w[k]) - y)\n if delta_p > 0.5 * delta[k]: break # do not delete if residual increased too much\n\n k = k - 1\n F[k] = F[k + 1].difference({i_least_important}) # delete least important feature\n w[k] = np.zeros((d, 1), dtype=np.float64)\n w[k][list(F[k])] = np.linalg.lstsq(X[:, list(F[k])], y, rcond=None)[0] # recalculate w\n\n warnings.warn('FoBa stopped, because maxit_f was reached')\n print('epsilon =', epsilon)\n return w[k]",
"def test_estimateFullRichness(self):\r\n # Verified with iNEXT.\r\n\r\n # f2 > 0\r\n obs = self.estimator3.estimateFullRichness()\r\n assert_almost_equal(obs, 5.5)\r\n\r\n # f2 == 0\r\n obs = self.estimator4.estimateFullRichness()\r\n assert_almost_equal(obs, 4)",
"def fd(model, EPS=0.01):\n\n N = model.N\n f = model.f\n gW_fd, gb_fd = zeros((N, N)), zeros((N))\n for x, y in model.train:\n for i in xrange(N):\n # get gW_fd\n for j in xrange(N):\n model.W[i, j] += EPS\n val1 = f(model.W, model.b)\n model.W[i, j] -= 2*EPS\n val2 = f(model.W, model.b)\n model.W[i, j] += EPS\n gW_fd[i, j] = (val1 - val2) / (2 * EPS)\n \n # get gb_fd\n model.b[i] += EPS\n val1 = f(model.W, model.b)\n model.b[i] -= 2*EPS\n val2 = f(model.W, model.b)\n model.b[i] += EPS\n gb_fd[i] = (val1 - val2) / (2 * EPS)\n\n \n return gW_fd, gb_fd",
"def _f(X, g, n):\n if n == 3:\n n = 3.001 # for numerical stability\n hyp2f1_term = hyp2f1((n-1)/2, g/2, n/2, 1/(1+X**2))\n beta_term = beta((n-1)/2, 0.5)\n return 0.5 * beta_term * hyp2f1_term * (1+X**2) ** ((1-n)/2)",
"def oned_linear(kernel, test, w_g):\n b = np.dot(test.T, kernel*w_g)\n return b",
"def test_intra_power_law_fit2(self):\n\t\tprint(type(self.fc_layers[0:2]), self.fc_layers[0:2])\n\t\tdetails= self.watcher.analyze(layers=self.fc_layers[0:2], intra=True, sparsify=False, pl_package=POWERLAW_PACKAGE, xmax=XMAX_FORCE)\n\t\tactual_alpha = details.alpha[0]\n\t\t#actual_best_fit = details.best_fit[0]\n\t\t#print(actual_alpha,actual_best_fit)\n\n\n\t\texpected_alpha = 2.719 # close to exact ?\n\t\t#expected_best_fit = LOG_NORMAL\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=2)\n\t\t#self.assertEqual(actual_best_fit, expected_best_fit)",
"def test_LDL_full_missing(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):\n kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)\n kf.init_attr(theta_mvar_diffuse)\n \n Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(2)\n assert kf.n_t[2] == 0\n \n R_t_move = np.array([[3, 2, 1], \n [2, 4, 3], \n [1, 3, 6]])\n L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move) \n L_inv_expected, _ = linalg.lapack.dtrtri(\n L_t_expected, lower=True)\n np.testing.assert_array_equal(L_t, L_t_expected)\n np.testing.assert_array_equal(R_t, R_t_expected)\n\n Y_t_expected = linalg.pinv(L_t_expected).dot(\n np.array([0, 0, 0]).reshape(-1, 1))\n np.testing.assert_array_almost_equal(Y_t, Y_t_expected)\n \n H_t_expected = L_inv_expected.dot(\n np.array([1, 2, 2.4]).reshape(-1, 1))\n np.testing.assert_array_almost_equal(H_t, H_t_expected)\n\n expected_partitioned_index = np.array([0, 1, 2])\n np.testing.assert_array_equal(kf.partitioned_index[2], \n expected_partitioned_index)",
"def test_trapezoidal_linear():\r\n\tf = lambda x: 6E8*x-4E6\r\n\tF = lambda x: 3E8*x**2 - 4E6*x #Anti-derivative\r\n\ta = 1.2; b = 4.4\r\n\texpected = F(b) - F(a)\r\n\ttol = 1E-6\r\n\tfor n in 2, 20, 21:\r\n\t\tcomputed = trapezoidal(f,a,b,n)\r\n\t\terror = abs(expected - computed)\r\n\t\tsuccess = error < tol\r\n\t\tmsg = 'n=%d, err=%g' % (n,error)\r\n\t\tassert success, msg",
"def test_ForwardEuler_against_hand_calculations():\n def f(u, t):\n return u\n u, t = ForwardEuler(f, U0=1, T=0.2, n=2)\n exact = np.array([1, 1.1, 1.21]) # hand calculations\n error = np.abs(exact - u).max()\n success = error < 1E-14\n assert success, '|exact - u| = %g != 0' % error",
"def test_smoohted_LM(self):\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_smoothed_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_smoothed_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')\n\t\t# YOUR CODE HERE",
"def test_gaussian_basis_hon(self):\n def row_generator():\n return [random.gauss(0, 1) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)",
"def test_basis_dynamics(self):\n # note: all prints here go to the output item in the json file\n trajectories_fit = self.notebook_locals[\"trajectories_fit\"]\n last_x = trajectories_fit[0][-1][0]\n last_z = trajectories_fit[0][-1][1]\n last_th = trajectories_fit[0][-1][2]\n self.assertLessEqual(\n last_x, 3.36, msg=\"Simulated trajectory x is incorrect\"\n )\n self.assertGreaterEqual(\n last_x, 3.34, msg=\"Simulated trajectory x is incorrect\"\n )\n self.assertLessEqual(\n last_z, 5.10, msg=\"Simulated trajectory z is incorrect\"\n )\n self.assertGreaterEqual(\n last_z, 4.99, msg=\"Simulated trajectory z is incorrect\"\n )\n self.assertLessEqual(\n last_th, -0.52, msg=\"Simulated trajectory theta is incorrect\"\n )\n self.assertGreaterEqual(\n last_th, -0.54, msg=\"Simulated trajectory theta is incorrect\"\n )"
] | [
"0.62175006",
"0.59313136",
"0.57992893",
"0.5789409",
"0.5753273",
"0.56032485",
"0.55942744",
"0.5555011",
"0.5532248",
"0.5489704",
"0.5489704",
"0.5489704",
"0.5489704",
"0.54816693",
"0.54639775",
"0.5443622",
"0.54368186",
"0.5426024",
"0.542323",
"0.54194605",
"0.5411049",
"0.5405133",
"0.5396611",
"0.53919375",
"0.5390074",
"0.53840005",
"0.53831106",
"0.5372724",
"0.5356651",
"0.5302716",
"0.530185",
"0.5289566",
"0.5288732",
"0.52536476",
"0.5236124",
"0.5201314",
"0.5188345",
"0.51872784",
"0.5184494",
"0.51841396",
"0.51618814",
"0.51596254",
"0.51463085",
"0.51296157",
"0.5129067",
"0.5127797",
"0.51231235",
"0.51227653",
"0.5119433",
"0.5119238",
"0.51166475",
"0.5101815",
"0.50929403",
"0.5090169",
"0.508579",
"0.5069518",
"0.5067761",
"0.5061503",
"0.5061208",
"0.50538105",
"0.50509506",
"0.50498545",
"0.50412756",
"0.5040447",
"0.5040234",
"0.5029429",
"0.50290877",
"0.50209904",
"0.50209165",
"0.5017927",
"0.5008696",
"0.500608",
"0.50048816",
"0.5000878",
"0.4995461",
"0.49946848",
"0.49885383",
"0.49822548",
"0.49640235",
"0.49569815",
"0.49528325",
"0.49508783",
"0.4949961",
"0.49462488",
"0.49414986",
"0.49409127",
"0.49346223",
"0.49296263",
"0.4928988",
"0.49244785",
"0.4924302",
"0.4920978",
"0.4918061",
"0.4914799",
"0.4907933",
"0.49055794",
"0.48961",
"0.48938915",
"0.48936957",
"0.48897174"
] | 0.59112656 | 2 |
Compute a Waldtest for a joint linear hypothesis. | def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
use_f=None):
if use_f is None:
# switch to use_t false if undefined
use_f = (hasattr(self, 'use_t') and self.use_t)
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
if (self.normalized_cov_params is None and cov_p is None and
invcov is None and not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'F statistics') # pragma: no cover
cparams = np.dot(r_matrix, self.params[:, None])
J = float(r_matrix.shape[0]) # number of restrictions
if q_matrix is None:
q_matrix = np.zeros(J)
else:
q_matrix = np.asarray(q_matrix)
if q_matrix.ndim == 1:
q_matrix = q_matrix[:, None]
if q_matrix.shape[0] != J:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
Rbq = cparams - q_matrix
if invcov is None:
cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
if np.isnan(cov_p).max():
raise ValueError("r_matrix performs f_test for using "
"dimensions that are asymptotically "
"non-normal")
invcov = np.linalg.pinv(cov_p)
J_ = np.linalg.matrix_rank(cov_p)
if J_ < J:
warnings.warn('covariance of constraints does not have full '
'rank. The number of constraints is %d, but '
'rank is %d' % (J, J_), ValueWarning)
J = J_
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)
else:
F = np.dot(np.dot(Rbq.T, invcov), Rbq)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_f:
F /= J
return ContrastResults(F=F, df_denom=df_resid,
df_num=J)
else:
return ContrastResults(chi2=F, df_denom=J, statistic=F,
distribution='chi2', distargs=(J,)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_null_distribution_wald(self, n_cells: int = 2000, n_genes: int = 100):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n summary = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n pval_h0 = stats.kstest(test.pval, 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0 is <= 0.05!\"\n\n return True",
"def trial_wtd(trial, omit_missing_frames=True):\n x = np.arange(len(trial.HMM_MLE))/60\n y = (trial.HMM_MLE == 0)\n if omit_missing_frames:\n x = x[trial.HMM_MLE >= 0]\n y = y[trial.HMM_MLE >= 0]\n return linear_regression_with_CIs(x/60, y, return_CIs=False)",
"def test_double_well_linear():\n file = \"tests/double_well_linear/schroedinger.inp\"\n x_min, x_max, n_point, ev_first, ev_last, interp_type, potential_decl, alpha = sd.read_inp(file)\n potential_dat = sd.potential_discret(x_min, x_max, n_point, interp_type, potential_decl)\n eigenvektors, eigenvalues = sd.solve_wavefuncs(n_point, ev_first, ev_last, potential_dat, alpha)\n sd.solve_expvalues(ev_first, ev_last, potential_dat, eigenvektors)\n potential_exp = np.loadtxt(\"tests/double_well_linear/potential.exp\")\n eigenvalues_exp = np.loadtxt(\"tests/double_well_linear/eigenvalues.exp\")\n assert np.all(eigenvalues-eigenvalues_exp < 1e-10)\n assert np.all(potential_dat-potential_exp < 1e-10)",
"def wald_test_terms(self, skip_single=False, extra_constraints=None,\n combine_terms=None): # noqa:E501\n result = self\n if extra_constraints is None:\n extra_constraints = []\n if combine_terms is None:\n combine_terms = []\n design_info = getattr(result.model.data, 'design_info', None)\n\n if design_info is None and extra_constraints is None:\n raise ValueError('no constraints, nothing to do')\n\n identity = np.eye(len(result.params))\n constraints = []\n combined = defaultdict(list)\n if design_info is not None:\n for term in design_info.terms:\n cols = design_info.slice(term)\n name = term.name()\n constraint_matrix = identity[cols]\n\n # check if in combined\n for cname in combine_terms:\n if cname in name:\n combined[cname].append(constraint_matrix)\n\n k_constraint = constraint_matrix.shape[0]\n if skip_single:\n if k_constraint == 1:\n continue\n\n constraints.append((name, constraint_matrix))\n\n combined_constraints = []\n for cname in combine_terms:\n combined_constraints.append((cname,\n np.vstack(combined[cname])))\n else:\n # check by exog/params names if there is no formula info\n for col, name in enumerate(result.model.exog_names):\n constraint_matrix = identity[col]\n\n # check if in combined\n for cname in combine_terms:\n if cname in name:\n combined[cname].append(constraint_matrix)\n\n if skip_single:\n continue\n\n constraints.append((name, constraint_matrix))\n\n combined_constraints = []\n for cname in combine_terms:\n combined_constraints.append((cname,\n np.vstack(combined[cname])))\n\n use_t = result.use_t\n distribution = ['chi2', 'F'][use_t]\n\n res_wald = []\n index = []\n for pair in constraints + combined_constraints + extra_constraints:\n name, constraint = pair\n wt = result.wald_test(constraint)\n row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]]\n if use_t:\n row.append(wt.df_denom)\n res_wald.append(row)\n index.append(name)\n\n # distribution neutral names\n col_names = ['statistic', 'pvalue', 'df_constraint']\n if use_t:\n col_names.append('df_denom')\n # TODO: maybe move DataFrame creation to results class\n table = pd.DataFrame(res_wald, index=index, columns=col_names)\n res = WaldTestResults(None, distribution, None, table=table)\n # TODO: remove temp again, added for testing\n res.temp = constraints + combined_constraints + extra_constraints\n return res",
"def test_linear_in_tut(self):\n # reproducible arbitrariness\n np.random.seed(5000)\n\n tut_out = np.random.randn(self.Ns)\n alpha = 0.7\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_fct = lambda _: self.rule.theta + tut_out\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.tutor.out_fct = lambda _: self.rule.theta + alpha*tut_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))",
"def test_linear_in_tut(self):\n # reproducible arbitrariness\n np.random.seed(5000)\n\n tut_out = np.random.randn(self.Ns)\n alpha = 0.7\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_fct = lambda _: self.rule.theta + tut_out\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.tutor.out_fct = lambda _: self.rule.theta + alpha*tut_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))",
"def test_twodstats():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(2)\n else:\n logger = None\n\n model = piff.Gaussian(fastfit=True)\n interp = piff.Polynomial(order=1) # should find that order=1 is better\n # create background model\n stars, true_model = generate_starlist(100)\n psf = piff.SimplePSF(model, interp)\n psf.fit(stars, None, None)\n stars = psf.stars # These have the right fit parameters\n\n # check the coeffs of sigma and g2, which are actually linear fits\n # skip g1 since it is actually a 2d parabola\n # factor of 0.263 is to account for going from pixel xy to wcs uv\n np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),\n np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)\n np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),\n np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),\n decimal=4)\n\n stats = piff.TwoDHistStats(nbins_u=5, nbins_v=5) # implicitly np.median\n stats.compute(psf, stars, logger=logger)\n # check the twodhists\n # get the average value in the bin\n u_i = 3\n v_i = 3\n icen = stats.twodhists['u'][v_i, u_i] / 0.263\n jcen = stats.twodhists['v'][v_i, u_i] / 0.263\n print('icen = ',icen)\n print('jcen = ',jcen)\n icenter = 1000\n jcenter = 2000\n # the average value in the bin should match up with the model for the average coordinates\n sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)\n gsq = g1**2 + g2**2\n T = 2*sigma**2 * (1+gsq)/(1-gsq)\n T_average = stats.twodhists['T'][v_i, u_i]\n g1_average = stats.twodhists['g1'][v_i, u_i]\n g2_average = stats.twodhists['g2'][v_i, u_i]\n # assert equal to 4th decimal\n print('T, g1, g2 = ',[T,g1,g2])\n print('av T, g1, g2 = ',[T_average,g1_average,g2_average])\n np.testing.assert_almost_equal([T, g1, g2], [T_average, g1_average, g2_average],\n decimal=2)\n\n # Test the plotting and writing\n twodstats_file = os.path.join('output','twodstats.pdf')\n stats.write(twodstats_file)\n\n with np.testing.assert_raises(ValueError):\n stats.write() # If not given in constructor, must give file name here.\n\n # repeat for whisker\n stats = piff.WhiskerStats(nbins_u=21, nbins_v=21, reducing_function='np.mean')\n stats.compute(psf, stars)\n # Test the plotting and writing\n whisker_file = os.path.join('output','whiskerstats.pdf')\n stats.write(whisker_file)\n with np.testing.assert_raises(ValueError):\n stats.write()\n\n # With large number of bins, many will have no objects. This is ok.\n # Also, can use other np functions like max, std, instead to get different stats\n # Not sure when these would be useful, but they are allowed.\n # And, check usage where file_name is given in init.\n twodstats_file2 = os.path.join('output','twodstats.pdf')\n stats2 = piff.TwoDHistStats(nbins_u=50, nbins_v=50, reducing_function='np.std',\n file_name=twodstats_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars, logger=logger)\n stats2.write()\n\n whisker_file2 = os.path.join('output','whiskerstats.pdf')\n stats2 = piff.WhiskerStats(nbins_u=100, nbins_v=100, reducing_function='np.max',\n file_name=whisker_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars)\n stats2.write()",
"def test_smoohted_LM(self):\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_smoothed_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_smoothed_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')\n\t\t# YOUR CODE HERE",
"def test_double_ended_ols_wls_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5)\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=5)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=6)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)",
"def test_dc_lcsmodel_class():\n\n # Set the problem size.\n n = 1000\n p = 3\n\n # Define the test model\n TM = test.Model1(n,p)\n\n # Note: diff_A/diff_b do not require A/b as an input in this case,\n # but in the more general case they might.\n\n # Check the basic model calculations.\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n\n dA_1 = TM.diff_A(A, theta, 0).todense()\n dA_2 = TM.diff_A(A, theta, 1).todense()\n dA_3 = TM.diff_A(A, theta, 2).todense()\n dA_4 = TM.diff_A(A, theta, 3).todense()\n Z = numpy.zeros_like(dA_1)\n \n dB_1 = TM.diff_b(B, theta, 0)\n dB_2 = TM.diff_b(B, theta, 1)\n dB_3 = TM.diff_b(B, theta, 2)\n dB_4 = TM.diff_b(B, theta, 3)\n z = numpy.zeros_like(dB_1)\n \n print \"dA/dtheta_1 check:\", numpy.allclose(dA_1, TM.A1.todense())\n print \"dA/dtheta_2 check:\", numpy.allclose(dA_2, TM.A2.todense())\n print \"dA/dtheta_3 check:\", numpy.allclose(dA_3, Z)\n print \"dA/dtheta_4 check:\", numpy.allclose(dA_4, Z)\n\n print \"db/dtheta_1 check:\", numpy.allclose(dB_1, z)\n print \"db/dtheta_2 check:\", numpy.allclose(dB_2, z)\n print \"db/dtheta_3 check:\", numpy.allclose(dB_3, TM.B1)\n print \"db/dtheta_4 check:\", numpy.allclose(dB_4, TM.B2)\n\n\n #\n # Test the lcs model class\n #\n\n gLCS = DC_LCSModel()\n gLCS.eval_A = TM.eval_A\n gLCS.eval_b = TM.eval_b\n gLCS.diff_A = TM.diff_A\n gLCS.diff_b = TM.diff_b\n \n gLCS.quiet=True\n gLCS.A_params_mask = numpy.array([True, True, False, False])\n gLCS.b_params_mask = numpy.array([False, False, True, True])\n\n x = gLCS.eval(theta)\n #print x.shape\n\n for k in range(p):\n print \"Primal solution for x_{}, matches spsolve calculation: {}\".\\\n format(k, numpy.allclose(x[:,k], spla.spsolve(A,B[:,k])))\n\n\n D = gLCS.jacobian(theta)\n\n # -- If theta[1]=0, and theta[2:3] are fixed, then there is an analytical\n # calculation for x(theta[0]), and in this case we can check the first\n # column of D.\n\n theta = numpy.array((5.1, 0, 1.2, 2.1))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n D = gLCS.jacobian(theta)\n\n for k in range(p):\n D_col_1 = -(1./theta[0]**2) * B[:,k]\n print \"First column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,0], D_col_1))\n\n\n # -- We'll use a numerical approximation to check the second column of D\n\n h = 0.000001\n theta = numpy.array((5.1, 1.1, 1.2, 2.1))\n dtheta = numpy.array((0., h, 0., 0.))\n A = TM.eval_A(theta)\n B = TM.eval_b(theta)\n x = gLCS.eval(theta)\n D = gLCS.jacobian(theta)\n\n A_dt = TM.eval_A(theta + dtheta)\n B_dt = TM.eval_b(theta + dtheta)\n\n for k in range(p):\n x_dt = spla.spsolve(A_dt, B_dt[:,k])\n D_col_2_num_approx = (x_dt - x[:,k])/h\n max_abs_err = numpy.max(numpy.abs(D[k,:,1] - D_col_2_num_approx))\n\n print \"Second column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,1], D_col_2_num_approx))\n \n print \"Max abs error in second column of D_{}: {}\".\\\n format(k, max_abs_err)\n \n\n # -- If theta[0] and theta[1] are fixed, A(theta) is determined, and A^{-1}\n # is fixed. With a little math you can analytically calculate the third\n # and fourth columns of D. In fact x(theta) is linear in theta[2] and\n # theta[3], but not in theta[0] and theta[1].\n\n theta = numpy.array((1., 0.1, 0.2, 0.1))\n A = TM.eval_A(theta)\n D = gLCS.jacobian(theta);\n\n for k in range(p):\n D_col_3 = spla.spsolve(A, TM.B1[:,k])\n\n print \"Third column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,2], D_col_3))\n\n\n for k in range(p):\n D_col_4 = spla.spsolve(A, TM.B2[:,k])\n \n print \"Fourth column of D_{} all close: {}\".\\\n format(k, numpy.allclose(D[k,:,3], D_col_4))",
"def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )",
"def J(theta, x, y):\n m = len(y)\n z = theta.dot(x.T) #argument for hypothesis function\n return 1. / m * np.sum(-y * np.log(g(z)) - (1. - y) * np.log(1 - g(z)))",
"def mm_lrt_test(y, K):\n lm = LinearModel(y)\n lmm = LinearMixedModel(y)\n lmm.add_random_effect(K)\n lmm_res = lmm.get_ML()\n ll0 = lm.get_ll()\n ll1 = lmm_res['max_ll']\n D = 2 * (ll1 - ll0)\n pval = stats.chi2.sf(D, 1)\n return {'pval':pval, 'lrt_stat':D}",
"def test_half_life():\n assert np.isclose(\n half_life(\"tritium\").to(u.s).value, (12.32 * u.yr).to(u.s).value, rtol=2e-4\n ), \"Incorrect half-life for tritium.\"",
"def test_suite():\r\n test(slope(5, 3, 4, 2) == 1.0)\r\n test(slope(1, 2, 3, 2) == 0.0)\r\n test(slope(1, 2, 3, 3) == 0.5)\r\n test(slope(2, 4, 1, 2) == 2.0)",
"def _wk_test(self, alternative='two-sided', alpha=0.01):\n\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n\n u_wk, p_wk = mannwhitneyu(q0, q1, alternative=alternative)\n stats_wk = ranksums(q0, q1)[0]\n\n if p_wk <= alpha:\n h = 1\n else:\n h = 0\n\n stats_wk = {'zval': stats_wk, 'pval': p_wk}\n\n return h, stats_wk",
"def calculate_expected_doublet_rate(x, y): # pylint: disable=invalid-name\n\n model = LinearRegression(fit_intercept=True, normalize=False)\n model.fit(x, y)\n\n return {\"r_sq\": model.score(x, y), \"intercept\": model.intercept_, \"coefficient\": model.coef_}",
"def pd_ls_homotopy(y, tau, **kwargs):\n returnLambda = kwargs.get('returnLambda', False)\n\n y_ell1 = np.linalg.norm(y, 1)\n if tau is None:\n raise ValueError('Enter a value for tau in the range [0, norm(y, 1)]')\n elif tau <= 0:\n return np.zeros(y.shape)\n elif tau >= y_ell1:\n return y\n else:\n if not (tau > 0 and tau < y_ell1):\n raise ValueError('tau must be a number' +\n ' in the range (0, norm(y, 1))')\n n = y.size\n lam = np.insert(np.sort(np.abs(y)), 0, 0)\n\n S = np.zeros(lam.size)\n S[0] = y_ell1\n\n for j in range(1, n+1):\n S[j] = S[j-1] - (n-(j-1))*(lam[j]-lam[j-1])\n if S[j] <= tau:\n lam_star = (S[j-1] - tau)/(n-(j-1)) + lam[j-1]\n if returnLambda:\n return (softThresh(y, lam_star), lam_star)\n else:\n return softThresh(y, lam_star)\n if returnLambda:\n return (np.zeros(n), ySorted[-1])\n else:\n return np.zeros(n)",
"def test_score(coefs, intercept, method):\n X, y = _create_dataset(coefs, intercept, noise=1.0)\n lad = LADRegression(method=method)\n lad.fit(X, y)\n assert lad.score(X, y) > 0.9",
"def _ols(y, X):\n\n dummy = np.repeat(1.0, X.shape[0])\n X = np.hstack([X, dummy[:, np.newaxis]])\n\n beta_hat, resid, _, _ = np.linalg.lstsq(X, y)\n y_hat = np.dot(X, beta_hat)\n\n return y_hat, beta_hat",
"def test_welch(x, y, level):\n return test_t(x, y, level, equal_var=False)",
"def test_pow_2ndord():\n # one variable\n x = fwd.Variable()\n f = (x+1)**3\n assert equals(f.derivative_at(x, {x: 2.0}, order=2), 18.0)\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = (x+y)**3\n assert equals(g.derivative_at(x, {x: 2.0, y: 1.0}, order=2), 18.0)\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)",
"def test_single_ended_ols_wls_estimate_synthetic():\n\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 500)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * \\\n np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)\n\n print('alphaint', cable_len * (dalpha_p - dalpha_m))\n print('alpha', dalpha_p - dalpha_m)\n print('C', np.log(C_p / C_m))\n print('x0', x.max())\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '0'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_single_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n method='ols',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_single_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n st_var=1.,\n ast_var=1.,\n method='wls',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n\n pass",
"def baseline_als(self, y, lambd, p, niter=10):\n L = y.size\n D = sparse.csc_matrix(np.diff(np.eye(L), 2))\n w = np.ones(L)\n z = w\n for i in range(niter):\n W = sparse.spdiags(w, 0, L, L)\n Z = W + lambd * D.dot(D.transpose())\n z = spsolve(Z, w*y)\n wold = w\n w = p * (y > z) + (1-p) * (y < z)\n if i > 0:\n # check convergence\n dw = np.sum(wold - w)\n if dw == 0.0:\n break\n return z",
"def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs",
"def _graded_func(difficulty, discrimination, thetas, output):\n # This model is based on the difference of standard\n # logistic functions.\n\n # Do first level\n output[0] = 1.0 - irt_evaluation(np.array([difficulty[0]]),\n discrimination, thetas)\n\n for level_ndx in range(1, output.shape[0]-1):\n right = irt_evaluation(np.array([difficulty[level_ndx]]),\n discrimination, thetas)\n left = irt_evaluation(np.array([difficulty[level_ndx-1]]),\n discrimination, thetas)\n output[level_ndx] = left - right\n\n # Do last level\n output[-1] = irt_evaluation(np.array([difficulty[-1]]),\n discrimination, thetas)",
"def test_regression_tieout_1d(self):\n tol = 1.0e-12\n\n # this beta is array\n intercept, beta = regression_alt.matrix_ols(\n self._indep, self._dep)\n # here, we know beta has only one element\n beta_result = beta[0]\n\n py_intercept, py_beta = regression_alt.bare_bones_ols(\n self._indep, self._dep)\n\n computed = np.array([intercept, beta_result])\n expected = np.array([py_intercept, py_beta])\n close = np.allclose(computed, expected, rtol=0.0, atol=tol)\n\n self.assertTrue(close)",
"def wald_test(tau, Sigma, alpha=0.05, max_condition=1e-6, pval=False):\n # instead of regularizing we preprocess Sigma and tau to get rid of 0 eigenvalues\n tau, Sigma = preprocessing(tau, Sigma, max_condition=max_condition)\n d = len(tau)\n # compute matrix inverse\n Sigma_inv = np.linalg.inv(Sigma)\n\n # below quantity is asymptotically standard normal\n t_obs = np.sqrt(tau @ Sigma_inv @ tau)\n\n # compute the 1-alpha quantile of the chi distribution with d degrees of freedom\n threshold = chi.ppf(q=1-alpha, df=d)\n if not pval:\n if t_obs > threshold:\n return 1\n else:\n return 0\n else:\n # return p value\n return 1 - chi.cdf(x=t_obs, df=d)",
"def askewtest(a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n b2 = askew(a,dimension)\r\n n = float(a.shape[dimension])\r\n y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )\r\n beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )\r\n W2 = -1 + N.sqrt(2*(beta2-1))\r\n delta = 1/N.sqrt(N.log(N.sqrt(W2)))\r\n alpha = N.sqrt(2/(W2-1))\r\n y = N.where(y==0,1,y)\r\n Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))\r\n return Z, (1.0-zprob(Z))*2",
"def test_ccsd_doubles_a_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c, d = p.V_dumms[:4]\n i, j, k, l = p.O_dumms[:4]\n u = dr.two_body\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n tau = dr.define_einst(\n IndexedBase('tau')[a, b, i, j],\n Rational(1, 2) * t[a, b, i, j] + t[a, i] * t[b, j]\n )\n\n a_i = dr.define_einst(\n IndexedBase('ai')[k, l, i, j], u[i, c, k, l] * t[c, j]\n )\n\n a_ = dr.define(\n IndexedBase('a')[k, l, i, j],\n u[k, l, i, j] +\n a_i[k, l, i, j] - a_i[k, l, j, i]\n + u[k, l, c, d] * tau[c, d, i, j]\n )\n\n tensor = dr.define_einst(\n IndexedBase('r')[a, b, i, j],\n a_[k, l, i, j] * tau[a, b, k, l]\n )\n targets = [tensor]\n\n eval_seq = optimize(\n targets, substs={p.nv: p.no * 10}, strategy=Strategy.ALL | Strategy.SUM\n )\n assert verify_eval_seq(eval_seq, targets)\n # Here we just assert that the final step is a simple product.\n assert len(eval_seq[-1].rhs_terms) == 1",
"def test_weighting(self):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\"\n )\n for stat in [\"hs\", \"tp\"]:\n idw = dset.spec.stats([stat])[stat].values\n site0 = self.dset.isel(site=[0]).spec.stats([stat])[stat].values\n site1 = self.dset.isel(site=[1]).spec.stats([stat])[stat].values\n lower = np.array([min(s1, s2) for s1, s2 in zip(site0, site1)])\n upper = np.array([max(s1, s2) for s1, s2 in zip(site0, site1)])\n assert (upper - idw > 0).all() and (idw - lower > 0).all()",
"def test_calibration_ols():\n filepath = data_dir_double_ended2\n ds = read_silixa_files(\n directory=filepath,\n timezone_netcdf='UTC',\n file_ext='*.xml')\n ds100 = ds.sel(x=slice(0, 100))\n sections_ultima = {\n 'probe1Temperature': [slice(8., 17.)], # cold bath\n }\n\n st_label = 'ST'\n ast_label = 'AST'\n rst_label = 'REV-ST'\n rast_label = 'REV-AST'\n\n ds100.calibration_double_ended(sections=sections_ultima,\n st_label=st_label,\n ast_label=ast_label,\n rst_label=rst_label,\n rast_label=rast_label,\n store_tmpw='TMPW',\n method='ols')\n\n np.testing.assert_array_almost_equal(ds100['TMPW'].data,\n ds100.TMP.data,\n decimal=1)\n\n ds009 = ds100.sel(x=sections_ultima['probe1Temperature'][0])\n np.testing.assert_array_almost_equal(ds009['TMPW'].data,\n ds009.TMP.data,\n decimal=2)\n pass",
"def test_concentration():\n\n dense = np.random.randn(5, 10).astype('float32')\n sparse = np.random.randn(5, 10).astype('float32')\n sparse[:, 1:] /= 1e5\n weights = Variable(dense)\n dhl_dense_10 = dirichlet_likelihood(weights, alpha=10.0).data\n dhl_dense_01 = dirichlet_likelihood(weights, alpha=0.1).data\n weights = Variable(sparse)\n dhl_sparse_10 = dirichlet_likelihood(weights, alpha=10.0).data\n dhl_sparse_01 = dirichlet_likelihood(weights, alpha=0.1).data\n\n msg = \"Sparse vector has higher likelihood than dense with alpha=0.1\"\n assert dhl_sparse_01 > dhl_dense_01, msg\n msg = \"Dense vector has higher likelihood than sparse with alpha=10.0\"\n assert dhl_dense_10 > dhl_sparse_10, msg",
"def evaluate_joint(joint_sampler,\n experiment,\n seed,\n num_pairs,\n joint_correction_num_samples=None):\n rng = jax.random.PRNGKey(seed)\n\n def run_pair(key):\n k1, k2 = jax.random.split(key, 2)\n p_logits, q_logits = experiment.logit_pair_distribution_fn(k1)\n joint_estimate = joint_sampler(p_logits, q_logits, k2)\n coupling_loss_matrix = experiment.coupling_loss_matrix_fn(\n p_logits, q_logits)\n loss_average = jnp.sum(joint_estimate * coupling_loss_matrix)\n loss_inner_variance = jnp.sum(\n joint_estimate * jnp.square(coupling_loss_matrix - loss_average))\n if joint_correction_num_samples:\n n = joint_correction_num_samples\n loss_inner_variance = loss_inner_variance * n / (n - 1)\n return loss_average, loss_inner_variance\n\n pair_averages, pair_variances = jax.lax.map(run_pair,\n jax.random.split(rng, num_pairs))\n\n overall_average = jnp.mean(pair_averages)\n overall_average_stderr = jnp.std(pair_averages) / jnp.sqrt(num_pairs)\n overall_pair_std = jnp.sqrt(jnp.mean(pair_variances))\n # overall_pair_variance_stderr = jnp.std(pair_variances) / jnp.sqrt(num_pairs)\n\n summary = (f\"average: {overall_average:.4f}, \"\n f\"inner st.dev.: +/- {overall_pair_std:.4}, \"\n f\"errorbars: +/- {overall_average_stderr:.4f}\")\n\n return summary, overall_average, overall_average_stderr, overall_pair_std",
"def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob",
"def test_levy(self):\n fun = get_problem('levy', dimension=2, lower=0, upper=np.pi)\n self.assertAlmostEqual(fun(np.ones(2)), 0.0)",
"def evaluate_regression(x_test,t_test,basis,bias,w,degree=1,mu=None,s=1):\n \n phi = design_matrix(x_test,basis,degree,bias,mu,s)\n pred_test=phi@w\n # Measure root mean squared error on testing data.\n t_est = pred_test\n #print(\"deleteeeeeeeeeee\",t_est)\n #print(np.shape(t_est))\n err = np.sqrt((np.square(pred_test-t_test)).mean())\n \n \n\n return (t_est, err)",
"def testAlgn(x,y):\n\n A = numpy.ones((3,3))\n A[:,1] = x\n A[:,2] = y\n return numpy.linalg.det(A)",
"def test_double_dqn(self):\n model = DoubleDQNLightning(self.hparams)\n result = self.trainer.fit(model)\n\n self.assertEqual(result, 1)",
"def testLambertWGradient(self, value, expected):\n x = tf.constant(value, dtype=tf.float64)\n with tf.GradientTape() as g:\n g.watch(x)\n y = tfp.math.lambertw(x)\n\n dy_dx = g.gradient(y, x)\n self.assertAllClose(dy_dx, expected)",
"def fligner_policello_test(X, Y):\n P_i = []\n for x in X:\n count = 0\n for y in Y:\n if y <= x:\n count += 1\n P_i.append(count)\n\n Q_j = []\n for y in Y:\n count = 0\n for x in X:\n if x <= y:\n count += 1\n Q_j.append(count)\n\n P_i = np.array(P_i)\n Q_j = np.array(Q_j)\n P_bar = np.average(P_i)\n Q_bar = np.average(Q_j)\n V1 = sum((P_i - P_bar) ** 2)\n V2 = sum((Q_j - Q_bar) ** 2)\n z = (sum(Q_j) - sum(P_i)) / (2 * np.sqrt(V1 + V2 + P_bar * Q_bar))\n p_value = 2. * norm.sf(abs(z)) # two sided test\n\n return z, p_value",
"def test_dice_coef_loss():\n assert dice_coef_loss() == expected_dice_coef_loss",
"def test_gt_grad():\r\n floatX = config.floatX\r\n T = theano.tensor\r\n\r\n input_ = T.vector(dtype=floatX)\r\n random_values = numpy.random.RandomState(1234).uniform(\r\n low=-1, high=1, size=(2, 2))\r\n W_values = numpy.asarray(random_values, dtype=floatX)\r\n W = theano.shared(value=W_values, name='weights')\r\n correct_score = T.dot(input_, W)\r\n wrong_input = T.vector(dtype=floatX)\r\n wrong_score = theano.clone(correct_score, {input_: wrong_input})\r\n # Hinge loss\r\n\r\n scores = T.ones_like(correct_score) - correct_score + wrong_score\r\n cost = (scores * (scores > 0)).sum()\r\n T.grad(cost, input_)",
"def test_basic_lindblad_lmult(self):\n A = Array([[1.0, 2.0], [3.0, 4.0]])\n\n t = 1.123\n ham = (\n 2 * np.pi * self.w * self.Z.data / 2\n + 2 * np.pi * self.r * np.cos(2 * np.pi * self.w * t) * self.X.data / 2\n )\n sm = Array([[0.0, 0.0], [1.0, 0.0]])\n\n expected = self._evaluate_lindblad_rhs(A, ham, [sm])\n value = self.basic_lindblad(t, A)\n self.assertAllClose(expected, value)",
"def L1(yhat, y):\n\n loss = np.sum(np.abs(y - yhat))\n \n return loss",
"def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient",
"def LL(y, yhat):\n\n return -np.sum(norm.logpdf(y, loc=yhat, scale=np.std(y)))",
"def _hypothesis(self, X):\n # * is element wise multiplication\n # numpy.dot(), or @ operator will work\n result = np.transpose(self.theta)@ X \n #emptyResult = np.zeros((1,X.shape[1]))\n return result",
"def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')",
"def test_kl_divergence(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n kl = kl_divergence(dist_a, dist_b)\n if i == j:\n assert pytest.approx(kl, 0.0001) == 0.0\n else:\n assert kl > 0",
"def test_double_ended_ols_wls_fix_alpha_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse',\n fix_alpha=(alpha, np.zeros_like(alpha)))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5,\n fix_alpha=(alpha, np.zeros_like(alpha)))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=5)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=5)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n pass",
"def test_ppt_distinguishability_yyd_density_matrices():\n psi_0 = bell(0)\n psi_1 = bell(2)\n psi_2 = bell(3)\n psi_3 = bell(1)\n\n x_1 = np.kron(psi_0, psi_0)\n x_2 = np.kron(psi_1, psi_3)\n x_3 = np.kron(psi_2, psi_3)\n x_4 = np.kron(psi_3, psi_3)\n\n rho_1 = x_1 * x_1.conj().T\n rho_2 = x_2 * x_2.conj().T\n rho_3 = x_3 * x_3.conj().T\n rho_4 = x_4 * x_4.conj().T\n\n states = [rho_1, rho_2, rho_3, rho_4]\n probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]\n\n # Min-error tests:\n primal_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=False\n )\n\n np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)",
"def test_dice_coef():\n assert dice_coef() == expected_dice_coef",
"def test_LDL_full_missing(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):\n kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)\n kf.init_attr(theta_mvar_diffuse)\n \n Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(2)\n assert kf.n_t[2] == 0\n \n R_t_move = np.array([[3, 2, 1], \n [2, 4, 3], \n [1, 3, 6]])\n L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move) \n L_inv_expected, _ = linalg.lapack.dtrtri(\n L_t_expected, lower=True)\n np.testing.assert_array_equal(L_t, L_t_expected)\n np.testing.assert_array_equal(R_t, R_t_expected)\n\n Y_t_expected = linalg.pinv(L_t_expected).dot(\n np.array([0, 0, 0]).reshape(-1, 1))\n np.testing.assert_array_almost_equal(Y_t, Y_t_expected)\n \n H_t_expected = L_inv_expected.dot(\n np.array([1, 2, 2.4]).reshape(-1, 1))\n np.testing.assert_array_almost_equal(H_t, H_t_expected)\n\n expected_partitioned_index = np.array([0, 1, 2])\n np.testing.assert_array_equal(kf.partitioned_index[2], \n expected_partitioned_index)",
"def naive_prediction(\n y_test:np.array) -> float:\n yhat = np.concatenate((np.zeros(1),y_test[:-1]))\n \n return np.sqrt(np.sum(np.square(yhat-y_test)))",
"def calc_rmsle(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass",
"def define_hypothesis(df, statistic, alternative, paired, alpha):\n paired_text = f\"the {statistic} difference\" if paired else f\"difference in {statistic}\"\n hypothesis = {\n 'two-sided_H0': f\"{paired_text} equal to zero\",\n 'two-sided_H1': f\"{paired_text} not equal to zero\",\n 'greater_H0': f\"{paired_text} greater than or equal to zero\",\n 'greater_H1': f\"{paired_text} less than zero\",\n 'less_H0': f\"{paired_text} less than or equal to zero\",\n 'less_H1': f\"{paired_text} greater than zero\"\n }\n df = HypothesisTester.test_alternative(df, hypothesis,\n alternative, alpha)\n return df",
"def test_newton_rhapson(testFunctions, tol, printFlag): \n pass",
"def dice_loss(yhat, ytrue, epsilon=1e-6):\n # compute Dice components\n intersection = torch.sum(yhat * ytrue, (1,2,3))\n cardinal = torch.sum(yhat + ytrue, (1,2,3))\n\n return torch.mean(1. - (2 * intersection / (cardinal + epsilon)))",
"def ellipse_dist_ratio_poly(self, theta, lwr):\n\n \"\"\"\n\n Params for FWD fit\n array([ 9.99999989e-01, 8.10852195e+07, 1.95444928e+00, 7.96543026e-02])\n this one is un-needed, since it's approximation y = 1\n\n Params for FWD_DIAG fit\n array([-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n\n Params for ORTHOG fit\n array([-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n\n Params for BCK_DIAG fit\n array([-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n\n Params for BCK fit\n array([ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n \"\"\"\n\n #fitting function\n def f(x,params):\n return params[0] + (1.0 / (params[1]*(x+params[2])**params[3]))\n\n #force float math, in case theta is an integer\n theta = float(theta)\n\n #into an angle index form:\n t = abs(int(4.0*theta/np.pi))\n\n if (t == 0) or (t == 8):\n return 1.0\n elif (t == 1) or (t == 7):\n #forward diagonal\n return f(lwr, [-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n elif (t == 2) or (t == 6):\n #orthogonal\n return f(lwr, [-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n elif (t == 3) or (t == 5):\n #backward diagonal\n return f(lwr, [-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n elif t == 4:\n #backward\n return f(lwr, [ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n else:\n #hmmm... TODO\n return 0.0",
"def test_caekl_1(d):\n assert I(d) == pytest.approx(J(d))",
"def test_newton_rhapson_system(testFunctions, tol, printFlag): \n pass",
"def baseline_als(y, lam, p, niter=10):\n m = len(y) #y.shape[0]\n\n D = sparse.diags([1, -2, 1], [-2, -1, 0], shape=(m, m - 2), format='csr')\n # = np.diff(np.eye(m), 2)\n # D is the transpose of the one in the paper\n\n w = np.ones(m)\n z = np.zeros_like(y)\n\n for _ in range(niter):\n W = sparse.spdiags(w, 0, m, m)\n z[:] = sparse.linalg.spsolve(W + lam * D.dot(D.transpose()), w*y)\n w[:] = p * (y > z) + (1-p) * (y < z)\n return z",
"def get_sgd_solution(TRAINING_PHI, TEST_PHI, VAL_PHI, W_Now, TrainingData,\n TrainingTarget, TestData, ValData):\n # Gradient Descent Solution for Linear Regression\n La = 2\n # learningRate = 0.01\n L_Erms_Val, L_Erms_TR, L_Erms_Test, L_Accuracy_Test, W_Mat = [], [], [], [], []\n\n for i in range(0, 400):\n\n # print (f'---------Iteration: {i} M{M} LR {learningRate} L :{C_Lambda}--------------')\n Delta_E_D = -np.dot(\n (TrainingTarget[i] - np.dot(np.transpose(W_Now), TRAINING_PHI[i])),\n TRAINING_PHI[i])\n La_Delta_E_W = np.dot(La, W_Now)\n Delta_E = np.add(Delta_E_D, La_Delta_E_W)\n Delta_W = -np.dot(learningRate, Delta_E)\n W_T_Next = W_Now + Delta_W\n W_Now = W_T_Next\n\n #-----------------TrainingData Accuracy---------------------#\n TR_TEST_OUT = GetValTest(TRAINING_PHI, W_T_Next)\n Erms_TR = GetErms(TR_TEST_OUT, TrainingTarget)\n L_Erms_TR.append(float(Erms_TR.split(',')[1]))\n\n #-----------------ValidationData Accuracy---------------------#\n VAL_TEST_OUT = GetValTest(VAL_PHI, W_T_Next)\n Erms_Val = GetErms(VAL_TEST_OUT, ValDataAct)\n L_Erms_Val.append(float(Erms_Val.split(',')[1]))\n\n #-----------------TestingData Accuracy---------------------#\n TEST_OUT = GetValTest(TEST_PHI, W_T_Next)\n Erms_Test = GetErms(TEST_OUT, TestDataAct)\n L_Erms_Test.append(float(Erms_Test.split(',')[1]))\n L_Accuracy_Test.append(float(Erms_Test.split(',')[0]))\n\n return ([L_Erms_TR, L_Erms_Val, L_Erms_Test, L_Accuracy_Test])",
"def poly_regression_second(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0",
"def test_wl_metric():\n z1 = np.random.normal(size=int(1e5)) + 1\n z2 = np.random.normal(size=int(1e5)) + 2\n res = pval.wl_metric(z1, z2)\n np.testing.assert_almost_equal(res, 1, 2)",
"def test_eff_pearson_p_greater_or_equal_to_normal_p_hind_da_initialized_1d(\n hind_da_initialized_1d, reconstruction_da_1d, comparison\n):\n normal_p = compute_hindcast(\n hind_da_initialized_1d,\n reconstruction_da_1d,\n metric=\"pearson_r_p_value\",\n comparison=comparison,\n )\n eff_p = compute_hindcast(\n hind_da_initialized_1d,\n reconstruction_da_1d,\n metric=\"pearson_r_eff_p_value\",\n comparison=comparison,\n )\n assert (normal_p <= eff_p).all()",
"def linear_regression(X, Y, Xs_test, Ys_test):\n\n ## YOUR CODE HERE\n #################\n return 0",
"def evaluate_regression(x, t, w, basis, degree):\n \t# TO DO:: Compute t_est and err \n #w_tranpose=w.T\n\n\n # My logic goes as follows:\n # Definition of test error is when you run the trained\n # model against a dataset that it hasn't been exposed to\n # this dataset is known as the testset \n\n # As such the basic algorithm goes as follows:\n # We do not need to recompute the weights but we need to recompute\n # phi for our test data\n\n # As such, we are interested in how well our trained weights\n # estimate against the test data so we matrix multiply our\n # weights against the phi from our test data\n # thus t_est = w_train.T*phi(x) since we want to know how well our\n # trained model estimates against the training data\n # but in implementation we do phi(x)*w_train\n # to match array dimensions \n\n\n #Compute design matrix from test data \n phi=design_matrix(x,basis,degree)\n phi_cross=np.linalg.pinv(phi)\n\n # Compute testing weights // just in case we require this variable\n #if(t is not None):\n #w_test=phi_cross.dot(t)\n #w_test=phi_cross.dot(t)\n\n # We want to be able to index into our target vector\n\n #t_est=phi.dot(w_test)\n #if (t is not None):\n # testing_estimate=phi.dot(w_test)\n #testing_estimate=phi.dot(w_test)\n\n # Estimate of our targets according to test data against learned \n # coefficients\n t_est=phi.dot(w)\n #print(\"t_est\",t_est)\n #t_est = None\n\n # We calculate the RMS error as follows\n # Take equation 3.12 of PRML and modify as follows\n # My logic:\n # The equation given in PRML gives the SSE (sum of squares error)\n # By definition the MSE (mean squared error) takes the SSE and divides \n # it by population size, we also preserve the 1/2 constant \n # throughout our calcuations \n # Afterwards we take our MSE and square root it.\n\n # Compute difference between target and estimate\n\n if(t is not None):\n \n diff=t-t_est\n # Square all observations\n diff_squared=np.power(diff,2)\n # Sum up all the observations in our vector\n sig_squared=diff_squared.sum()\n half_sig_squared=0.5*(sig_squared)\n # Calculate population size\n population_size=t.shape[0]\n rmse=np.sqrt(half_sig_squared/population_size)\n err=rmse\n else:\n err=None\n\n #diff=t-t_est\n\n\n # Square all observations \n #diff_squared=np.power(diff,2)\n\n # Sum up all the observations in our vector\n #sig_squared=diff_squared.sum()\n\n #half_sig_squared=0.5*(sig_squared)\n\n # Calculate population size\n #population_size=t.shape[0]\n\n #rmse=np.sqrt(half_sig_squared/population_size)\n #err = rmse\n #print(\"err inside function\",err)\n #err=rmse\n return (t_est, err)",
"def penalized_logistic_regression(y, tx, w, lambda_):\n\tgradient = calculate_gradient(y, tx, w) + 2.0 * lambda_ * w\n\tH = calculate_hessian(y, tx, w) + 2.0 * lambda_\n\t\n\treturn gradient, H",
"def test_double_ended_ols_wls_estimate_synthetic_df_and_db_are_different():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 3\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 8)\n ts_cold = np.ones(nt) * 4. + np.cos(time) * 4\n ts_warm = np.ones(nt) * 20. + -np.sin(time) * 4\n\n C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4\n eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)\n eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)\n C_m = 5000.\n eta_mf = np.cos(time + np.pi / 8) / 10 + 1\n eta_mb = np.sin(time + np.pi / 8) / 10 + 1\n dalpha_r = 0.005284\n dalpha_m = 0.004961\n dalpha_p = 0.005607\n gamma = 482.6\n\n temp_real_kelvin = np.zeros((len(x), nt)) + 273.15\n temp_real_kelvin[x < 0.2 * cable_len] += ts_cold[None]\n temp_real_kelvin[x > 0.85 * cable_len] += ts_warm[None]\n temp_real_celsius = temp_real_kelvin - 273.15\n\n st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \\\n (np.exp(gamma / temp_real_kelvin) - 1)\n ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real_kelvin) - 1)\n rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real_kelvin) / (\n np.exp(gamma / temp_real_kelvin) - 1)\n rast = eta_mb[None] * C_m * np.exp(\n -dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(gamma / temp_real_kelvin) - 1)\n\n c_f = np.log(eta_mf * C_m / (eta_pf * C_p))\n c_b = np.log(eta_mb * C_m / (eta_pb * C_p))\n\n dalpha = dalpha_p - dalpha_m # \\Delta\\alpha\n alpha_int = cable_len * dalpha\n\n df = c_f + alpha_int / 2\n db = c_b + alpha_int / 2\n i_fw = np.log(st / ast)\n i_bw = np.log(rst / rast)\n\n E_real = (i_bw - i_fw) / 2 + (c_b - c_f) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n ds.sections = {\n 'cold': [slice(0., 0.09 * cable_len)],\n 'warm': [slice(0.9 * cable_len, cable_len)]}\n\n real_ans2 = np.concatenate(([gamma], df, db, E_real[:, 0]))\n\n ds.calibration_double_ended(\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1.5,\n ast_var=1.5,\n rst_var=1.,\n rast_var=1.,\n method='wls',\n solver='sparse',\n tmpw_mc_size=1000,\n fix_gamma=(gamma, 0.),\n remove_mc_set_flag=True)\n\n np.testing.assert_allclose(df, ds.df.values)\n np.testing.assert_allclose(db, ds.db.values)\n np.testing.assert_allclose(x * (dalpha_p - dalpha_m),\n ds.alpha.values - ds.alpha.values[0])\n np.testing.assert_allclose(real_ans2, ds.p_val.values)\n np.testing.assert_allclose(temp_real_celsius, ds.TMPF.values, atol=1e-10)\n np.testing.assert_allclose(temp_real_celsius, ds.TMPB.values, atol=1e-10)\n np.testing.assert_allclose(temp_real_celsius, ds.TMPW.values, atol=1e-10)\n pass",
"def penalized_logistic_regression(y, tx, w, lambda_):\n penality = lambda_*np.linalg.norm(w)**2\n diag = np.diag(np.repeat(2*lambda_, len(w)))\n return calculate_loss(y,tx,w) + penality, calculate_gradient(y,tx,w) + lambda_*2*w, calculate_hessian(y,tx,w) + diag",
"def testLambertWWorksElementWise(self, value):\n scipy_wz = _w0(value)\n wz = tfp.math.lambertw(value)\n self.assertAllClose(wz, scipy_wz)\n self.assertEqual(value.shape, wz.shape)",
"def test_grad_computations(self, samples, training_points):\n \n self.add_p_tilda(training = False)\n \n do_updates = OrderedDict()\n \n self.b.set_value(self.b_init)\n \n self.W.set_value(self.W_init)\n \n gradW = theano.shared(np.zeros([self.num_vars,self.num_vars]))\n \n gradb = theano.shared(np.zeros([self.num_vars]))\n \n [gradW, gradb], updates =\\\n theano.scan(lambda i, gradW, gradb: [gradW+ \\\n (1.0-self.batch_size*self.p_tilda[i])\\\n *self.xn_xn_prod(self.x[i,:]),\n gradb+ \\\n (1.0-self.batch_size*self.p_tilda[i])\\\n *self.x[i,:]],\n outputs_info =[gradW, gradb],\n sequences =[T.arange(self.batch_size)])\n \n gradW = gradW[-1]\n \n gradb = gradb[-1]\n \n do_updates.update(updates)\n \n [gradW, gradb], updates = \\\n theano.scan(lambda i, gradW, gradb: [gradW - \\\n self.batch_size*self.p_tilda[self.batch_size+i]*\\\n self.xn_xn_prod(self.x_tilda[i,:]),\n gradb-self.batch_size*self.p_tilda[self.batch_size+i]*\\\n self.x_tilda[i,:]],\n outputs_info =[gradW, gradb],\n sequences =[T.arange(self.num_samples)])\n \n gradW = gradW[-1] /self.batch_size\n \n gradb = gradb[-1] /self.batch_size\n \n gradW = gradW - T.diag(T.diag(gradW)) # no recurrent connections\n \n do_updates.update(updates)\n \n ## ML objective log likelihood (ascend gradient)\n ## the first, more efficient implementation uses the cost\n ## objective which is negative of the log likelihood.\n \n do_updates.update([(self.W, self.W + self.learning_rate*gradW)])\n \n do_updates.update([(self.b, self.b + self.learning_rate*gradb)])\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set, self.learning_rate]\n \n test_grads = theano.function(inputs = var_list,\n outputs= [],\n updates= do_updates,\n givens = input_dict,\n on_unused_input='warn')\n \n test_grads(samples, training_points)",
"def test_single_linear_regression_coefficients(single_linear_regression_model):\n print(single_linear_regression_model)\n expected_coefficients = [(0, 151.27), (1, 303.90)]\n no_of_betas = len(single_linear_regression_model.B)\n for n in range(no_of_betas):\n assert single_linear_regression_model.B[n] == pytest.approx(\n expected_coefficients[n][1], 0.001\n )",
"def linear_error(X, y, w):\n\n return np.where(y != np.sign(np.dot(X, w)), 1.0, 0.0).mean()",
"def test_lindblad_pseudorandom(self):\n rng = np.random.default_rng(9848)\n dim = 10\n num_ham = 4\n num_diss = 3\n\n b = 1.0 # bound on size of random terms\n\n # generate random hamiltonian\n randoperators = rng.uniform(low=-b, high=b, size=(num_ham, dim, dim)) + 1j * rng.uniform(\n low=-b, high=b, size=(num_ham, dim, dim)\n )\n rand_ham_ops = Array(randoperators + randoperators.conj().transpose([0, 2, 1]))\n\n # generate random hamiltonian coefficients\n rand_ham_coeffs = rng.uniform(low=-b, high=b, size=(num_ham)) + 1j * rng.uniform(\n low=-b, high=b, size=(num_ham)\n )\n rand_ham_carriers = Array(rng.uniform(low=-b, high=b, size=(num_ham)))\n rand_ham_phases = Array(rng.uniform(low=-b, high=b, size=(num_ham)))\n\n ham_sigs = []\n for coeff, freq, phase in zip(rand_ham_coeffs, rand_ham_carriers, rand_ham_phases):\n ham_sigs.append(Signal(coeff, freq, phase))\n\n ham_sigs = SignalList(ham_sigs)\n\n # generate random dissipators\n rand_diss = Array(\n rng.uniform(low=-b, high=b, size=(num_diss, dim, dim))\n + 1j * rng.uniform(low=-b, high=b, size=(num_diss, dim, dim))\n )\n\n # random dissipator coefficients\n rand_diss_coeffs = rng.uniform(low=-b, high=b, size=(num_diss)) + 1j * rng.uniform(\n low=-b, high=b, size=(num_diss)\n )\n rand_diss_carriers = Array(rng.uniform(low=-b, high=b, size=(num_diss)))\n rand_diss_phases = Array(rng.uniform(low=-b, high=b, size=(num_diss)))\n\n diss_sigs = []\n for coeff, freq, phase in zip(rand_diss_coeffs, rand_diss_carriers, rand_diss_phases):\n diss_sigs.append(Signal(coeff, freq, phase))\n\n diss_sigs = SignalList(diss_sigs)\n\n # random anti-hermitian frame operator\n rand_op = rng.uniform(low=-b, high=b, size=(dim, dim)) + 1j * rng.uniform(\n low=-b, high=b, size=(dim, dim)\n )\n frame_op = Array(rand_op - rand_op.conj().transpose())\n evect = -1j * np.linalg.eigh(1j * frame_op)[1]\n f = lambda x: evect.T.conj() @ x @ evect\n\n lindblad_frame_op = frame_op\n\n # construct model\n hamiltonian = HamiltonianModel(operators=rand_ham_ops, signals=ham_sigs)\n lindblad_model = LindbladModel.from_hamiltonian(\n hamiltonian=hamiltonian, dissipator_operators=rand_diss, dissipator_signals=diss_sigs\n )\n lindblad_model.rotating_frame = lindblad_frame_op\n\n A = Array(\n rng.uniform(low=-b, high=b, size=(dim, dim))\n + 1j * rng.uniform(low=-b, high=b, size=(dim, dim))\n )\n\n t = rng.uniform(low=-b, high=b)\n value = lindblad_model(t, A, in_frame_basis=False)\n value_in_frame_basis = lindblad_model(\n t, lindblad_model.rotating_frame.operator_into_frame_basis(A), in_frame_basis=True\n )\n\n ham_coeffs = np.real(\n rand_ham_coeffs * np.exp(1j * 2 * np.pi * rand_ham_carriers * t + 1j * rand_ham_phases)\n )\n ham = np.tensordot(ham_coeffs, rand_ham_ops, axes=1)\n\n diss_coeffs = np.real(\n rand_diss_coeffs\n * np.exp(1j * 2 * np.pi * rand_diss_carriers * t + 1j * rand_diss_phases)\n )\n\n expected = self._evaluate_lindblad_rhs(\n A, ham, dissipators=rand_diss, dissipator_coeffs=diss_coeffs, frame_op=frame_op, t=t\n )\n\n self.assertAllClose(ham_coeffs, ham_sigs(t))\n self.assertAllClose(diss_coeffs, diss_sigs(t))\n self.assertAllClose(f(rand_diss), lindblad_model._dissipator_operators)\n self.assertAllClose(f(rand_ham_ops), lindblad_model._hamiltonian_operators)\n self.assertAllClose(f(-1j * frame_op), lindblad_model.get_drift(in_frame_basis=True))\n self.assertAllClose(-1j * frame_op, lindblad_model.get_drift(in_frame_basis=False))\n self.assertAllClose(f(-1j * frame_op), lindblad_model._operator_collection.drift)\n self.assertAllClose(expected, value)\n\n lindblad_model.evaluation_mode = \"dense_vectorized\"\n vectorized_value = lindblad_model.evaluate_rhs(\n t, A.flatten(order=\"F\"), in_frame_basis=False\n ).reshape((dim, dim), order=\"F\")\n self.assertAllClose(value, vectorized_value)\n\n vec_gen = lindblad_model.evaluate(t, in_frame_basis=False)\n vectorized_value_lmult = (vec_gen @ A.flatten(order=\"F\")).reshape((dim, dim), order=\"F\")\n self.assertAllClose(value, vectorized_value_lmult)\n\n rho_in_frame_basis = lindblad_model.rotating_frame.operator_into_frame_basis(A)\n vectorized_value_lmult_fb = (\n lindblad_model.evaluate(t, in_frame_basis=True) @ rho_in_frame_basis.flatten(order=\"F\")\n ).reshape((dim, dim), order=\"F\")\n self.assertAllClose(vectorized_value_lmult_fb, value_in_frame_basis)\n\n if Dispatch.DEFAULT_BACKEND != \"jax\":\n lindblad_model.evaluation_mode = \"sparse\"\n sparse_value = lindblad_model.evaluate_rhs(t, A, in_frame_basis=False)\n self.assertAllCloseSparse(value, sparse_value)\n\n lindblad_model.evaluation_mode = \"sparse_vectorized\"\n sparse_vectorized_value = lindblad_model.evaluate_rhs(\n t, A.flatten(order=\"F\"), in_frame_basis=False\n ).reshape((dim, dim), order=\"F\")\n self.assertAllCloseSparse(value, sparse_vectorized_value)\n\n sparse_vec_gen = lindblad_model.evaluate(t, in_frame_basis=False)\n sparse_vectorized_value_lmult = (sparse_vec_gen @ A.flatten(order=\"F\")).reshape(\n (dim, dim), order=\"F\"\n )\n self.assertAllCloseSparse(sparse_vectorized_value_lmult, value)",
"def dw_test(error):\n print('dw test', durbin_watson(error, axis=0))",
"def mannwhitneyu(sample_0, sample_1, one_sided=False):\n res = stats.mannwhitneyu(sample_0, sample_1, alternative=\"two-sided\" if not one_sided else \"less\")\n return res.statistic, res.pvalue",
"def testLambertWApproxmiation(self, value):\n exact = _w0(value)\n value = tf.convert_to_tensor(value)\n approx = tfp.math.lambertw_winitzki_approx(value)\n self.assertAllClose(approx, exact, rtol=0.05)",
"def hypoTest2(df, rt):\n from sklearn import preprocessing\n import statsmodels.api as sm\n\n gs = df\n ratings = rt\n\n # limit the time scope to recent 5 years\n testData2 = gs[gs['year']>=2015]\n\n testData2 = testData2[['show','year','month','this_week_gross']]\n\n # calculate avg weekly grosses mean (by show)\n testData2['avg_weekly_gross'] = testData2.groupby('show')['this_week_gross'].transform('mean')\n testData2_1 = pd.merge(testData2, ratings, on='show')\n # select distinct show\n testData2_1 = testData2_1.drop_duplicates('show')\n\n # Select relevant columns\n testData2_1 = testData2_1[['show', 'avg_weekly_gross', 'total_rating']]\n\n testData2_1['ratingLevel'] = 0\n testData2_1.loc[(testData2_1.total_rating > 7), 'ratingLevel'] = 1\n\n # normalize avg_weekly_gross\n mm_scaler = preprocessing.MinMaxScaler()\n mm_scaler.fit(testData2_1[['avg_weekly_gross']])\n testData2_1['norm_gross'] = mm_scaler.transform(testData2_1[['avg_weekly_gross']])\n\n # logistic regression\n X = sm.add_constant(testData2_1['norm_gross'])\n\n logit1 = sm.Logit(testData2_1['ratingLevel'], X)\n\n result1 = logit1.fit()\n\n # summarize the results\n print(result1.summary())\n\n # get the odds\n print()\n print(\"The odds-ratios are as the following:\")\n print()\n print(np.exp(result1.params))",
"def lpaired(x,y):\r\n samples = ''\r\n while samples not in ['i','r','I','R','c','C']:\r\n print '\\nIndependent or related samples, or correlation (i,r,c): ',\r\n samples = raw_input()\r\n\r\n if samples in ['i','I','r','R']:\r\n print '\\nComparing variances ...',\r\n# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112\r\n r = obrientransform(x,y)\r\n f,p = F_oneway(pstats.colex(r,0),pstats.colex(r,1))\r\n if p<0.05:\r\n vartype='unequal, p='+str(round(p,4))\r\n else:\r\n vartype='equal'\r\n print vartype\r\n if samples in ['i','I']:\r\n if vartype[0]=='e':\r\n t,p = ttest_ind(x,y,0)\r\n print '\\nIndependent samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n if len(x)>20 or len(y)>20:\r\n z,p = ranksums(x,y)\r\n print '\\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)\r\n else:\r\n u,p = mannwhitneyu(x,y)\r\n print '\\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)\r\n\r\n else: # RELATED SAMPLES\r\n if vartype[0]=='e':\r\n t,p = ttest_rel(x,y,0)\r\n print '\\nRelated samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n t,p = ranksums(x,y)\r\n print '\\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)\r\n else: # CORRELATION ANALYSIS\r\n corrtype = ''\r\n while corrtype not in ['c','C','r','R','d','D']:\r\n print '\\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',\r\n corrtype = raw_input()\r\n if corrtype in ['c','C']:\r\n m,b,r,p,see = linregress(x,y)\r\n print '\\nLinear regression for continuous variables ...'\r\n lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]\r\n pstats.printcc(lol)\r\n elif corrtype in ['r','R']:\r\n r,p = spearmanr(x,y)\r\n print '\\nCorrelation for ranked variables ...'\r\n print \"Spearman's r: \",round(r,4),round(p,4)\r\n else: # DICHOTOMOUS\r\n r,p = pointbiserialr(x,y)\r\n print '\\nAssuming x contains a dichotomous variable ...'\r\n print 'Point Biserial r: ',round(r,4),round(p,4)\r\n print '\\n\\n'\r\n return None",
"def test_sd_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"SD\")\n expected_w_vector = np.array(\n [0.33333333, 0.33333333, 0.33333333],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)",
"def freedom(L=5):\n\n Y = -1\n return Y",
"def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)",
"def test_LDL_middle_missing(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):\n kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)\n kf.init_attr(theta_mvar_diffuse)\n \n Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(3)\n assert kf.n_t[3] == 2\n \n R_t_move = np.array([[3, 1, 2], \n [1, 6, 3], \n [2, 3, 4]])\n L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move) \n L_inv_expected, _ = linalg.lapack.dtrtri(\n L_t_expected, lower=True)\n np.testing.assert_array_equal(L_t, L_t_expected)\n np.testing.assert_array_equal(R_t, R_t_expected)\n\n Y_t_expected = linalg.pinv(L_t_expected).dot(\n np.array([2, 3.2, 0]).reshape(-1, 1))\n np.testing.assert_array_almost_equal(Y_t, Y_t_expected)\n \n H_t_expected = L_inv_expected.dot(\n np.array([1, 2.4, 2]).reshape(-1, 1))\n np.testing.assert_array_almost_equal(H_t, H_t_expected)\n\n expected_partitioned_index = np.array([0, 2, 1])\n np.testing.assert_array_equal(kf.partitioned_index[3], \n expected_partitioned_index)",
"def test_positive_definite2(dist, alpha, divergence):\n assert divergence(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(0)\n assert hellinger_sum(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(1)",
"def calculate_hypothesis_output(self):\n return np.matmul(self.X, self.theta_vector)",
"def test_adf(self):\n\n dftest = adfuller(self.ts_df['y'], autolag='AIC')\n dfoutput = pd.Series(dftest[0:4],\n index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput)\n if dftest[0] > dftest[4]['5%']:\n print(\n \"Test statistic greater than critical value at 5% --> series seems to be not stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be less than test statistic.\")\n else:\n print(\n \"Test statistic less than critical value at 5% --> series seems to be stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be greater than test statistic.\")",
"def log_loss_objective(y_true: npt.NDArray, y_pred: npt.NDArray) -> Tuple[npt.NDArray, npt.NDArray]:\n y_pred = sigmoid(y_pred)\n grad = y_pred - y_true\n hess = y_pred * (1.0 - y_pred)\n return grad, hess",
"def wer(d_ref, d_hyp, is_align=False, is_full=False):\n\n total_count = 0\n total_sub = 0\n total_ins = 0\n total_del = 0\n\n for i, name in enumerate(d_hyp):\n if not name in d_ref:\n logger.error(f'cannot find {name} in {ref}')\n exit()\n \n # build the matrix\n ed = editDistance(d_ref[name], d_hyp[name])\n\n # find out the manipulation steps\n step_list = getStepList(d_ref[name], d_hyp[name], ed)\n\n n = len(d_ref[name])\n n_cor = step_list.count('e')\n n_ins = step_list.count('i')\n n_sub = step_list.count('s')\n n_del = step_list.count('d')\n\n # print the result of each sentence in aligned way\n if is_align:\n print(f'Aligned transcription: {name}')\n alignedPrint(step_list, d_ref[name], d_hyp[name])\n \n # print statistics of each sentence\n if is_full:\n wer = float(ed[len(d_ref[name])][len(d_hyp[name])]) / len(d_ref[name]) * 100\n corr = 100 - (n_sub + n_del) / n * 100\n acc = 100 - wer\n e_ins = n_ins / n * 100\n e_sub = n_sub / n * 100\n e_del = n_del / n * 100\n print(f'{name}: % {corr:.2f} ({acc:.2f}) [Sub={e_sub:.2f}, Del={e_del:.2f}, Ins={e_ins:.2f}]')\n \n total_count += n\n total_ins += n_ins\n total_sub += n_sub\n total_del += n_del\n\n e_ins = total_ins / total_count * 100\n e_sub = total_sub / total_count * 100\n e_del = total_del / total_count * 100\n wer = (total_ins + total_sub + total_del ) / total_count * 100\n corr = 100 - (total_sub + total_del ) / total_count * 100\n acc = 100 - wer\n print(\"------------------------- Overall Results -------------------------\")\n print(f\"%Corr={corr:.2f}, Acc={acc:.2f}, WER: {wer:.2f} [Sub={e_sub:.2f}, Del={e_del:.2f}, Ins={e_ins:.2f}]\")",
"def evaluate(y_test, y_hat):\n score = np.sum(y_test==y_hat)/len(y_test)\n return score",
"def test_jacobi_analytical(env_class: mantrap.environment.base.GraphBasedEnvironment.__class__):\n env = env_class(torch.rand(2), ego_type=mantrap.agents.DoubleIntegratorDTAgent)\n env.add_ado(position=torch.rand(2) * 5, goal=torch.rand(2) * 10)\n\n ego_controls = torch.rand((5, 2)) / 10.0\n ego_controls.requires_grad = True\n ego_trajectory = env.ego.unroll_trajectory(controls=ego_controls, dt=env.dt)\n\n # Initialize HJ module and compute partial derivative dx_rel/du_robot using auto-grad.\n module = mantrap.modules.HJReachabilityModule(env=env, t_horizon=5)\n _ = module._constraint_core(ego_trajectory, ado_ids=env.ado_ids, tag=\"test\", enable_auto_grad=True)\n dx_rel_du_auto_grad = []\n for ado_id in env.ado_ids:\n x_rel = module.x_relative[f\"test/{ado_id}\"]\n grad = [torch.autograd.grad(x, ego_controls, retain_graph=True)[0] for x in x_rel]\n dx_rel_du_auto_grad.append(torch.stack(grad).reshape(4, -1))\n dx_rel_du_auto_grad = torch.stack(dx_rel_du_auto_grad)\n\n # Compute the same partial derivative analytically, by calling the `compute_jacobian_analytically()`\n # function. Since we cannot inverse a vector (dJ/dx_rel), we can check whether the jacobian\n # computed using the pre-computed dJ/dx_rel and the auto-grad (!) dx_rel/du results in the same\n # jacobian as the result of `compute_jacobian_analytically()`, which is only the case if\n # dx_rel/du(auto-grad) = dx_rel/du(analytic) since dJ/dx has non-zero elements.\n jacobian_analytical = module.compute_jacobian_analytically(ego_trajectory, grad_wrt=ego_controls,\n ado_ids=env.ado_ids, tag=\"test\")\n dj_dx_rel = []\n for ado_id in env.ado_ids:\n dj_dx_rel.append(module.value_gradient(x=module.x_relative[f\"test/{ado_id}\"]))\n jacobian_auto_grad = np.matmul(np.stack(dj_dx_rel), dx_rel_du_auto_grad)\n\n assert np.allclose(jacobian_analytical, jacobian_auto_grad)",
"def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint",
"def target(w, z):\n return log_joint(data_dim=data_dim,\n latent_dim=latent_dim,\n num_datapoints=num_datapoints,\n stddv_datapoints=stddv_datapoints,\n w=w, z=z, x=x_train)",
"def test_ppt_distinguishability_yyd_vectors():\n psi_0 = bell(0)\n psi_1 = bell(2)\n psi_2 = bell(3)\n psi_3 = bell(1)\n\n x_1 = np.kron(psi_0, psi_0)\n x_2 = np.kron(psi_1, psi_3)\n x_3 = np.kron(psi_2, psi_3)\n x_4 = np.kron(psi_3, psi_3)\n\n states = [x_1, x_2, x_3, x_4]\n probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]\n\n primal_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=False\n )\n\n np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)",
"def test_double_ended_wls_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.4 * cable_len)],\n \"warm\": [slice(0.65 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method=\"wls\",\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=8)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)",
"def regularized_linear_regression(X, y, lambd):\n #####################################################\n # TODO 4: Fill in your code here #\n #####################################################\t\t\n w = None\n xtx = np.dot(X.T, X)\n xtx = np.add(xtx, np.identity(len(xtx)) * 0.1)\n w = np.dot(np.dot(np.linalg.inv(xtx), X.T), y)\n return w",
"def _ols(self, X, y):\n # add bias \n X = self._add_bias(X)\n\n # optimise coefficients\n xTx = np.dot(X.T, X)\n inverse_xTx = np.linalg.inv(xTx)\n xTy = np.dot(X.T, y)\n bhat = np.dot(inverse_xTx, xTy)\n\n # pull out weights and bias\n b = bhat[0]\n w = bhat[1:]\n\n return w, b",
"def test_regression_tieout_2d(self):\n tol = 1.0e-12\n\n intercept, beta = regression_alt.matrix_ols(\n self._indep2, self._dep)\n\n py_intercept, py_beta = regression_alt.bare_bones_ols(\n self._indep2, self._dep)\n\n self.assertAlmostEqual(intercept, py_intercept, delta=tol)\n\n computed = np.array([beta])\n expected = np.array([py_beta])\n close = np.allclose(computed, expected, rtol=0.0, atol=tol)\n\n self.assertTrue(close)",
"def test_mw_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"MW\")\n expected_w_vector = np.array(\n [0.33333333, 0.33333333, 0.33333333],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)"
] | [
"0.5814578",
"0.55685",
"0.55546004",
"0.54914194",
"0.54667383",
"0.54667383",
"0.54367995",
"0.5427969",
"0.5383164",
"0.53804135",
"0.5377707",
"0.5370084",
"0.53386796",
"0.52805245",
"0.5276071",
"0.527296",
"0.52697283",
"0.52659935",
"0.52651185",
"0.52339625",
"0.52332336",
"0.5228884",
"0.521231",
"0.52087057",
"0.52085096",
"0.52077335",
"0.5201678",
"0.51966614",
"0.5195829",
"0.51945114",
"0.51799774",
"0.5145449",
"0.5128099",
"0.5120333",
"0.51171255",
"0.51082605",
"0.5102831",
"0.5100347",
"0.5097124",
"0.50900614",
"0.50685626",
"0.50626385",
"0.50565016",
"0.5054957",
"0.50503343",
"0.5042214",
"0.50380164",
"0.5035311",
"0.5028674",
"0.49999705",
"0.499611",
"0.49959418",
"0.4994111",
"0.49899894",
"0.49748972",
"0.4971168",
"0.49704605",
"0.49695063",
"0.49616575",
"0.4959304",
"0.49523765",
"0.49512216",
"0.4946937",
"0.4946752",
"0.493449",
"0.4934217",
"0.49333304",
"0.49328914",
"0.4932646",
"0.49284908",
"0.49200892",
"0.49200112",
"0.49193555",
"0.49164674",
"0.49148932",
"0.49109307",
"0.49062276",
"0.49008778",
"0.4897637",
"0.48970506",
"0.48957655",
"0.48949704",
"0.48947972",
"0.48917925",
"0.48809072",
"0.48759118",
"0.48748788",
"0.4873392",
"0.48729742",
"0.48685196",
"0.48679298",
"0.48648143",
"0.4860683",
"0.48592243",
"0.48474422",
"0.48452154",
"0.48393545",
"0.48380935",
"0.48332596",
"0.48318362",
"0.4830855"
] | 0.0 | -1 |
Compute a sequence of Wald tests for terms over multiple columns This computes joined Wald tests for the hypothesis that all coefficients corresponding to a `term` are zero. `Terms` are defined by the underlying formula or by string matching. | def wald_test_terms(self, skip_single=False, extra_constraints=None,
combine_terms=None): # noqa:E501
result = self
if extra_constraints is None:
extra_constraints = []
if combine_terms is None:
combine_terms = []
design_info = getattr(result.model.data, 'design_info', None)
if design_info is None and extra_constraints is None:
raise ValueError('no constraints, nothing to do')
identity = np.eye(len(result.params))
constraints = []
combined = defaultdict(list)
if design_info is not None:
for term in design_info.terms:
cols = design_info.slice(term)
name = term.name()
constraint_matrix = identity[cols]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
k_constraint = constraint_matrix.shape[0]
if skip_single:
if k_constraint == 1:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname,
np.vstack(combined[cname])))
else:
# check by exog/params names if there is no formula info
for col, name in enumerate(result.model.exog_names):
constraint_matrix = identity[col]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
if skip_single:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname,
np.vstack(combined[cname])))
use_t = result.use_t
distribution = ['chi2', 'F'][use_t]
res_wald = []
index = []
for pair in constraints + combined_constraints + extra_constraints:
name, constraint = pair
wt = result.wald_test(constraint)
row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]]
if use_t:
row.append(wt.df_denom)
res_wald.append(row)
index.append(name)
# distribution neutral names
col_names = ['statistic', 'pvalue', 'df_constraint']
if use_t:
col_names.append('df_denom')
# TODO: maybe move DataFrame creation to results class
table = pd.DataFrame(res_wald, index=index, columns=col_names)
res = WaldTestResults(None, distribution, None, table=table)
# TODO: remove temp again, added for testing
res.temp = constraints + combined_constraints + extra_constraints
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all(any, shard, shard_term_features, qterms):\n tmp = 1\n for t in qterms:\n if t in shard_term_features[shard]:\n cdf = shard_term_features[shard][t].df\n else:\n cdf = 0\n tmp *= cdf/any\n all = tmp * any\n return all",
"def evaluate_terms(terms):\n expr_terms = [x for x in terms]\n\n while expr_terms.count('^') != 0:\n expr_terms = eval_expon(expr_terms)\n\n while MUL_DIV_RE.search(str(expr_terms)) is not None:\n expr_terms = eval_a_op_b(expr_terms, 'md')\n\n while len(expr_terms) != 1:\n expr_terms = eval_a_op_b(expr_terms, 'pm')\n\n return expr_terms[0]",
"def test_ccsd_doubles_a_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c, d = p.V_dumms[:4]\n i, j, k, l = p.O_dumms[:4]\n u = dr.two_body\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n tau = dr.define_einst(\n IndexedBase('tau')[a, b, i, j],\n Rational(1, 2) * t[a, b, i, j] + t[a, i] * t[b, j]\n )\n\n a_i = dr.define_einst(\n IndexedBase('ai')[k, l, i, j], u[i, c, k, l] * t[c, j]\n )\n\n a_ = dr.define(\n IndexedBase('a')[k, l, i, j],\n u[k, l, i, j] +\n a_i[k, l, i, j] - a_i[k, l, j, i]\n + u[k, l, c, d] * tau[c, d, i, j]\n )\n\n tensor = dr.define_einst(\n IndexedBase('r')[a, b, i, j],\n a_[k, l, i, j] * tau[a, b, k, l]\n )\n targets = [tensor]\n\n eval_seq = optimize(\n targets, substs={p.nv: p.no * 10}, strategy=Strategy.ALL | Strategy.SUM\n )\n assert verify_eval_seq(eval_seq, targets)\n # Here we just assert that the final step is a simple product.\n assert len(eval_seq[-1].rhs_terms) == 1",
"def all_terms(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allTerms\", [*terms])",
"def test_term(self):\n\t\tterm_one = schrodinger.term(0)\n\t\tself.assertEqual(1, term_one(0).numpy())\n\t\tterm_two = schrodinger.term(1)\n\t\tself.assertEqual(0, term_two(0).numpy())",
"def collect_like_terms(term_matrix):\n t = [term[:] for term in term_matrix]\n for i, term in enumerate(t, start=1):\n if i < len(t) - 1:\n for j in range(i+1, len(t)):\n if t[i][1:] == t[j][1:]:\n t[i] = [t[i][0] + t[j][0]] + t[i][1:]\n t[j][0] = 0\n # get rid of 0 terms\n t = [u for u in t if u[0] != 0]\n # get rid of extra variables\n if len(t[0]) > 0:\n for i in reversed(range(len(t[0]))):\n # in reverse so deletion doesn't affect index of subsequent variables\n extra = True\n if len(t) > 0:\n for term in t[1:]:\n try:\n if term[i] != 0:\n extra = False\n except IndexError:\n extra = True\n if extra:\n for term in t:\n try:\n del term[i]\n except IndexError:\n pass\n if t == [[]]:\n return [['constant']]\n return t",
"def calc_tdf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n tdf = {}\r\n for term in terms:\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1 if term in doc else 0\r\n tdf[term] = doc_count\r\n return tdf",
"def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n# result = []\n\n# for punct_val in punct_vals:\n# tokens = [tokenize(doc, punct_val) for doc in docs]\n# for f in [comb for i in range(len(feature_fns)) for comb in combinations(feature_fns, i+1)]:\n# feature = list(f)\n\n# for min_freq in min_freqs:\n# clf = LogisticRegression()\n# X, vocab = vectorize(tokens, feature, min_freq)\n# accuracy = cross_validation_accuracy(clf, X, labels, 5)\n# result.append(dict(punct= punct_val, features= feature, min_freq= min_freq, accuracy = accuracy))\n\n# return sorted(result, key=lambda x:(-x['accuracy'],-x['min_freq']))\n clf = LogisticRegression()\n result = []\n output = []\n for l in range(1, len(feature_fns)+1):\n for c in combinations(feature_fns,l):\n result.append(c)\n\n for p in punct_vals:\n list_tok = [tokenize(d,p) for d in docs]\n for fl in result:\n for mf in min_freqs:\n dict_output = {}\n matrix,vocab = vectorize(list_tok, fl, mf)\n average_value = cross_validation_accuracy(clf, matrix, labels, 5)\n dict_output['features'] = fl\n dict_output['punct'] = p\n dict_output['accuracy'] = average_value\n dict_output['min_freq'] = mf\n output.append(dict_output)\n\n return sorted(output, key=lambda x: (-x['accuracy'], -x['min_freq']))",
"def test_multiple(self):\n df = self.df.copy()\n for renorm in [True, False]:\n with self.subTest(renorm=renorm):\n out = nan_weighted_compositional_mean(df.values, renorm=renorm)\n if renorm:\n self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))",
"def test_null_distribution_wald(self, n_cells: int = 2000, n_genes: int = 100):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n summary = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n pval_h0 = stats.kstest(test.pval, 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0 is <= 0.05!\"\n\n return True",
"def vectorize(self, terms):\n features = {}\n\n if self.parameters[LexiconFeature.PARAM_ENABLED] == 'false':\n return features\n\n tones = []\n if (self.terms_used == 'all'):\n tones = [self.get_tone(term) for term in terms]\n elif (self.used_terms == 'hashtags_only'):\n tones = [self.get_tone(term) for term in terms\n if len(term) > 0 and term[0] == '#']\n\n if (len(tones) == 0):\n tones.append(0)\n\n for function_name in self.functions:\n if (function_name == 'sum'):\n value = (sum(tones))\n elif (function_name == 'max'):\n value = max(tones)\n elif (function_name == 'min'):\n value = min(tones)\n else:\n raise ValueError(\n \"unexpected function: '{}'\".format(function_name))\n\n feature_name = \"{}_{}\".format(self.get_name(), function_name)\n features[feature_name] = utils.normalize(value)\n\n #\n # Calculate sum of cluster scores\n #\n # for cluster in self.bag_of_clusters_features:\n # cluster_tones = [self.get_cluster_tone(\n # cluster, cluster.get_cluster_id(word))\n # for word in terms if cluster.contains_word(word)]\n # if len(cluster_tones) == 0:\n # cluster_tones.append(0)\n\n # feature_name = \"{}_score_sum\".format(cluster.get_name())\n # value = sum(cluster_tones)\n # features[feature_name] = utils.normalize(value)\n\n return features",
"def findTerms(self, text, terms, scope=50, includeAll=True):\n\t\tlistOfResults = list()\n\t\tlistOfMatchesMain = list()\n\t\tlistOfMatchesSecondary = list()\n\n\t\tappend = listOfResults.append\n\t\treplace\t= str.replace\n\n\t\tkeywordIndices = self.find(text, terms[0])\n\n\t\t# loop through the indices and check for dependencies if terms list has more than 1 term\n\t\tfor indices in keywordIndices:\n\n\t\t\tleading = text[indices[0]-scope:indices[0]]\n\t\t\ttrailing = text[indices[0]:indices[0]+scope]\n\n\t\t\tleading = replace(replace(leading, '\\n', '_'), '\\t', ' ') \n\t\t\ttrailing = replace(replace(trailing, '\\n', '_'), '\\t', ' ') \n\n\t\t\t# if terms list has more than 1 term (i.e., contextual terms), see if present within scope\n\t\t\tif len(terms) > 1:\n\n\t\t\t\t# loop through the contextual terms and check for presence within scope\n\t\t\t\tfor term in terms[1:]:\n\n\t\t\t\t\t# if term in either leading or trailing\n\t\t\t\t\tif (replace(term, '*', '') in leading.lower()) or (replace(term, '*', '') in trailing.lower()):\n\n\t\t\t\t\t\t# if '*' in term, do not add this context\n\t\t\t\t\t\tif '*' in term:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\t# if '*' not indicated, add this context\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t'+term)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tappend(excerpt)\n\n\t\t\t# if terms list has 1 term, just append the excerpt\n\t\t\telse:\n\n\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t')\n\t\t\t\t\telse:\n\t\t\t\t\t\tappend(excerpt)\n\n\t\treturn listOfResults",
"def compare_css_terms(self, x_inputs, x_samples, full_path):\n \n self.load_model_params(full_path)\n \n data_term = self.get_data_term()\n is_term = self.get_is_term()\n \n diff_var = T.log(is_term - data_term)\n \n self.add_p_tilda()\n \n get_css_diff = theano.function(inputs = [self.x, self.x_tilda],\n outputs = [diff_var, self.p_tilda])\n \n diff_val, p_tilda_vals = get_css_diff(x_inputs, x_samples)\n \n return diff_val, p_tilda_vals",
"def corrected_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / math.sqrt(2 * n_words)",
"def term_restrictions(data):\n\n term = [\"1st\", \"2nd\", \"3rd\", \"1ST\", \"2ND\", \"3RD\"]\n if data not in term:\n return False\n return True",
"def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (basestring, list, tuple)):\n raise ValueError, \"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms))\n\n if isinstance(terms, (list, tuple)):\n parsed = terms\n else:\n if callable(split):\n parsed = split(terms)\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n term = str_to_unicode(term)\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())",
"def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())",
"def any_term(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"anyTerm\", [*terms])",
"def add_eqns(df):\n\n def lett(col): return alpha[list(df.columns).index(col)]\n for i in df.index:\n row = str(i + 3)\n if df.loc[i, 'Deleted'] != 'Total':\n df.loc[i, 'M/M_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + lett('# Molds') + row + '*' + lett('Price/Mold') + row + '+' + lett('Model Price') + row + ')'\n df.loc[i, 'Unit_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + lett('# Units') + row + '*' + lett('Price/Unit') + row + ')'\n df.loc[i, 'Line_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + 'SUM(' + lett('M/M_Total') + row + ',' + lett('Unit_Total') + row + '))'\n return df",
"def terms(f):\n return dmp_terms(f.rep, f.lev, f.dom)",
"def test(dfa, words):\n for word in words:\n try:\n dfa.test(word)\n except AssertionError as e:\n logging.error('ERROR: %s\\n' % e.message)",
"def test__empty_terms_in_potential():\n\n assert automol.pot.is_nonempty(POT1)\n assert not automol.pot.is_nonempty(POT4)\n\n ref_filt_pot = {\n (0.00000000,): 0.00, (1.04719755,): 3.58,\n (2.09439510,): 0.01, (2.61799388,): 1.75,\n (3.14159265,): 3.59, (3.66519143,): 1.69,\n (4.18879020,): 0.02, (4.71238898,): 1.72,\n (5.23598776,): 3.60\n }\n\n filt_pot = automol.pot.remove_empty_terms(POT3)\n assert numpy.allclose(\n tuple(filt_pot.keys()), tuple(ref_filt_pot.keys()), atol=1.0e-2)\n assert numpy.allclose(\n tuple(filt_pot.values()), tuple(ref_filt_pot.values()), atol=1.0e-2)",
"def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))",
"def all_terms(f):\n return dmp_all_terms(f.rep, f.lev, f.dom)",
"def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)",
"def get_relevant_terms(self, to_test_path, min_word_count=0):\n matching_include_path_ids = []\n matching_exclude_path_ids = []\n op_pathes_include = []\n op_pathes_exclude = []\n # all terms without a pattern\n no_restricted_terms_ids =Term.objects.filter(is_active=True).filter(operating_path__isnull=True).values_list('id', flat=True)\n relevant_term_ids = [x for x in no_restricted_terms_ids]\n \n # get all exclude pathes\n op_path_exclude_ids = OperatingPath.objects.filter(is_include=False).values_list('id', flat=True)\n # includes\n op_path_include_ids = OperatingPath.objects.filter(is_include=True).values_list('id', flat=True)\n if op_path_include_ids:\n op_pathes_include = OperatingPath.objects.filter(id__in=op_path_include_ids)\n if op_path_exclude_ids:\n op_pathes_exclude = OperatingPath.objects.filter(id__in=op_path_exclude_ids)\n \n # test exclude pathes\n for exclude_path in op_pathes_exclude:\n if exclude_path.is_matching(to_test_path):\n if exclude_path.id not in matching_exclude_path_ids:\n matching_exclude_path_ids.append(exclude_path.id)\n # test include pathes\n for include_path in op_pathes_include:\n if include_path.is_matching(to_test_path):\n if include_path.id not in matching_include_path_ids:\n matching_include_path_ids.append(include_path.id)\n \n # now mix the buckets\n to_include_term_ids = Term.objects.filter(is_active=True).filter(operating_path__id__in=matching_include_path_ids).values_list('id', flat=True)\n #log.warn(\"include term ids %s\"%(to_include_term_ids) )\n to_exclude_term_ids = Term.objects.filter(is_active=True).filter(operating_path__id__in=matching_exclude_path_ids).values_list('id', flat=True)\n # buckets with term ids are filled\n # now get the term obj to the ids\n positive_term_ids = [x for x in relevant_term_ids]\n positive_term_ids += [x for x in to_include_term_ids]\n final_set = []\n for x in positive_term_ids:\n if x not in to_exclude_term_ids:\n if x not in final_set:\n final_set.append(x)\n \n relevant_terms = Term.objects.filter(word_count__gte = min_word_count).filter(id__in = final_set).exclude(is_active = False) \n relevant_terms = relevant_terms.order_by(\"-word_count\") \n \n # need to be ordered by wordcount longest first\n return relevant_terms",
"def test_ccd_doubles_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c, d = p.V_dumms[:4]\n i, j, k, l = p.O_dumms[:4]\n u = dr.two_body\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n r = IndexedBase('r')\n tensor = dr.define_einst(\n r[a, b, i, j],\n + t[a, b, l, j] * t[c, d, i, k] * u[k, l, c, d]\n + t[a, d, i, j] * t[b, c, k, l] * u[k, l, c, d]\n - t[a, b, i, l] * t[c, d, k, j] * u[k, l, c, d]\n - t[a, c, k, l] * t[b, d, i, j] * u[k, l, c, d]\n )\n targets = [tensor]\n\n eval_seq = optimize(targets, substs={p.nv: p.no * 10})\n\n assert verify_eval_seq(eval_seq, targets)",
"def _construct_compute_fe_terms(self):\n # setup some symbolic variables for theano to deal with\n xi = T.matrix()\n xo = T.matrix()\n _, hi_zmuv = self._construct_zmuv_samples(xi, 1)\n # construct values to output\n nll = self.nlli[-1]\n kld = self.kld_z.flatten() + self.kld_hi_q2p.flatten()\n # compile theano function for a one-sample free-energy estimate\n fe_term_sample = theano.function(inputs=[ xi, xo ], \\\n outputs=[nll, kld], \\\n givens={self.x_in: xi, \\\n self.x_out: xo, \\\n self.hi_zmuv: hi_zmuv}, \\\n updates=self.scan_updates)\n # construct a wrapper function for multi-sample free-energy estimate\n def fe_term_estimator(XI, XO, sample_count):\n # compute a multi-sample estimate of variational free-energy\n nll_sum = np.zeros((XI.shape[0],))\n kld_sum = np.zeros((XI.shape[0],))\n for i in range(sample_count):\n result = fe_term_sample(XI, XO)\n nll_sum += result[0].ravel()\n kld_sum += result[1].ravel()\n mean_nll = nll_sum / float(sample_count)\n mean_kld = kld_sum / float(sample_count)\n return [mean_nll, mean_kld]\n return fe_term_estimator",
"def find_terms(naf: KafNafParser, words: Sequence[str]) -> Iterable[Cterm]:\n for t in naf.get_terms():\n if t.get_lemma() in words or get_word(naf, t) in words:\n yield t",
"def check_spellings(text):\n\n for word in vocabulary:\n text = correct(word, text, 0.7)\n return text",
"def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n ###TODO\n \n # gettting feature's 7 combinations\n feature_comb = [] \n i = 1\n \n while i <= len (feature_fns) :\n comb = combinations(feature_fns,i)\n i += 1 \n \n for c in comb:\n feature_comb.append(c) \n #print(c)\n\n #for option in feature_comb:\n #print(option)\n \n # LogisticRegression object\n \n\n keys = ['punct','features','min_freq','accuracy']\n results = []\n dicts = [] \n feature_dict = {}\n \n # setting on punct,mi_freq and conmbination of feature\n for punct in punct_vals: \n tokens_list = [tokenize(d,punct) for d in docs]\n \n for freq in min_freqs : \n\n for features in feature_comb:\n \n #print('MinFreq = %d Punct = %s fetures = %s'%(freq,punct,features))\n X, vocabulary = vectorize(tokens_list,features,freq)\n clf = LogisticRegression()\n avg_acc = cross_validation_accuracy(clf, X, labels, k=5)\n #print('Avg accuracy = %f'%(avg_acc))\n #print('vocab size =',len(vocabulary)) \n \n result = [punct,features,freq,avg_acc]\n\n feature_dict = dict(zip(keys, result))\n\n dicts.append(feature_dict)\n \n \n # sort dict on accuracy \n dicts.sort(key=lambda x:(-x['accuracy'],-x['min_freq']))\n #print('dicts = ',dicts)\n \n return(dicts)",
"def corrections(vec):\n bonf_all = vec * len(vec)\n bonf_within = vec.groupby(level=0).apply(lambda s: s * len(s))\n\n bh_all = H.bhCorrection(vec)\n bh_within = vec.groupby(level=0).apply(H.bhCorrection).order()\n\n two_step = bh_within * len(vec.groupby(level=0).size())\n q = pd.concat([vec, bh_within, bh_all, bonf_all, bonf_within, two_step],\n keys=['uncorrected', 'bh_within', 'bh_all', 'bonf_all', 'bonf_within',\n 'two_step'], axis=1)\n return q",
"def basic_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / n_words",
"def test_spelling(self) -> None:\n misstakes: Dict[Word, List[str]] = self.report.spellcheck(\n self.rules.spelling_skip_wordclasses\n )\n for word, corrections in misstakes.items():\n if word.text.lower() in self.rules.forbidden_words:\n continue\n if word.text.lower() in [\n ab[\"word\"] for ab in self.rules.police_abbreviations\n ]:\n continue\n error_text: str = f\"Ordet {word.text} är felstavat.\"\n if corrections:\n error_text += \" Rättningsförslag: \" + \", \".join(corrections) + \".\"\n self.add_error(error_text, word=word)",
"def similarities (self, listOfWords):\n \n # building the query dictionary\n queryDict = collections.defaultdict(int)\n for w in listOfWords:\n queryDict [w] += + 1.0\n \n # normalizing the query\n length = float (len (listOfWords))\n for k in queryDict:\n queryDict [k] /= length\n \n # computing the list of similarities\n sims = []\n for doc in self.documents:\n score = 0.0\n docDict = doc [1]\n for k in queryDict:\n if docDict.has_key (k):\n score += (queryDict [k] / self.corpusDict [k]) + (docDict [k] / self.corpusDict [k])\n sims.append ([doc [0], score])\n \n return sims",
"def eqs_and_deriv(self, _):\n pass",
"def any_term(self, row):\n return any(term in row for term in self.search_terms)",
"def test_calculate_basic_property_terms():\n X = 100\n delta_beta = 1.0\n rho = np.ones(X)\n g = np.ones(X)\n g_plus = np.ones(X) + 1\n g_minus = np.ones(X)\n\n ret = st.calculate_basic_property_terms(delta_beta, rho, g, g_plus, g_minus)\n\n assert np.all(ret[0] == g / rho)\n assert np.all(ret[1] == 0.5)\n assert np.all(ret[2] == 1.0)\n return",
"def count_terms(equat_orig):\n\tterms = 0\n\tfor pow_group in equat_orig:\n\t\tif pow_group:\n\t\t\tfor _ in pow_group:\n\t\t\t\tterms += 1\n\tprint(f'\\033[1;95mTerms in the polynom: \\033[0m{terms}')",
"def score_pmid_cooccurrence(term0_to_pmids, term1_to_pmids, term0_name='term_0', term1_name='term_1', verbose=True):\n all_pmids0 = set.union(*term0_to_pmids.values())\n all_pmids1 = set.union(*term1_to_pmids.values())\n pmids_in_both = all_pmids0 & all_pmids1\n total_pmids = len(pmids_in_both)\n if verbose:\n print('Total articles containing a {}: {}'.format(term0_name, len(all_pmids0)))\n print('Total articles containing a {}: {}'.format(term1_name, len(all_pmids1)))\n print('Total articles containing both a {} and {}: {}'.format(term0_name, term1_name, total_pmids))\n\n term0_to_pmids = term0_to_pmids.copy()\n term1_to_pmids = term1_to_pmids.copy()\n for d in term0_to_pmids, term1_to_pmids:\n for key, value in list(d.items()):\n d[key] = value & pmids_in_both\n if not d[key]:\n del d[key]\n\n if verbose:\n print('\\nAfter removing terms without any cooccurences:')\n print('+ {} {}s remain'.format(len(term0_to_pmids), term0_name))\n print('+ {} {}s remain'.format(len(term1_to_pmids), term1_name))\n\n rows = list()\n for term0, term1 in itertools.product(term0_to_pmids, term1_to_pmids):\n pmids0 = term0_to_pmids[term0]\n pmids1 = term1_to_pmids[term1]\n\n a = len(pmids0 & pmids1)\n b = len(pmids0) - a\n c = len(pmids1) - a\n d = total_pmids - len(pmids0 | pmids1)\n contingency_table = [[a, b], [c, d]]\n\n expected = len(pmids0) * len(pmids1) / total_pmids\n enrichment = a / expected\n\n oddsratio, pvalue = scipy.stats.fisher_exact(contingency_table, alternative='greater')\n rows.append([term0, term1, a, expected, enrichment, oddsratio, pvalue])\n\n columns = [term0_name, term1_name, 'cooccurrence', 'expected', 'enrichment', 'odds_ratio', 'p_fisher']\n df = pd.DataFrame(rows, columns=columns)\n\n if verbose:\n log.info('\\nCooccurrence scores calculated for {} {} -- {} pairs'.format(len(df), term0_name, term1_name))\n return df",
"def with_terms(model: Model, terms: Iterable[Term]):\n program: SWIProgram = model.solver.program\n if isinstance(program, SWIProgram):\n # cdb = ClauseDB(builtins={})\n # for c in terms:\n # cdb.add_statement(c)\n identifiers = list(x[0:2] for x in program.add_program(terms))\n model.solver.cache.invalidate()\n try:\n yield\n finally:\n for type_, idx in identifiers:\n if type_ == \"cl\":\n program.retract_clause(idx)\n elif type_ == \"fa\":\n program.retract_fact(idx)\n else:\n raise NotImplementedError(\n \"with_terms is currently only implemented for ApproximateEngine\"\n )",
"def get_any(shard, qterms, shard_term_features, shard_size):\n d = float(shard_size)\n # first compute the prob for a document in this shard to contain none of the query terms\n tmp = 1\n for t in qterms:\n t = t.strip()\n if t in shard_term_features[shard]:\n cdf = shard_term_features[shard][t].df\n else:\n cdf = 0\n tmp *= 1 - cdf / d\n\n # than compute any\n any = (1 - tmp) * d\n return any",
"def evaluate_polynomials(polynomials: List[Poly]):\n fft = MultiDimNonBinaryFFT(field, root_of_unity, width)\n values = fft.multi_fft(polynomials)\n return values",
"def a_test_couple_terms():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )",
"def test_perform_pairwise_tests_multi_comp(self):\r\n # Verified with R's t.test function.\r\n exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,\r\n 0.021241438692373202, nan, nan], ['foo', 'baz',\r\n -\r\n 9.7979589711327115, 0.00060818494446333643, 0.0018245548333900093,\r\n nan, nan], ['bar', 'baz', -3.0, 0.05766888562243732,\r\n 0.17300665686731195, nan, nan]]\r\n obs = _perform_pairwise_tests(self.labels2, self.dists2, 'two-sided',\r\n 0)\r\n self.compare_multiple_level_array(obs, exp)",
"def test_accept_all_terms_optional(api, account, given_terms):\n api.terms.get_all_terms.return_value = given_terms\n api.terms.accept_terms.reset_mock()\n account.accept_all_terms(optional=True)\n api.terms.get_required_terms.assert_not_called()\n api.terms.get_all_terms.assert_called()\n\n call_count = custom_st.count_terms(given_terms)\n assert api.terms.accept_terms.call_count == call_count",
"def _gen_terms(self, tree):\n for leaf in self._leaves(tree):\n terms = [self._normalize(w) for w, _ in leaf if self._filter(w)]\n # Phrase only\n if len(terms) > 1:\n yield terms",
"def undetermined_coefficients(gensols: List[Symbol], func_coeffs: List[Symbol], gt: Symbol, t: Symbol = t) -> Tuple[Symbol, Procedure]:\n\n Y = Function('Y', real=True)(t)\n\n coeffs = numbered_symbols('A', cls=Dummy)\n coefflist = []\n\n trialset = _undetermined_coefficients_match(gt, t)['trialset']\n\n notneedset = set()\n\n mult = 0\n for i, sol in enumerate(gensols):\n check = sol\n if check in trialset:\n # If an element of the trial function is already part of the\n # homogeneous solution, we need to multiply by sufficient x to\n # make it linearly independent. We also don't need to bother\n # checking for the coefficients on those elements, since we\n # already know it will be 0.\n while True:\n if check*t**mult in trialset:\n mult += 1\n else:\n break\n trialset.add(check*t**mult)\n notneedset.add(check)\n\n newtrialset = trialset - notneedset\n\n # while True:\n # dependent = False\n # for trial in newtrialset:\n # if trial in gensols:\n # dependent = True\n # break\n # if not dependent:\n # break\n # newtrialset = set([t*trial for trial in trialset])\n\n # trialset = trialset.union(newtrialset)\n\n trialfunc = sympy.Number(0)\n for i in newtrialset:\n c = next(coeffs)\n coefflist.append(c)\n trialfunc += c*i\n\n derivatives = []\n\n eqs = 0\n for order, coeff in enumerate(func_coeffs[::-1]):\n deriv = simplify(trialfunc.diff(t, order))\n derivatives.append(\n Eq(Derivative(Y, t, order), deriv, evaluate=False))\n eqs += coeff * deriv\n\n coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))\n\n eqs_lhs = eqs\n\n eqs = _mexpand(simplify(eqs - gt).expand())\n\n for i in Add.make_args(eqs):\n s = separatevars(i, dict=True, symbols=[t])\n coeffsdict[s[t]] += s['coeff']\n\n coeffvals = solve(list(coeffsdict.values()), coefflist)\n\n if not coeffvals:\n print(\n \"Could not solve `%s` using the \"\n \"method of undetermined coefficients \"\n \"(unable to solve for coefficients).\" % eqs)\n\n psol = trialfunc.subs(coeffvals)\n\n procedure = Procedure()\n procedure\\\n .text('Find ').latex('Y(t)').text(' that mimics the form of ').latex('g(t)', nl=True)\\\n .eq(Eq(Y, trialfunc, evaluate=False))\\\n .text('Compute successive derivatives of ').latex('Y(t)', nl=True)\\\n .equlist(derivatives)\\\n .text('Plug the derivatives into the LHS and equate coefficients', nl=True)\\\n .equlist([Eq(eqs_lhs, gt, evaluate=False),\n Eq(simplify(eqs_lhs).expand().collect(t), gt, evaluate=False)])\\\n .equarr([Eq(a, 0, evaluate=False) for a in coeffsdict.values()])\\\n .text('Solve for the undetermined coefficients', nl=True)\\\n .equarr([Eq(k, v, evaluate=False)\n for k, v in coeffvals.items() if k != 0] if len(coeffvals) > 0 else [])\\\n .text('Substitute the coefficients to get the particular solution', nl=True)\\\n .eq(Eq(Dummy('y_p'), psol, evaluate=False))\n\n return psol, procedure",
"def test_TRit(self):\n\n prices = self._convert_df_to_list(self.prices)\n currency = self._convert_df_to_list(self.currency)\n\n manual_total = self.list_multiplication(prices, currency)\n manual_calculated = list(self._manual_calculate_formal(manual_total))\n\n self.portfolio._generate_total()\n test_column = self._convert_df_to_list(self.portfolio._df_total)\n\n self.assertAlmostEqual(self._clear_column(manual_calculated),\n self._clear_column(test_column))",
"def _compute_expected_freqs(confmat: Tensor) ->Tensor:\n margin_sum_rows, margin_sum_cols = confmat.sum(1), confmat.sum(0)\n expected_freqs = torch.einsum('r, c -> rc', margin_sum_rows, margin_sum_cols) / confmat.sum()\n return expected_freqs",
"def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertNotEqual(int(obj[\"X\"]), 0)\n \n self.assertTrue(\"G\" in obj)\n self.assertFalse(obj[\"G\"].any())\n \n self.assertTrue(\"H\" in obj)\n self.assertEqual(obj[\"H\"], numpy.diag([1]*self.ff.np))",
"def a_test2_couple_terms():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def findLinkedTerms(self):\n for key in self.summaryDict.keys(): # v' in the formula\n if self.getCoverFromModalityInDictionnary(self.summaryDict, key) == 0:\n correlation = 0\n else:\n dep = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,key) / self.getCoverFromModalityInDictionnary(self.summaryDict, key) #cover(v',R')/cover(v'R)\n if dep <= 1:\n correlation = 0\n else:\n correlation = 1 - (1 / dep)\n self.correlationDict[key] = correlation",
"def test_ccsd_singles_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c = p.V_dumms[:3]\n i, j, k = p.O_dumms[:3]\n u = dr.two_body\n f = dr.fock\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n r = IndexedBase('r')\n tensor = dr.define_einst(\n r[a, i],\n t[a, b, i, j] * u[j, k, b, c] * t[c, k] + t[a, b, i, j] * f[j, b]\n - t[a, j] * t[b, i] * f[j, b]\n - t[a, j] * t[b, i] * t[c, k] * u[j, k, b, c]\n )\n targets = [tensor]\n\n eval_seq = optimize(targets, substs={p.nv: p.no * 10})\n\n assert verify_eval_seq(eval_seq, targets)\n assert len(eval_seq) == 4",
"def compute_terms(Y, latent_means, latent_Sigmas, B1, B2, mu, g1, g2):\n # these are the 's' parameters when nu=e_q, beta=0\n ss = [-mi[-1]*si[-1] for mi, si in zip(latent_means, latent_variances)]\n\n a1 = [-np.trace(Sigma) - np.matmul(mi.T, mi) + np.log(LA.det(Sigma)) for Sigma, mi in zip(latent_Sigmas, latent_means)]\n\n B_times_mean = [np.matmul(B1+B2, mi) for mi in latent_means]\n a2 = [\n np.matmul(\n yi - mu, \n (yi - mu).T\n ).item() - np.matmul(\n yi - mu, \n Bm\n ).item() for yi, Bm in zip(Y, B_times_mean)\n ]\n\n a3_scalars = [SQRT_PI_OVER_2*erfc(si/ROOT2)+si*np.exp(-si**2/2) for si in ss]\n a3 = [\n g1*sc*np.trace(\n np.matmul(\n B1.T, \n np.matmul(B1, Sigma)\n )\n ).item() for Sigma, sc in zip(latent_Sigmas, a3_scalars)\n ]\n\n a4_scalars = [SQRT_PI_OVER_2*(erf(si/ROOT2)+1)-si*np.exp(-si**2/2) for si in ss]\n a4 = np.array([\n g2*sc*np.trace(\n np.matmul(\n B2.T, \n np.matmul(\n B2, \n Sigma\n )\n )\n ).item() for Sigma, sc in zip(latent_Sigmas, a4_scalars)\n ],\n dtype='object'\n )\n\n a5_inner = [\n erfc(si/ROOT2)*np.matmul(\n B1.T, \n B1\n ) + (erf(si/ROOT2)+1)*np.matmul(\n B2.T, \n B2\n ) for si in ss]\n a5 = [\n SQRT_PI_OVER_2*np.matmul(mi.T, np.matmul(Bi, mi)) for mi, Bi in zip(latent_means, a5_inner)\n ]\n\n # convert all list of 1d arrays to lists of floats\n a1 = [element.item()*0.5 for element in a1]\n a3 = [element.item() for element in a3]\n a4 = [element.item() for element in a4]\n a5 = [element.item() for element in a5]\n\n return a1, a2, a3, a4, a5",
"def test_pythagorean_triples(self):\n\n s = space(0)\n for a, b, c in (\n (3, 4, 5),\n (8, 15, 17),\n (33, 56, 65)\n ):\n self.assertTrue(isclose(\n s.hypot(a, b),\n c\n ))\n self.assertTrue(isclose(\n s.leg(a, c),\n b\n ))",
"def test_accept_all_terms_required(api, account, given_terms):\n api.terms.get_required_terms.return_value = given_terms\n api.terms.accept_terms.reset_mock()\n account.accept_all_terms()\n api.terms.get_required_terms.assert_called()\n api.terms.get_all_terms.assert_not_called()\n\n call_count = custom_st.count_terms(given_terms)\n assert api.terms.accept_terms.call_count == call_count",
"def spell_t(tests, verbose=False):\n import time\n start = time.clock()\n good, unknown = 0, 0\n n = len(tests)\n for right, wrong in tests:\n w = en_correct(wrong)[0]\n good += (w == right)\n if w != right:\n unknown += (right not in spell.word_freq_dict)\n if verbose:\n print('en_correct({}) => {}; expected {}'.format(wrong, w, right))\n dt = time.clock() - start\n print('acc: {:.0%}, total num: {}, ({:.0%} unknown), speed: {:.0f} '\n 'words per second'.format(good / n, n, unknown / n, n / dt))",
"def chi_2_test(magnitudes, errors):\n num_obs = magnitudes.shape[0]\n errors_sq = np.square(errors)\n\n m_bar = np.sum(np.divide(magnitudes, errors_sq)) /\\\n np.sum(np.divide(np.ones(num_obs), errors_sq))\n\n chi_2 = np.sum(np.divide(np.square(magnitudes - m_bar), errors_sq))\n return chi_2",
"def render_formula_context(context, words):\n result = []\n started, ended = False, False\n for word in words:\n is_formula = (\n word.sentence_unique == context.sentence_unique and\n context.start <= word.idx <= context.end\n )\n if is_formula and not started:\n result.append('<span class=\"f-page-formula\">')\n started = True\n elif not is_formula and started and not ended:\n result.append('</span>')\n ended = True\n result.append(word.token.word_form)\n return \" \".join(result)",
"def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c",
"def _testit(words):\n w_list = list(words)\n pairs = defaultdict(lambda: [0, 0])\n if not _is_component(w_list):\n return False\n for word in w_list:\n pairs[word[0].lower()][0] += 1\n pairs[word[-1].lower()][1] += 1\n lst = sorted([pair[0] - pair[1] for pair in pairs.values()])\n return all(i == 0 for i in lst[1:-1]) and \\\n lst[-1] <= 1 and sum(lst[::len(lst) - 1]) == 0",
"def test(self, values: Dict[str, Any]) -> Optional[str]:\n reasons = []\n for alt in self.alternatives:\n reason = alt.test(values)\n if reason is None:\n return None\n reasons.append(reason)\n\n return \" AND \".join(reasons)",
"def fits(a, b):\n return all(x & y for x, y in zip(a, b))",
"def _calc_chromatic_term(self):\n LOG.debug(\"Calculating Chromatic Term.\")\n self._check_k_columns([\"K1L\", \"K2L\", \"K2SL\"])\n res = self._results_df\n tw = self.twiss_df\n\n with timeit(lambda t: LOG.debug(\" Time needed: {:f}\".format(t))):\n mask = (tw['K1L'] != 0) | (tw['K2L'] != 0) | (tw['K2SL'] != 0)\n if \"DX\" in tw and \"DY\" in tw:\n LOG.debug(\"Dispersion values found in model. Used for chromatic calculations\")\n sum_term = tw.loc[mask, 'K1L'] - \\\n (tw.loc[mask, 'K2L'] * tw.loc[mask, 'DX']) + \\\n (tw.loc[mask, 'K2SL'] * tw.loc[mask, 'DY'])\n else:\n LOG.info(\"Dispersion values NOT found in model. Using analytic values.\")\n if \"DX\" not in res or \"DY\" not in res:\n self.calc_linear_dispersion()\n sum_term = tw.loc[mask, 'K1L'] - \\\n (tw.loc[mask, 'K2L'] * res.loc[mask, 'DX']) + \\\n (tw.loc[mask, 'K2SL'] * res.loc[mask, 'DY'])\n\n res['CHROMX'] = sum_term * tw.loc[mask, 'BETX']\n res['CHROMY'] = sum_term * tw.loc[mask, 'BETY']\n\n LOG.debug(\"Chromatic Term Calculated.\")",
"def eval_expon(terms):\n pow_dex = terms.index('^')\n if terms[pow_dex + 1] == '-':\n terms[pow_dex + 1] = -1 * terms[pow_dex + 2]\n del terms[pow_dex + 2]\n\n terms[pow_dex - 1] = terms[pow_dex - 1] ** terms[pow_dex + 1]\n\n del terms[pow_dex: pow_dex + 2]\n\n return terms",
"def test_do_check_number_of_terms(self):\n self.assertTrue(self.a.do_check_number_of_terms(self.b))\n self.assertFalse(self.a.do_check_number_of_terms(self.c))",
"def calculate_chi_squared(self):\n chi = 0\n obsVals, expVals = self.calculate_obs_and_exp()\n for i in range(4):\n if expVals[i] != 0:\n chi += (obsVals[i] - expVals[i])**2 / expVals[i]\n return chi",
"def filter_element_and(mt_list, elem_list):\r\n return [mt for mt in mt_list if all(e in mt['pretty_formula'] for e in elem_list)]",
"def a_test_no_terms():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=0, ma=0, family=Exponential())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 2)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def root_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / math.sqrt(n_words)",
"def filterOptions(self, phrases,columnlist):\n filtered=[]\n for col in columnlist:\n outerFlag = False\n for phrase in phrases:\n innerFlag = True\n for word in phrase:\n if word.lower() not in col.lower(): innerFlag = False #Requires all words of phrase to be in col\n if innerFlag: outerFlag = True #Requires at least one phrase to be in col\n if outerFlag: filtered.append(col)\n if not filtered: filtered = ['']\n return filtered",
"def _corrections(self, *correction_models):\n corrections = 0 if self.time.isscalar else np.zeros(self.time.size)\n for name, correction_func, out_idx, factor in correction_models:\n if name not in self.models:\n continue\n\n correction_cache = self._correction_cache.setdefault(name, dict())\n if self.time.isscalar:\n mjd = self.time.tt.mjd\n if mjd not in correction_cache:\n correction_cache[mjd] = correction_func(mjd)\n corrections += factor * correction_cache[mjd][out_idx]\n else:\n for idx, mjd in enumerate(self.time.tt.mjd):\n if mjd not in correction_cache:\n correction_cache[mjd] = correction_func(mjd)\n\n corrections[idx] += factor * correction_cache[mjd][out_idx]\n\n return corrections",
"def make_function_word_cols(df):\n\tfunction_words = ['a', 'been', 'all', 'but', 'also', 'by', 'an', 'can',\n\t\t\t\t\t 'and','do', 'any', 'down', 'are', 'even', 'had', 'its',\n\t\t\t\t\t 'one','has', 'may', 'only', 'have', 'more', 'or', 'her',\n\t\t\t\t\t 'must','our', 'his', 'my', 'should', 'if', 'no', 'so',\n\t\t\t\t\t 'as', 'at','be', 'in', 'not', 'some', 'every', 'into',\n\t\t\t\t\t 'now', 'such','for', 'is', 'of', 'than', 'the', 'were',\n\t\t\t\t\t 'their', 'what','then', 'when', 'there', 'which', 'things',\n\t\t\t\t\t 'who', 'this','will', 'to', 'with', 'up', 'would', 'upon',\n\t\t\t\t\t 'you', 'from','it', 'on', 'that', 'was']\n\tfw_cols = {}\n\tfor fw in function_words:\n\t\tvalues = df['lemma'] == fw\n\t\tfw_cols[fw] = values\n\tdata = pd.DataFrame.from_dict(fw_cols)\n\tmerged = pd.merge(df, data, left_index = True, right_index = True)\n\treturn merged",
"def test_multiple_without_IS(self):\n df = self.mdf\n fixed_record_idx = 0\n for renorm in [True, False]:\n with self.subTest(renorm=renorm):\n out = standardise_aggregate(\n df, renorm=renorm, fixed_record_idx=fixed_record_idx\n )\n if not renorm:\n self.assertTrue(\n np.isclose(\n out.values, df.iloc[fixed_record_idx, :].values\n ).any()\n )",
"def test_from_formula_bad_formula():\n dummy_df = pd.DataFrame(columns=['SystolicBP', 'Smoke', 'Overwt'])\n\n for formula_i in ['Smoke + Overwt', 'SystolicBP ~']:\n with pytest.raises(AssertionError):\n from_formula(formula_i, dummy_df)\n\n assert from_formula('SystolicBP ~ Smoke + l(Overwt)', dummy_df) is not None",
"def test_kl_divergence(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n kl = kl_divergence(dist_a, dist_b)\n if i == j:\n assert pytest.approx(kl, 0.0001) == 0.0\n else:\n assert kl > 0",
"def query_set_sim(self, en_ids, weights):\n # fielded_weights = self.__get_weights(weights)\n scorer = ScorerMLM(econfig.LUCENE, self.query, {}) # {'field_weights': fielded_weights})\n\n p_t_theta_d = {}\n for t in set(self.query.split()):\n p_t_theta_d[t] = 0\n for en in en_ids:\n lucene_doc_id = scorer.lucene.get_lucene_document_id(en)\n p_t_theta_d[t] += scorer.get_mlm_term_prob(lucene_doc_id, weights, t)\n score = self.nllr(self.query, p_t_theta_d, weights)\n if score is None:\n return 0\n return math.exp(score)",
"def chi_square_analysis(obs_list):\r\n obs = np.array(obs_list)\r\n chi2, p, dof, expected = chi2_contingency(obs)\r\n return chi2, p, dof, expected",
"def a_test2_no_terms():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=0, ma=0, family=Exponential())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 3)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def _quad_terms(self, padded):\n\n def loop_body(_padded, ii):\n _padded = tf.concat(\n [_padded, _padded[:, ii + 1:self.nlabels + 1] * tf.expand_dims(_padded[:, ii + 1], axis=-1)],\n axis=1)\n return _padded, tf.add(ii, 1)\n\n i = tf.constant(0)\n all_padded = tf.while_loop(lambda _padded, _i: tf.less(_i, self.nlabels),\n loop_body,\n [padded, i],\n shape_invariants=[tf.TensorShape([self.nspec, None]),\n i.get_shape()],\n parallel_iterations=1,\n back_prop=False)\n return all_padded[0]",
"def eterms(tol=0.001):\n from math import e\n\n terms = 1\n approx = 1\n fact = 1\n while abs(approx - e) > tol:\n fact = fact * terms # Factorial of the number of terms\n approx = approx + 1/fact\n terms = terms + 1\n return terms",
"def df_sample_concepts(self):\n return self.abundance_mat_mult(False)",
"def eq_in_evaluate_partial_derivative(self, eq, used_jacobian_vars):\n eq.setdefault('in_evaluate_partial_derivative', []).append(eq['sympy_lhs']\n in [v[0] for v in used_jacobian_vars\n if v[0] is not None])",
"def key_terms(self, fieldname, docs = 10, numterms = 5,\r\n model = classify.Bo1Model, normalize = True):\r\n \r\n docs = min(docs, self.scored_length())\r\n if docs <= 0: return\r\n \r\n doc_reader = self.searcher.doc_reader\r\n fieldnum = self.searcher.fieldname_to_num(fieldname)\r\n \r\n expander = classify.Expander(self.searcher, fieldname, model = model)\r\n for docnum in self.scored_list[:docs]:\r\n expander.add(doc_reader.vector_as(docnum, fieldnum, \"weight\"))\r\n \r\n return expander.expanded_terms(numterms, normalize = normalize)",
"def test_no_multiply():\n with pytest.raises(NotImplementedError):\n SplineTerm(0) * LinearTerm(1)\n\n term_list = SplineTerm(0) + LinearTerm(1)\n with pytest.raises(NotImplementedError):\n term_list * term_list",
"def getExpandedTerms(self,df):\n search_term = df['search_term'].split()\n searchquery=\"\"\n for term in search_term:\n searchquery=searchquery+\" \" + term\n\n # print(searchquery)\n return self.getExpandedQuery(querywords=searchquery,maxNoOfAdditionalWords=1,minSimilarityLevel=0.7)",
"def text_extractor(course_desc,query,doc_term_TF_matrix,terms,vectorizer):\n query = query.lower()\n query_vec = vectorizer.transform(pd.Series(query))\n sentences = sent_tokenize(course_desc)\n sentences_vec = [vectorizer.transform(pd.Series(sentence)) for sentence in sentences]\n\n tfidf_transformer = TfidfTransformer(smooth_idf=True, use_idf=True)\n tfidf_transformer.fit(doc_term_TF_matrix)\n\n tf_idf_desc = tfidf_transformer.transform(query_vec)\n tf_idf_sentences = [tfidf_transformer.transform(sentence) for sentence in sentences_vec]\n\n sim_array = np.zeros(len(sentences_vec)) # array of similarity scores\n\n array_1 = tf_idf_desc\n for i in range(len(sentences_vec)):\n array_2 = tf_idf_sentences[i]\n sim_array[i] = cosine_similarity(array_1, array_2)\n print(course_desc)\n print(\"Most:\",sentences[np.argmax(sim_array)])",
"def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj\n assert int(obj[\"X\"]) != 0\n assert obj[\"G\"].any() == False\n assert (obj[\"H\"] == numpy.diag([1]*self.ff.np)).all()",
"def fit_all(res=0.05, write=False):\n\n subjects = [101, 102, 103, 104, 105, 106, 108, 109, 111, 112, 113, 114, \n 115, 116, 117, 118]\n\n fitted = []\n reward_names = ['acc', 'gl']\n similarities = [None, 'exp', 'gauss', 'rdis']\n for num in subjects:\n for name in reward_names:\n for sim in similarities:\n params, log_L = fit(num, sim, name, res)\n fitted.append(\n params + (log_L, ) + (num, ) + (name, ) + (sim, ))\n \n if write:\n # Ease writing and None substitution\n # with pandas\n ftable = pandas.DataFrame(\n fitted, \n columns=['alpha', 'beta', 'logL', 'sub','reward', 'sim'])\n ftable.to_csv('101_118_rl_params.txt', \n header=True, index=False, na_rep='none')\n\n return fitted",
"def preprocess_terms(df, dataType='testNames'):\n pass",
"def test_calculate_offsets_word_part(self):\n applicable_terms = [('act', 'a')]\n text = \"I am about to act on this transaction.\"\n t = Terms(None)\n matches = t.calculate_offsets(text, applicable_terms)\n self.assertEqual(1, len(matches))\n self.assertEqual(1, len(matches[0][2]))",
"def get_all_terms(self, index, doc_type, doc_id, field):\n\n term_vector = self.es.termvectors(index, doc_type, id=doc_id, field_statistics=False,\n fields=[field], offsets=False, positions=False)\n\n all_terms = term_vector[field][\"terms\"].keys()\n\n return all_terms",
"def all_terms(self, termset=None, phrases=True):\r\n\r\n if termset is None:\r\n termset = set()\r\n self._all_terms(termset, phrases=phrases)\r\n return termset",
"def Q9_test():\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n return plus_court(tab_dist) == [0, 1, 2]",
"def query_vectorize(self, query_words: List) -> np.ndarray:\n weights = np.zeros(shape=self.num_terms)\n terms = set(query_words)\n\n for term in terms:\n if term not in self.vector_mapping:\n continue\n else:\n index = self.vector_mapping[term]\n weights[index] = self.tf_idf_weight_query(term, query_words)\n\n return weights",
"def get_is_term(self):\n \n approx_Z = self.add_is_approximation()\n \n return T.sum(T.exp(approx_Z))",
"def test_newton_rhapson(testFunctions, tol, printFlag): \n pass",
"def test_dice_similarity_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\""
] | [
"0.5098105",
"0.49435574",
"0.49396107",
"0.4900988",
"0.4857589",
"0.4845605",
"0.46524802",
"0.4648548",
"0.4620158",
"0.46167716",
"0.46052644",
"0.45635396",
"0.45390478",
"0.45198467",
"0.45163706",
"0.4515403",
"0.4504055",
"0.45020077",
"0.44826326",
"0.44797707",
"0.44423264",
"0.44348654",
"0.4431736",
"0.44302016",
"0.44154418",
"0.44074002",
"0.44044936",
"0.4403324",
"0.43892953",
"0.43720508",
"0.43360096",
"0.4335875",
"0.43277425",
"0.43074813",
"0.43059337",
"0.4294411",
"0.42890415",
"0.42875084",
"0.42831764",
"0.4271843",
"0.42581242",
"0.424963",
"0.42415082",
"0.42395455",
"0.42252856",
"0.422365",
"0.4214362",
"0.4205684",
"0.41971707",
"0.41956118",
"0.41900542",
"0.41888046",
"0.41875067",
"0.41716525",
"0.41699615",
"0.4161412",
"0.41606104",
"0.41602978",
"0.415511",
"0.41534948",
"0.41525632",
"0.41489175",
"0.41486478",
"0.41451508",
"0.41437206",
"0.41347295",
"0.41343877",
"0.41325685",
"0.41284376",
"0.41258836",
"0.41216406",
"0.41182202",
"0.41116497",
"0.40989837",
"0.40930858",
"0.40894285",
"0.4088474",
"0.40879315",
"0.4085538",
"0.40754995",
"0.4072646",
"0.4072372",
"0.40722167",
"0.40691793",
"0.40677786",
"0.4066715",
"0.40644747",
"0.406197",
"0.4059779",
"0.4058932",
"0.4057983",
"0.40507263",
"0.40504888",
"0.4043425",
"0.40405148",
"0.40368792",
"0.40309054",
"0.4029845",
"0.4028146",
"0.40267217"
] | 0.61136484 | 0 |
Returns the confidence interval of the fitted parameters. | def conf_int(self, alpha=.05, cols=None, method=None):
if method is not None: # pragma: no cover
raise NotImplementedError("`method` argument is not actually "
"supported. Upstream silently ignores "
"it.")
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = self.params[cols] - q * bse[cols]
upper = self.params[cols] + q * bse[cols]
return np.asarray(list(zip(lower, upper))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def confidence(self) -> float:\n return self._confidence",
"def confidence(self):\n return self._confidence",
"def confidence(self):\n return self._confidence",
"def confidence(self) -> float:\n return float(self.class_scores[self.class_num])",
"def calculate_confidence_interval(input_data, confidence_coeficient=0.95):\n error_margin = ExperimentUtil._calculate_error_margin(input_data, confidence_coeficient)\n superior_limit = statistics.mean(input_data) + error_margin\n inferior_limit = statistics.mean(input_data) - error_margin\n return superior_limit, inferior_limit",
"def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range",
"def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb",
"def confidenceInterval(start,end,confidence):\n\n\tmean = 0.5*(end+start)\n\tstddev = getStdDev(0.5*(end-start), confidence)\n\n\treturn (mean,stddev)",
"def get_min_confidence(self):\n return self.__min_confidence",
"def min_confidence(self) -> float:\n return self._min_confidence",
"def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm",
"def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0",
"def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)",
"def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)",
"def get_confidence_interval(self,a,b):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\tworking_indices = [i for i,v in enumerate(k_vals) if (v >= a and v<= b)]\n\t\tworking_prob_vals = [prob_vals[i] for i in working_indices]\n\t\treturn sum(working_prob_vals)",
"def guess_fit_parameters(self):\n\n def errfcn(pars):\n lnl = -self._lnprob(pars)\n p = list(pars)\n p.append(lnl)\n logging.info(p)\n return lnl if np.isfinite(lnl) else np.sign(lnl) * 9e9\n\n if self.vary_bin_frac:\n initial_pars = [0.5, 0.5]\n bounds_list = [[0.0, 1.0], [0, 0.999]]\n else:\n initial_pars = [0.5]\n bounds_list = [[0, 0.999]]\n out = minimize(errfcn, initial_pars, bounds=bounds_list)\n self.guess_pars = out.x\n return out.x",
"def _confidence_interval_function(xq, cinfo):\n a = cinfo.a.copy()\n a[cinfo.indx] = xq\n\n yfit, _ = cinfo.fit_function(a, pderflg=False)\n if yfit.dtype in ['complex64','complex128']:\n yfit = np.concatenate([yfit.real,yfit.imag])\n wchisqr1 = np.sum(cinfo.ww*(yfit-cinfo.dat)**2)/cinfo.nfree\n \n goal = abs(wchisqr1-cinfo.wchi*cinfo.factor)\n \n return goal",
"def confidenceInterval(model, N = 30):\n predicted_accuracies = [0]*N\n predicted_roc = [0]*N\n for i in tqdm(range(N)):\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, random_state=i)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n model = model.fit(X_train, y_train)\n predicted_accuracies[i] = accuracy_score(model.predict(X_test), y_test)\n predicted_roc[i] = roc_auc_score(model.predict(X_test), y_test)\n r = np.mean(predicted_roc)\n m = np.mean(predicted_accuracies)\n\n variance_roc = np.var(predicted_roc)\n variance_acc = np.var(predicted_accuracies)\n sd_acc = np.sqrt(variance_acc)\n sd_roc = np.sqrt(variance_roc)\n CI_acc = 2*sd_acc\n CI_roc = 2*sd_roc\n return m, CI_acc, r, CI_roc",
"def estimates_conf(self):\n return self._est_L, self._est_R",
"def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)",
"def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score",
"def get_confidence_interval(self, scores, ci_method='bca', ci_size=0.95, replications=100000, seed_value=None):\n def score(x):\n return np.array([x.mean()])\n data = np.array([float(score) for score in scores])\n if min(data) == max(data):\n return tuple([min(data), max(data)])\n bs = IIDBootstrap(data)\n if seed_value is not None:\n bs.seed(seed_value)\n ci = bs.conf_int(score, replications, method=ci_method, size=ci_size, tail='two')\n return tuple([ci[0][0], ci[1][0]])",
"def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment",
"def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)",
"def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]",
"def _ci(arr, ci=0.95, method=\"bootstrap\", n_bootstraps=2000, random_state=None):\n if method == \"bootstrap\":\n return bootstrap_confidence_interval(\n arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state\n )\n else:\n from .parametric import _parametric_ci\n\n return _parametric_ci(arr, ci=ci)",
"def test_conf_interval_normal_method_with_bounds(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with enforced lower limit (``min_admissible_value``)\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=290.0,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.0, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (290.0, 290.0, 290.0, 290.0), (\n \"quantiles are incorrect\")",
"def confidence_interval(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n c_means = CompareMeans(DescrStatsW(test), DescrStatsW(control))\n if _is_proportion(control, test):\n return c_means.zconfint_diff()\n else:\n return c_means.tconfint_diff()\n\n return _apply(data, fn, control_label)",
"def detection_confidence(self):\n return self._detection_confidence",
"def nse_bound(self) -> float:\n nse_ = self.nse()\n nse_c2m_ = nse_ / (2 - nse_)\n\n return nse_c2m_",
"def _lower_confidence_bound(self, NA: int, N: int, alpha: float) -> float:\n return proportion_confint(NA, N, alpha=2 * alpha, method=\"beta\")[0]",
"def test_confidence_intervals(self):\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)",
"def bootstrap_confidence_interval(\n arr, ci=0.95, n_bootstraps=2000, stat_fun=\"mean\", random_state=None\n):\n if stat_fun == \"mean\":\n\n def stat_fun(x):\n return x.mean(axis=0)\n\n elif stat_fun == \"median\":\n\n def stat_fun(x):\n return np.median(x, axis=0)\n\n elif not callable(stat_fun):\n raise ValueError(\"stat_fun must be 'mean', 'median' or callable.\")\n n_trials = arr.shape[0]\n indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too\n rng = check_random_state(random_state)\n boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices)))\n stat = np.array([stat_fun(arr[inds]) for inds in boot_indices])\n ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100)\n ci_low, ci_up = np.percentile(stat, ci, axis=0)\n return np.array([ci_low, ci_up])",
"def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)",
"def get_fit_intercept(self):\n return self.fit_intercept",
"def get_confidence(cls, X, y=None):\n scores = []\n for metric_wrapper, weight in cls.confidence_computation.items():\n scores.append(metric_wrapper.calculate(X) * weight)\n return sum(scores)",
"def confidence(samples, confidence_level):\n mean = scipy.mean(samples)\n sdev = scipy.std(samples)\n n = len(samples)\n df = n - 1\n t = distributions.t.ppf((1+confidence_level)/2.0, df)\n interval = (interval_low, interval_high) = ( mean - t * sdev / math.sqrt(n) , mean + t * sdev / math.sqrt(n) )\n interval_size = interval_high - interval_low\n interval_percentage = interval_size / mean * 100.0\n return (interval, mean, sdev, interval_percentage)",
"def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)",
"def conf_int(self, alpha=.05, cols=None, method='default'):\n bse = self.bse\n\n if self.use_t:\n dist = stats.t\n df_resid = getattr(self, 'df_resid_inference', self.df_resid)\n q = dist.ppf(1 - alpha / 2, df_resid)\n else:\n dist = stats.norm\n q = dist.ppf(1 - alpha / 2)\n\n if cols is None:\n lower = self.params - q * bse\n upper = self.params + q * bse\n else:\n cols = np.asarray(cols)\n lower = self.params[cols] - q * bse[cols]\n upper = self.params[cols] + q * bse[cols]\n return np.asarray(lzip(lower, upper))",
"def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")",
"def plot_confidence_interval_for_data (model, X):\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n preds_ds = pd.DataFrame()\n preds_ds['mean'] = preds.mean(axis=1)\n preds_ds['std'] = preds.std(axis=1)\n\n fig = plt.figure(figsize=(15,6))\n my_xticks = ['datapoint ' + str(i+1) for i in list(preds_ds.index)]\n plt.errorbar(x = preds_ds.index, y=preds_ds['mean'], yerr=preds_ds['std'], \n fmt='o', color='blue', ecolor='lightblue', capsize=3)\n plt.title('Confidence Interval for the predicted value')\n plt.xticks(preds_ds.index, my_xticks)\n for i in list(preds_ds.index):\n m, std = round(preds_ds['mean'][i],1), round(preds_ds['std'][i],2)\n s=f' pred={m} \\n std dev= {std}'\n plt.text(x = i, y=preds_ds['mean'][i], s=s ) \n plt.show()",
"def getInterval(self) -> float:\n\t\treturn self[self._bcni]",
"def get_params_bounds(self) -> np.array:\n pass",
"def landmarking_confidence(self):\n return self._landmarking_confidence",
"def getFitErr(self):\n return (self.fitSum2Err)",
"def upper_confidence(self, X):\n x = np.asarray(X).reshape(1, -1)\n mu, sigma = self.gpr.predict(x, return_std=True)\n\n return mu - self.beta * sigma",
"def Tolerance(self):\n\t\treturn self._get_attribute('tolerance')",
"def confidence_values(self) -> List[Union[int, str]]:\n\n return self._confidence_values",
"def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high",
"def inrse(self) -> float:\n return float(np.sqrt(np.sum(np.square(self._error())) / np.sum(np.square(self.true - np.mean(self.true)))))",
"def mean_confidence_interval(data, confidence=0.95):\n\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h",
"def predictions_conf(self):\n return self._pred_L, self._pred_R",
"def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))",
"def confidence_coefficient( confidence_level, dimensions=1 ):\n return np.sqrt(chi2.ppf(confidence_level, df=dimensions))",
"def get_confidence_interval(\n num_people,\n num_iter=1000000,\n percentile=2.576,\n num_days=365,\n):\n mean = 0.0\n variance = 0.0 # not exactly\n for i in range(1, num_iter + 1):\n x = [randint(1, num_days) for person in range(num_people)]\n x.sort()\n is_consecutive = any(p + 1 == q for (p, q) in zip(x[:-1], x[1:], strict=True))\n is_a_loop = x[0] + num_days - 1 == x[-1]\n is_positive = int(is_consecutive or is_a_loop)\n delta = is_positive - mean\n mean += delta / float(i)\n variance += delta * (is_positive - mean)\n sd = sqrt(variance / float(num_iter - 1))\n lower_bound = mean - percentile * sd / sqrt(num_iter)\n upper_bound = mean + percentile * sd / sqrt(num_iter)\n print(\n \"Number of people: {}\\tLower bound: {:2.5%}\\tUpper bound: {:2.5%}\".format(\n num_people,\n lower_bound,\n upper_bound,\n ),\n )\n return lower_bound, upper_bound",
"def compute_confidence_interval(self) -> bool:\n return False",
"def confidence(s, p):\r\n\r\n if p == 1:\r\n return (-oo, oo)\r\n\r\n assert p <= 1\r\n\r\n # In terms of n*sigma, we have n = sqrt(2)*ierf(p). The inverse\r\n # error function is not yet implemented in SymPy but can easily be\r\n # computed numerically\r\n\r\n from sympy.numerics import Float, secant, evalf\r\n from sympy.numerics.functions2 import erf\r\n p = evalf(p)\r\n # calculate y = ierf(p) by solving erf(y) - p = 0\r\n y = secant(lambda y: erf(y) - p, 0)\r\n t = Real(str(evalf(s.sigma) * Float(2)**0.5 * y))\r\n mu = s.mu.evalf()\r\n return (mu-t, mu+t)",
"def interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval\")",
"def bootstrap_ci(x, n=300, ci=0.95):\n\n low_per = 100 * (1 - ci) / 2\n high_per = 100 * ci + low_per\n x = removena_numpy(x)\n if not len(x):\n return (np.nan, np.nan)\n bootstrap_samples = choice(a=x, size=(\n len(x), n), replace = True).mean(axis = 0)\n return np.percentile(bootstrap_samples, [low_per, high_per])",
"def _get_model_confidence_mean(self, exog, alpha=0.1):\n\n res = self._model.fit()\n\n y_fit = self._model.predict(res.params, exog=exog)\n\n u_ci = np.empty(y_fit.shape)\n l_ci = np.empty(y_fit.shape)\n\n x_prime_x_inverse = np.linalg.inv(np.dot(self._model.exog.transpose(), self._model.exog))\n\n t_ppf_value = stats.t.ppf(1 - alpha / 2, self._model.df_resid)\n\n for i in range(len(u_ci)):\n leverage = np.dot(exog[i, :], np.dot(x_prime_x_inverse, exog[i, :]))\n\n interval_distance = t_ppf_value * np.sqrt(res.mse_resid * leverage)\n\n u_ci[i] = y_fit[i] + interval_distance\n l_ci[i] = y_fit[i] - interval_distance\n\n return y_fit, l_ci, u_ci",
"def get_threshold(self):\n confs = self.confidence[\"conf\"]\n\n return compute_minimum_kernel_density(confs)",
"def test_conf_interval_normal_method_no_conditionals(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no ``conditional_cols``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=None,\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.05, 290.37, 292.42, 292.74), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.41, -5.08, -3.04, -2.72), (\n \"quantiles are incorrect\")",
"def compute_credible_interval(vals, weights, confidence: float = 0.95):\n if confidence <= 0.0 or confidence >= 1.0:\n raise ValueError(\n f\"Confidence {confidence} must be in the interval (0.0, 1.0).\"\n )\n alpha_lb = 0.5 * (1.0 - confidence)\n alpha_ub = confidence + alpha_lb\n lb = compute_quantile(vals, weights, alpha_lb)\n ub = compute_quantile(vals, weights, alpha_ub)\n return lb, ub",
"def cci(self) -> float:\n return self._cci",
"def confidence(self, value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n request_data = {'confidence': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )",
"def tolerance(self):\n return self.params['tolerance']",
"def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds",
"def mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), stats.sem(a)\n h = se * stats.t._ppf((1 + confidence) /2., n - 1)\n return m, m - h, m + h",
"def _curvature(self):\n y_eval = self.left_fitx.shape[0] - 10\n left_curverad = (((1 + (2 * self.left_fit[0] * y_eval + self.left_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.left_fit[0]))\n right_curverad = (((1 + (2 * self.right_fit[0] * y_eval + self.right_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.right_fit[0]))\n return left_curverad, right_curverad",
"def __call__(self, **kwargs):\n stddev = self.predictive_distribution.stddev(**kwargs)\n mean = self.predictive_distribution.mean(**kwargs)\n return normal_upper_confidence_bound(\n mean, stddev, exploration=self.exploration)",
"def find_confidence(self, chi2, df):\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n # Subtract from one to get confidence.\n confidence = (1.0 - float(col))\n return confidence",
"def test_conf_interval_normal_method_no_small_sample_calc(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no small sample size calculation\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=None,\n small_sample_size_method=None,\n small_sample_size_quantile=None,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.64, -5.26, -2.86, -2.49), (\n \"quantiles are incorrect\")",
"def interval(self) -> int:\n return pulumi.get(self, \"interval\")",
"def get_ymin(self):\n return self.__ymin",
"def find_confidence(self, t, df):\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n # Subtract from one to get confidence, divide by two to get\n # single section on positive side of distribution.\n confidence = (1.0 - float(col)) / 2.0\n return confidence",
"def confidence_ellipsoid(data: Dict[str, np.ndarray], lambda_: float = 1e-5, delta: float = 0.1, sigma: float = 0.1,\n param_bound: float = 1.0) -> Tuple[np.ndarray, np.ndarray, float]:\n phi = np.array(data[\"features\"])\n y = np.array(data[\"outputs\"])\n g_n_lambda = 1/sigma * np.transpose(phi) @ phi + lambda_ * np.identity(phi.shape[-1])\n theta_n_lambda = np.linalg.inv(g_n_lambda) @ np.transpose(phi) @ y / sigma\n d = theta_n_lambda.shape[0]\n beta_n = np.sqrt(2*np.log(np.sqrt(np.linalg.det(g_n_lambda) / lambda_ ** d) / delta)) + \\\n np.sqrt(lambda_*d) * param_bound\n return theta_n_lambda, g_n_lambda, beta_n",
"def tol(self):\n return self._tol",
"def confidence_at_tpr(self, tpr):\r\n\r\n assert self.validation_confidences is not None\r\n assert tpr > 0\r\n\r\n # true positives are correctly classified examples\r\n if self.sorted_correct_validation_confidences is None:\r\n correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))\r\n # rounding is a hack see tests\r\n cutoff = math.floor(self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2))\r\n assert cutoff >= 0\r\n assert cutoff < self.sorted_correct_validation_confidences.shape[0]\r\n return self.sorted_correct_validation_confidences[cutoff]",
"def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds",
"def ci_prop(p, n, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # standard error\n std_error = np.sqrt(p * (1 - p) / n)\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n # calculate lower and upper confidence bounds\n lcb = np.round(p - margin_of_error, 2)\n ucb = np.round(p + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Proportion: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )",
"def calc_confidence(self, last_n_matches=None):\n if last_n_matches is None:\n # use all the matches\n last_n_matches = len(self.slopes)\n\n # only start processing once we've gotten a few frames\n if len(self.matches_per_frame) < self.min_frames:\n return\n \n # calculate confidence interval of the slope of the last N matches\n np_slopes = np.array(self.slopes[-last_n_matches:])\n m = np.mean(np_slopes)\n se = scipy.stats.sem(np_slopes)\n h = se * scipy.stats.t.ppf((1+self.confidence)/2.0, len(np_slopes)-1)\n\n # update class variables\n self.confidence_interval = [m-h, m+h]\n self.rho = m",
"def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p)\n for p in self.shape_parameters]\n if parameter_name in self.shape_parameters.keys():\n bounds = []\n for ll in self.likelihood_list:\n if parameter_name in ll.shape_parameters.keys():\n bounds.append(ll.get_bounds(parameter_name))\n bounds = np.array(bounds)\n ret= np.max(bounds[:,0]), np.min(bounds[:,1])\n if ret[1] <= ret[0]:\n raise InvalidParameterSpecification(\"lower bound %s higher than upper bound!\" % parameter_name)\n return ret\n\n elif parameter_name.endswith('_rate_multiplier'):\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)",
"def t_confidence_Interval_Difference_Of_Means(xSamples, ySamples, confidence):\n try:\n if len(xSamples) >= 30 or len(ySamples) >= 30:\n raise sampleSizeError(\"Should use normal distribution instead. m or n > 30.\")\n \n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n elif type(confidence) != int or type(confidence) != float:\n raise ValueError(\"Confidence Interval must be a numeric value\")\n \n # Find mean and variance for both sample distributions\n n = len(xSamples) \n xBar = sample_mean(xSamples)\n xSampStd = sample_variance(xSamples) ** .5\n \n m = len(ySamples)\n yBar = sample_mean(ySamples)\n ySampStd = sample_variance(ySamples) ** .5\n \n # Find t at alpha/2 and the new distribution's sample size - 2\n # Calculate the sample pooling standard deviation\n tAlpha = (1 + confidence) / 2.0\n t = scipy.stats.t.ppf(tAlpha, (m + n - 2)) \n spsd = ((((n - 1)* (xSampStd**2)) + ((m - 1) * (ySampStd**2)))/(m + n - 2)) ** .5 \n \n # Find the lower and upper bound \n # (X-Y) (+/-) t((spsd * (((1/m)+(1/n)) **.5))\n lowerBound = (xBar - yBar) - t * (spsd * (((1/m)+(1/n)) **.5))\n upperBound = (xBar - yBar) + t * (spsd * (((1/m)+(1/n)) **.5))\n \n return lowerBound, upperBound\n \n except sampleSizeError as inst:\n print(inst.args[0])\n \n except ValueError as inst:\n print(inst.args[0])",
"def get_initial_params(self, x, y, yerr):\n estimated_max = max(y)\n estimated_min = min(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(min(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_max, estimated_min])\n return p0",
"def checkpoint_interval(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"checkpoint_interval\")",
"def calc_conf(deviation, tolerance, mape):\n return (1 - ((mape / 100) * (deviation/tolerance))) * 100",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def rrse(self) -> float:\n return float(np.sqrt(self.rse()))",
"def curvature_max(self):\n return 1.0 / self.radius_min",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")",
"def evaluation_periods(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"evaluation_periods\")"
] | [
"0.68219185",
"0.6498113",
"0.6498113",
"0.64727414",
"0.6388635",
"0.6311576",
"0.62752163",
"0.6229275",
"0.6110395",
"0.60803914",
"0.60660297",
"0.60526866",
"0.6051802",
"0.6024401",
"0.60003996",
"0.590071",
"0.5890141",
"0.58519065",
"0.5841124",
"0.58395547",
"0.57960707",
"0.5791978",
"0.5750795",
"0.57146114",
"0.5708533",
"0.5660279",
"0.56027275",
"0.55988663",
"0.55807775",
"0.55748606",
"0.55597264",
"0.55578417",
"0.55289936",
"0.552245",
"0.5505093",
"0.5495861",
"0.54869086",
"0.5465352",
"0.54542834",
"0.5426838",
"0.5420658",
"0.5416277",
"0.54013944",
"0.5345726",
"0.53419733",
"0.53191745",
"0.5315224",
"0.5309324",
"0.53045136",
"0.5269593",
"0.5262822",
"0.525844",
"0.52540034",
"0.5247891",
"0.5237765",
"0.52292114",
"0.5222993",
"0.52174014",
"0.5216627",
"0.52112144",
"0.5207123",
"0.520696",
"0.5199154",
"0.51865184",
"0.5169459",
"0.51675487",
"0.51652",
"0.51576704",
"0.51501733",
"0.5136987",
"0.51354885",
"0.513012",
"0.5130035",
"0.5114717",
"0.5106983",
"0.51045555",
"0.50986856",
"0.5092905",
"0.5090763",
"0.5083654",
"0.50702375",
"0.50678676",
"0.5067709",
"0.5064202",
"0.5061221",
"0.5060258",
"0.5055021",
"0.50491333",
"0.5044849",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814",
"0.50406814"
] | 0.51247424 | 73 |
Configure app object blueprints and global variables. | def create_app(config='dev'):
if config == 'dev':
from .conf.config import DevelopmentConfig as dev_config
app = configure_app(Flask(__name__), dev_config)
else:
from .conf.config import ProdConfig
app = configure_app(Flask(__name__), ProdConfig)
# setup flask blueprints
configure_blueprints(app)
return app | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_app(flask_app):\n flask_app.config['RESTPLUS_SWAGGER_UI_DOC_EXPANSION'] = \\\n settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION\n flask_app.config['RESTPLUS_VALIDATE'] = \\\n settings.RESTPLUS_VALIDATE\n\n flask_app.config['SQLALCHEMY_DATABASE_URI'] = \\\n settings.SQLALCHEMY_DATABASE_URI\n flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = \\\n settings.SQLALCHEMY_TRACK_MODIFICATIONS",
"def _setup(app_obj):",
"def __init__(self, app: Sanic):\n self.configurations = app.config\n\n Configs.__instance = self",
"def init_app(app):\n app.register_blueprint(index_bl)\n app.register_blueprint(main_bl, url_prefix=\"/main\")\n app.register_blueprint(map_bl, url_prefix=\"/map\")\n app.register_blueprint(login_bl, url_prefix=\"/login\")\n app.register_blueprint(prof_bl, url_prefix=\"/profile\")\n app.register_blueprint(average_bl, url_prefix=\"/average\")",
"def initialize_app(app):\n # configure_app(app)\n # log.info(\"> Starting development server at http://%s/api/ <<<<<\" %\n # app.config[\"SERVER_NAME\"])\n\n blueprint_api = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint_api)\n app.register_blueprint(blueprint_api)\n\n api.add_namespace(task_namespace)\n api.add_namespace(chain_namespace)\n\n Bootstrap(app)\n nav.init_app(app)\n app.register_blueprint(frontend_blueprint)\n app.register_blueprint(processors_blueprint)\n app.register_blueprint(chains_blueprint)\n app.register_blueprint(tasks_blueprint)\n app.register_blueprint(compare_blueprint)\n\n db.init_app(app)\n db.create_all(app=app)\n\n if not os.path.exists(app.config[\"OCRD_BUTLER_RESULTS\"]):\n os.makedirs(app.config[\"OCRD_BUTLER_RESULTS\"])",
"def init_app(self, app):\n self.app = app\n\n self._init_extension()\n\n # Register views\n for view in self._views:\n app.register_blueprint(view.create_blueprint(self))",
"def init_app(app, hive_setting):\n global about\n about = About(app, hive_setting)\n app.register_blueprint(blueprint)",
"def _configure(self):\n Application._configure(self)\n\n return",
"def prepare_app(self):\n self.app = Flask(self.APP_NAME)\n self.app.config.from_object('mmapi.config.Config')\n CORS(self.app, origins=self.app.config['CORS_ACCEPTED_ORIGINS'])\n\n # Map urls with and without a trailing slash to the same endpoint.\n self.app.url_map.strict_slashes = False",
"def init_app(config_object=ProdConfig):\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # Set up cross-site access to the API\n if app.config['SERVER_CORS']:\n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n\n # Set up using an external proxy/static server\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n # Internally optimize static file hosting\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app",
"def init_app(self, app):\n\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)",
"def init_app(app):\n api.add_namespace(ns)\n app.register_blueprint(bp, url_prefix='/api/v1')",
"def configure(self):\n inject(self.urls, self.names_for(\"urls\"))\n inject(self.models, self.names_for(\"models\"))\n self.load_admin()",
"def setup_application(self):\n pass",
"def create_app(self):\r\n self.app = Flask(__name__, instance_relative_config=True)\r\n\r\n # Init the secret key of the app -it is a must for flask to run\r\n self.app.config.from_mapping(\r\n SECRET_KEY='!ZNeverSayNever116Z!',\r\n MONGODB_SETTINGS= {'host': 'mongodb://localhost/opc_integrity'}\r\n )\r\n initialize_db(self.app)\r\n\r\n\r\n # Init the app with core routes\r\n routes.init_app(self.app)",
"def configure(app):\n api.add_resource(Event, '/event/')\n api.add_resource(EventItem, '/event/<event_id>')\n app.register_blueprint(bp_restapi)",
"def init_app(self, app):\n app.config.setdefault('YAWT_INDEXER_IFC', 'yawtext.whoosh')\n app.config.setdefault('YAWT_INDEXER_WHOOSH_INFO_FIELDS', {})\n app.config.setdefault('YAWT_INDEXER_WHOOSH_FIELDS',\n {'content': TEXT()})",
"def setup(self, app: VisModel):\n self.app = app",
"def setup(app):\r\n app.add_config_value('rosmsg_path_root', [], 'env')\r\n app.add_directive(\"ros_message\", MessageDirective)\r\n app.connect('config-inited', on_config_inited)\r\n return {\r\n 'version': __version__,\r\n }",
"def create_app(config_object):\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # add blueprint\n from app.api import api_bp\n app.register_blueprint(api_bp, url_prefix='/api/v1/')\n\n # add redis client\n from app.redis_init import redis_client\n redis_client.init_app(app)\n\n # add prometheus middleware\n from app.prometheus_middleware import setup_metrics\n setup_metrics(app)\n\n return app",
"def create_app(config_object):\n\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n from app import api_bp\n app.register_blueprint(api_bp, url_prefix='/api')\n\n return app",
"def configure_blueprints(app):\n\n for blueprint in _blueprints:\n app.register_blueprint(blueprint)",
"def init_app(self, app):\n\n self.app = app\n self.app.apscheduler = self\n\n self._load_config()\n if self.api_enabled:\n self._load_api()",
"def configure(self):\n\n pass",
"def configure_app(self):\n self.app.route('/', callback=self.get_api)",
"def __init__(self, app) -> None:\n super().__init__()\n self._app = app",
"def init_app(self, app):\r\n\r\n app.config.setdefault('REDIS_URLS', {\r\n 'main': 'redis://localhost:6379/0',\r\n 'admin': 'redis://localhost:6379/1',\r\n })\r\n\r\n app.before_request(self.before_request)\r\n\r\n self.app = app",
"def setup(app):\n # Register builder.\n app.add_builder(BeamerBuilder)\n\n # Add setting for allowframebreaks.\n app.add_config_value(\"beamer_allowframebreaks\", True, \"beamer\")\n # Add setting for Beamer theme.\n app.add_config_value(\"beamer_theme\", \"Warsaw\", \"beamer\")\n # Adjust titles upon doctree-resolved.\n app.connect(\"doctree-resolved\", adjust_titles)\n\n return {\n \"version\": \"1.0\",\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }",
"def _configure(self):\n pass",
"def configure(self):\n pass",
"def configure(self):\n pass",
"def create_app(config_object):\n app = Flask('song_recommender',static_folder = None)\n app.config.from_object(config_object)\n with app.app_context():\n # Imports\n from .views import view\n\n # REGISTER ROUTES\n app.register_blueprint(view)\n\n return app",
"def init_app(app: object = None) -> None:\n config = get_application_config(app)\n config.setdefault(\n \"FULLTEXT_ENDPOINT\", \"https://fulltext.arxiv.org/fulltext/\"\n )",
"def app(self):\n\n ## set flask specific things that are non-optional\n error = lambda k: 'Fatal: You need to specify a \"flask\" section ' + \\\n 'with an entry like \"'+k+'=...\" in your .ini file'\n try: app_name = self['flask.app']\n except KeyError: raise SystemExit(error('app'))\n try: secret_key = self['flask.secret_key']\n except KeyError: raise SystemExit(error('secret_key'))\n app = Flask(app_name)\n app.secret_key = secret_key\n\n ## set flask specific things that are optional\n if 'flask.template_path' in self:\n app.jinja_loader = FileSystemLoader(self['template_path'])\n if 'flask.before_request' in self:\n before_request = self['flask.before_request']\n before_request = namedAny(before_request)\n app.before_request(before_request)\n if 'flask.after_request' in self:\n after_request = self['flask.after_request']\n after_request = namedAny(after_request)\n app.after_request(after_request)\n\n ## setup views\n try: view_holder = self['corkscrew.views']\n except KeyError:\n error = 'Fatal: could not \"view=<dotpath>\" entry in your .ini file'\n raise SystemExit(error)\n else:\n view_list = namedAny(view_holder)\n [ v(app=app, settings=self) for v in view_list]\n\n return app",
"def init_app(self, app):\n stripe.api_key = app.config['STRIPE_API_KEY']",
"def init_app(self):\n self.app.config.setdefault('MACL_DEFINITION', None)\n self.app.config.setdefault('MACL_CLASS', None)\n self.app.config.setdefault('MACL_ERROR_MESSAGE',\n 'You do not have access to this resource')\n\n self.app.miracle_acl_manager = self\n\n self.load_acl()",
"def configure(self):\r\n pass",
"def init_app(app, hive_setting):\n # global scripting\n # scripting = Scripting(app=app, hive_setting=hive_setting)\n app.register_blueprint(blueprint)",
"def init_app(app, hive_setting):\n # global scripting\n # scripting = Scripting(app=app, hive_setting=hive_setting)\n app.register_blueprint(blueprint)",
"def app(self, app):\n\n self._app = app",
"def setup_app():\n\n # 1 Create Flask application\n app = Flask(\n import_name=__name__,\n template_folder=\"templates\",\n static_folder=\"static\"\n )\n\n # 2 Update the apps configuration\n app = config_selector(app)\n register_error_handlers(app)\n\n cache.init_app(app)\n\n # 3 Set up logger\n setup_logger(app.config)\n LOGGER.info(\"Set up app & logger.\")\n\n # 4 Init clients\n init_clients(app.config)\n\n # 5 Init Daemon\n start_daemon(app.config)\n\n # 6 Register blueprints\n register_blueprints(app)\n Bootstrap(app)\n\n return app",
"def init_application(app, config):\n app.config.from_object(config)\n\n api = Api(app)\n api.add_resource(Build, config.WSPATH)\n api.add_resource(Request, config.WSPATH + '/<request_id>')\n api.add_resource(Result, config.WSPATH + '/<request_id>/result')\n api.add_resource(Image, config.WSPATH + '/<request_id>/result/image')\n api.add_resource(Output, config.WSPATH + '/<request_id>/result/output/<int:output_id>')\n api.add_resource(Log, config.WSPATH + '/<request_id>/result/log')\n\n AgroLogHandler(app).init()\n app.logger.info(\"Flask Application initialized\")",
"def setup_app_config(self, app: Flask) -> Flask:\n app.secret_key = config.get_setting('SECRET_KEY')\n return app",
"def __init__(self, app):\n pass",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def create_app(configobj=ProdConfig):\n\n app = Flask(__name__)\n app.config.from_object(configobj)\n configure_blueprints(app)\n configure_extensions(app)\n configure_callbacks(app)\n configure_filters(app)\n configure_error_handlers(app)\n return app",
"def configure_app(self, defaults: t.Optional[DefaultConfig]) -> None:\n self.config = Config(defaults)",
"def create_app(config_object='fifa_app.settings'):\n # FLASK APP OBJECT\n app = Flask(__name__, \n template_folder='views/templates',\n static_folder='views/static')\n\n # APP CONFIGS\n app.config.from_object(config_object)\n app.url_map.converters['objectid'] = ObjectIdConverter\n app.json_encoder = MongoJSONEncoder\n\n # LOAD EXTENSIONS\n mongo.init_app(app)\n\n # REGISTER APP ELEMENTS\n register_api(app)\n \n return app",
"def main(global_config, **settings):\n # add settings in here?\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()",
"def create_app(self):\n app.config.from_object('config.TestingConfig')\n return app",
"def configure(cls):\n pass",
"def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)",
"def set_config(app):\n # set config from config.py\n app.config.from_object('config')\n\n # override config from secret conf files\n pi_home = os.path.dirname(app.config['ENVPATH']) # /home/pi\n secret_conf_dir = os.path.join(pi_home, 'CONFIG_CHAUDIERE') # /home/pi/CONFIG_CHAUDIERE\n secret_conf_com_file = 'chaudiere_secret_config.py'\n secret_conf_com = secret_conf_dir+'/'+secret_conf_com_file\n try:\n with open(secret_conf_com) as f:\n json_config = json.load(f)\n for conf in ['Common', app.config['ENVNAME']]:\n app.config.update(json_config[conf])\n except IOError as e:\n print('IOError loading conf file (file not existing?): ' + secret_conf_com + str(e))\n except ValueError as e:\n print('ValueError loading JSON : ' + secret_conf_com + ' ' + str(e))\n\n #app.config['USERS_EMAILS'] = list(map(lambda x: x+'@gmail.com', app.config['USERS'])) \n # app.logger.error('test error') # <-- This works !!! ",
"def register_blueprints_on_app(app):\n app.register_blueprint(views.main_pages)\n app.register_blueprint(views.main_api, url_prefix='/api')",
"def create_app(config_object=Config):\n app = Flask(__name__.split('.')[0], static_folder='../client/build/static', template_folder=\"../client/build\")\n\n app.url_map.strict_slashes = False\n app.config.from_object(config_object)\n db.init_app(app)\n cache.init_app(app)\n register_blueprints(app)\n register_error_handler(app)\n \n\n return app",
"def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))",
"def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('clldmpg')\n config.registry.registerUtility(link_attrs, interfaces.ILinkAttrs)\n home_comp = config.registry.settings['home_comp']\n home_comp.append('software')\n home_comp.append('contribute')\n config.add_settings(home_comp=home_comp)\n config.add_route('software', '/software')\n config.add_route('contribute', '/contribute')\n return config.make_wsgi_app()",
"def initialize(self, application):",
"def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n config.add_directive('add_restful_routes', routing.add_restful_routes)\n set_globals(**settings)\n\n from . import config as global_config\n\n secret = str(uuid.uuid4())\n\n # Beaker include\n config.include('pyramid_beaker')\n\n if global_config.AUTH_ENABLED is True:\n\n authn_policy = AuthTktAuthenticationPolicy(secret,\n callback=model.user_callback, hashalg='sha512', include_ip=global_config.AUTH_INCLUDE_IP)\n authz_policy = ACLAuthorizationPolicy()\n\n config.set_authentication_policy(authn_policy)\n config.set_authorization_policy(authz_policy)\n\n model.make_restful_app()\n routing.make_routes(config)\n config.scan()\n\n return config.make_wsgi_app()",
"def initialize_app(flask_app):\n # Create a blueprint to house the API, swagger can be reached from /api\n # and each of the models from /api/[model]\n blueprint = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint)\n\n # Configure namespaces per model on the API.\n api.add_namespace(noms_namespace)\n\n flask_app.register_blueprint(blueprint)\n db.init_app(flask_app)\n\n with flask_app.app_context():\n db.create_all()",
"def create_app():\r\n app = Flask(__name__, instance_relative_config=False)\r\n app.config.from_object('config.Config') \r\n \r\n api = Api(app) \r\n \r\n with app.app_context():\r\n from .flights import TicketRoute, FlightRoute\r\n api.add_resource(TicketRoute,\"/api/tickets\")\r\n api.add_resource(FlightRoute,\"/api/flights\")\r\n \r\n \r\n return app",
"def main(global_config, **settings):\n SETTINGS = settings\n config = Configurator(settings=settings,)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.cors')\n config.add_cors_preflight_handler()\n config.include('.routes')\n config.include('.security')\n config.include('..greggo')\n config.add_static_view('static', path='repoll:static')\n config.scan()\n return config.make_wsgi_app()",
"def _init_app(self):\n\n self._app = FastAPI(**self._app_kws)\n\n for rt, kwargs in self._app_routers:\n self._app.include_router(rt, **kwargs)\n\n self._app.dependency_overrides[get_dataset] = lambda: self._obj\n self._app.dependency_overrides[get_cache] = lambda: self.cache\n\n return self._app",
"def init_app(self, app):\n try:\n # Assume this is a blueprint and defer initialization\n if app._got_registered_once is True:\n raise ValueError(\"\"\"Blueprint is already registered with an app.\"\"\")\n app.record(self._deferred_blueprint_init)\n except AttributeError:\n self._init_app(app)\n else:\n self.blueprint = app",
"def create_app(**config_overrides):\n # we want to modify the global app, not a local copy\n global app\n global eventum\n\n app = Flask(__name__)\n\n # Load config then apply overrides\n app.config.from_object('config.flask_config')\n app.config.update(config_overrides)\n\n # Initialize assets\n assets = Environment(app)\n register_scss(assets)\n\n # Eventum\n eventum = Eventum(app)\n\n # Blueprints\n register_blueprints()\n\n return app",
"def configure(self) -> None:",
"def init_config():\n if 'FLASKUTILS_SETTINGS_MODULE' not in os.environ:\n raise ConfigurationError('No settings has been defined')\n\n app.config['BASE_DIR'] = BASE_DIR\n\n # default settings\n for v in dir(default_settings):\n if not v.startswith('_'):\n app.config[v] = getattr(default_settings, v)\n\n app.debug = app.config['DEBUG']\n\n # app settings\n settings = importlib.import_module(\n os.environ['FLASKUTILS_SETTINGS_MODULE'])\n for v in dir(settings):\n if not v.startswith('_'):\n app.config[v] = getattr(settings, v)\n\n def init_urls():\n # Reads urls definition from URLs file and bind routes and views\n urls_module = importlib.import_module(app.config['URLS'])\n for route in urls_module.URLS:\n app.add_url_rule(\n route[0], view_func=route[1].as_view(route[2]))\n\n def init_postgres(testing):\n \"\"\"\n If postgresql url is defined in configuration params a\n scoped session will be created and will be used by\n pgsqlutils\n https://github.com/Riffstation/sqlalchemypostgresutils\n \"\"\"\n if 'POSTGRESQL_DATABASE_URI' in app.config:\n if not testing:\n # not testing will use request context as scope\n # for sqlalchemy Session object\n from flask import _app_ctx_stack\n import pgsqlutils.base as pgbase\n from pgsqlutils.base import get_db_conf, init_db_conn\n from sqlalchemy.orm import sessionmaker, scoped_session\n dbconf = get_db_conf()\n dbconf.DATABASE_URI = app.config['POSTGRESQL_DATABASE_URI']\n # monkey patching to replace default session\n # by a sessing handled by flask\n pgbase.Session = scoped_session(\n sessionmaker(),\n scopefunc=_app_ctx_stack.__ident_func__)\n init_db_conn()\n else:\n # Testing will use current thread as scope for Session\n from pgsqlutils.base import get_db_conf, init_db_conn\n dbconf = get_db_conf()\n dbconf.DATABASE_URI = app.config['POSTGRESQL_DATABASE_URI']\n init_db_conn()\n\n def init_logging():\n \"\"\"\n initialize logger for the app\n \"\"\"\n app.logger.addHandler(logging.StreamHandler())\n log_level = app.config['LOG_LEVEL']\n app.logger.setLevel(getattr(logging, log_level))\n\n init_urls()\n init_postgres(testing)\n init_logging()",
"def init_config(self):\n pass",
"def __init__(self, appname):\n self.exepath = '%s' % (os.path.dirname(os.path.realpath(__file__)))\n self.cnfgfile = '%s/versions.cfg' % self.exepath\n self.static_path = '%s/app/static' % self.exepath\n self.config = ConfigParser.RawConfigParser()",
"def setup(app) -> Dict[str, Any]:\n app.add_config_value(\"uqbar_book_console_setup\", [], \"env\")\n app.add_config_value(\"uqbar_book_console_teardown\", [], \"env\")\n app.add_config_value(\n \"uqbar_book_extensions\", [\"uqbar.book.extensions.GraphExtension\"], \"env\"\n )\n app.add_config_value(\"uqbar_book_strict\", False, \"env\")\n app.add_config_value(\"uqbar_book_use_black\", False, \"env\")\n app.add_config_value(\"uqbar_book_use_cache\", True, \"env\")\n app.add_config_value(\"uqbar_book_block_options\", {}, \"env\")\n app.add_directive(\"book\", UqbarBookDirective)\n app.add_directive(\"book-defaults\", UqbarBookDefaultsDirective)\n app.add_directive(\"book-import\", UqbarBookImportDirective)\n\n for node_class in [uqbar_book_defaults_block, uqbar_book_import_block]:\n app.add_node(\n node_class,\n html=[skip_node, None],\n latex=[skip_node, None],\n text=[skip_node, None],\n )\n app.connect(\"builder-inited\", on_builder_inited)\n app.connect(\"config-inited\", on_config_inited)\n app.connect(\"doctree-read\", on_doctree_read)\n app.connect(\"build-finished\", on_build_finished)\n return {\n \"version\": uqbar.__version__,\n \"parallel_read_safe\": False,\n \"parallel_write_safe\": True,\n }",
"def init_rest(app_):\n\n rest_api = Api(app_)\n rest_api.add_resource(views.rest_resources.AppListResource,\n ActiveConfig.REST_URL_APPS_LIST,\n ActiveConfig.REST_URL_APPS_LIST + '/')\n rest_api.add_resource(views.rest_resources.AppResource,\n ActiveConfig.REST_URL_APPS_ITEM,\n ActiveConfig.REST_URL_APPS,\n ActiveConfig.REST_URL_APPS + '/')",
"def create_app(config_filename=None, config_object=None):\n app = Flask(__name__)\n\n app.config.from_object('psephology.config.default')\n if config_filename is not None:\n app.config.from_pyfile(config_filename)\n if config_object is not None:\n app.config.from_object(config_object)\n\n db.init_app(app)\n migrate.init_app(app, db, render_as_batch=True)\n\n app.register_blueprint(ui)\n app.register_blueprint(api, url_prefix='/api')\n app.cli.add_command(cli)\n\n # Things which should only be present in DEBUG-enabled apps\n app.debug = app.config.get('DEBUG', False)\n if app.debug:\n from flask_debugtoolbar import DebugToolbarExtension\n toolbar = DebugToolbarExtension()\n toolbar.init_app(app)\n\n return app",
"def setup_environ(app, global_conf, app_conf):\n from example.lib.templating import Templating\n templating = Templating(app_conf)\n\n def application(environ, start_response):\n environ['restish.templating'] = templating\n environ['couchish'] = adminish.config.make_couchish_store(app_conf, 'example.model')\n environ['adminish'] = adminish.config.make_adminish_config(environ['couchish'].config.types)\n return app(environ, start_response)\n\n return application",
"def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app",
"def __init__(self):\n #self._app = Flask(__name__) # imports the named package, in this case this file\n self.__load_config()\n self._app = Flask(__name__.split(\".\")[-1], template_folder = self.template_folder)\n self._app.mongo = db_sync_manager #PyMongo(self._app)\n self._app.db = \"felix_mro\" if self.mro_enabled else \"felix_ro\"\n # Added in order to be able to execute \"before_request\" method\n app = self._app\n\n # Setup debugging for app\n cDebug = self.general_section.get(\"debug\")\n if cDebug: # log all actions on the XML-RPC interface\n def log_request(sender, **extra):\n logger.info(\">>> REQUEST %s:\\n%s\" % (request.path, request.data))\n request_started.connect(log_request, self._app)\n def log_response(sender, response, **extra):\n logger.info(\">>> RESPONSE %s:\\n%s\" % (response.status, response.data))\n request_finished.connect(log_response, self._app)\n\n @app.before_request\n def before_request():\n # \"Attach\" objects within the \"g\" object. This is passed to each view method\n g.mongo = self._app.mongo",
"def main(global_config, **settings):\n\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.scan()\n return config.make_wsgi_app()",
"def init_app(self, app):\n self.__init__(aws_access_key_id=app.config.get(\"SES_AWS_ACCESS_KEY\"),\n aws_secret_access_key=app.config.get(\"SES_AWS_SECRET_KEY\"),\n region=app.config.get(\"SES_REGION\", \"us-east-1\"),\n sender=app.config.get(\"SES_SENDER\", None),\n reply_to=app.config.get(\"SES_REPLY_TO\", None),\n template=app.config.get(\"SES_TEMPLATE\", None),\n template_context=app.config.get(\"SES_TEMPLATE_CONTEXT\", {})\n )",
"def config(self):\n pass",
"def config(self):\n pass",
"def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.login_view = 'auth_bp.login'\n login_manager.init_app(app)\n\n cache.init_app(app)\n sess.init_app(app)\n\n @app.context_processor\n def inject_session():\n return dict(session=sess)\n\n @app.context_processor\n def inject_datetime():\n return dict(dnow=datetime.now())\n\n @app.context_processor\n def check_permissions():\n def check_perms(perm, permset):\n return Permissions.check(perm, permset)\n return dict(check_perms=check_perms)\n\n @app.context_processor\n def lookup_permissions():\n def lookup_perm(perm):\n return Permissions.lookup(perm)\n return dict(lookup_perm=lookup_perm)\n\n app.add_template_global(Permissions.lookups(), 'permissions')\n\n with app.app_context():\n # Include our Routes\n from .main import main_bp\n from .auth import auth_bp\n from .admin import admin_bp\n from .snapshots import snap_bp\n from .geo import geo_bp\n from .ppe import ppe_bp\n from .space import space_bp\n from .staff import staff_bp\n from .trans import trans_bp\n\n # Register Blueprints\n app.register_blueprint(main_bp)\n app.register_blueprint(auth_bp)\n app.register_blueprint(admin_bp)\n app.register_blueprint(snap_bp)\n app.register_blueprint(geo_bp)\n app.register_blueprint(ppe_bp)\n app.register_blueprint(space_bp)\n app.register_blueprint(staff_bp)\n app.register_blueprint(trans_bp)\n\n\n return app",
"def create_app():\n app = Flask(__name__)\n app.register_blueprint(playlists)\n app.register_blueprint(comments)\n return app",
"def init_app(self, app):\n # Avoid double initialization.\n if self._flask_app is app:\n return None\n if self._flask_app is not None:\n raise RuntimeError(\n \"This api has already been registered on a flask application.\"\n )\n\n self._flask_app = app\n\n # Add the url rule.\n app.add_url_rule(\n rule=self._uri + \"/<path:path>\",\n endpoint=\"jsonapi\",\n view_func=self.handle_request,\n methods=[\"get\", \"post\", \"patch\", \"delete\", \"head\"]\n )\n\n # Register the jsonapi extension on the flask application.\n app.extensions = getattr(app, \"extensions\", dict())\n app.extensions[\"jsonapi\"] = self\n\n # Add the api to the jinja environment\n app.jinja_env.globals[\"jsonapi\"] = current_api\n return None",
"def setup_app(app):\n try:\n config_key = ndb.Key('WordListConfig', os.environ['CONFIG_MODEL_ID'])\n app.wordlist_config = config_key.get()\n except:\n print('Cannot load config from Datastore', file=sys.stderr)\n sys.exit(1)",
"def register_blueprints(app):\n app.register_blueprint(hello_world.bp_config.bp)",
"def setup_rest(app: web.Application):\n _logger.debug(\"Setting up %s ...\", __name__)\n\n spec_path: Path = storage_resources.get_path(\"api/v0/openapi.yaml\")\n\n # Connects handlers\n for routes in [\n handlers_health.routes,\n handlers_locations.routes,\n handlers_datasets.routes,\n handlers_files.routes,\n handlers_simcore_s3.routes,\n ]:\n set_default_route_names(routes)\n app.router.add_routes(routes)\n\n _logger.debug(\"routes: %s\", get_named_routes_as_message(app))\n\n # prepare container for upload tasks\n app[UPLOAD_TASKS_KEY] = {}\n\n # Enable error, validation and envelop middleware on API routes\n append_rest_middlewares(app, api_version=f\"/{api_vtag}\")\n\n # Adds swagger doc UI\n setup_swagger(\n app,\n swagger_url=\"/dev/doc\",\n swagger_from_file=f\"{spec_path}\",\n ui_version=3,\n )",
"def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app",
"def __init__(self, app, root):\n self.app = app\n self.root = root\n self.config = app.config()\n self._synth_editors = {} # Active synth editors. synth SID used as key",
"def setup(self, app_args):\n raise NotImplementedError",
"def init_app(app):\n app.config.from_object(\"config.DevelopmentConfig\")\n app.teardown_appcontext(close_db)\n app.cli.add_command(init_db_command)\n app.cli.add_command(add_data_db_command)\n app.cli.add_command(clear_db_command)",
"def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)",
"def __init__(self) -> None:\n\n self.config_keys = ['APPS_HOST', 'APPS_PORT']\n super().__init__()\n\n self.APPS_HOST = str(self.APPS_HOST)\n \"\"\"Host where the server will be served\"\"\"\n\n self.APPS_PORT = int(self.APPS_PORT)\n \"\"\"Port where the server will be served\"\"\"",
"def create_app():\n app = Flask(__name__)\n\n # app.secret_key = os.urandom(12)\n # jwt_manager = JWTManager()\n # jwt_manager.init_app(app)\n\n CORS(app)\n\n app.register_blueprint(redflag_blueprint, url_prefix=\"/api/v1/red-flags\")\n app.register_blueprint(user_blueprint, url_prefix=\"/api/v1/users\")\n app.register_blueprint(intervention_blueprint, url_prefix=\"/api/v1/interventions\")\n app.register_blueprint(auth_blueprint, url_prefix=\"/api/v1/auth\")\n app.register_blueprint(index_blueprint, url_prefix=\"/api/v1\")\n app.register_blueprint(base_url_blueprint, url_prefix=\"/\")\n app.register_blueprint(media_blueprint, url_prefix=\"/api/v1/files/uploads\")\n # app.register_blueprint(media_edit_blueprint, url_prefix=\"/api/v1/\")\n\n app.register_error_handler(400, bad_request_error)\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(405, method_not_allowed)\n app.register_error_handler(500, internal_server_error)\n\n swagger_ui_blueprint = get_swaggerui_blueprint(SWAGGER_UI_URL, API_URL)\n app.register_blueprint(swagger_ui_blueprint, url_prefix=SWAGGER_UI_URL)\n\n return app",
"def setup(app): # noqa\n # Wee want to override the directives:\n # - 'graph' from sphinx.ext.graphviz extension.\n # - 'uml' from sphinxcontrib.plantuml\n # But Sphinx warns of the override, causing failure if warnings are set\n # to fail documentation build. So, we go down and use docutils registering\n # directly instead.\n\n # app.add_directive('uml', UmlDirective)\n # app.add_directive('graph', GraphDirective)\n # app.add_directive('diagram', DiagramDirective)\n\n from docutils.parsers.rst import directives\n directives.register_directive('uml', UmlDirective)\n directives.register_directive('graph', GraphDirective)\n directives.register_directive('diagram', DiagramDirective)\n\n # Register the config value to allow to set plantweb defaults in conf.py\n app.add_config_value('plantweb_defaults', {}, 'env')\n\n # Register Plantweb defaults setter\n # Note: The str() is because:\n # - In Python 2.7, Sphinx expects a str, not unicode.\n # - In Python 3.4, Sphinx expects a str, not bytes.\n app.connect(str('builder-inited'), builder_inited_handler)",
"def create_app():\n app = Flask(__name__)\n\n app.config.from_pyfile('../settings.py')\n\n app.register_blueprint(layout_bp, url_prefix='/layouts')\n app.register_blueprint(sheet_bp, url_prefix='/sheets')\n app.register_blueprint(user_bp, url_prefix='/users')\n\n db.init_app(app)\n ma.init_app(app)\n migrate.init_app(app)\n login_manager.init_app(app)\n\n return app",
"def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n\n # Adding a renderer for custom model objects\n custom_json = JSON()\n model.register_custom_json(custom_json)\n config.add_renderer('json', custom_json)\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('index', '/')\n config.add_route('api_board', '/api/{board}/', request_method='GET')\n config.add_route('api_thread', '/api/{board}/{thread}/', request_method='GET')\n config.add_route('board', '/{board}/', request_method='GET')\n config.add_route('new_thread', '/{board}/', request_method='POST')\n config.add_route('thread', '/{board}/{thread}/', request_method='GET')\n config.add_route('reply', '/{board}/{thread}/', request_method='POST')\n config.scan()\n return config.make_wsgi_app()",
"def add_app(self):\n \n pass",
"def init_app(self, app, config=None):\n if not (config is None or isinstance(config, dict)):\n raise ValueError(\"`config` must be an instance of dict or None\")\n\n base_config = app.config.copy()\n if self.config:\n base_config.update(self.config)\n if config:\n base_config.update(config)\n\n config = base_config\n\n config.setdefault(k_log_path, None)\n config.setdefault(k_log_name, \"\")\n config.setdefault(k_log_rotation, 60 * 60)\n config.setdefault(k_log_format, \"\")\n config.setdefault(k_log_enqueue, True)\n config.setdefault(k_log_serialize, True)\n\n self._set_loguru(app, config)",
"def create_app(config_class=DevConfig):\n\n app = Flask(__name__)\n app.config.from_object(config_class)\n\n # Register Blueprints\n from routes import bp_main\n app.register_blueprint(bp_main)\n\n return app"
] | [
"0.6912322",
"0.6909037",
"0.6776755",
"0.6729488",
"0.67033446",
"0.6680665",
"0.6629941",
"0.6628452",
"0.661505",
"0.65740204",
"0.6563665",
"0.65455115",
"0.6527511",
"0.6517811",
"0.650456",
"0.64928526",
"0.6415426",
"0.6414632",
"0.6411077",
"0.64020663",
"0.6355862",
"0.63493615",
"0.63461065",
"0.63420916",
"0.63365763",
"0.6335241",
"0.6314674",
"0.6305412",
"0.63034743",
"0.62925214",
"0.62925214",
"0.62828743",
"0.62819856",
"0.6277305",
"0.6267545",
"0.62397414",
"0.6235979",
"0.623247",
"0.623247",
"0.6222305",
"0.62125754",
"0.6211702",
"0.61667764",
"0.615878",
"0.61408544",
"0.61408544",
"0.61408544",
"0.61408544",
"0.61342645",
"0.61015254",
"0.6097908",
"0.60916936",
"0.60830176",
"0.6070935",
"0.60276324",
"0.60188377",
"0.6018025",
"0.6005769",
"0.60051626",
"0.6003574",
"0.5999552",
"0.59973085",
"0.5984814",
"0.5982088",
"0.5975086",
"0.59709436",
"0.5970883",
"0.59691674",
"0.59646356",
"0.59598887",
"0.5959184",
"0.5952891",
"0.5951533",
"0.5951184",
"0.59473234",
"0.5945753",
"0.59444237",
"0.593339",
"0.5933123",
"0.5931821",
"0.59308344",
"0.59308344",
"0.5929392",
"0.5929281",
"0.59277123",
"0.59164125",
"0.59164065",
"0.59162074",
"0.59103304",
"0.5907933",
"0.5893184",
"0.58906305",
"0.58865476",
"0.58831745",
"0.5880307",
"0.58801305",
"0.58782566",
"0.5866449",
"0.5863741",
"0.5855763",
"0.58542264"
] | 0.0 | -1 |
Formats dictated text to camel case. | def camel_case_text(text):
newText = format_camel_case(text)
Text("%(text)s").execute({"text": newText}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")",
"def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)",
"def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)",
"def CamelCase(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def name_camel(self) -> str:\n # We want to use any of the customization applied to name_title\n # so let's just give _name_title with spaces stripped out.\n return self._name_title.replace(' ', '')",
"def convert_to_uppercase(text):\n return text.upper()",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])",
"def uppercase_text(text):\n newText = format_upper_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_upper(self, text):\n\t\treturn text.upper()",
"def UPPER(text):\n return text.upper()",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def capify(text):\n return text[0].upper() + text[1:]",
"def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()",
"def camel(s):\n return s[0].upper() + s[1:]",
"def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def UCase(text):\n return text.upper()",
"def snake_to_camel(snake_str):\n title_str = snake_str.split('_')\n return ' '.join(title_str).title()",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def convert_camel_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))",
"def test_capitalize(self):\n self.assertEqual(\n minerals_extras.capitalize('mohs scale hardness'),\n 'Mohs Scale Hardness')",
"def invert_capitalization(word):\n if word.islower():\n return word.upper()\n else:\n return word.lower()",
"def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])",
"def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])",
"def snake_case_to_camel_case(s, separator='_'):\n return s.title().replace(separator, '')",
"def camelcase_to_underscore(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def _case_convert_snake_to_camel(token: str) -> str:\n while True:\n try:\n # find next underscore\n underscore_loc = token.index('_')\n except ValueError:\n # converted all underscores\n break\n # is the underscore at the end of the string?\n if underscore_loc == len(token) - 1:\n break\n\n orig = token\n token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'\n # is there more after the capital?\n if len(orig) > underscore_loc+2:\n token += f'{orig[underscore_loc+2:]}'\n return token",
"def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def _transliterate_text(self, _text):\n return _text.upper()",
"def process(self, s):\n # modified for project...\n return s.upper()",
"def CamelCaseToOutputFriendly(string):\n return re.sub('([A-Z]+)', r' \\1', string).strip().lower()",
"def get_casing(word):\n if len(word) == 0:\n return \"other\"\n elif word.isdigit(): # Is a digit\n return \"numeric\"\n elif word.islower(): # All lower case\n return \"allLower\"\n elif word.isupper(): # All upper case\n return \"allUpper\"\n # is a title, initial char upper, then all lower\n elif word[0].isupper():\n return \"initialUpper\"\n\n return \"other\"",
"def underscore_to_camelcase(word, initial_capital=False):\n words = [x.capitalize() or \"_\" for x in word.split(\"_\")]\n if not initial_capital:\n words[0] = words[0].lower()\n\n return \"\".join(words)",
"def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))",
"def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text",
"def to_upper_camelcase(name):\n return re.sub(r'(?:\\B_|\\b\\-|^)([a-zA-Z0-9])', lambda l: l.group(1).upper(),\n name)",
"def un_camel_case(text, separator='_'):\n split = re.findall(r'(?:[A-Z][a-z0-9]*|[a-z0.9]+)', text)\n split = map(str.lower, split)\n split = list(split)\n\n words = []\n\n while len(split) > 0:\n word = split[0]\n split = split[1:]\n\n if len(word) == 1:\n while (len(split) > 0) and (len(split[0]) == 1):\n word += split[0]\n split = split[1:]\n\n words.append(word)\n\n return separator.join(words)",
"def name_to_camel_case(name: str) -> str:\n\n s = name.lower().split('-')\n\n if len(name) == 0:\n return name\n\n return s[0] + ''.join(i.capitalize() for i in s[1:])",
"def parse_case_snake_to_camel(snake, upper_first=True):\n\tsnake = snake.split('_')\n\tfirst_part = snake[0]\n\tif upper_first:\n\t\tfirst_part = first_part.title()\n\treturn first_part + ''.join(word.title() for word in snake[1:])",
"def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data",
"def transform_from_camelcase(key):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', key)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def snake_to_camel(string):\n \n camel_case = []\n\n for word in string.split(\"_\"):\n camel_case.append(word.title())\n\n \"\".join(camel_case)",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def LCase(text):\n return text.lower()",
"def to_camel_case(s):\n if s[:1].isupper() and '_' not in s:\n return s\n else:\n return snake_case_to_camel_case(s)",
"def normalize_case(text):\n text = str(text)\n return text.lower()",
"def capitalize(result):\n\treturn result.upper()",
"def snake_to_camel(name):\n if name == \"role_arn\":\n return \"roleARN\"\n temp = name.split(\"_\")\n return temp[0] + \"\".join(ele.title() for ele in temp[1:])",
"def title(value):\n capped = [char for char in string.capwords(value.replace(\"_\", \" \"))]\n\n # If a string also contains some letters after an apostrophe, we should capitalize that\n # letter... (ie: O'ryan's Charm -> O'Ryan's Charm).\n for index, char in enumerate(capped):\n if char is \"'\":\n if index + 1 <= len(capped):\n if capped[index + 2] != ' ':\n capped[index + 1] = capped[index + 1].upper()\n\n return \"\".join(capped)",
"def force_title_case(etl, field_names, **kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_function,\r\n function=value.force_title_case, **kwargs\r\n )\r\n tuple(func(field_name=name) for name in field_names)",
"def capitalize(value: str, **kwargs: Any) -> str:\n return value[0].upper() + value[1:]",
"def capitalize(self) -> String:\n pass",
"def transform_to_camelcase(key):\n return Jsonifiable.lower_first(\n ''.join(c.capitalize() or '_' for c in key.split('_')))",
"def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)",
"def titlecase(\n s,\n exceptions=[\"and\", \"in\", \"a\"],\n abbrv=[\"ID\", \"IGSN\", \"CIA\", \"CIW\", \"PIA\", \"SAR\", \"SiTiIndex\", \"WIP\"],\n capitalize_first=True,\n split_on=r\"[\\.\\s_-]+\",\n delim=\"\",\n):\n # Check if abbrv in string, in which case it'll need to be split first?\n words = re.split(split_on, s)\n out = []\n first = words[0]\n if capitalize_first and not (first in abbrv):\n first = first.capitalize()\n\n out.append(first)\n for word in words[1:]:\n if word in exceptions + abbrv:\n pass\n elif word.upper() in abbrv:\n word = word.upper()\n else:\n word = word.capitalize()\n out.append(word)\n return delim.join(out)",
"def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()",
"def snake_to_camel(name):\n return \"\".join([piece.capitalize() for piece in name.split(\"_\")])",
"def toCamelCase(name, perserveSep = False):\n if name == None or name.isspace():\n return \"\"\n\n name = name.strip()\n \n #has no letters at all\n if len([x for x in name if x.isalpha()]) == 0:\n return name\n\n #is all caps\n if name.isupper():\n return name.lower()\n\n nameformatted = \"\"\n markStart = False\n sepChars = list(string.punctuation)\n sepChars += \" \"\n nameChars = list(name)\n i = 0\n while i < len(name):\n c = nameChars[i]\n if c in sepChars:\n if perserveSep:\n nameformatted += c\n i += 1\n continue\n if i+1 < len(name):\n nameChars[i+1] = nameChars[i+1].upper()\n i += 1\n continue\n\n if not markStart:\n markStart = True\n nameformatted += c.lower()\n i += 1\n continue\n\n if i > 0 and nameChars[i-1].isupper():\n nameformatted += c.lower()\n i += 1\n continue\n\n nameformatted += c\n i += 1\n \n return nameformatted",
"def underscored2camel_case(v):\n vlist = v.split('_')\n c = []\n for n, el in enumerate(vlist):\n if el:\n if n == 0:\n c.append(el)\n else:\n c.extend([el[0].upper(), el[1:]])\n return ''.join(c)",
"def pascalcase(string):\n\n return capitalcase(camelcase(string))",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def titlecase(input_str):\n return \"\".join([x.title() for x in input_str.split('_')])",
"def snake_to_camel_case(string: str):\n return ''.join(string_component.title() for string_component in string.split('_'))",
"def to_lower(self, text):\n return text.lower()",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def snake_to_camel(string):\n return \"\".join(word.title() for word in string.split(\"_\"))",
"def apply_capitalization_to_tag(tag: Dict[str, Union[str, List[str]]]) -> str:\n return CAPITALIZATION_TABLE[tag[\"capitalization\"]].apply(tag[\"context\"])",
"def to_camel(string, capitalize):\n\n ret = string.lower()\n words = re.split(\"[\\s_]\", ret)\n words = list(map(lambda x: x.capitalize(), words))\n if not capitalize:\n words[0] = words[0].lower()\n return \"\".join(words)",
"def detect_case(text):\n\n parts = split_by_case(text, 'underscore')\n if not parts:\n # text is collection of underscores\n return 'other'\n\n if not all(part.isalnum() for part in parts):\n # one or more text part contains not alpha-numeric characters\n return 'other'\n\n if len(parts) != 1:\n return 'underscore'\n\n parts = split_by_case(parts[0], 'camel')\n if parts[0][0].isupper(): # check first character\n return 'title'\n\n # first character lower or not letter\n\n if len(parts) == 1:\n return 'mixed'\n\n return 'camel'",
"def camelcase(string):\r\n return ''.join(word.capitalize() for word in string.split('_'))",
"def to_camel(id):\n return ''.join(cap_first(x) for x in id.split('_'))",
"def getCasing(word):\n casing = 'other'\n \n numDigits = 0\n for char in word:\n if char.isdigit():\n numDigits += 1\n \n digitFraction = numDigits / float(len(word))\n \n if word.isdigit(): #Is a digit\n casing = 'numeric'\n elif digitFraction > 0.5:\n casing = 'mainly_numeric'\n elif word.islower(): #All lower case\n casing = 'allLower'\n elif word.isupper(): #All upper case\n casing = 'allUpper'\n elif word[0].isupper(): #is a title, initial char upper, then all lower\n casing = 'initialUpper'\n elif numDigits > 0:\n casing = 'contains_digit'\n \n return casing",
"def transform(text: str) -> str:\n return text.title()",
"def to_lower_camelcase(name):\n return re.sub(r'(?:\\B_|\\b\\-)([a-zA-Z0-9])', lambda l: l.group(1).upper(),\n name)",
"def namecase(name):\n return re.sub(r\"[A-Za-z]+('[A-Za-z]+])?\",\n lambda mo: _namecase.get(mo.group(0).lower(),\n mo.group(0).title()),\n name)",
"def not_capitalized(): # noqa: D416",
"def test_underscore_separated_words_to_camel_case(underscore_words, expected_output):\n assert underscore_separated_words_to_camel_case(underscore_words) == expected_output",
"def process_name(self, name, inverse=False):\n if inverse:\n return name.replace('_', ' ').title()\n return name.lower().replace(' ', '_').replace('.', '')",
"def change_case(word):\n return word.upper() if case == \"upper\" else word.lower()"
] | [
"0.7785462",
"0.7445175",
"0.72581625",
"0.7184089",
"0.71070236",
"0.70822126",
"0.6964225",
"0.69159365",
"0.6845309",
"0.68369484",
"0.683591",
"0.67920893",
"0.6758114",
"0.6714242",
"0.66896963",
"0.6676171",
"0.6648838",
"0.6558678",
"0.6504255",
"0.6491712",
"0.6463216",
"0.64568657",
"0.64397925",
"0.6427154",
"0.64185476",
"0.64045215",
"0.6373759",
"0.6344882",
"0.63438916",
"0.63438916",
"0.63436085",
"0.63179046",
"0.6314209",
"0.62976027",
"0.62931496",
"0.6292751",
"0.62868196",
"0.62687737",
"0.6257756",
"0.62526107",
"0.6208292",
"0.6202899",
"0.61640245",
"0.61631554",
"0.61590433",
"0.6153391",
"0.6152042",
"0.6144733",
"0.614189",
"0.61269164",
"0.6126768",
"0.6121653",
"0.6101522",
"0.6099956",
"0.6087533",
"0.6073741",
"0.60669863",
"0.6064168",
"0.6062552",
"0.6060258",
"0.60565",
"0.6042768",
"0.6042347",
"0.6038588",
"0.6035006",
"0.6030798",
"0.6027725",
"0.6024195",
"0.6019331",
"0.6008242",
"0.60075593",
"0.60057133",
"0.5998001",
"0.59923875",
"0.599091",
"0.5961493",
"0.5960518",
"0.5954495",
"0.59447265",
"0.59297013",
"0.59274465",
"0.5926237",
"0.59256285",
"0.5925381",
"0.5912815",
"0.5899982",
"0.58979577",
"0.5891205",
"0.58777744",
"0.5873214",
"0.58534884",
"0.58364403",
"0.5825223",
"0.5816257",
"0.58083564",
"0.5800665",
"0.5800552",
"0.5800266",
"0.57981503",
"0.5793755"
] | 0.77859265 | 0 |
Formats n words to the left of the cursor to camel case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def camel_case_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText)
newText = _camelify(text.split(' '))
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))",
"def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)",
"def make_title(words):",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def camel(s):\n return s[0].upper() + s[1:]",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def camel_to_spaces(s):\n subbed = _underscorer1.sub(r'\\1 \\2', s)\n return _underscorer2.sub(r'\\1 \\2', subbed).lower()",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")",
"def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)",
"def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_to_lower_case_underscore(string):\n words = []\n from_char_position = 0\n for current_char_position, char in enumerate(string):\n if char.isupper() and from_char_position < current_char_position:\n words.append(\n string[from_char_position:current_char_position].lower())\n from_char_position = current_char_position\n words.append(string[from_char_position:].lower())\n return '_'.join(words)",
"def test_capitalize_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n Line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"capitalize-word\",\n )",
"def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def name_camel(self) -> str:\n # We want to use any of the customization applied to name_title\n # so let's just give _name_title with spaces stripped out.\n return self._name_title.replace(' ', '')",
"def upper(value,n):\n return value.upper()[0:n]",
"def space_out_camel_case(camel):\r\n chars = []\r\n\r\n for char in camel:\r\n if len(chars) >= 2 and chars[-1] != ' ':\r\n if char.isupper() and chars[-1].islower():\r\n chars.append(' ')\r\n elif char.islower() and chars[-1].isupper() and chars[-2].isupper():\r\n chars.insert(len(chars) - 1, ' ')\r\n\r\n chars.append(char)\r\n\r\n return ''.join(chars)",
"def camel_to_underscore(name):\n as_list = []\n length = len(name)\n for index, i in enumerate(name):\n if index != 0 and index != length - 1 and i.isupper():\n as_list.append('_%s' % i.lower())\n else:\n as_list.append(i.lower())\n\n return ''.join(as_list)",
"def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def _case_convert_snake_to_camel(token: str) -> str:\n while True:\n try:\n # find next underscore\n underscore_loc = token.index('_')\n except ValueError:\n # converted all underscores\n break\n # is the underscore at the end of the string?\n if underscore_loc == len(token) - 1:\n break\n\n orig = token\n token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'\n # is there more after the capital?\n if len(orig) > underscore_loc+2:\n token += f'{orig[underscore_loc+2:]}'\n return token",
"def capify(text):\n return text[0].upper() + text[1:]",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def titlecase(input_str):\n return \"\".join([x.title() for x in input_str.split('_')])",
"def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]",
"def camelCaseFunc():\n unused = 1",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def camelcase_to_underscore(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def snake_to_camel(snake_str):\n title_str = snake_str.split('_')\n return ' '.join(title_str).title()",
"def underToAllCaps(value): # pragma: no cover\n return ' '.join(map(lambda x: x.title(), value.split('_')))",
"def to_lowercase(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = word.lower()\n new_words.append(new_word)\n # new_word += f\"{new_word} \"\n self.words = new_words\n return self",
"def get_snake_case_from_camel_case(name: str) -> str:\n\n new_chars = []\n for i, char in enumerate(name): \n if i == len(name)-1 or i == 0: \n new_chars.append(char)\n elif char.isupper() and name[i+1].islower():\n new_chars.append('_')\n new_chars.append(char)\n elif char.islower() and name[i+1].isupper(): \n new_chars.append(char)\n new_chars.append('_')\n else: \n new_chars.append(char)\n\n new_name = ''.join(new_chars)\n return new_name.lower().replace('__', '_')",
"def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(s)):\n\t\tif (s[i] in \"Ii Iii Iv Vi Vii Viii Ix Ii: Iii: Iv: Vi: Vii: Viii: Ix:\"):\n\t\t\ts[i] = s[i].upper()\n\treturn \" \".join(s)",
"def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])",
"def make_display_word(secret_word):\n return ('_ ' * len(secret_word))",
"def underscore_to_camelcase(word, initial_capital=False):\n words = [x.capitalize() or \"_\" for x in word.split(\"_\")]\n if not initial_capital:\n words[0] = words[0].lower()\n\n return \"\".join(words)",
"def capitalize(self) -> String:\n pass",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def print_upper_words(words):\n for word in words:\n print(word.upper())",
"def snake_to_camel(name):\n return \"\".join([piece.capitalize() for piece in name.split(\"_\")])",
"def camel_to_underscore(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def render_snake(var_words):\n return '_'.join(var_words)",
"def _make_name(words):\n return \" \".join(words)",
"def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def capitalizeFirst(word):\n return word[0].upper() + word[1:]",
"def snake_to_camel(string):\n \n camel_case = []\n\n for word in string.split(\"_\"):\n camel_case.append(word.title())\n\n \"\".join(camel_case)",
"def to_camel_case(string: str):\n return \"\".join(\n word.title() if idx > 0 else word for idx, word in enumerate(string.split(\"_\"))\n )",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def capitalize1(s):\n return s[:1].upper() + s[1:]",
"def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)",
"def shn_abbreviate(word, size=48):\n\n if word:\n if (len(word) > size):\n word = \"%s...\" % word[:size - 4]\n else:\n return word\n else:\n return word",
"def snake_to_camel(string):\n return \"\".join(word.title() for word in string.split(\"_\"))",
"def test_underscore_separated_words_to_camel_case(underscore_words, expected_output):\n assert underscore_separated_words_to_camel_case(underscore_words) == expected_output",
"def print_upper_words(words):\n \n for word in words:\n print(word.upper())",
"def _nice_case(line):\n line_lower = line.lower()\n s = \"\"\n i = 0\n nextCap = 1\n while i < len(line_lower):\n c = line_lower[i]\n if c >= \"a\" and c <= \"z\" and nextCap:\n c = c.upper()\n nextCap = 0\n elif c in \" .,;:\\t-_\":\n nextCap = 1\n s += c\n i += 1\n return s",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def lunderize(title):\n title = title.lower()\n title = title.replace(' ', '_')\n title = title.replace('.', '')\n return title",
"def create_word(char_list):",
"def capwords(s, sep=None):\n if sep is None:\n sep = ' '\n return sep.join(x.capitalize() for x in s.split(sep))\n #return (sep or ' ').join(x.capitalize() for x in s.split(sep))",
"def _truncate_name(orig_str, word_num):\n if not orig_str:\n return orig_str\n tokens = string_utils.tokenizer(orig_str)\n if len(tokens) > word_num:\n orig_str = ' '.join(tokens[:word_num])\n return orig_str",
"def cap_first(word):\n return word[0].upper() + word[1:]",
"def name_to_camel_case(name: str) -> str:\n\n s = name.lower().split('-')\n\n if len(name) == 0:\n return name\n\n return s[0] + ''.join(i.capitalize() for i in s[1:])",
"def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()",
"def kebab_case(value: str, **kwargs: Any) -> str:\n return \"-\".join(split_words(value))",
"def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))"
] | [
"0.73462373",
"0.6970347",
"0.6930063",
"0.69166636",
"0.66168064",
"0.6470583",
"0.6462862",
"0.6323668",
"0.6291367",
"0.6219177",
"0.61823606",
"0.61714643",
"0.6143847",
"0.6097175",
"0.60751027",
"0.60740274",
"0.60687244",
"0.6060768",
"0.6060067",
"0.6053927",
"0.5989033",
"0.59860134",
"0.5924931",
"0.59247094",
"0.590119",
"0.58939",
"0.5885327",
"0.58656526",
"0.5856559",
"0.5838679",
"0.5838679",
"0.58227044",
"0.5806614",
"0.5806257",
"0.58026075",
"0.57946134",
"0.5781999",
"0.57679915",
"0.5752248",
"0.5735267",
"0.57333195",
"0.5724431",
"0.57215357",
"0.5717929",
"0.5710111",
"0.57043934",
"0.5694921",
"0.5694581",
"0.5679368",
"0.5675926",
"0.5671545",
"0.56675106",
"0.5653258",
"0.56471866",
"0.56346965",
"0.56319547",
"0.56318694",
"0.56302947",
"0.5625935",
"0.5622464",
"0.5574035",
"0.55730236",
"0.5561114",
"0.55542785",
"0.5552595",
"0.555202",
"0.55518085",
"0.5548536",
"0.55476946",
"0.5537306",
"0.5532685",
"0.55225605",
"0.54957014",
"0.5495512",
"0.5495336",
"0.54930526",
"0.5490292",
"0.5489157",
"0.54688436",
"0.5462376",
"0.5456233",
"0.545376",
"0.54436857",
"0.5442653",
"0.54413676",
"0.54367155",
"0.54211223",
"0.5419202",
"0.541788",
"0.5415548",
"0.5414035",
"0.5413759",
"0.54109144",
"0.54099464",
"0.54071623",
"0.54055566",
"0.54051083",
"0.539584",
"0.53955966",
"0.5390952"
] | 0.7841125 | 0 |
Takes a list of words and returns a string formatted to camel case. | def _camelify(words):
newText = ''
for word in words:
if newText == '':
newText = word[:1].lower() + word[1:]
else:
newText = '%s%s' % (newText, word.capitalize())
return newText | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]",
"def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def correctCasing(words):\n strings = words.split(' ')\n strings = [s[0].upper()+s[1:].lower() for s in strings if s]\n return ' '.join(strings)",
"def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))",
"def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)",
"def camelcase(string):\r\n return ''.join(word.capitalize() for word in string.split('_'))",
"def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)",
"def snake_to_camel(string):\n return \"\".join(word.title() for word in string.split(\"_\"))",
"def camel(s):\n return s[0].upper() + s[1:]",
"def snake_to_camel(string):\n \n camel_case = []\n\n for word in string.split(\"_\"):\n camel_case.append(word.title())\n\n \"\".join(camel_case)",
"def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])",
"def snake_to_camel(name):\n return \"\".join([piece.capitalize() for piece in name.split(\"_\")])",
"def underscored2camel_case(v):\n vlist = v.split('_')\n c = []\n for n, el in enumerate(vlist):\n if el:\n if n == 0:\n c.append(el)\n else:\n c.extend([el[0].upper(), el[1:]])\n return ''.join(c)",
"def CamelCase(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def underscore_to_camelcase(word, initial_capital=False):\n words = [x.capitalize() or \"_\" for x in word.split(\"_\")]\n if not initial_capital:\n words[0] = words[0].lower()\n\n return \"\".join(words)",
"def space_out_camel_case(camel):\r\n chars = []\r\n\r\n for char in camel:\r\n if len(chars) >= 2 and chars[-1] != ' ':\r\n if char.isupper() and chars[-1].islower():\r\n chars.append(' ')\r\n elif char.islower() and chars[-1].isupper() and chars[-2].isupper():\r\n chars.insert(len(chars) - 1, ' ')\r\n\r\n chars.append(char)\r\n\r\n return ''.join(chars)",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def to_camel_case(string: str):\n return \"\".join(\n word.title() if idx > 0 else word for idx, word in enumerate(string.split(\"_\"))\n )",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')",
"def titlecase(input_str):\n return \"\".join([x.title() for x in input_str.split('_')])",
"def print_upper_words(words):\n for word in words:\n print(word.upper())",
"def snake_to_camel_case(string: str):\n return ''.join(string_component.title() for string_component in string.split('_'))",
"def snake_to_camel(snake_str):\n title_str = snake_str.split('_')\n return ' '.join(title_str).title()",
"def CamelCaseToOutputFriendly(string):\n return re.sub('([A-Z]+)', r' \\1', string).strip().lower()",
"def snake_to_camel(string):\n camel_case = []\n\n for word in string.split('_'):\n camel_case.append(word[0].upper() + word[1:])\n\n return ''.join(camel_case)",
"def print_upper_words(words):\n \n for word in words:\n print(word.upper())",
"def underToAllCaps(value): # pragma: no cover\n return ' '.join(map(lambda x: x.title(), value.split('_')))",
"def to_camel(string, capitalize):\n\n ret = string.lower()\n words = re.split(\"[\\s_]\", ret)\n words = list(map(lambda x: x.capitalize(), words))\n if not capitalize:\n words[0] = words[0].lower()\n return \"\".join(words)",
"def name_to_camel_case(name: str) -> str:\n\n s = name.lower().split('-')\n\n if len(name) == 0:\n return name\n\n return s[0] + ''.join(i.capitalize() for i in s[1:])",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def ucwords(string):\n erg=[ item.capitalize() for item in string.split( ' ' ) ]\n return ' '.join( erg )",
"def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def un_camel_case(text, separator='_'):\n split = re.findall(r'(?:[A-Z][a-z0-9]*|[a-z0.9]+)', text)\n split = map(str.lower, split)\n split = list(split)\n\n words = []\n\n while len(split) > 0:\n word = split[0]\n split = split[1:]\n\n if len(word) == 1:\n while (len(split) > 0) and (len(split[0]) == 1):\n word += split[0]\n split = split[1:]\n\n words.append(word)\n\n return separator.join(words)",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def snake_case_to_camel_case(s, separator='_'):\n return s.title().replace(separator, '')",
"def test_underscore_separated_words_to_camel_case(underscore_words, expected_output):\n assert underscore_separated_words_to_camel_case(underscore_words) == expected_output",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_upper_camelcase(name):\n return re.sub(r'(?:\\B_|\\b\\-|^)([a-zA-Z0-9])', lambda l: l.group(1).upper(),\n name)",
"def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])",
"def _make_name(words):\n return \" \".join(words)",
"def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()",
"def camel_to_underscore(name):\n as_list = []\n length = len(name)\n for index, i in enumerate(name):\n if index != 0 and index != length - 1 and i.isupper():\n as_list.append('_%s' % i.lower())\n else:\n as_list.append(i.lower())\n\n return ''.join(as_list)",
"def titlecase(\n s,\n exceptions=[\"and\", \"in\", \"a\"],\n abbrv=[\"ID\", \"IGSN\", \"CIA\", \"CIW\", \"PIA\", \"SAR\", \"SiTiIndex\", \"WIP\"],\n capitalize_first=True,\n split_on=r\"[\\.\\s_-]+\",\n delim=\"\",\n):\n # Check if abbrv in string, in which case it'll need to be split first?\n words = re.split(split_on, s)\n out = []\n first = words[0]\n if capitalize_first and not (first in abbrv):\n first = first.capitalize()\n\n out.append(first)\n for word in words[1:]:\n if word in exceptions + abbrv:\n pass\n elif word.upper() in abbrv:\n word = word.upper()\n else:\n word = word.capitalize()\n out.append(word)\n return delim.join(out)",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def change_to_uppercase(arg_list):\n result = []\n\n for element_list in arg_list:\n result.append(element_list.upper())\n\n return result",
"def to_camel(id):\n return ''.join(cap_first(x) for x in id.split('_'))",
"def camelize(name):\n return ''.join([bit.capitalize() for bit in name.split('_')])",
"def string_to_camel_case(string:str) -> str:\n\tresult_camel_case = str()\n\tsym = iter(string)\n\tfor symb in sym:\n\t if symb == \"_\":\n\t symbol = next(sym)\n\t symb = \"\" + symbol.upper()\n\t result_camel_case += symb\n\treturn result_camel_case",
"def title(value):\n capped = [char for char in string.capwords(value.replace(\"_\", \" \"))]\n\n # If a string also contains some letters after an apostrophe, we should capitalize that\n # letter... (ie: O'ryan's Charm -> O'Ryan's Charm).\n for index, char in enumerate(capped):\n if char is \"'\":\n if index + 1 <= len(capped):\n if capped[index + 2] != ' ':\n capped[index + 1] = capped[index + 1].upper()\n\n return \"\".join(capped)",
"def capwords(s, sep=None):\n if sep is None:\n sep = ' '\n return sep.join(x.capitalize() for x in s.split(sep))\n #return (sep or ' ').join(x.capitalize() for x in s.split(sep))",
"def convert_camel_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def capify(text):\n return text[0].upper() + text[1:]",
"def snake_to_camel(name):\n if name == \"role_arn\":\n return \"roleARN\"\n temp = name.split(\"_\")\n return temp[0] + \"\".join(ele.title() for ele in temp[1:])",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])",
"def name_camel(self) -> str:\n # We want to use any of the customization applied to name_title\n # so let's just give _name_title with spaces stripped out.\n return self._name_title.replace(' ', '')",
"def to_lowercase(words):\r\n new_words = []\r\n for word in words:\r\n new_word = word.lower()\r\n new_words.append(new_word)\r\n return new_words",
"def snake_case_to_headless_camel_case(snake_string):\n return ''.join([snake_string.split('_')[0]] +\n list(sub_string.capitalize()\n for sub_string in snake_string.split('_')[1:]))",
"def _case_convert_snake_to_camel(token: str) -> str:\n while True:\n try:\n # find next underscore\n underscore_loc = token.index('_')\n except ValueError:\n # converted all underscores\n break\n # is the underscore at the end of the string?\n if underscore_loc == len(token) - 1:\n break\n\n orig = token\n token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'\n # is there more after the capital?\n if len(orig) > underscore_loc+2:\n token += f'{orig[underscore_loc+2:]}'\n return token",
"def titlecase(original: str, delimiter: str = \" \", small_words: list = None) -> str:\n _small_words = [\"of\", \"in\", \"at\", \"to\", \"the\", \"on\", \"an\", \"a\"]\n if small_words:\n _small_words = list(set(_small_words + small_words))\n\n original_splitted = original.split(delimiter)\n result = []\n\n for word in original_splitted:\n word = word.lower()\n if word in _small_words:\n result.append(word)\n else:\n result.append(word.capitalize())\n\n return delimiter.join(result)",
"def to_lower(self, word_list):\n return [word.lower() for word in word_list]",
"def invert_capitalization(word):\n if word.islower():\n return word.upper()\n else:\n return word.lower()",
"def capitalize(a):\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'capitalize')",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words: list) -> list:\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def title_case(s):\n if not s:\n return None\n\n s = s.lower()\n parts = s.split(' ')\n lower_case = (\n 'a', 'an', 'and', 'as', 'at', 'by', 'for', 'in', 'of', 'on', 'or',\n 'the', 'to', 'with'\n )\n\n parts[0] = parts[0].title()\n parts = map(\n lambda part: part.title() if part not in lower_case else part,\n parts\n )\n\n return ' '.join(parts)",
"def camel_to_spaces(s):\n subbed = _underscorer1.sub(r'\\1 \\2', s)\n return _underscorer2.sub(r'\\1 \\2', subbed).lower()",
"def capify(word, reference):\n new_word = \"\"\n\n # First check whole word before char-by-char\n if reference.islower():\n return word.lower()\n elif reference.isupper():\n return word.upper()\n\n # Char-by-char checks\n for i, c in enumerate(reference):\n if c.isupper():\n new_word += word[i].upper()\n else:\n new_word += word[i]\n return new_word",
"def toLowercase(self, words):\n\t\tnewWords = [word.lower() for word in words]\n\t\treturn newWords",
"def createtext(lst):\n newlst = []\n for item in lst:\n item = item.replace(\"_!\",\"\")\n newlst.append(item)\n text = ' '.join(newlst)\n # Lower-casing\n return text.lower()",
"def camel_case_to_lower_case_underscore(string):\n words = []\n from_char_position = 0\n for current_char_position, char in enumerate(string):\n if char.isupper() and from_char_position < current_char_position:\n words.append(\n string[from_char_position:current_char_position].lower())\n from_char_position = current_char_position\n words.append(string[from_char_position:].lower())\n return '_'.join(words)",
"def to_lower_camelcase(name):\n return re.sub(r'(?:\\B_|\\b\\-)([a-zA-Z0-9])', lambda l: l.group(1).upper(),\n name)",
"def capsentence(value):\n value = value.lower()\n return \". \".join([sentence.capitalize() for sentence in value.split(\". \")])",
"def capitalizeFirst(word):\n return word[0].upper() + word[1:]",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return(new_words)",
"def to_camel_case(string: str, first_lower: bool = False) -> str:\n if first_lower:\n first, _, rest = string.partition(\"_\")\n else:\n first, rest = (\"\", string)\n return first.lower() + \"\".join(part.capitalize() for part in rest.split(\"_\"))",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def split_uppercase(word):\r\n final_word = ''\r\n for i in word:\r\n final_word += ' %s' % i if i.isupper() else i\r\n\r\n return final_word.strip()",
"def _to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def camelize(s):\n return ''.join(s.replace('_', ' ').title().split())",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified"
] | [
"0.8251883",
"0.7825327",
"0.7563968",
"0.7526195",
"0.7506611",
"0.7506611",
"0.7482835",
"0.72373885",
"0.7220596",
"0.7182877",
"0.7105976",
"0.7050961",
"0.70338947",
"0.7015833",
"0.70085067",
"0.6960694",
"0.6957555",
"0.6947957",
"0.69223166",
"0.69102275",
"0.6900598",
"0.6872665",
"0.6864597",
"0.68355256",
"0.68123627",
"0.67573637",
"0.67450446",
"0.6735121",
"0.6732642",
"0.67188543",
"0.67146754",
"0.6711752",
"0.6709082",
"0.6698284",
"0.6692669",
"0.6683966",
"0.66548854",
"0.66398776",
"0.66146594",
"0.6613792",
"0.65854406",
"0.65581757",
"0.653689",
"0.6532509",
"0.6522198",
"0.64857507",
"0.647851",
"0.6470099",
"0.64104176",
"0.6401772",
"0.6398955",
"0.6397854",
"0.63948375",
"0.63830066",
"0.63761455",
"0.6367162",
"0.6359659",
"0.6342194",
"0.6320877",
"0.63095766",
"0.62939125",
"0.62872803",
"0.62781334",
"0.6256351",
"0.6246845",
"0.62421364",
"0.6239363",
"0.6233103",
"0.62208545",
"0.6215896",
"0.6207594",
"0.62034047",
"0.62033105",
"0.6189621",
"0.6189016",
"0.61832154",
"0.61809313",
"0.61809313",
"0.61809313",
"0.61809313",
"0.61809313",
"0.61809313",
"0.61809313",
"0.6166046",
"0.6158963",
"0.6147767",
"0.61454815",
"0.6143803",
"0.61432946",
"0.61411625",
"0.6137508",
"0.61313385",
"0.6130962",
"0.6129702",
"0.61290425",
"0.61192876",
"0.6117619",
"0.6114683",
"0.61024266",
"0.6099238"
] | 0.8511594 | 0 |
Formats dictated text to pascal case. | def pascal_case_text(text):
newText = format_pascal_case(text)
Text("%(text)s").execute({"text": newText}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def pascalcase(string):\n\n return capitalcase(camelcase(string))",
"def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def capify(text):\n return text[0].upper() + text[1:]",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def LCase(text):\n return text.lower()",
"def UCase(text):\n return text.upper()",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def preprocess(text):\n return text.lower()",
"def _transliterate_text(self, _text):\n return _text.upper()",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def convert_to_uppercase(text):\n return text.upper()",
"def normalize_case(text):\n text = str(text)\n return text.lower()",
"def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")",
"def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text",
"def UPPER(text):\n return text.upper()",
"def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def process(self, s):\n # modified for project...\n return s.upper()",
"def LOWER(text):\n return text.lower()",
"def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_lower(self, text):\n return text.lower()",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)",
"def not_capitalized(): # noqa: D416",
"def normalize(text):\n return text.lower().translate(TRANSLATION_TABLE)",
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def pascal_to_snake(string):\n return string[0].lower() + re.sub('[A-Z]', lambda match: '_' + match.group(0).lower(), string[1:])",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def get_casing(word):\n if len(word) == 0:\n return \"other\"\n elif word.isdigit(): # Is a digit\n return \"numeric\"\n elif word.islower(): # All lower case\n return \"allLower\"\n elif word.isupper(): # All upper case\n return \"allUpper\"\n # is a title, initial char upper, then all lower\n elif word[0].isupper():\n return \"initialUpper\"\n\n return \"other\"",
"def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)",
"def test_capitalize(self):\n self.assertEqual(\n minerals_extras.capitalize('mohs scale hardness'),\n 'Mohs Scale Hardness')",
"def no_caps_and_ponctuation(text):\n return re.sub(r'[^\\w\\s]', '', text).lower()",
"def dec(text):\n formatted = '%s%s.' % (text[0].capitalize(), text[1:len(text)])\n return formatted",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')",
"def _lowercase(text: str) -> str:\n return text.lower()",
"def lower(self) -> str:",
"def titlecase(input_str):\n return \"\".join([x.title() for x in input_str.split('_')])",
"def detect_case(text):\n\n parts = split_by_case(text, 'underscore')\n if not parts:\n # text is collection of underscores\n return 'other'\n\n if not all(part.isalnum() for part in parts):\n # one or more text part contains not alpha-numeric characters\n return 'other'\n\n if len(parts) != 1:\n return 'underscore'\n\n parts = split_by_case(parts[0], 'camel')\n if parts[0][0].isupper(): # check first character\n return 'title'\n\n # first character lower or not letter\n\n if len(parts) == 1:\n return 'mixed'\n\n return 'camel'",
"def uppersnakecase(string):\n\n return uppercase(snakecase(string))",
"def to_upper(self, text):\n\t\treturn text.upper()",
"def lower(text):\n text = text.lower()\n return text",
"def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def normalize_text(text):\n return normalize_case(normalize_punctuation(text))",
"def invert_capitalization(word):\n if word.islower():\n return word.upper()\n else:\n return word.lower()",
"def uppercase_text(text):\n newText = format_upper_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def getCasing(word):\n casing = 'other'\n \n numDigits = 0\n for char in word:\n if char.isdigit():\n numDigits += 1\n \n digitFraction = numDigits / float(len(word))\n \n if word.isdigit(): #Is a digit\n casing = 'numeric'\n elif digitFraction > 0.5:\n casing = 'mainly_numeric'\n elif word.islower(): #All lower case\n casing = 'allLower'\n elif word.isupper(): #All upper case\n casing = 'allUpper'\n elif word[0].isupper(): #is a title, initial char upper, then all lower\n casing = 'initialUpper'\n elif numDigits > 0:\n casing = 'contains_digit'\n \n return casing",
"def capitalize(self):\n return asarray(capitalize(self))",
"def CamelCase(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def PROPER(text):\n return text.title()",
"def correct_cap(title):\n try:\n fl = fln[title]\n return title\n except:\n #capitalize first letter only\n try:\n fl = fln[title[0].upper() + title[1:]]\n return title[0].upper() + title[1:]\n except:\n #try title case\n try:\n fl = fln[title.title()]\n return title.title()\n except KeyError:\n return \"\"",
"def task_1_fix_names_start_letter(data: DT) -> DT:\n for dic in data:\n if dic.get('name'):\n dic['name'] = dic['name'].capitalize()\n return data",
"def _nice_case(line):\n line_lower = line.lower()\n s = \"\"\n i = 0\n nextCap = 1\n while i < len(line_lower):\n c = line_lower[i]\n if c >= \"a\" and c <= \"z\" and nextCap:\n c = c.upper()\n nextCap = 0\n elif c in \" .,;:\\t-_\":\n nextCap = 1\n s += c\n i += 1\n return s",
"def camelcase_to_underscore(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def clean_cases(text):\n return text.lower()",
"def capitalize(result):\n\treturn result.upper()",
"def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def toLowerCase(self) -> None:\n self.text = self.text.lower()",
"def title(value):\n capped = [char for char in string.capwords(value.replace(\"_\", \" \"))]\n\n # If a string also contains some letters after an apostrophe, we should capitalize that\n # letter... (ie: O'ryan's Charm -> O'Ryan's Charm).\n for index, char in enumerate(capped):\n if char is \"'\":\n if index + 1 <= len(capped):\n if capped[index + 2] != ' ':\n capped[index + 1] = capped[index + 1].upper()\n\n return \"\".join(capped)",
"def fix_string_case(text):\n fixed = []\n for i in text:\n if is_case_sensitive(i):\n fixed.append(i)\n else:\n fixed.append(i.lower())\n return ''.join(fixed)",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def pascal_to_snake(s: str) -> str:\n converted = name_pattern.sub('_', s).lower().replace('::', '/')\n\n # We end up with some '/_' so we remove the underscore.\n return converted.replace('/_', '/')",
"def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(s)):\n\t\tif (s[i] in \"Ii Iii Iv Vi Vii Viii Ix Ii: Iii: Iv: Vi: Vii: Viii: Ix:\"):\n\t\t\ts[i] = s[i].upper()\n\treturn \" \".join(s)",
"def asciify(text: str) -> str:\n return \"\".join(\n filter(\n lambda x: x in list(string.ascii_letters) or x.isspace(), \n unidecode.unidecode(text).lower()\n )\n )",
"def make_alphabetic(text):\n text = re.sub(r'[^A-Za-z\\s]', '', text)\n return text.lower()",
"def to_lowercase(text: str) -> str:\n text = text.lower()\n return text",
"def capitalize(self) -> String:\n pass",
"def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)",
"def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result",
"def keyify(text):\n text = text.lower()\n text = text.strip()\n\n text = text.replace('.', '')\n text = re.sub('[,-]', ' ', text)\n text = re.sub('\\s{2,}', ' ', text)\n\n return text",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def titlecase(\n s,\n exceptions=[\"and\", \"in\", \"a\"],\n abbrv=[\"ID\", \"IGSN\", \"CIA\", \"CIW\", \"PIA\", \"SAR\", \"SiTiIndex\", \"WIP\"],\n capitalize_first=True,\n split_on=r\"[\\.\\s_-]+\",\n delim=\"\",\n):\n # Check if abbrv in string, in which case it'll need to be split first?\n words = re.split(split_on, s)\n out = []\n first = words[0]\n if capitalize_first and not (first in abbrv):\n first = first.capitalize()\n\n out.append(first)\n for word in words[1:]:\n if word in exceptions + abbrv:\n pass\n elif word.upper() in abbrv:\n word = word.upper()\n else:\n word = word.capitalize()\n out.append(word)\n return delim.join(out)",
"def encrypt(self, text):\n text = text.upper()\n output = []\n text_list = list(text)\n for letter in text_list:\n output.append(self.atbash_dict.get(letter, letter))\n return ''.join(output)",
"def underToAllCaps(value): # pragma: no cover\n return ' '.join(map(lambda x: x.title(), value.split('_')))",
"def toPascelCase(name, perserveSep = False):\n\n if name == None or name.isspace():\n return \"\"\n\n name = toCamelCase(name, perserveSep)\n rslt = name[0].upper()\n\n i = 1\n while i < len(name):\n rslt += name[i]\n i += 1\n \n return rslt",
"def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)",
"def capitalize_sentences(text: str) -> str:\n punctuation = ['.', '?','!']\n text = list(text)\n \n for i in range(len(text)-2):\n if i == 0:\n text[i] = text[i].upper()\n if text[i] in punctuation and text[i+1] == ' ':\n text[i+2] = text[i+2].upper()\n \n return ''.join(text)",
"def _capitalize_name(player_name: str) -> str:\n # Remove accents and replace final sigmas with normal ones\n player_name = player_name.translate(\n str.maketrans(\n {\n \"ά\": \"α\",\n \"Ά\": \"α\",\n \"έ\": \"ε\",\n \"Έ\": \"ε\",\n \"ί\": \"ι\",\n \"Ί\": \"ι\",\n \"ή\": \"η\",\n \"Ή\": \"η\",\n \"ύ\": \"υ\",\n \"Ύ\": \"υ\",\n \"ό\": \"ο\",\n \"Ό\": \"o\",\n \"ώ\": \"ω\",\n \"Ώ\": \"ω\",\n \"ς\": \"σ\",\n }\n )\n )\n\n player_name = player_name.upper()\n return player_name",
"def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])",
"def titleize(phrase):\n # Python has a `title()` methods for string\n return phrase.title()\n\n # SB Alternative approach\n # return ' '.join([s.capitalize() for s in phrase.split(' ')])"
] | [
"0.8325709",
"0.7577789",
"0.7054179",
"0.6948193",
"0.67188567",
"0.66129065",
"0.64995056",
"0.64025515",
"0.6349463",
"0.633064",
"0.6246584",
"0.6222095",
"0.6209781",
"0.6185109",
"0.61772996",
"0.6169017",
"0.6150109",
"0.6128363",
"0.6115912",
"0.6075849",
"0.60635227",
"0.6053344",
"0.6049605",
"0.60489094",
"0.60287654",
"0.5980917",
"0.5979099",
"0.59762156",
"0.5971847",
"0.5963734",
"0.5949988",
"0.5939137",
"0.59169567",
"0.59105986",
"0.5893016",
"0.5887076",
"0.58780277",
"0.58694065",
"0.58556706",
"0.5829477",
"0.5816906",
"0.5811029",
"0.58069867",
"0.58006835",
"0.57957727",
"0.57945085",
"0.5780057",
"0.577967",
"0.5776894",
"0.57633704",
"0.5731962",
"0.56764936",
"0.5658518",
"0.56557107",
"0.56505847",
"0.56472415",
"0.5643263",
"0.56319475",
"0.56285614",
"0.56120205",
"0.56097",
"0.56004244",
"0.5598289",
"0.5585334",
"0.5580224",
"0.5576608",
"0.55473465",
"0.5547338",
"0.5543868",
"0.55334085",
"0.55312467",
"0.5525957",
"0.5504013",
"0.55033845",
"0.5499373",
"0.54958034",
"0.5494973",
"0.5480892",
"0.5480892",
"0.548009",
"0.54644036",
"0.5462639",
"0.54612595",
"0.54512614",
"0.5433926",
"0.54320806",
"0.5425249",
"0.539704",
"0.53914577",
"0.5382666",
"0.5378177",
"0.5374794",
"0.53729916",
"0.5372486",
"0.5370805",
"0.5361622",
"0.535935",
"0.5359349",
"0.5354904",
"0.5353411"
] | 0.8074431 | 1 |
Formats n words to the left of the cursor to pascal case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def pascal_case_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText)
newText = text.title().replace(' ', '')
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def create_word(char_list):",
"def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None",
"def wc(file_):\r\n with open(file_) as f:\r\n file = f.read().strip()\r\n char_nums = len(file)\r\n lines = file.split('\\n')\r\n line_nums = len(lines)\r\n word_nums = 0\r\n for line in lines:\r\n words = line.split()\r\n word_nums += len(words)\r\n return f'{line_nums} {word_nums} {char_nums} {file_}'",
"def make_title(words):",
"def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))",
"def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def pascalcase(string):\n\n return capitalcase(camelcase(string))",
"def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]",
"def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def make(text=input()):\n alp = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n box = []\n dic = dict()\n val = 0\n #collect alphabets into list\n for i in alp:\n if i in text:\n dic[i] = text.count(i)\n box.append(i)\n if text.count(i) > val:\n val = text.count(i)\n else:\n val = val\n for i in range(val, 0, -1):\n print(\"%03d \"%i, end=\"\")\n for wow in sorted(dic, key=str.swapcase):\n if dic[wow] >= i:\n print(\"*\", end=\" \")\n else:\n print(\" \", end=\" \")\n print()\n print(\" \", *box, sep=\" \")",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def pascal_to_snake(string):\n return string[0].lower() + re.sub('[A-Z]', lambda match: '_' + match.group(0).lower(), string[1:])",
"def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")",
"def render_snake(var_words):\n return '_'.join(var_words)",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def pretty_print_order(count_list,word):\n\tfor i in range(len(word)):\n\t\tif (count_list[ord(word[i].lower())-ord('a')]) > 0:\n\t\t\tprint(word[i],count_list[ord(word[i].lower())-ord('a')],sep = \": \", end =\"\\n\")\n\t\t\tcount_list[ord(word[i].lower())-ord('a')] = 0",
"def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def frequencyLetterDic(s):\n pass",
"def print_word_frequency_list(my_word_frequency_list):\n for word in my_word_frequency_list:\n print(\"{} {}\".format(word[0].ljust(5), word[1]))\n\n print('\\n')",
"def textJustification(words, maxWidth):\n lines = []\n currWordLen = 0\n temp = []\n\n # split up into different lines.\n\n # ensure everything before gets appended properly\n words.append('a' * maxWidth)\n\n for word in words:\n if len(word) + currWordLen > maxWidth:\n lines.append(temp)\n temp = []\n temp.append(word)\n currWordLen = len(word) + 1 # account for spaces\n else:\n temp.append(word)\n currWordLen += len(word) + 1\n\n res = []\n numLines = len(lines)\n for index, line in enumerate(lines):\n if index == numLines - 1:\n numWords = len(line)\n s = ' '.join(line)\n remainingSpaces = maxWidth - len(s)\n s += ' ' * remainingSpaces\n res.append(s)\n else:\n\n numWords = len(line)\n remainingSpaces = maxWidth - len(''.join(line))\n if numWords - 1 != 0:\n interSpace = remainingSpaces // (numWords - 1)\n remainingSpaces = remainingSpaces - \\\n ((numWords - 1) * interSpace)\n\n i = 0\n while remainingSpaces != 0:\n line[i] += ' '\n i = (i + 1) % (numWords)\n remainingSpaces -= 1\n\n res.append((' ' * interSpace).join(line))\n\n return res",
"def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def make_display_word(secret_word):\n return ('_ ' * len(secret_word))",
"def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]",
"def words_lower_case(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_in_lower_case = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_in_lower_case = number_of_words_in_lower_case + sum(list(map(lambda x: x.islower(), i.text.split())))\n return number_of_words_in_lower_case",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def capify(text):\n return text[0].upper() + text[1:]",
"def just(s: str) -> str:\n return s.ljust(50, \"_\")",
"def createWordKnown(self):\n return ''.join(['_ ' for m in range(self.wordLen)])",
"def lower(self) -> str:",
"def upper(value,n):\n return value.upper()[0:n]",
"def cap_first(word):\n return word[0].upper() + word[1:]",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)",
"def compile_word(word):\n \n result = ''\n for i,ltr in enumerate(word):\n result = str(10**(len(word)-i-1)) + '*' + ltr + result\n if i != len(word)-1:\n result = '+' + result\n\n return result",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def daily1(word):\n if len(word) <= 2:\n return f'{word} to za krótkie słowo.'\n else:\n return word[::-1]",
"def left_justify_string(keyword, value):\n return '%s' % keyword .ljust(40, \".\") + \": \" + '%s\\n' % value",
"def to_lowercase(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = word.lower()\n new_words.append(new_word)\n # new_word += f\"{new_word} \"\n self.words = new_words\n return self",
"def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t",
"def make_text(chains, n):\n\n words = []\n\n capital_keys = [key for key in chains.keys() if key[0][0].isupper() and chains[key] != None]\n first_key = choice(capital_keys)\n\n words.extend(list(first_key))\n rand_value = choice(chains[first_key])\n words.append(rand_value)\n\n current_string = \" \".join(words)\n\n i = 1\n while len(current_string) < 140:\n current_string = \" \".join(words)\n new_key = tuple(words[i: i + n])\n if not chains[new_key]:\n break\n else:\n rand_value = choice(chains[new_key])\n words.append(rand_value)\n i += 1\n\n return current_string",
"def s_words(words):\n\t\n\treturn words // 100 / 10",
"def getWordScore(word, n):\n score=0\n for i in range(len(word)):\n addition=SCRABBLE_LETTER_VALUES[word[i]]\n score+=addition*(len(word))\n if len(word)==n:\n score+=50\n return score",
"def rotate_word(s,i):\n word=''\n if abs(i) > 26:\n i=i%26\n for char in s:\n old=ord(char)\n new=old+i\n if old < 65:\n fixed=old\n elif old > 122:\n fixed=old\n elif 90 < old < 97:\n fixed=old\n\telif 65 < old < 90:\n if new > 90:\n fixed=new-26\n elif new < 65:\n fixed=new+26\n else:\n fixed=new\n elif 97 < old < 122:\n if new > 122:\n fixed=new-26\n elif new < 97:\n fixed=new+26\n else:\n fixed=new\n rotated=chr(fixed)\n word=word+rotated\n return word",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def show_word(self):\n self.display_word = len(self.chosen_word) * \"_ \"\n Donatello.draw_word(self.display_word)\n return self.display_word",
"def Left(n=1):\n return ESC + str(n) + 'D'",
"def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)",
"def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def Left(text, number):\n return text[:number]",
"def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)",
"def capitalizeFirst(word):\n return word[0].upper() + word[1:]",
"def process_game(level, blank):\r\n paragraph = paragraphs[level]\r\n while blank < len(input_list[level]):\r\n word_to_replace = (input_list[level][blank])\r\n list_of_words = paragraph.split(\" \")\r\n index = 0\r\n # Replace the word_to_replace with ___number___\r\n while index < len(list_of_words):\r\n if list_of_words[index] == word_to_replace:\r\n list_of_words[index] = \"___\" + str(blank + 1) + \"___\"\r\n index += 1\r\n\r\n blank += 1\r\n paragraph = \" \".join(list_of_words)\r\n\r\n return paragraph",
"def wc(filename):\n\n\n # holds number of characters, words and lines in the file\n lines = 0\n words = 0\n char = 0\n try: # if the file does not exist\n file = open(filename)\n for line in file: # goes through file\n lines += 1\n wordList = line.split()\n words += len(wordList)\n for el in wordList: # this way I only count characters without \" \"(spaces)\n char += len(el)\n file.close()\n except Exception as e:\n print(e)\n lines, words, char = 404, 404, 404\n\n # prints out only if there was a right file name\n print(f\"{lines} {words} {char} {filename}\")",
"def normalizeTexts(texts):\n fCW = 0\n for item in texts:\n fCW = max(len(item), fCW)\n for counter, item in enumerate(texts):\n texts[counter] = texts[counter].ljust(fCW + 1, '.')\n return (texts, fCW)",
"def primera_palabra_mayuscula(cadena):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n for palabra in palabras: # recorro la palabra separada \n frase_final += palabra.capitalize() + \" \" # agarro la palabra separado y la primera letra la pongo en mayuscula \n return frase_final",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)",
"def numLetterCount(maxNum):\n sum = 0\n for num in range(1,maxNum+1):\n sum += len(num2words(num))\n print(str(num) + ' = ' + str(num2words(num)))\n return sum",
"def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()",
"def lunderize(title):\n title = title.lower()\n title = title.replace(' ', '_')\n title = title.replace('.', '')\n return title",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_next_word_beginning(count=event.arg) or \\\n buffer.document.get_end_of_document_position()",
"def capwords(s, sep=None):\n if sep is None:\n sep = ' '\n return sep.join(x.capitalize() for x in s.split(sep))\n #return (sep or ' ').join(x.capitalize() for x in s.split(sep))",
"def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)",
"def capitalize1(s):\n return s[:1].upper() + s[1:]",
"def ordinal(n):\n ord_dict = {1: \"st\", 2: \"nd\", 3: \"rd\"}\n return str(n + 1) + ord_dict.get((n + 1) if (n + 1) < 20 else (n + 1) % 10, \"th\")",
"def justify(self, value):\n pos = 0\n upper = value.upper()\n\n if upper == 'L':\n pos = 0\n elif upper == 'C':\n pos = 1\n elif upper == 'R':\n pos = 2\n\n self.write(self.ASCII_ESC, 'a', pos)",
"def wrap(cls, text, first=0, indent=15, maxwidth=75):\n outstr = []\n sentence = []\n if not text:\n return \"\"\n for word in text.split():\n if len(\" \".join(sentence)) + len(word) + first > maxwidth:\n outstr.append(\" \".join(sentence))\n sentence = [\" \" * indent, word]\n first = 0\n else:\n sentence.append(word.strip())\n outstr.append(\" \".join(sentence))\n return \"\\n\".join(outstr)",
"def test_capitalize_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n Line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"capitalize-word\",\n )",
"def count_words(filename):",
"def test_wrap_word():\n line = \"n\" * 81\n assert wrap_line(line) == \"n\" * 80 + \"\\nn\"",
"def motion_W(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False",
"def count_words():\n paragraph = \"a distinct section of a piece of writing,\"\n # 替换\n paragraph.replace(\",\", \" \").replace(\":\", \" \").replace(\";\", \" \").replace(\".\", \" \").replace(\"?\", \" \")\n words = paragraph.split(\" \")\n nums = {}\n\n for word in words:\n nums[word] = nums[word]+1 if word in nums else 1\n # nums[word] = nums.get(word, 0) + 1\n\n for word, num in nums.items():\n print(word, \": \", num)",
"def get_word_score(word, n=7):\n score = 0\n\n for i in word:\n score += SCRABBLE_LETTER_VALUES[i]\n\n if len(word) == n:\n score += 50\n\n return score"
] | [
"0.6748049",
"0.668678",
"0.66685766",
"0.65173775",
"0.6507821",
"0.64246476",
"0.62588936",
"0.5990105",
"0.5817989",
"0.5803157",
"0.5715056",
"0.5696685",
"0.5679296",
"0.5647844",
"0.56197363",
"0.56024784",
"0.5598668",
"0.5587423",
"0.5575231",
"0.5562862",
"0.5554474",
"0.55531186",
"0.55442256",
"0.55324304",
"0.55222905",
"0.55084115",
"0.5501381",
"0.5492033",
"0.54842985",
"0.548238",
"0.54668915",
"0.5459138",
"0.5440758",
"0.5418102",
"0.5365969",
"0.53655887",
"0.53623444",
"0.53615034",
"0.53614384",
"0.5354993",
"0.5322377",
"0.5320589",
"0.5315331",
"0.52936864",
"0.529263",
"0.5291597",
"0.5288575",
"0.5282354",
"0.5273143",
"0.5264295",
"0.5253977",
"0.5248972",
"0.520848",
"0.5197072",
"0.5193423",
"0.51914424",
"0.5190479",
"0.518807",
"0.5181485",
"0.5178542",
"0.5162234",
"0.514976",
"0.5148958",
"0.51398855",
"0.51315665",
"0.5115011",
"0.5113818",
"0.5113502",
"0.51020795",
"0.5101606",
"0.50992346",
"0.5097045",
"0.50897175",
"0.50853217",
"0.5080188",
"0.50671023",
"0.5059397",
"0.50559723",
"0.5053165",
"0.50468886",
"0.5046173",
"0.5044517",
"0.5040363",
"0.50394833",
"0.5038647",
"0.5023472",
"0.5023201",
"0.5019242",
"0.5018103",
"0.50177133",
"0.50176",
"0.501693",
"0.50167876",
"0.5012704",
"0.50100017",
"0.49986622",
"0.4996608",
"0.49884418",
"0.4987719",
"0.49862316"
] | 0.7799282 | 0 |
Formats dictated text to snake case. | def snake_case_text(text):
newText = format_snake_case(text)
Text("%(text)s").execute({"text": newText}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def screaming_snake_case(value: str, **kwargs: Any) -> str:\n return snake_case(value, **kwargs).upper()",
"def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)",
"def convert_to_snake_case(string: str) -> str:\n\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)\n draft = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return draft.replace('__', '_')",
"def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def LCase(text):\n return text.lower()",
"def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data",
"def normalize_case(text):\n text = str(text)\n return text.lower()",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def to_lower(self, text):\n return text.lower()",
"def snakecase(self, given_path):\n filename = os.path.basename(given_path)\n filename = first_cap_re.sub(r'\\1_\\2', filename)\n filename = all_cap_re.sub(r'\\1_\\2', filename).lower()\n return given_path.replace(os.path.basename(given_path), filename)",
"def convert_to_snake_case(camel_case_string):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case_string)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace('__', '_')",
"def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))",
"def _camel_to_snake(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()",
"def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_snake_case(string):\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)).lower()",
"def preprocess(text):\n return text.lower()",
"def LOWER(text):\n return text.lower()",
"def to_snake_case(str):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n s2 = re.sub('-', '_', s1)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s2).lower()",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def snakecase(string):\n\n string = re.sub(r\"[\\-\\.\\s]\", '_', str(string))\n if not string:\n return string\n return lowercase(string[0]) + re.sub(r\"[A-Z]\", lambda matched: '_' + lowercase(matched.group(0)), string[1:])",
"def snake_case(string_to_convert):\n return ''.join(['_' + i.lower() if i.isupper()\n else i for i in string_to_convert]).lstrip('_')",
"def camel_to_snake_case(value):\n return re_camel_case.sub(r\"_\\1\", value).strip(\"_\").lower()",
"def snakecase(label, separator='_'):\n subbed = RegexCamelSnake1.sub(r'\\1{sep}\\2'.format(sep=separator), label)\n return RegexCamelSnake2.sub(r'\\1{sep}\\2'.format(sep=separator), subbed).lower()",
"def toLowerCase(self) -> None:\n self.text = self.text.lower()",
"def _lowercase(text: str) -> str:\n return text.lower()",
"def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()",
"def name_to_snake_case(name: str) -> str:\n\n # From COBOL entity\n if '-' in name or name.isupper():\n return name.strip().lower().replace('-', '_')\n\n # From camelCase\n return re.sub(r'(?<!^)(?=[A-Z])', '_', name.strip()).lower()",
"def make_snake_case(string):\n return snake_re.sub(r'_\\1', string).lower()",
"def camel_to_snake(name):\n name = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', name).lower()",
"def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])",
"def camel_to_snake(name: str) -> str:\n name = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", name).lower()",
"def _nice_case(line):\n line_lower = line.lower()\n s = \"\"\n i = 0\n nextCap = 1\n while i < len(line_lower):\n c = line_lower[i]\n if c >= \"a\" and c <= \"z\" and nextCap:\n c = c.upper()\n nextCap = 0\n elif c in \" .,;:\\t-_\":\n nextCap = 1\n s += c\n i += 1\n return s",
"def parse_case_snake_to_camel(snake, upper_first=True):\n\tsnake = snake.split('_')\n\tfirst_part = snake[0]\n\tif upper_first:\n\t\tfirst_part = first_part.title()\n\treturn first_part + ''.join(word.title() for word in snake[1:])",
"def camel_to_snake(s):\n subbed = _underscorer1.sub(r'\\1_\\2', s)\n return _underscorer2.sub(r'\\1_\\2', subbed).lower()",
"def parse_case_camel_to_snake(camel):\n\t# requirements = re\n\treturn re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\\1', camel).lower()",
"def snake_to_camel(snake_str):\n title_str = snake_str.split('_')\n return ' '.join(title_str).title()",
"def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])",
"def fix_string_case(text):\n fixed = []\n for i in text:\n if is_case_sensitive(i):\n fixed.append(i)\n else:\n fixed.append(i.lower())\n return ''.join(fixed)",
"def camel_to_snake_case(name: str) -> str:\n return CAPITALS.sub(r'_\\1', name).lower().lstrip('_')",
"def snake_case_to_headless_camel_case(snake_string):\n return ''.join([snake_string.split('_')[0]] +\n list(sub_string.capitalize()\n for sub_string in snake_string.split('_')[1:]))",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def test_snake_to_camel(self):\n\n self.assertEqual(snake_to_camel(None), None)\n self.assertEqual(snake_to_camel(b''), b'')\n self.assertEqual(snake_to_camel(''), '')\n self.assertEqual(snake_to_camel('123'), '123')\n self.assertEqual(snake_to_camel('test'), 'test')\n self.assertEqual(snake_to_camel('TEST'), 'test')\n self.assertEqual(snake_to_camel('abc_def'), 'abcDef')\n self.assertEqual(snake_to_camel('abc__def'), 'abcDef')\n self.assertEqual(snake_to_camel('abc___def'), 'abcDef')\n self.assertEqual(snake_to_camel('_abc_def_'), 'abcDef')\n self.assertEqual(snake_to_camel('__abc_def'), 'abcDef')\n self.assertEqual(snake_to_camel('abc_def__'), 'abcDef')\n self.assertEqual(snake_to_camel('a_bc_def'), 'aBcDef')\n self.assertEqual(snake_to_camel('abc_def_ghi'), 'abcDefGhi')\n self.assertEqual(snake_to_camel('one1_two2_three3'), 'one1Two2Three3')\n self.assertEqual(snake_to_camel('1_one2_two3_three'), '1One2Two3Three')\n self.assertEqual(snake_to_camel('11_twelve13_fourteen15'), '11Twelve13Fourteen15')",
"def lower(self) -> str:",
"def clean_cases(text):\n return text.lower()",
"def lower(text):\n text = text.lower()\n return text",
"def pascal_to_snake(string):\n return string[0].lower() + re.sub('[A-Z]', lambda match: '_' + match.group(0).lower(), string[1:])",
"def uppersnakecase(string):\n\n return uppercase(snakecase(string))",
"def UCase(text):\n return text.upper()",
"def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])",
"def test_camel_to_snake(self):\n\n self.assertEqual(camel_to_snake(None), None)\n self.assertEqual(camel_to_snake(b''), b'')\n self.assertEqual(camel_to_snake(''), '')\n self.assertEqual(camel_to_snake('123'), '123')\n self.assertEqual(camel_to_snake('test'), 'test')\n self.assertEqual(camel_to_snake('TEST'), 'test')\n self.assertEqual(camel_to_snake('abcDef'), 'abc_def')\n self.assertEqual(camel_to_snake('AbcDef'), 'abc_def')\n self.assertEqual(camel_to_snake('ABCDef'), 'abc_def')\n self.assertEqual(camel_to_snake('abcDEF'), 'abc_def')\n self.assertEqual(camel_to_snake('aBcDEF'), 'a_bc_def')\n self.assertEqual(camel_to_snake('ABcDEF'), 'a_bc_def')\n self.assertEqual(camel_to_snake('abcDefGhi'), 'abc_def_ghi')\n self.assertEqual(camel_to_snake('abcDEFGhi'), 'abc_def_ghi')\n self.assertEqual(camel_to_snake('One1Two2Three3'), 'one1_two2_three3')\n self.assertEqual(camel_to_snake('1One2Two3Three'), '1_one2_two3_three')\n self.assertEqual(camel_to_snake('11Twelve13Fourteen15'), '11_twelve13_fourteen15')",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def spinalcase(string):\n\n return re.sub(r\"_\", \"-\", snakecase(string))",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()",
"def process(self, s):\n # modified for project...\n return s.upper()",
"def pascal_to_snake(s: str) -> str:\n converted = name_pattern.sub('_', s).lower().replace('::', '/')\n\n # We end up with some '/_' so we remove the underscore.\n return converted.replace('/_', '/')",
"def _camel_to_snake(s):\n return \"_\".join(\n [\n i.lower() for i in _camel_words.split(s)[1::2]\n ]\n )",
"def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()",
"def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)",
"def to_snake(string):\n\n return re.sub(r'(?<!^)(?=[A-Z])', '_', string).lower()",
"def detect_case(text):\n\n parts = split_by_case(text, 'underscore')\n if not parts:\n # text is collection of underscores\n return 'other'\n\n if not all(part.isalnum() for part in parts):\n # one or more text part contains not alpha-numeric characters\n return 'other'\n\n if len(parts) != 1:\n return 'underscore'\n\n parts = split_by_case(parts[0], 'camel')\n if parts[0][0].isupper(): # check first character\n return 'title'\n\n # first character lower or not letter\n\n if len(parts) == 1:\n return 'mixed'\n\n return 'camel'",
"def test_snake_to_camel_to_snake(self):\n\n values = (\n None, b'', '',\n '123', 'test', 'abc_def', 'a_bc_def', 'abc_def_ghi',\n 'one1_two2_three3', '1_one2_two3_three', '11_twelve13_fourteen15'\n )\n for value in values:\n self.assertEqual(camel_to_snake(snake_to_camel(value)), value)",
"def camel_to_snake(string):\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", string).lower().replace(\"__\", \"_\")",
"def camel_to_snake(s):\n no_camel = \"\".join([\"_\" + c.lower() if c.isupper() else c for c in s]).lstrip(\"_\")\n return no_camel.replace(\"__\", \"_\")",
"def test_utils_snake_to_camel(self, tcex, input_, expected):\n result = tcex.utils.snake_to_camel(input_)\n assert result == expected, f'Input {input_} result of {result} != {expected}'",
"def test_utils_camel_to_snake(self, tcex, input_, expected):\n result = tcex.utils.camel_to_snake(input_)\n assert result == expected, f'Input {input_} result of {result} != {expected}'",
"def camel_to_snake(name: str) -> str:\n snake1 = _FIRST_CAP_RE.sub(r'\\1_\\2', name)\n return _ALL_CAP_RE.sub(r'\\1_\\2', snake1).lower()",
"def not_capitalized(): # noqa: D416",
"def _case_convert_capital_to_snake(token: str) -> str:\n # split on uppercase characters\n # this split works by inserting a space before each uppercase character, then space-splitting\n components = re.sub(r'([A-Z])', r' \\1', token).split()\n return '_'.join(components).lower()",
"def test_titlecase(self):\n test_pairs = [\n [\n \"Q&A with steve jobs: 'that's what happens in technology'\",\n \"Q&A With Steve Jobs: 'That's What Happens in Technology'\",\n ],\n [\"What is AT&T's problem?\", \"What is AT&T's Problem?\"],\n [\n \"Apple deal with AT&T falls through\",\n \"Apple Deal With AT&T Falls Through\",\n ],\n [\"this v that\", \"This v That\"],\n [\"this v. that\", \"This v. That\"],\n [\"this vs that\", \"This vs That\"],\n [\"this vs. that\", \"This vs. That\"],\n [\n \"The SEC's Apple Probe: What You Need to Know\",\n \"The SEC's Apple Probe: What You Need to Know\",\n ],\n [\n \"'by the Way, small word at the start but within quotes.'\",\n \"'By the Way, Small Word at the Start but Within Quotes.'\",\n ],\n [\n \"Small word at end is nothing to be afraid of\",\n \"Small Word at End is Nothing to Be Afraid Of\",\n ],\n [\n \"Starting Sub-Phrase With a Small Word: a Trick, Perhaps?\",\n \"Starting Sub-Phrase With a Small Word: A Trick, Perhaps?\",\n ],\n [\n \"Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'\",\n \"Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'\",\n ],\n [\n 'Sub-Phrase With a Small Word in Quotes: \"a Trick, Perhaps?\"',\n 'Sub-Phrase With a Small Word in Quotes: \"A Trick, Perhaps?\"',\n ],\n ['\"Nothing to Be Afraid of?\"', '\"Nothing to Be Afraid Of?\"'],\n ['\"Nothing to be Afraid Of?\"', '\"Nothing to Be Afraid Of?\"'],\n [\"a thing\", \"A Thing\"],\n [\n \"2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'\",\n \"2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'\",\n ],\n [\"this is just an example.com\", \"This is Just an example.com\"],\n [\n \"this is something listed on del.icio.us\",\n \"This is Something Listed on del.icio.us\",\n ],\n [\"iTunes should be unmolested\", \"iTunes Should Be Unmolested\"],\n [\n \"Reading between the lines of steve jobs’s ‘thoughts on music’\",\n # Tests unicode\n \"Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’\",\n ],\n [\n \"seriously, ‘repair permissions’ is voodoo\", # Tests unicode\n \"Seriously, ‘Repair Permissions’ is Voodoo\",\n ],\n [\n \"generalissimo francisco franco: still dead; kieren McCarthy: \"\n \"still a jackass\",\n \"Generalissimo Francisco Franco: Still Dead; Kieren McCarthy:\"\n \" Still a Jackass\",\n ],\n [\n \"Chapman v. u.s. Postal Service\",\n \"Chapman v. U.S. Postal Service\",\n ],\n [\n \"Spread Spectrum Screening Llc. v. Eastman Kodak Co.\",\n \"Spread Spectrum Screening LLC. v. Eastman Kodak Co.\",\n ],\n [\n \"Consolidated Edison Co. of New York, Inc. v. Entergy Nuclear \"\n \"Indian Point 2, Llc.\",\n \"Consolidated Edison Co. of New York, Inc. v. Entergy Nuclear\"\n \" Indian Point 2, LLC.\",\n ],\n [\n \"Infosint s.a. v. H. Lundbeck A/s\",\n \"Infosint S.A. v. H. Lundbeck A/S\",\n ],\n [\n \"KEVIN O'CONNELL v. KELLY HARRINGTON\",\n \"Kevin O'Connell v. Kelly Harrington\",\n ],\n [\n \"International Union of Painter v. J&r Flooring, Inc\",\n \"International Union of Painter v. J&R Flooring, Inc\",\n ],\n [\n \"DOROTHY L. BIERY, and JERRAMY and ERIN PANKRATZ v. THE UNITED\"\n \" STATES 07-693L And\",\n \"Dorothy L. Biery, and Jerramy and Erin Pankratz v. the \"\n \"United States 07-693l And\",\n ],\n [\"CARVER v. US\", \"Carver v. US\"],\n ]\n\n for pair in test_pairs:\n unicode_string = force_unicode(pair[0])\n self.assertEqual(titlecase(unicode_string, DEBUG=False), pair[1])",
"def camel_to_snake(\n name: str, _re_snake: Pattern[str] = re.compile(\"[a-z][A-Z]\")\n) -> str:\n\n def repl(match: Match[str]) -> str:\n lower: str\n upper: str\n lower, upper = match.group() # type: ignore\n return f\"{lower}_{upper.lower()}\"\n\n return _re_snake.sub(repl, name).lower()",
"def snake_to_spaces(snake_cased_str):\n separator = \"_\"\n components = snake_cased_str.split(separator)\n if components[0] == \"\":\n components = components[1:]\n if components[-1] == \"\":\n components = components[:-1]\n if len(components) > 1:\n spaced_str = components[0].lower()\n for x in components[1:]:\n spaced_str += \" \" + x.lower()\n else:\n spaced_str = components[0]\n return spaced_str",
"def _transform_to_lowercase(self, doc: str):\n processed_tweet = doc.lower()\n return processed_tweet",
"def test_camel_to_snake_to_camel(self):\n\n values = (\n None, b'', '',\n '123', 'test', 'abcDef', 'aBcDef', 'abcDefGhi',\n 'one1Two2Three3', '1One2Two3Three', '11Twelve13Fourteen15'\n )\n for value in values:\n self.assertEqual(snake_to_camel(camel_to_snake(value)), value)",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def camel_to_snake(column_name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', column_name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def _conventionalize(options: dict, what: str, name: str):\n function = _case_mapping[\n options.get(\"naming_conventions\", {}).get(what, \"snake_case\")]\n return function(name)",
"def camel_case_to_snake_case(s, separator='_'):\n return ''.join(separator + c.lower() if c.isupper() else c for c in s).lstrip(separator)",
"def get_snake_case_from_camel_case(name: str) -> str:\n\n new_chars = []\n for i, char in enumerate(name): \n if i == len(name)-1 or i == 0: \n new_chars.append(char)\n elif char.isupper() and name[i+1].islower():\n new_chars.append('_')\n new_chars.append(char)\n elif char.islower() and name[i+1].isupper(): \n new_chars.append(char)\n new_chars.append('_')\n else: \n new_chars.append(char)\n\n new_name = ''.join(new_chars)\n return new_name.lower().replace('__', '_')",
"def lowerCase(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.lowerCase(phrase)",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))",
"def to_lowercase(text: str) -> str:\n text = text.lower()\n return text",
"def snakecase_name(cls) -> str:\n snake_name = (cls.__name__[0:1] + re.sub(r\"([A-Z])\", r\"_\\1\", cls.__name__[1:])).lower()\n return snake_name",
"def dash_snake_case(string_to_convert):\n return ''.join(['-'+i.lower() if i.isupper()\n else i for i in string_to_convert]).lstrip('-')",
"def string_to_snake_case(string:str) -> str:\n result_snake_case = str()\n for symb in string:\n if symb.isupper():\n symb = '_' + symb.lower()\n result_snake_case += symb\n return result_snake_case",
"def render_snake(var_words):\n return '_'.join(var_words)",
"def _case_convert_snake_to_camel(token: str) -> str:\n while True:\n try:\n # find next underscore\n underscore_loc = token.index('_')\n except ValueError:\n # converted all underscores\n break\n # is the underscore at the end of the string?\n if underscore_loc == len(token) - 1:\n break\n\n orig = token\n token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'\n # is there more after the capital?\n if len(orig) > underscore_loc+2:\n token += f'{orig[underscore_loc+2:]}'\n return token",
"def task_1_fix_names_start_letter(data: DT) -> DT:\n for dic in data:\n if dic.get('name'):\n dic['name'] = dic['name'].capitalize()\n return data",
"def snake_case_to_camel_case(s, separator='_'):\n return s.title().replace(separator, '')"
] | [
"0.70693374",
"0.7003121",
"0.69320387",
"0.6745865",
"0.67148775",
"0.6688143",
"0.66141355",
"0.65758014",
"0.65488815",
"0.64430827",
"0.6417229",
"0.6416612",
"0.63867706",
"0.63755894",
"0.6365438",
"0.63618255",
"0.6327051",
"0.6304511",
"0.6293115",
"0.62793934",
"0.6235145",
"0.6223727",
"0.6212002",
"0.61669123",
"0.61632633",
"0.616044",
"0.61486906",
"0.6146765",
"0.6143876",
"0.6121142",
"0.6116997",
"0.6111547",
"0.60523593",
"0.60430956",
"0.6036813",
"0.6036312",
"0.6031201",
"0.60006607",
"0.5984077",
"0.59753966",
"0.5969846",
"0.5967387",
"0.59579754",
"0.59564024",
"0.59455675",
"0.5928065",
"0.5906157",
"0.58906895",
"0.58861166",
"0.5882656",
"0.5876846",
"0.5875467",
"0.58739173",
"0.5857544",
"0.58459246",
"0.5843448",
"0.5836126",
"0.58322316",
"0.58009297",
"0.58001363",
"0.57934946",
"0.5791945",
"0.5791648",
"0.5791568",
"0.5776564",
"0.5775392",
"0.5773525",
"0.57631606",
"0.57606643",
"0.57592",
"0.5750825",
"0.5746058",
"0.57425344",
"0.57411504",
"0.5737561",
"0.5735194",
"0.5734885",
"0.5730473",
"0.5722497",
"0.5718624",
"0.5714137",
"0.5699973",
"0.5691127",
"0.56845456",
"0.5678672",
"0.5665594",
"0.5651257",
"0.5627333",
"0.5618265",
"0.56069297",
"0.56041414",
"0.56041414",
"0.55957013",
"0.5590291",
"0.5577296",
"0.55661947",
"0.5558697",
"0.5555277",
"0.5547956",
"0.5534928"
] | 0.7620087 | 0 |
Formats n words to the left of the cursor to snake case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def snake_case_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText.lower())
newText = '_'.join(text.split(' '))
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def render_snake(var_words):\n return '_'.join(var_words)",
"def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def make_title(words):",
"def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))",
"def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def _camel_to_snake(s):\n return \"_\".join(\n [\n i.lower() for i in _camel_words.split(s)[1::2]\n ]\n )",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)",
"def test_downcase_word(self):\n before_b = \"\"\"\\\n XYZZY line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n xyzzy line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.4\", \"1.4\"),\n after_sel=(\"1.4\", \"1.4\"),\n command_name=\"downcase-word\",\n )",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def make_text(chains, n):\n\n words = []\n\n capital_keys = [key for key in chains.keys() if key[0][0].isupper() and chains[key] != None]\n first_key = choice(capital_keys)\n\n words.extend(list(first_key))\n rand_value = choice(chains[first_key])\n words.append(rand_value)\n\n current_string = \" \".join(words)\n\n i = 1\n while len(current_string) < 140:\n current_string = \" \".join(words)\n new_key = tuple(words[i: i + n])\n if not chains[new_key]:\n break\n else:\n rand_value = choice(chains[new_key])\n words.append(rand_value)\n i += 1\n\n return current_string",
"def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None",
"def random_text(self, n=100):\n # choose a random prefix (not weighted by frequency)\n start = random.choice(list(self.suffix_map.keys()))\n #print(\">>DEBUG | start is\", start)\n \n for i in range(n):\n #print(\">> DEBUG | i is\", n)\n suffixes = self.suffix_map.get(start, None)\n #print(\">> DEBUG | suffixes is\", suffixes)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n #print(\">> DEBUG | start isn't in map\")\n random_text(n-i)\n return\n\n # choose a random suffix\n word = random.choice(suffixes)\n #print(\">> DEBUG | word is\", word)\n print(word, end=' ')\n start = self.shift(start, word)",
"def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])",
"def create_word(char_list):",
"def wcount(lines, topn = 10):\n global worddict\n worddict = {}\n # record words each line by each\n linestr = lines.readline().decode() \n while linestr:\n record(linestr)\n linestr = lines.readline().decode()\n \n # sort the worddict to construct a wordlist\n wordlist = sorted(worddict.items(),\\\n key=lambda x:x[1],reverse = True)\n \n # get all words if lenth is less than number\n print(' '*3+'Word'.ljust(30),'Times'.center(10))\n for num in range(min(len(wordlist),topn)):\n print(' '*3+wordlist[num][0].ljust(30),\\\n str(wordlist[num][1]).center(10))",
"def test_wrap_word():\n line = \"n\" * 81\n assert wrap_line(line) == \"n\" * 80 + \"\\nn\"",
"def pascal_to_snake(string):\n return string[0].lower() + re.sub('[A-Z]', lambda match: '_' + match.group(0).lower(), string[1:])",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def snake_to_spaces(snake_cased_str):\n separator = \"_\"\n components = snake_cased_str.split(separator)\n if components[0] == \"\":\n components = components[1:]\n if components[-1] == \"\":\n components = components[:-1]\n if len(components) > 1:\n spaced_str = components[0].lower()\n for x in components[1:]:\n spaced_str += \" \" + x.lower()\n else:\n spaced_str = components[0]\n return spaced_str",
"def wc(file_):\r\n with open(file_) as f:\r\n file = f.read().strip()\r\n char_nums = len(file)\r\n lines = file.split('\\n')\r\n line_nums = len(lines)\r\n word_nums = 0\r\n for line in lines:\r\n words = line.split()\r\n word_nums += len(words)\r\n return f'{line_nums} {word_nums} {char_nums} {file_}'",
"def camel_to_snake(s):\n subbed = _underscorer1.sub(r'\\1_\\2', s)\n return _underscorer2.sub(r'\\1_\\2', subbed).lower()",
"def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))",
"def nwords(s: str):\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZÄÜÖabcdefghijklmnopqrstuvwxyzüäö\"\n take = 0\n skip = 0\n for i in s:\n if i not in letters:\n skip += 1\n #print(\"S:\", skip)\n else:\n take += 1\n #print(\"t:\", take)\n res = (len(s) - take) + 1\n return res",
"def _camel_to_snake(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()",
"def make(text=input()):\n alp = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n box = []\n dic = dict()\n val = 0\n #collect alphabets into list\n for i in alp:\n if i in text:\n dic[i] = text.count(i)\n box.append(i)\n if text.count(i) > val:\n val = text.count(i)\n else:\n val = val\n for i in range(val, 0, -1):\n print(\"%03d \"%i, end=\"\")\n for wow in sorted(dic, key=str.swapcase):\n if dic[wow] >= i:\n print(\"*\", end=\" \")\n else:\n print(\" \", end=\" \")\n print()\n print(\" \", *box, sep=\" \")",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def test_capitalize_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n Line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"capitalize-word\",\n )",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")",
"def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))",
"def camel_to_snake(name):\n name = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', name).lower()",
"def _nice_case(line):\n line_lower = line.lower()\n s = \"\"\n i = 0\n nextCap = 1\n while i < len(line_lower):\n c = line_lower[i]\n if c >= \"a\" and c <= \"z\" and nextCap:\n c = c.upper()\n nextCap = 0\n elif c in \" .,;:\\t-_\":\n nextCap = 1\n s += c\n i += 1\n return s",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_next_word_beginning(count=event.arg) or \\\n buffer.document.get_end_of_document_position()",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()",
"def getWordScore(word, n):\n score=0\n for i in range(len(word)):\n addition=SCRABBLE_LETTER_VALUES[word[i]]\n score+=addition*(len(word))\n if len(word)==n:\n score+=50\n return score",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def to_lowercase(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = word.lower()\n new_words.append(new_word)\n # new_word += f\"{new_word} \"\n self.words = new_words\n return self",
"def just(s: str) -> str:\n return s.ljust(50, \"_\")",
"def count_words(s, n):\n\n # TODO: Count the number of occurences of each word in s\n words = s.lower().split()\n dict = {}\n\n for item in words:\n dict[item] = words.count(item)\n\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n items = dict.items()\n\n items.sort(key=lambda tup: tup[0])\n items.sort(key=lambda tup: tup[1], reverse=True)\n\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n return items[:n]",
"def get_word_score(word, n=7):\n score = 0\n\n for i in word:\n score += SCRABBLE_LETTER_VALUES[i]\n\n if len(word) == n:\n score += 50\n\n return score",
"def getWordScore(word, n):\n score = 0\n for letter in word:\n score += SCRABBLE_LETTER_VALUES[letter]\n score *= len(word)\n if len(word) == n:\n score += 50\n return score",
"def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]",
"def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")",
"def lower(self) -> str:",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def camel_to_snake(s):\n no_camel = \"\".join([\"_\" + c.lower() if c.isupper() else c for c in s]).lstrip(\"_\")\n return no_camel.replace(\"__\", \"_\")",
"def camel_to_snake(column_name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', column_name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def wrap(cls, text, first=0, indent=15, maxwidth=75):\n outstr = []\n sentence = []\n if not text:\n return \"\"\n for word in text.split():\n if len(\" \".join(sentence)) + len(word) + first > maxwidth:\n outstr.append(\" \".join(sentence))\n sentence = [\" \" * indent, word]\n first = 0\n else:\n sentence.append(word.strip())\n outstr.append(\" \".join(sentence))\n return \"\\n\".join(outstr)",
"def convert_to_snake_case(string: str) -> str:\n\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)\n draft = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return draft.replace('__', '_')",
"def camel_to_snake(name: str) -> str:\n name = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", name).lower()",
"def _select_and_cut_text(wordCount):\n clipboard = Clipboard()\n clipboard.set_system_text('')\n Key('cs-left/3:%s/10, c-x/10' % wordCount).execute()\n return clipboard.get_system_text()",
"def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t",
"def s_words(words):\n\t\n\treturn words // 100 / 10",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\n while not any(letter in self.vowels for letter in word):\r\n length = len(word)\r\n if length == 1:\r\n index = 0\r\n elif length == 2:\r\n index = random.randrange(0, 2)\r\n else:\r\n a = len(word) / 2\r\n index = a + random.randrange(-a / 2, a / 2)\r\n word = word[:index] + self.get_letter(100) + word[index + 1:]\r\n\r\n if random.random() > self.capital_chance:\r\n word = word.capitalize()\r\n self.words.append(word)\r\n self.word_count += 1\r\n return word",
"def generateSentences(self, n, maxLength=5):\n\n if n < 1:\n return\n \n string = ''\n\n while n:\n prevWord = random.choice(self.starters)\n newSentence = prevWord + ' '\n sentenceFormed = False\n\n for _ in range(maxLength):\n keyFound = False\n while not keyFound:\n newStuff = ''\n if not prevWord:\n newSentence = ''\n break\n if prevWord in self.model:\n keyFound = True\n newStuff = random.choice(self.model[prevWord])\n else:\n listOfPrevWord = prevWord.split(' ')[::-1]\n listOfPrevWord.pop()\n prevWord = ' '.join(listOfPrevWord[::-1])\n\n if not newStuff:\n break\n\n newSentence += newStuff\n\n if newSentence and newSentence[-1] in '.?!\\'\\\"':\n sentenceFormed = True\n break\n \n newSentence += ' '\n if len(newSentence) < self.overlap:\n prevWord = newStuff.split(' ')\n else:\n prevWord = newStuff.split(' ')[-self.overlap]\n \n if sentenceFormed:\n n -= 1\n string += newSentence + ' ' \n \n return string",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")",
"def spin_words(sentence):\n\n words = sentence.split()\n words = [word if len(word) < 5 else word[::-1] for word in words]\n return \" \".join(words)",
"def words_before_index(text, idx):\n while text[idx] != ' ':\n idx -= 1\n if idx <= 0:\n return 0\n n_words = len(text[:idx].split(' '))\n return n_words",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_previous_word_beginning(count=event.arg) or 0",
"def motion_w(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False",
"def fit_to_width(string, limit):\n\n input_words = string.split()\n i = 0\n line_list = []\n new_str = str()\n\n for word in input_words:\n if i == 0:\n new_str = word\n elif len(new_str+word) < limit:\n new_str = new_str + ' ' + word\n else:\n line_list.append(new_str)\n new_str = word\n if i == (len(input_words)-1):\n line_list.append(new_str)\n i += 1\n\n for string in line_list:\n print(string)",
"def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()",
"def make_snake_case(string):\n return snake_re.sub(r'_\\1', string).lower()",
"def to_snake(string):\n\n return re.sub(r'(?<!^)(?=[A-Z])', '_', string).lower()",
"def getWordScore(word, n):\n score = 0\n\n for letters in word:\n if letters in SCRABBLE_LETTER_VALUES:\n score += SCRABBLE_LETTER_VALUES[letters]\n\n if len(word) == n:\n return (score * len(word)) + 50\n else:\n return score * len(word)",
"def test_case_sensitive() -> None:\n tknzr = WsTknzr(is_uncased=False, max_vocab=-1, min_count=0)\n tknzr.build_vocab(['a', 'A'])\n assert tknzr.tk2id == {\n BOS_TK: BOS_TKID,\n EOS_TK: EOS_TKID,\n PAD_TK: PAD_TKID,\n UNK_TK: UNK_TKID,\n 'a': max(BOS_TKID, EOS_TKID, PAD_TKID, UNK_TKID) + 1,\n 'A': max(BOS_TKID, EOS_TKID, PAD_TKID, UNK_TKID) + 2,\n }",
"def cut_in_words(self,linea):\n length = 0\n res = ''\n limit_screen = 30\n for word in linea.split(' '):\n if length + len(word) <= limit_screen:\n new_word = word + ' '\n length += len(new_word)\n else:\n new_word = '\\n' + word + ' '\n length = len(new_word) - 2 #-2 para no tener en cuenta el \\n\n res += new_word\n return res",
"def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def random_scrambled(wordlist, n):\n s = random_string(wordlist, n) + \" \"\n shifts = [(i, random.randint(0, 26)) for i in range(len(s)) if s[i-1] == ' ']\n return apply_shifts(s, shifts)[:-1]",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def createWordKnown(self):\n return ''.join(['_ ' for m in range(self.wordLen)])"
] | [
"0.7383346",
"0.7166221",
"0.6926944",
"0.6357526",
"0.62876",
"0.6222895",
"0.5993043",
"0.5974094",
"0.5933266",
"0.5884368",
"0.5855512",
"0.58387643",
"0.57972294",
"0.57646835",
"0.574676",
"0.57403195",
"0.57358956",
"0.5729549",
"0.5655932",
"0.5653761",
"0.56503767",
"0.562152",
"0.5605645",
"0.55945045",
"0.55934334",
"0.5572235",
"0.5572001",
"0.5568948",
"0.55586344",
"0.55516106",
"0.555161",
"0.55478644",
"0.5537373",
"0.55283356",
"0.549001",
"0.54693705",
"0.54614466",
"0.545766",
"0.5402482",
"0.5391529",
"0.5390642",
"0.53884226",
"0.5386988",
"0.53813237",
"0.5379771",
"0.53779244",
"0.5373849",
"0.53728896",
"0.5371893",
"0.5355825",
"0.5355291",
"0.5355291",
"0.5350196",
"0.5348093",
"0.53346896",
"0.5334553",
"0.5317101",
"0.53144145",
"0.53133273",
"0.5308259",
"0.5302642",
"0.52947116",
"0.5275352",
"0.527382",
"0.5270969",
"0.5268311",
"0.52674675",
"0.5262189",
"0.5259124",
"0.52581346",
"0.5254268",
"0.52513695",
"0.5242058",
"0.52392244",
"0.5238659",
"0.5231559",
"0.52228606",
"0.5216116",
"0.5215365",
"0.52128434",
"0.520361",
"0.5201684",
"0.5198211",
"0.5188982",
"0.518743",
"0.5185165",
"0.51843536",
"0.51714367",
"0.51662457",
"0.5160899",
"0.51559824",
"0.51516074",
"0.51502764",
"0.5148016",
"0.5146681",
"0.51429045",
"0.5135012",
"0.5132828",
"0.51316255",
"0.5113134"
] | 0.7747876 | 0 |
Formats dictated text with whitespace removed. | def squash_text(text):
newText = format_squash(text)
Text("%(text)s").execute({"text": newText}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text",
"def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text",
"def _format_text(self, text) :\n text_width = self.width - self.current_indent\n indent = \" \"*self.current_indent\n output_text = []\n paragraphs = text.split('\\n')\n for p in paragraphs :\n output_text.append(textwrap.fill(p,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent))\n return '\\n'.join(output_text)",
"def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()",
"def format_text(text):\n\n\ttext = ' '.join(text).lower()\n\ttext = re.sub(r\"[^a-zA-Z.?!]\", \" \", text)\n\ttext = re.sub(r' +', ' ', text)\n\ttext = word_tokenize(text)\n\ttext = pos_tag(text)\n\n\treturn text",
"def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]",
"def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)",
"def format_value(text):\n return text.encode('utf8').replace('\\n', ' ').replace('\\r', ' ')",
"def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')",
"def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string",
"def normalize(self, text: str) -> str:",
"def reformat(ctx):\n pass",
"def text_standardization(text_in):\n stand_text = text_in.strip()\n stand_text = ' '.join(stand_text.split())\n stand_text = stand_text.replace(u'(', u'(')\n stand_text = stand_text.replace(u')', u')')\n stand_text = stand_text.replace(u':', u':')\n return stand_text",
"def reformat():\n toolkit.reformat()",
"def normalize(self, what):\n txt = strippedtxt(what, [\"\\002\", \"\\003\"])\n txt = re.sub(\"\\s+\", \" \", what)\n txt = stripcolor(txt)\n txt = txt.replace(\"\\002\", \"*\")\n txt = txt.replace(\"<b>\", \"*\")\n txt = txt.replace(\"</b>\", \"*\")\n txt = txt.replace(\"<i>\", \"\")\n txt = txt.replace(\"</i>\", \"\")\n txt = txt.replace(\"<b>\", \"*\")\n txt = txt.replace(\"</b>\", \"*\")\n txt = txt.replace(\"<i>\", \"\")\n txt = txt.replace(\"</i>\", \"\")\n return txt",
"def process_text(text):\n text = re.sub(r'<@>\\s+|<s>\\s+|</s>\\s+|<p>\\s+|</p>\\s+|\\s+\\,|\\'s|\\'|\\;|\\(|\\)|\\-\\-\\s+|\\s+\\.', '', text)\n text = re.sub(r'\\.\\,', '. ,', text)\n text = re.sub(r'\\,', '', text)\n text = re.sub(r'\\$', '$ ', text)\n text = re.sub(r'\\%', ' %', text)\n text = re.sub(r'\\s\\\"\\s', ' ', text)\n text = re.sub(r'\\.\\s+', '. ', text)\n text = text.lower()\n return text",
"def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text",
"def _cleanup_text(text):\n prefixChars = \"\"\n suffixChars = \"\"\n if text.startswith(\"-\"):\n prefixChars += \"-\"\n if text.startswith(\"_\"):\n prefixChars += \"_\"\n if text.endswith(\"-\"):\n suffixChars += \"-\"\n if text.endswith(\"_\"):\n suffixChars += \"_\"\n text = text.strip()\n text = text.replace('-', ' ')\n text = text.replace('_', ' ')\n text = text.replace(\"'\", ' ')\n text = re.sub('[ \\t\\r\\n]+', ' ', text) # Any whitespaces to one space.\n text = prefixChars + text + suffixChars\n return text",
"def sanitise(text: str):\n # Removes new lines, weird characters and dialogue\n text = \" \" + text + \" \"\n\n lined_text = text.split(\"\\n\")\n text = \"\"\n # Remove dialogue\n for line in lined_text:\n if \":\" in line:\n if line.index(\":\") < 15:\n index = line.index(\":\") + 1\n else:\n index = 0\n else:\n index = 0\n text = text + \"\\n\" + line[index:]\n\n # Lower case everything\n text = text.lower()\n\n text = text.replace(\"'s\", \" is\")\n text = text.replace(\"'ve\", \" have\")\n text = text.replace(\"n't\", \" not\")\n text = text.replace(\"I'm\", \"I am\")\n text = text.replace(\"'re\", \" are\")\n text = text.replace(\"’s\", \" is\")\n text = text.replace(\"’ve\", \" have\")\n text = text.replace(\"n’t\", \" not\")\n text = text.replace(\"I’m\", \"I am\")\n text = text.replace(\"’re\", \" are\")\n\n # Remove weird characters and double spaces\n weird_characters = [\".\", \",\", \"?\", \"!\", \"'\", \"’\", \"\\\"\", \"\\n\", \"\\t\", \"-\", \"/\", \"[\", \"]\", \"(\", \")\", \":\", \"“\", \"”\"]\n for weird_character in weird_characters:\n text = text.replace(weird_character, \" \")\n\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n return text",
"def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))",
"def remove_formatting(formatted_text):\n return ''.join([formatted_tuple[1] for formatted_tuple in formatted_text]) # pylint: disable=not-an-iterable",
"def adjustText(cls, text):\n\t\t\n\t\t\"\"\"Adjust dates so to transform strings such as '21 August' to 'August\n\t\t 21' and have them recognized by the SCNLP tools\"\"\"\n\t\tmonths = (u'January|February|March|April|May|June|July'\n\t\t\t\t\t'August|September|October|November|December')\n\t\tdates = re.compile('(?P<day>\\d{1,2})\\s+(?P<month>%s)(\\s+(?P<year>(\\d{2,4})))?' % months)\n\t\ttext = dates.sub(cls.normalizeDate, text)\n\t\t# Strip any remaining HTML (WikiExtractor is not perfect)\n\t\thtmlTags = re.compile('<[^>]+>')\n\t\t\n\t\ttext = htmlTags.sub(\"\", text)\n\t\t\n\t\treturn text",
"def reformat_text(text: str) -> str:\n return (\n text.replace(\"<br>\", \"\\n\")\n .replace(\"[/b][b]\", \"\")\n .replace(\"[b]\", \"**\")\n .replace(\"[/b]\", \"**\")\n )",
"def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text",
"def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result",
"def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)",
"def formatted(self) -> str:\r\n ...",
"def format(self, data):\r\n for name, value in sorted(data.items()):\r\n full_text = ': {name} : {value}'.format(\r\n name=name,\r\n value=value,\r\n )\r\n wrapped_text = textwrap.fill(\r\n full_text,\r\n initial_indent='',\r\n subsequent_indent=' ',\r\n width=self.max_width,\r\n )\r\n yield wrapped_text + '\\n'",
"def preprocess(self, text):\r\n return text",
"def markup_text(self, text):\n for moniker, name in S['names'].items():\n text = text.replace('${0}'.format(moniker.split('_')[1]), name)\n return text",
"def process_text(text):\n fix_dict = {'fig.': 'fig', 'fig .': 'fig ', 'Fig.': 'Fig', 'Fig .': 'Fig ',\n 'figure.': 'figure', 'figure .': 'figure ', 'Figure.': 'Fig', 'Figure .': 'Fig ',\n 'et al.': 'et al', 'III': '3', 'II': '2', 'I': '1'}\n\n for old_pattern in fix_dict.keys():\n text = text.replace(old_pattern, fix_dict[old_pattern])\n return text",
"def format_field(self, value, format_spec):\n value = super(FilenameFormatter, self).format_field(value, format_spec)\n if self.lowercase:\n value = value.lower()\n if not self.nonwordchars:\n value = re.sub('[^\\w\\s]+', '', value)\n value = re.sub('\\s+', self.word_delimiter, value)\n return value",
"def get_db_format(text):\n db_text = \"\"\n for t in text.split(\" \"):\n db_text += t.title()\n return db_text",
"def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)",
"def preprocess_text(text):\n # replace non characers with space and lower case\n temp = re.sub(r\"[/W/D/S.,-]+\", \" \", str(text).lower())\n # merge multiple spaces to a single one\n return re.sub(r\"[ ]+\", \" \", temp)",
"def cleanText(self, stripNonAlphaNumeric=False, stripNumod_byers=False):\n if stripNonAlphaNumeric:\n txt = REG_CLEAN1.sub(\" \", self.getRawText())\n else:\n txt = self.getRawText()\n\n # clean up white spaces\n txt = REG_CLEAN2.sub(\" \", txt)\n if stripNumod_byers:\n txt = REG_CLEAN3.sub(\"\", txt)\n\n self.graph[\"__scope\"] = (0, len(txt))\n self.graph[\"__txt\"] = txt\n if self.getVerbose():\n print(\"cleaned text is now\", self.getText())",
"def _format_output(**values):\r\n return WEATHER_TEXT.format(**values)",
"def _sanitize(text):\n # TODO: any cleanup needed here?\n if text is None:\n return None\n text = text.replace('\\n', ' ')\n return text",
"def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)",
"def _justifyText(text):\n chunks = text.split()\n line = []\n lineLength = 0\n for chunk in chunks:\n lineLength += len(chunk) + 1\n if lineLength <= 73:\n line.append(chunk)\n continue\n else:\n print('''{:5}{:73}{}'''.format(\"|\", \" \".join(line), \"|\"))\n del line[:]\n line.append(chunk)\n lineLength = len(chunk) + 1\n print('''{:5}{:73}{}'''.format(\"|\", \" \".join(line), \"|\"))",
"def short(text):\n rep = {\n ' *health *center': '',\n ' *health *ceanter': '',\n ' +H[./]*C': '',\n ' *health *post': '',\n ' *heslth *post': '',\n ' *Haelth *Post': '',\n ' *Health *Poat': '',\n ' *hospital': '',\n ' +h[./]*p': '',\n ' {2,}': ''}\n\n return reduce(lambda a, kv: re.sub(*kv, a, flags=re.I), rep.items(), text)",
"def clean_text(self, num='substitute'):\n for i, doc in enumerate(self.documents):\n if num is 'spell':\n doc = doc.replace('0', ' zero ')\n doc = doc.replace('1', ' one ')\n doc = doc.replace('2', ' two ')\n doc = doc.replace('3', ' three ')\n doc = doc.replace('4', ' four ')\n doc = doc.replace('5', ' five ')\n doc = doc.replace('6', ' six ')\n doc = doc.replace('7', ' seven ')\n doc = doc.replace('8', ' eight ')\n doc = doc.replace('9', ' nine ')\n elif num is 'substitute':\n doc = re.sub('(\\\\d+)', ' NUM ', doc)\n elif num is 'remove':\n doc = re.sub('[0-9]', ' ', doc)\n doc = doc.replace('$', ' dollar ')\n doc = doc.lower()\n doc = re.sub('[^a-z]', ' ', doc)\n doc = ' '.join(doc.split())\n self.documents[i] = doc",
"def make_style(self, mixed):\n if isinstance(mixed, dict):\n return ' '.join('%s: %s;' % (k, v) for k, v in mixed.items())\n return str(mixed)",
"def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"",
"def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()",
"def prepareExplainerText(amount, ranges):\n text = \"\\n\"\n for currKey in amount:\n text += f\"{currKey}: {ranges[currKey]} | {amount[currKey]}\\n\"\n text += \"\\n\\n\"\n return text",
"def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text",
"def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new",
"def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text",
"def clean_text(text: Any) -> str:\n return textwrap.dedent(str(text)).strip()",
"def del_whitespace(selfs, text):\n\t\treturn text.replace(' ', '')",
"def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()",
"def normalize_text(text):\n\n text = text.lower().strip().replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n\n text = replace_money_token(text)\n text = replace_urls_token(text)\n text = fix_unicode_quotes(text)\n text = format_large_numbers(text)\n text = pad_punctuation(text)\n return text.strip()",
"def _apply_filters(self, text, tag):\n\n # The order of the filters below is important\n # and should not be changed\n\n # intial_quotes needs to happen at this point so that\n # attribute values introduced later on do not get affected\n text = self.initial_quotes(text)\n text = self.smarty_pants(text)\n text = self.amp(text)\n text = self.caps(text)\n\n return text",
"def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt",
"def clean(text):\r\n #clean structure\r\n text = re.sub(r\"(?<!\\|)Amount ?\\| ?Ingredient(?!\\|)\", \"|Amount|Ingredient|\", text)\r\n text = re.sub(r\"----\\|----\\n\\n\", r\"----|----\\n\", text)\r\n text = re.sub(r\"(?<!\\|)----\\|----(?!\\|)\", \"|----|----|\", text)\r\n text = re.sub(\"## Directions\", \"## Cooking Instructions\", text)\r\n\r\n #fractions \r\n for pat, rep in repls:\r\n text = re.sub(pat, rep, text, flags=re.IGNORECASE)\r\n\r\n #links\r\n def fix_link(match):\r\n return \"](../\"+re.sub(\" \", \"-\", fix_title(match.group(1)))+\")\"\r\n text = re.sub(r\"\\]\\((.*?)\\)\", fix_link, text)\r\n \r\n lines = text.split(\"\\n\")\r\n new_text = []\r\n #add spaces to the end of lines\r\n for line in lines:\r\n match = re.search(r\" $\", line)\r\n if match:\r\n new_text.append(line)\r\n else:\r\n new_text.append(line+\" \")\r\n #remove spaces from the end of lines\r\n # for line in lines:\r\n # match = re.search(r\" +$\", line)\r\n # if match:\r\n # new_text.append(line[:-len(match.group(0))])\r\n # else:\r\n # new_text.append(line)\r\n\r\n text = \"\\n\".join(new_text)\r\n\r\n return text",
"def normalize_text(text):\n text = re.sub(r'[ \\t]+', ' ', text)\n text = re.sub(r'\\r', '', text)\n\n # Remove whitespace in the middle of text.\n text = re.sub(r'[ \\t]+\\n', '\\n', text)\n # Remove whitespace at the end of the text.\n text = text.rstrip()\n\n return text",
"def handle_kw_phrases(self, text: str\n ) -> str:\n for kw in self.spaces:\n if kw in text:\n text = text.replace(kw, self.spaces[kw])\n return text",
"def widont(self, tag, text):\n\n approved_tags = ['a','em','span','strong','i','b','p','h1',\n 'h2','h3','h4','h5','h6','li','dt','dd']\n \n # Must be inside an approved tag\n if tag not in approved_tags:\n return text\n \n widont_finder = re.compile(r\"\"\"\n (.*) # Group 1: captures everything except the final whitespace before a word\n \\s+ # The final whitespace before the word\n (\\S) # The actual word\n \\s* # Optional whitespace (which is removed if present)\n \"\"\", re.VERBOSE)\n\n replace_function = lambda match: '%s %s' % match.group(1, 2)\n text = widont_finder.sub(replace_function, text)\n\n return text",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text",
"def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()",
"def reset_format(self):\n ## Formatters\n self._format_setters(*self.format_set_info)\n self._format_getters(*self.format_get_info)\n self._format_joining_functions()",
"def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]",
"def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)",
"def normalize_text(text,pad_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~',remove_punc='!\"#$%&\\'()*+,-/:;<=>?@[\\\\]^_`{|}~',remove_number='[0-9]',chars=False):\n punc_spaces = re.compile('([%s])' % re.escape(pad_punc))\n punc = re.compile('[%s]' % re.escape(remove_punc))\n text = text.lower()\n if chars:\n text = re.sub(punc,'',text)\n else:\n text = re.sub('\\.{3,}',' dots',text)\n text = re.sub(punc_spaces, r' \\1 ', text)\n text = re.sub(remove_number,'',text)\n text = re.sub(punc,'',text)\n text = re.sub(r'\\b((?![ai])[a-z])\\b','',text)\n text = re.sub('\\s{2,}', ' ', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\t', ' ', text)\n text=text.strip()\n \n return text",
"def format(space, w_obj, w_format_spec):\n return space.format(w_obj, w_format_spec)",
"def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text",
"async def aesthetic(self, ctx, *, text):\n out = \"\"\n for char in text:\n out += utils.fullwidth_transform.get(char, char)\n await ctx.send(out)",
"def unpreprocess(self, text, desegment=True):\n\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n\n # removes the spaces around quotation marks ex: i \" ate \" an apple --> i \"ate\" an apple\n # https://stackoverflow.com/a/53436792/5381220\n text = re.sub(white_spaced_double_quotation_regex, '\"' + r\"\\1\" + '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + r\"\\1\" + \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\`\" + r\"\\1\" + \"\\`\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\—\" + r\"\\1\" + \"\\—\", text)\n\n # during generation, sometimes the models don't put a space after the dot, this handles it\n text = text.replace(\".\", \" . \")\n text = \" \".join(text.split())\n\n # handle decimals\n text = re.sub(r\"(\\d+) \\. (\\d+)\", r\"\\1.\\2\", text)\n text = re.sub(r\"(\\d+) \\, (\\d+)\", r\"\\1,\\2\", text)\n\n text = re.sub(left_and_right_spaced_chars, r\"\\1\", text)\n text = re.sub(left_spaced_chars, r\"\\1\", text)\n text = re.sub(right_spaced_chars, r\"\\1\", text)\n\n return text",
"def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()",
"def removeMultipleSpaces(self) -> None:\n self.text = re.sub('\\s+', ' ', self.text)",
"def format(self) -> str:",
"def stripFormatting(self, s):\n # stripColor has to go first because of some strings, check the tests.\n s = stripColor(s)\n s = stripBold(s)\n s = stripReverse(s)\n s = stripUnderline(s)\n return s.replace('\\x0f', '').replace('\\x0F', '')",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = clean_number(text)\n text = decontracted(text)\n text = correct_spelling(text)\n text = spacing_punctuation(text)\n text = spacing_some_connect_words(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n text = text.lower()\n return text",
"def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result",
"def normalize(self, text: str) -> str:\n\n raise NotImplementedError()",
"def pre_process(text: str) -> str:\n text = text.replace('--', '-')\n space_right = '!?:;,.-()*+-/<=>@^_'\n space_both = '-()*+-/<=>@^_'\n\n for punct in space_right:\n text = text.replace(punct, punct + ' ')\n for punct in space_both:\n text = text.replace(punct, ' ' + punct + ' ')\n\n # remove extra space\n text = re.sub(r' +', ' ', text)\n return text",
"def PROPER(text):\n return text.title()",
"def normalize_text(w):\n return str(w, \"utf-8\").lower().replace(\"-\", \"\")",
"def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)",
"def keyify(text):\n text = text.lower()\n text = text.strip()\n\n text = text.replace('.', '')\n text = re.sub('[,-]', ' ', text)\n text = re.sub('\\s{2,}', ' ', text)\n\n return text",
"def get_formatted_text(self, n_cols):",
"def normalize_space (text):\n return RE_WS.sub (' ', text.strip ())",
"def post_process_text(self, text):\n\t\treturn text",
"def normalizeSpaces(strText, bDouble=False):\n if bDouble:\n strText = re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)\n # Remove double spaces from groups\n return re.sub(r\"([(|]) ([|)])\", r\"\\g<1> \\g<2>\", strText, flags=re.UNICODE)\n\n return re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)",
"def preprocess_nmt(text):\n def no_space(char, prev_char):\n return char in set(',.!?') and prev_char != ' '\n\n # Replace non-breaking space with space, and convert uppercase letters to\n # lowercase ones\n text = text.replace('\\u202f', ' ').replace('\\xa0', ' ').lower()\n # Insert space between words and punctuation marks\n out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char\n for i, char in enumerate(text)]\n return ''.join(out)",
"def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]",
"def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)",
"def cleaner(self, w_old):\n w_new = re.sub('[\\(\\)]', '', w_old)\n w_new = re.sub('[^А-Яа-яЁё ]', 'ъ', w_new)\n w_new = re.sub(' ', ' ', w_new)\n return w_new",
"def __format_display(file_contents: str) -> str:\n\n new_file_contents = file_contents\n\n for match in re.finditer(COBOL_FORMAT_DISPLAY_REGEX, file_contents):\n match_str = match_to_str(match)\n\n # Skip \"DISPLAY\" statements within \"IF-ELSE\" blocks\n if re.search(re.compile(r'\\s+ELSE\\s+'), match_str) is not None:\n continue\n\n new_str = match_str.replace('\\n', ' ')\n new_file_contents = new_file_contents.replace(match_str, new_str)\n\n return new_file_contents",
"def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)",
"def default_format_transition_label(self, word):\n result = \" \".join(self.format_letter(u) for u in word)\n if result:\n return result\n else:\n return EmptyWordLaTeX",
"def standardize_text(df: pd.DataFrame,\r\n text_field: str,\r\n output_field: str) -> pd.DataFrame:\r\n\r\n # df[output_field] = df[text_field].apply(\r\n # lambda column: emoji.get_emoji_regexp().sub(u'', column)\r\n # )\r\n\r\n df[output_field] = df[text_field].str.replace(\"'m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"’m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"´m\", ' am')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"’ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"´ve\", ' have')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"’d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"´d\", ' would')\r\n\r\n df[output_field] = df[output_field].str.replace(\"n't\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n’t\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n´t\", ' not')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"’ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"´ll\", ' will')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'s\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"’\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"´s\", ' is')\r\n\r\n df[output_field] = df[output_field].str.replace('/', ' ')\r\n df[output_field] = df[output_field].str.replace('\\.{2,}', '.')\r\n df[output_field] = df[output_field].str.replace('!{2,}', '!')\r\n df[output_field] = df[output_field].str.replace('\\?{2,}', '?')\r\n df[output_field] = df[output_field].str.replace('€+', '')\r\n df[output_field] = df[output_field].str.replace('[0-9$&~\\\\()[\\]{}<>%\\'\"“”‘’,;…+\\-_=*]+', '')\r\n df[output_field] = df[output_field].str.replace(r'http\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'http', '')\r\n df[output_field] = df[output_field].str.replace(r'@\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'@', 'at')\r\n df[output_field] = df[output_field].str.lower()\r\n df[output_field] = df[output_field].astype(str)\r\n\r\n return df",
"def cleaningIndent(text):\n\n text = re.sub(r'^[\\s \\t]+', r'', text)\n text = re.sub(r'[\\s \\t]+$', r'', text)\n text = re.sub(r'[\\r\\n]+', r'\\r\\n', text)\n text = re.sub(r'(<(/p|/h[1-6]|/?div|/head|/l|/?lg|/?body|/?back|/?text|/?front)>)', r'\\1\\r\\n', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'([^\\r\\n<>])[\\r\\n]+([^\\r\\n<>])', r'\\1 \\2', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'([^>$])\\r\\n *(<seg)', r'\\1 \\2', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'(>)[\\r\\n]+([^\\s<>])', r'\\1 \\2', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'<p> +', r'<p>', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'[\\r\\n]+', r'\\r\\n', text)\n text = re.sub(r' +', r' ', text)\n text = re.sub(r'<p(>| [^>]*>)\\s*</p>', r' ', text)\n return text",
"def CleanText(text):\n\n pretty_issue = text.lower().strip()\n\n quoteless_issue = re.sub('\\'', '', pretty_issue)\n no_punctuation_issue = re.sub('[^\\w\\s]|_+', ' ', quoteless_issue)\n one_space_issue = ' '.join(no_punctuation_issue.split())\n\n return one_space_issue",
"def formatEditText(self, storedText):\n return (storedText, True)",
"def formatEditText(self, storedText):\n return (storedText, True)",
"def parse_text(text):\n return re.sub(r'\\s+', \" \", text)",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text"
] | [
"0.6537464",
"0.64203817",
"0.63955766",
"0.638702",
"0.63303804",
"0.61618394",
"0.60671043",
"0.5989454",
"0.59656113",
"0.5920744",
"0.5918541",
"0.59150285",
"0.58982563",
"0.5888299",
"0.5853104",
"0.58076817",
"0.5796348",
"0.5772249",
"0.57603693",
"0.5750695",
"0.5728735",
"0.56871736",
"0.56815284",
"0.56780237",
"0.5658031",
"0.5657621",
"0.56575316",
"0.5649325",
"0.5608761",
"0.56079715",
"0.5589443",
"0.55830103",
"0.5575074",
"0.5562466",
"0.554759",
"0.5535112",
"0.5520946",
"0.54934335",
"0.5486658",
"0.54848826",
"0.5479937",
"0.5478493",
"0.54772425",
"0.54761344",
"0.5473543",
"0.546983",
"0.5461201",
"0.54031205",
"0.54011685",
"0.5400327",
"0.5399362",
"0.5399123",
"0.53991127",
"0.53988534",
"0.5397083",
"0.53905076",
"0.5385273",
"0.5379875",
"0.5367782",
"0.5364739",
"0.5355991",
"0.5353337",
"0.5345751",
"0.5344812",
"0.53425163",
"0.5336044",
"0.5333163",
"0.5321889",
"0.53201485",
"0.531855",
"0.531383",
"0.5307327",
"0.53007406",
"0.5297083",
"0.5288021",
"0.5280964",
"0.52803206",
"0.5273814",
"0.52705973",
"0.52651614",
"0.5253744",
"0.5247038",
"0.5245314",
"0.52445924",
"0.5243188",
"0.5236869",
"0.523355",
"0.52233714",
"0.52211976",
"0.521751",
"0.52148956",
"0.5214886",
"0.5214039",
"0.5210131",
"0.5208851",
"0.5204924",
"0.5204924",
"0.5202899",
"0.5200072",
"0.5200072",
"0.5200072"
] | 0.0 | -1 |
Formats n words to the left of the cursor with whitespace removed. Excepting spaces immediately after comma, colon and percent chars. | def squash_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
text = _cleanup_text(cutText)
newText = ''.join(text.split(' '))
if endSpace:
newText = newText + ' '
newText = _expand_after_special_chars(newText)
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")",
"def html_space(n):\n return \" \" * n",
"def _justifyText(text):\n chunks = text.split()\n line = []\n lineLength = 0\n for chunk in chunks:\n lineLength += len(chunk) + 1\n if lineLength <= 73:\n line.append(chunk)\n continue\n else:\n print('''{:5}{:73}{}'''.format(\"|\", \" \".join(line), \"|\"))\n del line[:]\n line.append(chunk)\n lineLength = len(chunk) + 1\n print('''{:5}{:73}{}'''.format(\"|\", \" \".join(line), \"|\"))",
"def s_words(words):\n\t\n\treturn words // 100 / 10",
"def fill_with_spaces(line: string, width: int) -> string:\n size = len(line)\n spaces_left = width - size\n return line + (' ' * spaces_left)",
"def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def left_justify_string(keyword, value):\n return '%s' % keyword .ljust(40, \".\") + \": \" + '%s\\n' % value",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def percent_space(self):\n self.custom_space(*[0,0,100,100])",
"def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)",
"def wrap(text, width):\n retstr = \"\"\n for word in text.split(' '):\n if len(retstr)-retstr.rfind('\\n')-1 + len(word.split('\\n',1)[0]) >= width:\n retstr += ' \\n' + word\n else:\n retstr += ' ' + word\n return retstr",
"def Left(text, number):\n return text[:number]",
"def normalizeTexts(texts):\n fCW = 0\n for item in texts:\n fCW = max(len(item), fCW)\n for counter, item in enumerate(texts):\n texts[counter] = texts[counter].ljust(fCW + 1, '.')\n return (texts, fCW)",
"def print_space(self,text,width,w=1,h=1):\n texlen = len(text)\n if texlen > width:\n text = text[:width]\n self.lesprint(text,width)",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def space(n):\n rstr = \" \" * 4 * n\n return rstr",
"def justify_token(tok, col_width):\n get_len = tools.display_len if PY3 else len\n tok_len = get_len(tok)\n diff_len = tok_len - len(tok) if PY3 else 0\n\n cols = (int(math.ceil(float(tok_len) / col_width))\n if col_width < tok_len + 4 else 1)\n\n if cols > 1:\n return tok.ljust((col_width * cols) + (4 * cols) - diff_len)\n else:\n return tok.ljust(col_width + 4 - diff_len)",
"def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"",
"def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t",
"def cut_in_words(self,linea):\n length = 0\n res = ''\n limit_screen = 30\n for word in linea.split(' '):\n if length + len(word) <= limit_screen:\n new_word = word + ' '\n length += len(new_word)\n else:\n new_word = '\\n' + word + ' '\n length = len(new_word) - 2 #-2 para no tener en cuenta el \\n\n res += new_word\n return res",
"def make_title(words):",
"def wrap_onspace(text, width):\n\n if type(text) is str:\n return reduce(lambda line, word, width=width: '%s%s%s' %\n (line,\n ' \\n'[(len(line[line.rfind('\\n')+1:])\n + len(word.split('\\n',1)[0]\n ) >= width)],\n word),\n text.split(' ')\n )\n elif type(text) is list:\n new_text = ''\n counter = 0\n for e in text:\n counter += 1\n new_text += '('+str(counter)+') '+str(e)+\"\\n\"\n #new_text = ''.join(str(e) for e in text)\n return reduce(lambda line, word, width=width: '%s%s%s' %\n (line,\n ' \\n'[(len(line[line.rfind('\\n')+1:])\n + len(word.split('\\n',1)[0]\n ) >= width)],\n word),\n new_text.split(' ')\n )",
"def doormat(n, m):\r\n pad = '-'\r\n filler = '.|.'\r\n middle = 'WELCOME'\r\n mat = []\r\n mid = middle.center(m, pad)\r\n error = 'Oops, try again!'\r\n\r\n try:\r\n for width in range(1, n, 2):\r\n mat.append((filler*width).center(m, pad))\r\n end = list(reversed(mat))\r\n mat.append(mid)\r\n mat = mat + end\r\n return '\\n'.join(mat)\r\n except TypeError:\r\n return error",
"def _format_line_for_sidebar(self, i):\n check = '\\u2714 ' if self.model.line_has_audio(i) else ' '\n return [('check', check), \" {}. {}\".format(i, self.model.get_lines()[i])]",
"def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()",
"def wrap(self, text, width = 78, indent1 = 0, indent2 = 0):\n \n new_text = \"\"\n\n current_line = \"\"\n current_line += \" \" * indent1\n\n text = text.replace(\"\\n\", \" \")\n \n words = text.split(\" \")\n\n for word in words:\n if len(word) == 0:\n continue\n\n if len(current_line) + len(word) <= width:\n if len(current_line):\n current_line += \" \" + word\n else:\n current_line = word\n else:\n new_text += current_line + NEWLINE\n current_line = \" \" * indent2 + word\n\n if len(current_line):\n new_text += current_line\n\n return new_text",
"def replace_spaces_with_pluses(self, sample):\r\n changed = list(sample)\r\n for i, c in enumerate(changed):\r\n if(c == ' ' or c ==' ' or c ==' ' or c=='\\n' or c=='\\n\\n'):\r\n changed[i] = '+'\r\n return ''.join(changed)",
"def wrap(text, width):\n return reduce(lambda line, word, width=width: '%s%s%s' %\n (line,\n ' \\n'[(len(line)-line.rfind('\\n')-1\n + len(word.split('\\n',1)[0]\n ) >= width)],\n word),\n text.split(' ')\n )",
"def preprocess_nmt(text):\n def no_space(char, prev_char):\n return char in set(',.!?') and prev_char != ' '\n\n # Replace non-breaking space with space, and convert uppercase letters to\n # lowercase ones\n text = text.replace('\\u202f', ' ').replace('\\xa0', ' ').lower()\n # Insert space between words and punctuation marks\n out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char\n for i, char in enumerate(text)]\n return ''.join(out)",
"def myformat(table):\n m = 0\n table = sorted(table, key=itemgetter(0))\n for t in table:\n t = str(t)\n if len(t[0]) > m:\n m = len(t[0])\n m += 10\n fstr = \"{0:}\" + m*\" \" + \"{1:}\"\n s = \"\"\n for x in table:\n try:\n a = float(x[0])\n b = float(x[1])\n s += \"{0:.5f}{1:{width}}\".format(a, b, width=m) + \"\\n\"\n except IndexError:\n pass\n return s\n \"\"\"\n out = \"\"\n for pair in table:\n out += str(pair[0]) + 5*\" \" + str(pair[1]) + \"\\n\"\n return out\"\"\"",
"def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)",
"def _padleft(width, s):\n fmt = \"{0:>%ds}\" % width\n return fmt.format(s)",
"def indent(text, first_line=True, n=1, width=4):\n lines = text.split(\"\\n\")\n if not first_line:\n first = lines[0]\n lines = lines[1:]\n\n spaces = \" \" * (width * n)\n lines2 = [spaces + x for x in lines]\n\n if not first_line:\n lines2.insert(0, first)\n\n indented = \"\\n\".join(lines2)\n\n return indented",
"def tab_width(self, pad, *args):\n pad.insert(GUI.INSERT, ' ' * 4)\n return 'break'",
"def print_formatted(n: int):\n adjustment = len(convert_int_to_form(n, 2))\n\n for num in range(1, n+1):\n num_forms = [\n str(num), \n convert_int_to_form(num, 8), \n get_hexa(num),\n convert_int_to_form(num, 2)\n ]\n for i in range(len(num_forms)):\n end_char = ' ' if i != len(num_forms) - 1 else '\\n'\n print(num_forms[i].rjust(adjustment), end = end_char)",
"def dumb_formatter(out, text, left, right = None):\n if right == None:\n (right, line_length) = bigsh.pp.get_terminal_size()\n if right - 20 > left: # XXX needs work\n right = right - 20\n right = min(right, 120)\n\n left_indent = ' ' * left\n out_len = left\n out_line = left_indent\n\n for line in text.split('\\n'):\n if len(line) == 0:\n if out_len > left:\n out.append(out_line)\n out_len = left\n out_line = left_indent\n out.append('')\n elif line[0] == ' ' or line[0] == '\\t': # leading spaces\n if out_len > left:\n out.append(out_line)\n out_len = left\n out_line = left_indent\n out.append( left_indent + line )\n else: # text formatting\n\n for word in line.split():\n sep = ' '\n if word.endswith('.'):\n sep = ' '\n if len(word) + out_len + len(sep) > right:\n if out_len > left:\n out.append(out_line)\n out_len = left + len(word) + len(sep)\n out_line = left_indent + word + sep\n else:\n out_line += word + sep\n out_len += len(sep) + len(word)\n if out_len > left:\n out.append(out_line)",
"def truncate(text, words=25):\n return ' '.join((text).split()[:words])",
"def _format_line(line: str, n: int, padding: int) -> str:\n return ' {dim}{n}{reset}: {line}'.format(dim=Style.DIM,\n n=str(n + 1).zfill(padding),\n line=line,\n reset=Style.RESET_ALL)",
"def insert_spaces(word):\n new_word = \"\"\n for c in word:\n new_word += c + \" \" \n return new_word",
"def wordwrap(value, arg=80):\n\treturn \"\\n\".join(textwrap.wrap(value, int(arg)))",
"def Left(n=1):\n return ESC + str(n) + 'D'",
"def fix_spaces(text):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"19\")\n # END OF SOLUTION",
"def wrap(cls, text, first=0, indent=15, maxwidth=75):\n outstr = []\n sentence = []\n if not text:\n return \"\"\n for word in text.split():\n if len(\" \".join(sentence)) + len(word) + first > maxwidth:\n outstr.append(\" \".join(sentence))\n sentence = [\" \" * indent, word]\n first = 0\n else:\n sentence.append(word.strip())\n outstr.append(\" \".join(sentence))\n return \"\\n\".join(outstr)",
"def shrink(self, numwords):\n self.desc += \", shrink \" + str(numwords)\n self.filter_words(lambda w: self._index[w] < numwords)",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def find_item_formatting(possible_matches):\n for i in range(len(possible_matches)):\n if i == 0:\n continue\n words = possible_matches[i].split()\n\n #If previous term has same ending (\"Dinner\") as current term, remove it\n if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:\n #8 = amount of characters taken up by [' during ']\n length = len(possible_matches[i].split()[-1]) + 8\n possible_matches[i - 1] = possible_matches[i - 1][:length*-1]\n\n return possible_matches",
"def format_word_split(txt):\n tt = re.sub(r\"'s\\b\", '', txt).lower() # possessives\n tt = re.sub(r'[\\.\\,\\;\\:\\'\\\"\\(\\)\\&\\%\\*\\+\\[\\]\\=\\?\\!/]', '', tt) # weird stuff\n tt = re.sub(r'[\\-\\s]+', ' ', tt) # hyphen -> space\n tt = re.sub(r' [a-z] ', ' ', tt) # single letter -> space\n tt = re.sub(r' [0-9]* ', ' ', tt) # numbers\n\n tt = re.sub('\\W+', ' ', tt)\n tt = tt.split(\" \")\n\n ret = []\n for elem in tt:\n if elem not in stop_words:\n ret.append(elem)\n\n tt = ' '.join(ret)\n return tt.strip()",
"def hangingIndent(msg, numCols):\r\n\tinsertPoint = numCols\r\n\twhile insertPoint < len(msg):\r\n\t\tmsg = msg[:insertPoint] + \"\".center(INDENT_SIZE) + msg[insertPoint:]\r\n\t\tinsertPoint += numCols\r\n\t\r\n\treturn msg",
"def delete_words(self):\n self.word_1.delete(0, tk.END)\n self.word_2.delete(0, tk.END)\n self.word_3.delete(0, tk.END)\n self.word_4.delete(0, tk.END)\n self.word_5.delete(0, tk.END)",
"def fullwidth(st):\n ret = \"\"\n if not st: return ret\n for c in st:\n i = ord(c)\n if c == \" \":\n ret += chr(0x3000)\n elif 0x21 <= i <= 0x7f:\n ret += chr(i - 0x21 + 0xff01)\n else:\n ret += c\n return ret",
"def print_list_to_columns(words, items_per_row=5):\n row = []\n width = max(map(len, words)) + 2 \n for idx, word in enumerate(words):\n if (idx + 1) % items_per_row == 0:\n print(\"\".join(word.ljust(width) for word in row))\n row = []\n row.append(word)\n # append one last time just in case\n if len(row) > 0:\n print(\"\".join(word.ljust(width) for word in row))",
"def just(s: str) -> str:\n return s.ljust(50, \"_\")",
"def Space(num):\n return String(num, \" \")",
"def test_wrap_2_words():\n w1, w2 = \"n\" * 75, \"n\" * 5\n line = \"%s %s\" % (w1, w2)\n assert wrap_line(line) == \"%s\\n%s\" % (w1, w2)",
"def insert_word(self, frame2, pad, lb, lang, *args):\n # print self.lastinsert\n\n if self.cntlbcall != 0:\n self.cntlbcall -= 1\n word = lb.get(self.cntlbcall).strip('\\n')\n\n coordinates1 = map(int, str(self.lastinsert).split('.'))\n coordinates = str(coordinates1[0]) + '.0'\n r = pad.get(coordinates, str(self.lastinsert))\n pos_space = 0\n\n for i in range(len(r)):\n if r[i] == ' ':\n pos_space = i\n if pos_space != 0:\n pos_space += 1\n coordinates = str(coordinates1[0]) + '.' + str(pos_space)\n pad.delete(coordinates, coordinates + 'lineend')\n pad.insert(self.lastinsert, word)\n coordinates1 = map(int, str(self.lastinsert).split('.'))\n coordinates1[-1] += len(word)\n pad.mark_set(GUI.INSERT, '%d.%d' % (coordinates1[0], coordinates1[1]))\n self.syntax_highlight(pad, lang, GUI.INSERT, 0)\n frame2.pack_forget()\n global FLAG\n FLAG = 1\n self.cntlbcall = 0\n pad.focus_force()\n return \"break\"\n # pad.insert('end',' ')",
"def test_wrap_word():\n line = \"n\" * 81\n assert wrap_line(line) == \"n\" * 80 + \"\\nn\"",
"def reduce_spaces(tweet):\r\n text = tweet.strip()\r\n text = \" \".join(text.split())\r\n return re.sub(' +', ' ', text)",
"def indent(self, n):\n self._ind = max(0, self._ind + n)",
"def masked_word(self):\n for i in range(0,len(self._word)):\n if self._word[i] == ' ':\n self.new_string.append(' ')\n else:\n self.new_string.append('__ ')\n\n return self.print_new_word(self.new_string)",
"def format_text(text):\n\n\ttext = ' '.join(text).lower()\n\ttext = re.sub(r\"[^a-zA-Z.?!]\", \" \", text)\n\ttext = re.sub(r' +', ' ', text)\n\ttext = word_tokenize(text)\n\ttext = pos_tag(text)\n\n\treturn text",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def fixtags(self, text):\n # french spaces, last one Guillemet-left\n # only if there is something before the space\n text = _guillemetLeftPat.sub(ur'\\1 \\2', text)\n # french spaces, Guillemet-right\n text = _guillemetRightPat.sub(ur'\\1 ', text)\n return text",
"def print_word_frequency_list(my_word_frequency_list):\n for word in my_word_frequency_list:\n print(\"{} {}\".format(word[0].ljust(5), word[1]))\n\n print('\\n')",
"def tabing_tool(code):\n for i, line in enumerate(code):\n code[i] = ' '*4 + line\n return code",
"def separator(self, num=1):\n for i in range(num):\n print('-') * 79",
"def align_day_block(day):\n if day == 0:\n return \" \"\n elif day > 0 and day < 10:\n return \" \" + str(day) + \" \"\n\n return \" \" + str(day)",
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def ltrim(self, name, start, end):\r\n return self.format_inline('LTRIM', name, start, end)",
"def removeMultipleSpaces(self) -> None:\n self.text = re.sub('\\s+', ' ', self.text)",
"def make_spaces_ascii(artist_str):\n while artist_str.find(' ') != -1:\n artist_str = artist_str.replace(' ', '%20')\n return artist_str",
"def make_display_word(secret_word):\n return ('_ ' * len(secret_word))",
"def Fill(self, line):\n self._blank = True\n for word in line.split():\n if not self._fill:\n self._fill = self._indent[self._level] - 1\n self._out.write(' ' * self._fill)\n n = self._attr.PrintWidth(word)\n if self._fill + n + 1 >= self._width and not self._ignore_width:\n self._out.write('\\n')\n self._fill = self._indent[self._level]\n self._out.write(' ' * self._fill)\n else:\n self._ignore_width = False\n if self._fill:\n self._fill += 1\n self._out.write(' ')\n self._fill += n\n self._out.write(word)",
"def _wrap_text(text: str, words_per_line=20) -> str:\n text = text.replace('\\r', '')\n text = text.replace('\\n', '')\n wordlist = text.split()\n\n result = []\n for i, word in enumerate(wordlist):\n if i % words_per_line == 0 and i != 0:\n result.append('\\n')\n\n result.append(word)\n\n return \" \".join(result)",
"def indent(text, count=1, prefix=\" \"):\n lines = text.split(\"\\n\")\n return \"\\n\".join(\"{}{}\".format(prefix * count, line)\n for line in lines)",
"def _pad_horizontally(self, chars_written):\n if chars_written >= self.width:\n return\n\n extra = self.width - chars_written\n self.buf += ' ' * extra",
"def remove_repeated_spaces(text: str) -> str:\n\n return _repeated_spaces.sub(' ', text)",
"def format(self, sentences):\n empty_fields = '\\t_' * 8\n for i, sentence in enumerate(sentences):\n yield f'# sent_id = {i+1}'\n sent_text = sentence.text.replace(\"\\n\", \" \")\n yield f'# text = {sent_text}'\n for token in sentence.tokens:\n # multiword\n if len(token.words) > 1:\n token_range = f'{token.id[0]}-{token.id[-1]}'\n yield f'{token_range}\\t{token.text + empty_fields}'\n for word in token.words:\n yield f'{word.id}\\t{word.text + empty_fields}'\n else:\n yield f'{token.id[0]}\\t{token.text + empty_fields}'\n yield ''",
"def percent_without_letter(l):\n\treturn len(words_without_letter(l)) / len(word_set)",
"def printResults(listWords):\n width = 0\n for word in listWords:\n if len(word.name) > width:\n width = len(word.name)\n for word in listWords:\n lstring = str(word.listOfLines).replace('[','').replace(']','')\n print '%s: %d times, lines: %s' % (word.name.rjust(width), \n word.occurence, lstring)",
"def fmt(cls, n):\n return ''.join(c for c in n if c in cls.ALLOWED).lower()",
"def display(wordsDictionary):\n noOfWords = 0\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n for word in list(sorted(wordsDictionary.keys())):\n noOfWords += 1\n print(\"| %-20s | %15s |\" % (word, str(wordsDictionary.get(word)).center(15)))\n # Halt every 20 words (configurable)\n if (noOfWords != 0 and noOfWords % 20 == 0):\n print(\"\\n\" * 2)\n input(\"PRESS ENTER TO CONTINUE ... \")\n print(\"\\n\" * 5)\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n print(\"-\" * 42)\n print(\"\\n\" * 2)",
"def removeExtraSpaces(self, words):\n\t\treturn re.sub(r'\\s+', ' ', words.strip()).strip()",
"def advance(self, i):\n sys.stdout.write('\\r')\n sys.stdout.write(\"[%-30s] %d%%\" % ('=' * int(\n ceil(i / self._n * self._length)),\n (i + 1) / self._n * 100))\n sys.stdout.flush()",
"def fit_to_width(string, limit):\n\n input_words = string.split()\n i = 0\n line_list = []\n new_str = str()\n\n for word in input_words:\n if i == 0:\n new_str = word\n elif len(new_str+word) < limit:\n new_str = new_str + ' ' + word\n else:\n line_list.append(new_str)\n new_str = word\n if i == (len(input_words)-1):\n line_list.append(new_str)\n i += 1\n\n for string in line_list:\n print(string)",
"def textJustification(words, maxWidth):\n lines = []\n currWordLen = 0\n temp = []\n\n # split up into different lines.\n\n # ensure everything before gets appended properly\n words.append('a' * maxWidth)\n\n for word in words:\n if len(word) + currWordLen > maxWidth:\n lines.append(temp)\n temp = []\n temp.append(word)\n currWordLen = len(word) + 1 # account for spaces\n else:\n temp.append(word)\n currWordLen += len(word) + 1\n\n res = []\n numLines = len(lines)\n for index, line in enumerate(lines):\n if index == numLines - 1:\n numWords = len(line)\n s = ' '.join(line)\n remainingSpaces = maxWidth - len(s)\n s += ' ' * remainingSpaces\n res.append(s)\n else:\n\n numWords = len(line)\n remainingSpaces = maxWidth - len(''.join(line))\n if numWords - 1 != 0:\n interSpace = remainingSpaces // (numWords - 1)\n remainingSpaces = remainingSpaces - \\\n ((numWords - 1) * interSpace)\n\n i = 0\n while remainingSpaces != 0:\n line[i] += ' '\n i = (i + 1) % (numWords)\n remainingSpaces -= 1\n\n res.append((' ' * interSpace).join(line))\n\n return res",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def format(prefix, core, sufix, crop, color):\n if color:\n txtgrn = '\\x1b[32m'\n txtrst = '\\x1b[0m'\n else:\n txtgrn = ''\n txtrst = ''\n if len(prefix + core + sufix) <= 50 or not crop:\n return prefix + txtgrn + core + txtrst + sufix\n left = 50\n left -= len(core)\n each = left / 4\n if len(prefix) >= each * 2:\n prefix = prefix[:each] + ' ... ' + prefix[-each:]\n if len(sufix) >= each * 2:\n sufix = sufix[:each] + ' ... ' + sufix[-each:]\n return prefix + txtgrn + core + txtrst + sufix",
"def reformat_search_for_spaces(self):\r\n self.yt_search_key = self.yt_search_key.rstrip().replace(' ', '+')",
"def __format_column_dashes(file_contents: str) -> str:\n\n result_lines = list()\n\n for line in file_contents.splitlines():\n if line.startswith(' ' * 6 + '-'):\n result_lines[-1] += f' {line[7:]}'\n else:\n result_lines.append(line)\n\n return '\\n'.join(result_lines)",
"def add_spaces(text):\n return \" \".join(text)",
"def wrap(self, ind, text, rhs=0):\n l = 79 - ind * self.indSize - rhs\n return textwrap.wrap(text, l)",
"def infer_spaces(s):\n global unfolded\n if s in unfolded:\n return unfolded[s]\n\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n return min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1,len(s)+1):\n c,k = best_match(i)\n cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i>0:\n c,k = best_match(i)\n assert c == cost[i]\n out.append(s[i-k:i])\n i -= k\n \n unfolded[s] = ' '.join(reversed(out))\n return ' '.join(reversed(out))",
"def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")",
"def non_std_words(work):\n dictionary = enchant.Dict(\"en_US\")\n non_std_word = []\n\n for elem in work:\n lyrics = [item for sublist in elem[1] for item in sublist]\n lyrics = [i for i in lyrics if i[0] not in [',', '.', \"'\", '?', '!', '’', '&', '#', ':']]\n word_count = 1\n not_word_count = 1\n for tuples in lyrics:\n if dictionary.check(tuples[0]):\n word_count += 1\n else:\n not_word_count += 1\n\n non_std_word.append((not_word_count/(not_word_count+word_count), elem[0]))\n\n return non_std_word",
"def print_dashes(num: int, dash: str = '#') -> str:\n\n # Gets the terminal width\n num_col = shutil.get_terminal_size((80, 20)).columns\n\n return dashed_line(num if num <= num_col else num_col, dash)",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def part2(entries: tuple) -> str:\n sheet = entries[0]\n fold_instructions = entries[1]\n for direction, axis in fold_instructions:\n sheet = fold(sheet, direction, axis)\n \n max_x = max(p[0] for p in sheet)\n max_y = max(p[1] for p in sheet)\n out = ''\n for y in range(max_y + 1):\n for x in range(max_x + 1):\n out += '#' if (x, y) in sheet else ' '\n out += '\\n'\n return out"
] | [
"0.5716477",
"0.56824267",
"0.56137496",
"0.5555356",
"0.5551132",
"0.54461175",
"0.5431609",
"0.54211974",
"0.54129255",
"0.54071826",
"0.5400933",
"0.53925633",
"0.53710663",
"0.5361933",
"0.5333949",
"0.5318006",
"0.5300602",
"0.5297923",
"0.5276843",
"0.5260407",
"0.5258891",
"0.52396846",
"0.521561",
"0.5208605",
"0.5174413",
"0.5144831",
"0.509811",
"0.5096749",
"0.5088272",
"0.5084104",
"0.5083278",
"0.5079974",
"0.5076997",
"0.5072256",
"0.50627303",
"0.5061776",
"0.5058262",
"0.50272506",
"0.50260305",
"0.50204635",
"0.501459",
"0.49986693",
"0.49922422",
"0.4985619",
"0.49855638",
"0.4976812",
"0.49759695",
"0.49621174",
"0.49613371",
"0.49593896",
"0.49592125",
"0.49540642",
"0.49462414",
"0.49387884",
"0.49379048",
"0.49342835",
"0.4928644",
"0.49103528",
"0.49043044",
"0.4887875",
"0.4886138",
"0.488379",
"0.48734987",
"0.48732018",
"0.486736",
"0.48629403",
"0.48583478",
"0.48577636",
"0.48542157",
"0.48501462",
"0.48491022",
"0.48164797",
"0.4813016",
"0.48028612",
"0.47997203",
"0.4798774",
"0.47968936",
"0.47963554",
"0.4769761",
"0.47655395",
"0.47640428",
"0.4760183",
"0.47451806",
"0.47449625",
"0.47438258",
"0.47433197",
"0.4741665",
"0.47375378",
"0.47369006",
"0.47348619",
"0.47319284",
"0.4729022",
"0.47149733",
"0.47083417",
"0.47062463",
"0.46949917",
"0.46897107",
"0.4689354",
"0.46818113",
"0.4679472"
] | 0.5585617 | 3 |
Formats n words to the left of the cursor by adding whitespace in certain positions. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def expand_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
endSpace = cutText.endswith(' ')
cutText = _expand_after_special_chars(cutText)
reg = re.compile(
r'([a-zA-Z0-9_\"\'\)][=\+\-\*/\%]|[=\+\-\*/\%][a-zA-Z0-9_\"\'\(])')
hit = reg.search(cutText)
count = 0
while hit and count < 10:
cutText = cutText[:hit.start() + 1] + ' ' + \
cutText[hit.end() - 1:]
hit = reg.search(cutText)
count += 1
newText = cutText
if endSpace:
newText = newText + ' '
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def wrap(text, width):\n retstr = \"\"\n for word in text.split(' '):\n if len(retstr)-retstr.rfind('\\n')-1 + len(word.split('\\n',1)[0]) >= width:\n retstr += ' \\n' + word\n else:\n retstr += ' ' + word\n return retstr",
"def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)",
"def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t",
"def html_space(n):\n return \" \" * n",
"def wrap(self, text, width = 78, indent1 = 0, indent2 = 0):\n \n new_text = \"\"\n\n current_line = \"\"\n current_line += \" \" * indent1\n\n text = text.replace(\"\\n\", \" \")\n \n words = text.split(\" \")\n\n for word in words:\n if len(word) == 0:\n continue\n\n if len(current_line) + len(word) <= width:\n if len(current_line):\n current_line += \" \" + word\n else:\n current_line = word\n else:\n new_text += current_line + NEWLINE\n current_line = \" \" * indent2 + word\n\n if len(current_line):\n new_text += current_line\n\n return new_text",
"def words_before_index(text, idx):\n while text[idx] != ' ':\n idx -= 1\n if idx <= 0:\n return 0\n n_words = len(text[:idx].split(' '))\n return n_words",
"def indent(text, first_line=True, n=1, width=4):\n lines = text.split(\"\\n\")\n if not first_line:\n first = lines[0]\n lines = lines[1:]\n\n spaces = \" \" * (width * n)\n lines2 = [spaces + x for x in lines]\n\n if not first_line:\n lines2.insert(0, first)\n\n indented = \"\\n\".join(lines2)\n\n return indented",
"def indent(text, count=1, prefix=\" \"):\n lines = text.split(\"\\n\")\n return \"\\n\".join(\"{}{}\".format(prefix * count, line)\n for line in lines)",
"def wrap(cls, text, first=0, indent=15, maxwidth=75):\n outstr = []\n sentence = []\n if not text:\n return \"\"\n for word in text.split():\n if len(\" \".join(sentence)) + len(word) + first > maxwidth:\n outstr.append(\" \".join(sentence))\n sentence = [\" \" * indent, word]\n first = 0\n else:\n sentence.append(word.strip())\n outstr.append(\" \".join(sentence))\n return \"\\n\".join(outstr)",
"def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()",
"def cut_in_words(self,linea):\n length = 0\n res = ''\n limit_screen = 30\n for word in linea.split(' '):\n if length + len(word) <= limit_screen:\n new_word = word + ' '\n length += len(new_word)\n else:\n new_word = '\\n' + word + ' '\n length = len(new_word) - 2 #-2 para no tener en cuenta el \\n\n res += new_word\n return res",
"def insert_spaces(word):\n new_word = \"\"\n for c in word:\n new_word += c + \" \" \n return new_word",
"def wrap_onspace(text, width):\n\n if type(text) is str:\n return reduce(lambda line, word, width=width: '%s%s%s' %\n (line,\n ' \\n'[(len(line[line.rfind('\\n')+1:])\n + len(word.split('\\n',1)[0]\n ) >= width)],\n word),\n text.split(' ')\n )\n elif type(text) is list:\n new_text = ''\n counter = 0\n for e in text:\n counter += 1\n new_text += '('+str(counter)+') '+str(e)+\"\\n\"\n #new_text = ''.join(str(e) for e in text)\n return reduce(lambda line, word, width=width: '%s%s%s' %\n (line,\n ' \\n'[(len(line[line.rfind('\\n')+1:])\n + len(word.split('\\n',1)[0]\n ) >= width)],\n word),\n new_text.split(' ')\n )",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"",
"def fill_with_spaces(line: string, width: int) -> string:\n size = len(line)\n spaces_left = width - size\n return line + (' ' * spaces_left)",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def test_wrap_word():\n line = \"n\" * 81\n assert wrap_line(line) == \"n\" * 80 + \"\\nn\"",
"def fit_to_width(string, limit):\n\n input_words = string.split()\n i = 0\n line_list = []\n new_str = str()\n\n for word in input_words:\n if i == 0:\n new_str = word\n elif len(new_str+word) < limit:\n new_str = new_str + ' ' + word\n else:\n line_list.append(new_str)\n new_str = word\n if i == (len(input_words)-1):\n line_list.append(new_str)\n i += 1\n\n for string in line_list:\n print(string)",
"def wrap(text, width):\n return reduce(lambda line, word, width=width: '%s%s%s' %\n (line,\n ' \\n'[(len(line)-line.rfind('\\n')-1\n + len(word.split('\\n',1)[0]\n ) >= width)],\n word),\n text.split(' ')\n )",
"def textJustification(words, maxWidth):\n lines = []\n currWordLen = 0\n temp = []\n\n # split up into different lines.\n\n # ensure everything before gets appended properly\n words.append('a' * maxWidth)\n\n for word in words:\n if len(word) + currWordLen > maxWidth:\n lines.append(temp)\n temp = []\n temp.append(word)\n currWordLen = len(word) + 1 # account for spaces\n else:\n temp.append(word)\n currWordLen += len(word) + 1\n\n res = []\n numLines = len(lines)\n for index, line in enumerate(lines):\n if index == numLines - 1:\n numWords = len(line)\n s = ' '.join(line)\n remainingSpaces = maxWidth - len(s)\n s += ' ' * remainingSpaces\n res.append(s)\n else:\n\n numWords = len(line)\n remainingSpaces = maxWidth - len(''.join(line))\n if numWords - 1 != 0:\n interSpace = remainingSpaces // (numWords - 1)\n remainingSpaces = remainingSpaces - \\\n ((numWords - 1) * interSpace)\n\n i = 0\n while remainingSpaces != 0:\n line[i] += ' '\n i = (i + 1) % (numWords)\n remainingSpaces -= 1\n\n res.append((' ' * interSpace).join(line))\n\n return res",
"def _justifyText(text):\n chunks = text.split()\n line = []\n lineLength = 0\n for chunk in chunks:\n lineLength += len(chunk) + 1\n if lineLength <= 73:\n line.append(chunk)\n continue\n else:\n print('''{:5}{:73}{}'''.format(\"|\", \" \".join(line), \"|\"))\n del line[:]\n line.append(chunk)\n lineLength = len(chunk) + 1\n print('''{:5}{:73}{}'''.format(\"|\", \" \".join(line), \"|\"))",
"def wordwrap(value, arg=80):\n\treturn \"\\n\".join(textwrap.wrap(value, int(arg)))",
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")",
"def WrapWords(textlist, size, joiner='\\n'):\n # \\S{%d}(?!\\s|\\Z) collets the max size for words that are larger than the max\n # (?<=\\S{%d})\\S+ collects the remaining text for overflow words in their own\n # line\n # \\S.{1,%d}(?=\\s|\\Z)) collects all words and spaces up to max size, breaking\n # at the last space\n rval = []\n linelength_re = re.compile(\n r'(\\S{%d}(?!\\s|\\Z)|(?<=\\S{%d})\\S+|\\S.{1,%d}(?=\\s|\\Z))' %\n (size, size, size - 1))\n for index in range(len(textlist)):\n if len(textlist[index]) > size:\n # insert joiner into the string at appropriate places.\n textlist[index] = joiner.join(linelength_re.findall(textlist[index]))\n # avoid empty comment lines\n rval.extend(x.strip() for x in textlist[index].strip().split(joiner) if x)\n return rval",
"def space(n):\n rstr = \" \" * 4 * n\n return rstr",
"def insertNewlines(text, lineLength):\r\n lenCtr = 0\r\n this = []\r\n \r\n # convert entire text to a list of words\r\n words = text.split()\r\n \r\n # get a length of all the word\r\n for each in words[:]:\r\n lenCtr += len(each) + 1 # word length + whitespace\r\n this.append(each) # add word to this line\r\n words.remove(each) # remove word from remaining text\r\n print this,\r\n print words,\r\n print lenCtr\r\n if lenCtr >= lineLength:\r\n # insert \\n when lineLength is reached\r\n this.append('\\n')\r\n break\r\n # if itarable is exhausted, just return entire text\r\n else:\r\n return ' '.join(words)\r\n # remaining text\r\n text = ' '.join(words)\r\n return ' '.join(this) + insertNewlines(text, lineLength)",
"def tab_width(self, pad, *args):\n pad.insert(GUI.INSERT, ' ' * 4)\n return 'break'",
"def create_word(char_list):",
"def indent(self, n):\n self._ind = max(0, self._ind + n)",
"def WordWrap(cmd, linelen=80):\n indent = cmd.index(\"(\")+1\n out = []\n first = True\n x = re.compile(\"^(.{,%d})\\0\" % (linelen-indent,))\n while True:\n if not first:\n out.append(\" \" * indent)\n first = False\n m = x.search(cmd)\n if not m:\n parts = cmd.split(\"\\0\", 1)\n out.append(parts[0]+\"\\n\")\n if len(parts) == 1:\n break\n else:\n cmd = parts[1]\n continue\n out.append(m.group(1)+\"\\n\")\n cmd = cmd[m.end():]\n\n return \"\".join(out).replace(\"\\0\", \" \").rstrip(\"\\n\")",
"def _wrap_text(text: str, words_per_line=20) -> str:\n text = text.replace('\\r', '')\n text = text.replace('\\n', '')\n wordlist = text.split()\n\n result = []\n for i, word in enumerate(wordlist):\n if i % words_per_line == 0 and i != 0:\n result.append('\\n')\n\n result.append(word)\n\n return \" \".join(result)",
"def print_space(self,text,width,w=1,h=1):\n texlen = len(text)\n if texlen > width:\n text = text[:width]\n self.lesprint(text,width)",
"def justify_token(tok, col_width):\n get_len = tools.display_len if PY3 else len\n tok_len = get_len(tok)\n diff_len = tok_len - len(tok) if PY3 else 0\n\n cols = (int(math.ceil(float(tok_len) / col_width))\n if col_width < tok_len + 4 else 1)\n\n if cols > 1:\n return tok.ljust((col_width * cols) + (4 * cols) - diff_len)\n else:\n return tok.ljust(col_width + 4 - diff_len)",
"def insert_word(self, frame2, pad, lb, lang, *args):\n # print self.lastinsert\n\n if self.cntlbcall != 0:\n self.cntlbcall -= 1\n word = lb.get(self.cntlbcall).strip('\\n')\n\n coordinates1 = map(int, str(self.lastinsert).split('.'))\n coordinates = str(coordinates1[0]) + '.0'\n r = pad.get(coordinates, str(self.lastinsert))\n pos_space = 0\n\n for i in range(len(r)):\n if r[i] == ' ':\n pos_space = i\n if pos_space != 0:\n pos_space += 1\n coordinates = str(coordinates1[0]) + '.' + str(pos_space)\n pad.delete(coordinates, coordinates + 'lineend')\n pad.insert(self.lastinsert, word)\n coordinates1 = map(int, str(self.lastinsert).split('.'))\n coordinates1[-1] += len(word)\n pad.mark_set(GUI.INSERT, '%d.%d' % (coordinates1[0], coordinates1[1]))\n self.syntax_highlight(pad, lang, GUI.INSERT, 0)\n frame2.pack_forget()\n global FLAG\n FLAG = 1\n self.cntlbcall = 0\n pad.focus_force()\n return \"break\"\n # pad.insert('end',' ')",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)",
"def indent_lines(s, n):\n return \"\\n\".join(map(lambda line: \" \" * n + line, \n s.split('\\n')))",
"def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))",
"def wrap(text, width=78, indent=0):\n paras = para_sep.split(text.strip())\n\n new_paras = []\n for par in paras:\n words = filter(None, whitespace.split(par))\n\n lines = []\n line = []\n length = indent\n for word in words:\n if length + len(word) <= width:\n line.append(word)\n length += len(word) + 1\n else:\n lines.append(' ' * indent + ' '.join(line))\n line = [word]\n length = len(word) + 1 + indent\n\n lines.append(' ' * indent + ' '.join(line))\n\n new_paras.append('\\n'.join(lines))\n\n return '\\n\\n'.join(new_paras) + '\\n\\n'",
"def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def wc(file_):\r\n with open(file_) as f:\r\n file = f.read().strip()\r\n char_nums = len(file)\r\n lines = file.split('\\n')\r\n line_nums = len(lines)\r\n word_nums = 0\r\n for line in lines:\r\n words = line.split()\r\n word_nums += len(words)\r\n return f'{line_nums} {word_nums} {char_nums} {file_}'",
"def tabing_tool(code):\n for i, line in enumerate(code):\n code[i] = ' '*4 + line\n return code",
"def Left(text, number):\n return text[:number]",
"def print_word_frequency_list(my_word_frequency_list):\n for word in my_word_frequency_list:\n print(\"{} {}\".format(word[0].ljust(5), word[1]))\n\n print('\\n')",
"def __ingest_whitespace(line, position):\n pos = position\n while line[pos] == ' ':\n pos += 1\n return pos - position",
"def add_spaces(text):\n return \" \".join(text)",
"def hangingIndent(msg, numCols):\r\n\tinsertPoint = numCols\r\n\twhile insertPoint < len(msg):\r\n\t\tmsg = msg[:insertPoint] + \"\".center(INDENT_SIZE) + msg[insertPoint:]\r\n\t\tinsertPoint += numCols\r\n\t\r\n\treturn msg",
"def test_wrap_2_words():\n w1, w2 = \"n\" * 75, \"n\" * 5\n line = \"%s %s\" % (w1, w2)\n assert wrap_line(line) == \"%s\\n%s\" % (w1, w2)",
"def left_justify_string(keyword, value):\n return '%s' % keyword .ljust(40, \".\") + \": \" + '%s\\n' % value",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])",
"def wcount(lines, topn = 10):\n global worddict\n worddict = {}\n # record words each line by each\n linestr = lines.readline().decode() \n while linestr:\n record(linestr)\n linestr = lines.readline().decode()\n \n # sort the worddict to construct a wordlist\n wordlist = sorted(worddict.items(),\\\n key=lambda x:x[1],reverse = True)\n \n # get all words if lenth is less than number\n print(' '*3+'Word'.ljust(30),'Times'.center(10))\n for num in range(min(len(wordlist),topn)):\n print(' '*3+wordlist[num][0].ljust(30),\\\n str(wordlist[num][1]).center(10))",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_next_word_beginning(count=event.arg) or \\\n buffer.document.get_end_of_document_position()",
"def get_pad(self, n):\n pad = \"\"\n for i in range(0, n):\n pad += \" \"\n return pad",
"def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]",
"def fd(self, sent, index, length):\n context = lambda idx, field: sent[index + idx][field] \\\n if index+idx >= 0 and index + idx < length \\\n else \"<s>\" if index+idx < 0 \\\n else \"</s>\"\n\n ## tokens in a 5 token window x_{i-2}..x_{i+2}\n word_unigram_cur = numify(context(0, WORD))\n word_unigram_pre = numify(context(-1, WORD))\n word_unigram_2pre = numify(context(-2, WORD))\n word_unigram_post = numify(context(1, WORD))\n word_unigram_2post = numify(context(2, WORD))\n\n ## token bigrams in a 5 token window\n word_bigram_pre_cur = \"/\".join([word_unigram_pre, word_unigram_cur])\n word_bigram_cur_post = \"/\".join([word_unigram_cur, word_unigram_post])\n\n ## pos in a 5 token window\n pos_cur = context(0, POS)\n pos_pre = context(-1, POS)\n pos_post = context(1, POS)\n pos_2pre = context(-2, POS)\n pos_2post = context(2, POS)\n\n ## pos bigrams in a 3 token window\n pos_bigram_pre_cur = \"/\".join([pos_pre, pos_cur])\n pos_bigram_cur_post = \"/\".join([pos_cur, pos_post])\n #pre_pre_pos_bigram = \"/\".join([pre_pre_pos, pre_pos])\n #post_post_pos_bigram = \"/\".join([post_pos, post_post_pos])\n\n pos_posw_cur = \"/\".join([word_unigram_cur, pos_cur])\n\n ## Word shape features (5 token window)\n shape_istitle_cur = word_unigram_cur.istitle()\n shape_isdigit_cur = context(0, WORD).isdigit()\n shape_isupper_cur = word_unigram_cur.isupper()\n shape_hyphen_cur = \"-\" in word_unigram_cur[1:-1]\n shape_isalnum_cur = context(0, WORD).isalnum()\n #shape_mixedcase_cur = self.mixedcase.match(context(0, WORD)) != None\n\n shape_istitle_pre = word_unigram_pre.istitle()\n shape_isdigit_pre = context(-1, WORD).isdigit()\n shape_isupper_pre = word_unigram_pre.isupper()\n shape_hyphen_pre = \"-\" in word_unigram_pre[1:-1]\n shape_isalnum_pre = context(-1, WORD).isalnum()\n #shape_mixedcase_pre = self.mixedcase.match(context(-1, WORD)) != None\n\n shape_istitle_2pre = word_unigram_2pre.istitle()\n shape_isdigit_2pre = context(-2, WORD).isdigit()\n shape_isupper_2pre = word_unigram_2pre.isupper()\n shape_hyphen_2pre = \"-\" in word_unigram_2pre[1:-1]\n shape_isalnum_2pre = context(-2, WORD).isalnum()\n #shape_mixedcase_2pre = self.mixedcase.match(context(-2, WORD)) != None\n\n shape_istitle_post = word_unigram_post.istitle()\n shape_isdigit_post = context(1, WORD).isdigit()\n shape_isupper_post = word_unigram_post.isupper()\n shape_hypen_post = \"-\" in word_unigram_post[1:-1]\n shape_isalnum_post = context(1, WORD).isalnum()\n #shape_mixedcase_post = self.mixedcase.match(context(1, WORD)) != None\n\n shape_istitle_2post = word_unigram_2post.istitle()\n shape_isdigit_2post = context(2, WORD).isdigit()\n shape_isupper_2post = word_unigram_2post.isupper()\n shape_hypen_2post = \"-\" in word_unigram_2post[1:-1]\n shape_isalnum_2post = context(2, WORD).isalnum()\n #shape_mixedcase_2post = self.mixedcase.match(context(2, WORD)) != None\n\n ## 2-4 suffixes in a 3 token window\n suffix_1_cur = word_unigram_cur[-1:]\n suffix_2_cur = word_unigram_cur[-2:]\n suffix_3_cur = word_unigram_cur[-3:]\n suffix_4_cur = word_unigram_cur[-4:]\n\n suffix_1_pre = word_unigram_pre[-1:]\n suffix_2_pre = word_unigram_pre[-2:]\n suffix_3_pre = word_unigram_pre[-3:]\n suffix_4_pre = word_unigram_pre[-4:]\n\n suffix_1_post = word_unigram_post[-1:]\n suffix_2_post = word_unigram_post[-2:]\n suffix_3_post = word_unigram_post[-3:]\n suffix_4_post = word_unigram_post[-4:]\n\n ## 3-4 prefixes in a 3 token window\n prefix_3_cur = word_unigram_cur[:3]\n prefix_4_cur = word_unigram_cur[:4]\n\n prefix_3_pre = word_unigram_pre[:3]\n prefix_4_pre = word_unigram_pre[:4]\n\n prefix_3_post = word_unigram_post[:3]\n prefix_4_post = word_unigram_post[:4]\n\n ## Noun phrase in a 3 token window\n syn_np_cur = context(0, NP)\n syn_npw_cur = \"/\".join([syn_np_cur, word_unigram_cur])\n syn_np_pre = context(-1, NP)\n syn_np_post = context(1, NP)\n\n ## Extract features from local scope\n features = locals()\n del features[\"context\"]\n del features[\"sent\"]\n del features[\"index\"]\n del features[\"length\"]\n del features[\"self\"]\n features = features.items()\n\n features.extend(self.brown_extractor(\"brown_%d_cur\", context(0, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_pre\", context(-1, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_2pre\", context(-2, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_post\", context(1, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_2post\", context(2, WORD))) \n\n return features",
"def watch_pyramid_from_the_side(characters):\n if characters:\n space_count = 0\n count = len(characters)\n out_string = \"\"\n for char in characters:\n out_string += \" \" * space_count + char * (2 * count - 1) +\\\n \" \" * space_count + \"\\n\"\n count -= 1\n space_count += 1\n return out_string[-2::-1]\n else:\n return characters",
"def limit_32_words(string: str) -> str:\n return_str = \"\"\n space_accum = 0\n for c in string:\n if space_accum == 32:\n break\n\n if c == \" \":\n return_str += c\n space_accum += 1\n\n else:\n return_str += c\n\n return return_str",
"def wordwrap(text, width=80):\n if not isinstance(text, (types.StringType,types.UnicodeType)):\n return text\n width = int(width) # ensure we have an int, if this is used as a template filter\n text = re.sub(\" *\\r\\n\", \"\\n\", text) # get rid of DOS line endings\n text = re.sub(\" *\\r\", \"\\n\", text) # get rid of MAC line endings\n text = re.sub(\"( *\\n){3,}\", \"\\n\\n\", text) # get rid of excessive vertical whitespace\n lines = text.split(\"\\n\")\n filled = []\n wrapped = False\n prev_indent = None\n for line in lines:\n line = line.expandtabs().rstrip()\n indent = \" \" * (len(line) - len(line.lstrip()))\n ind = len(indent)\n if wrapped and line.strip() != \"\" and indent == prev_indent:\n line = filled[-1] + \" \" + line.lstrip()\n filled = filled[:-1]\n else:\n wrapped = False\n while (len(line) > width) and (\" \" in line[ind:]):\n linelength = len(line)\n wrapped = True\n breakpoint = line.rfind(\" \",ind,width)\n if breakpoint == -1:\n breakpoint = line.find(\" \", ind)\n filled += [ line[:breakpoint] ]\n line = indent + line[breakpoint+1:]\n if len(line) >= linelength:\n break\n filled += [ line.rstrip() ]\n prev_indent = indent\n return \"\\n\".join(filled)",
"def fix_spaces(text):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"19\")\n # END OF SOLUTION",
"def s_words(words):\n\t\n\treturn words // 100 / 10",
"def remove_repeated_spaces(text: str) -> str:\n\n return _repeated_spaces.sub(' ', text)",
"def make_title(words):",
"def truncate(text, words=25):\n return ' '.join((text).split()[:words])",
"def lw(max_no, str_obj):\n x = max_no - len(str_obj)\n y = 0\n string = ''\n for y in range(x):\n string = string + ' '\n return string",
"def insertNewlines(text, lineLength):\n def gotoWordEnd(text, lineEnd):\n print \"Text: %s, %r\" % (text, lineEnd)\n if len(text) == lineEnd:\n return lineEnd\n if text[lineEnd] ==' ': # test if its a space\n return lineEnd\n lineEnd += 1\n\n return gotoWordEnd(text, lineEnd)\n\n # Base case: text is empty:\n if not text:\n return ''\n\n # check if we are on a current word.\n lineEnd = lineLength - 1\n print \"lineEnd == %r\" % lineEnd\n\n currentWord = ''\n if len(text) >= lineLength:\n currentWord = text[lineEnd]\n \n # current letter isn't empty. keep skipping forward\n # return lineLengthIndex of after the word ends\n print \"current word == %s\" % currentWord\n if currentWord:\n\n wordEnd = gotoWordEnd(text, lineEnd)\n print \"wordEnd: %r\" % wordEnd\n currentLine = text[:wordEnd] + \"\\n\"\n print \"currentLine: %s \" % currentLine\n text = text[wordEnd + 1:]\n else:\n currentLine = text[:lineLength]\n text = text[lineLength:]\n return currentLine + insertNewlines(text, lineLength)",
"def make_spaces_for_html(indent_num: int) -> str:\r\n from apysc.validation import number_validation\r\n number_validation.validate_integer(integer=indent_num)\r\n number_validation.validate_num_is_gte_zero(num=indent_num)\r\n spaces: str = ' ' * (indent_num * 2)\r\n return spaces",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def _format_line(line: str, n: int, padding: int) -> str:\n return ' {dim}{n}{reset}: {line}'.format(dim=Style.DIM,\n n=str(n + 1).zfill(padding),\n line=line,\n reset=Style.RESET_ALL)",
"def display(wordsDictionary):\n noOfWords = 0\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n for word in list(sorted(wordsDictionary.keys())):\n noOfWords += 1\n print(\"| %-20s | %15s |\" % (word, str(wordsDictionary.get(word)).center(15)))\n # Halt every 20 words (configurable)\n if (noOfWords != 0 and noOfWords % 20 == 0):\n print(\"\\n\" * 2)\n input(\"PRESS ENTER TO CONTINUE ... \")\n print(\"\\n\" * 5)\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n print(\"-\" * 42)\n print(\"\\n\" * 2)",
"def printResults(listWords):\n width = 0\n for word in listWords:\n if len(word.name) > width:\n width = len(word.name)\n for word in listWords:\n lstring = str(word.listOfLines).replace('[','').replace(']','')\n print '%s: %d times, lines: %s' % (word.name.rjust(width), \n word.occurence, lstring)",
"def doormat(n, m):\r\n pad = '-'\r\n filler = '.|.'\r\n middle = 'WELCOME'\r\n mat = []\r\n mid = middle.center(m, pad)\r\n error = 'Oops, try again!'\r\n\r\n try:\r\n for width in range(1, n, 2):\r\n mat.append((filler*width).center(m, pad))\r\n end = list(reversed(mat))\r\n mat.append(mid)\r\n mat = mat + end\r\n return '\\n'.join(mat)\r\n except TypeError:\r\n return error",
"def process_game(level, blank):\r\n paragraph = paragraphs[level]\r\n while blank < len(input_list[level]):\r\n word_to_replace = (input_list[level][blank])\r\n list_of_words = paragraph.split(\" \")\r\n index = 0\r\n # Replace the word_to_replace with ___number___\r\n while index < len(list_of_words):\r\n if list_of_words[index] == word_to_replace:\r\n list_of_words[index] = \"___\" + str(blank + 1) + \"___\"\r\n index += 1\r\n\r\n blank += 1\r\n paragraph = \" \".join(list_of_words)\r\n\r\n return paragraph",
"def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str",
"def add_line(self):\n #N_spaces = message.count(' ') # From both sides\n banner = 40\n words = [self.your_string]\n word_sum = sum(len(i) for i in words)\n margin1 = int(banner/5)*\"=\"\n margi1_len = len(margin1)\n margin2 = (banner-(margi1_len+word_sum+2))*\"=\"\n if len(margin1+self.your_string+margin2) < banner:\n \"\"\"\n Here for simple one line sentence we add\n directly the margins with the self.your_string\n \"\"\"\n print(banner*\"=\")\n print(f\"\"\"{margin1} {self.your_string} {margin2}\"\"\")\n print(banner*\"=\")\n else:\n \"\"\"\n Here we check the number of words and reset the list and\n spaces everytime we go to a new line until the self.your_string\n is finished.\n \"\"\"\n print(banner*\"=\")\n wordsx = self.your_string.split()\n xr = 4 # alignment added later (adjusted manually)\n i = 0\n spacex = 0\n listwords = []\n while i < len(wordsx):\n\n listwords.append(wordsx[i])\n if sum(len(i) for i in listwords) + spacex < banner-xr:\n print(wordsx[i], end = \" \")\n spacex = spacex +1\n\n else:\n print(wordsx[i])\n listwords = []\n spacex = 0\n\n i = i +1\n\n print(\"\\n\", end= \"\")\n print(banner*\"=\")",
"def _pad_horizontally(self, chars_written):\n if chars_written >= self.width:\n return\n\n extra = self.width - chars_written\n self.buf += ' ' * extra",
"def pad_words(words, length):\n diff_len = length - len(words)\n if diff_len <= 0:\n return words\n return words + [\"padding\"] * diff_len",
"def normalizeTexts(texts):\n fCW = 0\n for item in texts:\n fCW = max(len(item), fCW)\n for counter, item in enumerate(texts):\n texts[counter] = texts[counter].ljust(fCW + 1, '.')\n return (texts, fCW)",
"def make_text(sent, begin, end):\n lemmas = [sent.morps[begin].lemma(), ]\n for idx in range(begin+1, end):\n if sent.mid2wid[idx-1] != sent.mid2wid[idx]: # if go over word boundary\n # insert space between words\n lemmas.append(' ')\n lemmas.append(sent.morps[idx].lemma())\n return ''.join(lemmas)",
"def wordwrap():\n file = open(sys.argv[2])\n width = int(sys.argv[1]) \n line = file.readline()\n while line:\n line = line.strip()\n line1,line = wordwrap_on(line,width)\n print line1\n if line=='' :\n line = file.readline()",
"def textwide(s, tf):\r\n width = 350 ## default ok for Arial or Helvetica\r\n if gv[\"font\"] == \"Times-roman\":\r\n width = 330\r\n if gv[\"font\"] == \"Courier\":\r\n width = 390\r\n if gv[\"fontfixed\"] is False:\r\n localfontsize = int(gv[\"fontsize\"]*gv[\"globalscale\"])\r\n else:\r\n localfontsize = int(gv[\"fontsize\"])\r\n return tf*localfontsize * len(s)*width/(1000*(gv[\"fixedUR\"][0] - gv[\"fixedLL\"][0]))",
"def lw2(max_no, str_obj):\n x = max_no - len(str_obj)\n y = 0\n string = ''\n for y in range(x):\n string = string + ' '\n return string",
"def calculate_text_stars(word_counts) -> int:\n if word_counts == []:\n return 3\n words_per_slide = sum(word_counts) / len(word_counts)\n stars = 5 - abs(words_per_slide - 35) / 8\n # print(stars)\n return max(0, min(5, int(stars + 0.5)))",
"def delete_words(self):\n self.word_1.delete(0, tk.END)\n self.word_2.delete(0, tk.END)\n self.word_3.delete(0, tk.END)\n self.word_4.delete(0, tk.END)\n self.word_5.delete(0, tk.END)",
"def split_into_words(context_text):\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in context_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n return doc_tokens, char_to_word_offset",
"def preprocess_nmt(text):\n def no_space(char, prev_char):\n return char in set(',.!?') and prev_char != ' '\n\n # Replace non-breaking space with space, and convert uppercase letters to\n # lowercase ones\n text = text.replace('\\u202f', ' ').replace('\\xa0', ' ').lower()\n # Insert space between words and punctuation marks\n out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char\n for i, char in enumerate(text)]\n return ''.join(out)",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def motion_w(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False",
"def format_text(text):\n\n\ttext = ' '.join(text).lower()\n\ttext = re.sub(r\"[^a-zA-Z.?!]\", \" \", text)\n\ttext = re.sub(r' +', ' ', text)\n\ttext = word_tokenize(text)\n\ttext = pos_tag(text)\n\n\treturn text",
"def wrap(self, ind, text, rhs=0):\n l = 79 - ind * self.indSize - rhs\n return textwrap.wrap(text, l)",
"def initialize_words(name, times=1, words_in_token=2):\r\n\twords = name.split(\" \")\r\n\tif words_in_token > len(words):\r\n\t\twords_in_token = len(words)\r\n\tfor i in range(times):\r\n\t\trandnum = random.randint(0, len(words) - words_in_token)\r\n\t\tremoved = \"\"\r\n\t\tfor index in range(words_in_token):\r\n\t\t\tremoved = removed + words.pop(randnum)[0]\r\n\t\twords.insert(randnum, removed)\r\n\treturn \" \".join(words)",
"def createWordKnown(self):\n return ''.join(['_ ' for m in range(self.wordLen)])",
"def _addIndent(self, block, count=1):\n return re.compile(r\"^((?!$))\", re.M).sub(\" \" * count, block)"
] | [
"0.662828",
"0.62945515",
"0.6190352",
"0.6139892",
"0.6126422",
"0.6030799",
"0.6020674",
"0.59485686",
"0.59349114",
"0.5918667",
"0.59178454",
"0.5900272",
"0.5900229",
"0.5873123",
"0.58688444",
"0.58219737",
"0.579922",
"0.579865",
"0.57816565",
"0.5758057",
"0.5754745",
"0.57186604",
"0.56766254",
"0.56553304",
"0.56273043",
"0.5624144",
"0.5623095",
"0.5609918",
"0.55997676",
"0.55996126",
"0.55810076",
"0.5564158",
"0.55578864",
"0.5557325",
"0.5545374",
"0.55341613",
"0.5529829",
"0.552281",
"0.5506853",
"0.54996",
"0.54849386",
"0.5481151",
"0.54790294",
"0.5471954",
"0.54686695",
"0.54650974",
"0.5450399",
"0.54419214",
"0.5440368",
"0.5436357",
"0.5431116",
"0.5428047",
"0.54204726",
"0.5418633",
"0.54148036",
"0.5403048",
"0.53977835",
"0.5393022",
"0.5357409",
"0.5353329",
"0.5341431",
"0.5340798",
"0.53401446",
"0.53353626",
"0.53297067",
"0.52952987",
"0.52936786",
"0.52850574",
"0.52826285",
"0.52753043",
"0.5274783",
"0.52700347",
"0.5265",
"0.5261763",
"0.5258239",
"0.52567047",
"0.52506995",
"0.52482295",
"0.52460164",
"0.52443564",
"0.52401",
"0.5227279",
"0.5226574",
"0.522579",
"0.52208585",
"0.5216725",
"0.52137715",
"0.5208308",
"0.5204233",
"0.51890934",
"0.5188927",
"0.5185748",
"0.51822716",
"0.5159309",
"0.515911",
"0.515533",
"0.5148175",
"0.51469046",
"0.51463765",
"0.51345813"
] | 0.6068492 | 5 |
Formats dictated text to upper case. | def uppercase_text(text):
newText = format_upper_case(text)
Text("%(text)s").execute({"text": newText}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_upper(self, text):\n\t\treturn text.upper()",
"def UPPER(text):\n return text.upper()",
"def convert_to_uppercase(text):\n return text.upper()",
"def UCase(text):\n return text.upper()",
"def _transliterate_text(self, _text):\n return _text.upper()",
"def upperCase(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.upperCase(phrase)",
"def upper(self, value):\n return self.text(value).upper()",
"def ReturnUpper(text):\n try:\n text = text.upper()\n return text\n except:\n pass",
"def process(self, s):\n # modified for project...\n return s.upper()",
"def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)",
"def _braceUppercase(text):\n for uc in string.uppercase:\n text = text.replace(uc, r'{%s}' % uc)\n return text",
"def capify(text):\n return text[0].upper() + text[1:]",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def upper(value):\n return value.upper()",
"def firstUpper(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.firstUpper(phrase)",
"def upper(self):\n if not self._upper_string:\n self._upper_string = \"\".join(self.upper_word_list)\n return self._upper_string",
"def upper(value): # Only one argument.\n return value.upper()",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def uppercase(self):\n names = {}\n for name, value in self._names.items():\n if name is not None:\n names[name] = names[name.upper()] = value\n self._names = names",
"def replace_all_uppercase(text):\n for match in re.finditer(r'([A-Z]+(!|\\.|,)?(.)?){5,}', text):\n text = text.replace(match.group(0), match.group(0).lower())\n return text",
"def invert_capitalization(word):\n if word.islower():\n return word.upper()\n else:\n return word.lower()",
"def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")",
"def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text",
"def uppercase_name(name):\n return name.upper()",
"def upper(self) -> String:\n pass",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def upper_list_label(self, upper_case=True):\r\n for item_index in xrange(self.count()):\r\n item_text = str(self.item(item_index).text())\r\n if upper_case:\r\n item_text = item_text.upper()\r\n else:\r\n item_text = item_text.lower()\r\n self.item(item_index).setText(item_text)",
"def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data",
"def apply_capitalization_to_tag(tag: Dict[str, Union[str, List[str]]]) -> str:\n return CAPITALIZATION_TABLE[tag[\"capitalization\"]].apply(tag[\"context\"])",
"def uppercase(self):\n\n file = open(self.filename, 'r')\n new_file = open(self.temp_filename, 'w')\n for line in file:\n for keyword in self.KEYWORDS:\n if keyword in line:\n line = line.replace(keyword, keyword.upper())\n new_file.write(line)\n file.close()\n new_file.close()\n self.overwrite_file()",
"def normalize_case(text):\n text = str(text)\n return text.lower()",
"def raw_upper(self) -> str:\n return self.raw.upper()",
"def capitalize(result):\n\treturn result.upper()",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def sep_upper(text):\n \n new_text = \"\"\n for letter in text:\n if letter.isupper():\n new_text += \" \" + letter\n else:\n new_text += letter\n \n return new_text",
"def force_uppercase(etl, field_names, **kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_function,\r\n function=value.force_uppercase, **kwargs\r\n )\r\n tuple(func(field_name=name) for name in field_names)",
"def test_capitalize(self):\n self.assertEqual(\n minerals_extras.capitalize('mohs scale hardness'),\n 'Mohs Scale Hardness')",
"def uppercase(str):\n \n return str.upper()",
"def print_upper_words(words):\n for word in words:\n print(word.upper())",
"def print_upper_words(words):\n \n for word in words:\n print(word.upper())",
"def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()",
"def capitalize(value: str, **kwargs: Any) -> str:\n return value[0].upper() + value[1:]",
"def not_capitalized(): # noqa: D416",
"def capitolize():\n a_variable.set(a_variable.get().upper())",
"def makeuppercase2(event: db_fn.Event[db_fn.Change]) -> None:\n\n # Only edit data when it is first created.\n if event.data.before is not None:\n return\n\n # Exit when the data is deleted.\n if event.data.after is None:\n return\n\n # Grab the value that was written to the Realtime Database.\n original = event.data.after\n if not hasattr(original, \"upper\"):\n print(f\"Not a string: {event.reference}\")\n return\n\n # Use the Admin SDK to set an \"uppercase\" sibling.\n print(f\"Uppercasing {event.params['pushId']}: {original}\")\n upper = original.upper()\n parent = db.reference(event.reference).parent\n if parent is None:\n print(\"Message can't be root node.\")\n return\n parent.child(\"uppercase\").set(upper)",
"def upper_vowel(s):\n for k, v in REPLACED_MAP.iteritems():\n s = s.replace(k, v)\n return s",
"def capitalize(self) -> String:\n pass",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def catch_uppercase_tags(self, tag):\n raw = self.get_starttag_text()\n q = re.search(\"<\\s*(\\w+)\\s*\", raw)\n txt = q.groups()[0]\n for chr in txt:\n if chr.isupper():\n self.errmsg(\"Tags like '<%s>' containing uppercase letters are deprecated\"\n % (txt))\n break",
"def LCase(text):\n return text.lower()",
"def change_case(word):\n return word.upper() if case == \"upper\" else word.lower()",
"def get_casing(word):\n if len(word) == 0:\n return \"other\"\n elif word.isdigit(): # Is a digit\n return \"numeric\"\n elif word.islower(): # All lower case\n return \"allLower\"\n elif word.isupper(): # All upper case\n return \"allUpper\"\n # is a title, initial char upper, then all lower\n elif word[0].isupper():\n return \"initialUpper\"\n\n return \"other\"",
"def upper(self):\n return self._upper",
"def upper(string):\n new_string = '' # Empty string to append to\n for char in string: # Itterate over each character in user's string\n if char.isalpha() and not char.isupper(): # If the character is an alphabet and not already uppercase\n char = (chr(ord(char) - 32)) # Subtract 32 from it's ASCI value to get the uppercase alphabet\n if char.isalnum() or char == ' ': # Preserve spaces, and ignore special characters such as punctuation etc.\n new_string += char # Append capitalized characters and spaces to the new string\n return new_string # return the capitalized string",
"def detectCapitalUse(self, word):\n\n # Check for no upper or all upper\n if all(l.isupper() for l in word) or all(l.islower() for l in word):\n return True\n elif word[0].isupper() and word[1:].islower():\n return True\n else:\n return False",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))",
"def str_isupper__Rope(space, w_self):\n l = w_self._node.length()\n \n if l == 0:\n return space.w_False\n cased = False\n iter = rope.ItemIterator(w_self._node)\n for idx in range(l):\n c = iter.nextchar()\n if c.islower():\n return space.w_False\n elif not cased and c.isupper():\n cased = True\n return space.newbool(cased)",
"def is_capitalized(word):\n return word[0].isupper()",
"def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)",
"def uppercase(string):\n\n return str(string).upper()",
"def setAcceptFirstUppercase(self, value):\n self.setBooleanOption(6, value)",
"def capitalize(self):\n return asarray(capitalize(self))",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def manage_text(msg):\r\n msg = msg.upper()\r\n msg_final = \"\"\r\n for i in msg:\r\n if i.isalpha():\r\n msg_final += i\r\n return msg_final",
"def CamelCase(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def title(value):\n capped = [char for char in string.capwords(value.replace(\"_\", \" \"))]\n\n # If a string also contains some letters after an apostrophe, we should capitalize that\n # letter... (ie: O'ryan's Charm -> O'Ryan's Charm).\n for index, char in enumerate(capped):\n if char is \"'\":\n if index + 1 <= len(capped):\n if capped[index + 2] != ' ':\n capped[index + 1] = capped[index + 1].upper()\n\n return \"\".join(capped)",
"def isupper(self):\n return isupper(self)",
"def PROPER(text):\n return text.title()",
"def remove_all_caps(text):\n return re.sub(r\"(\\b(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b(?:\\s+(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b)*)\",\n ' ', text)",
"def makeuppercase(event: db_fn.Event[Any]) -> None:\n\n # Grab the value that was written to the Realtime Database.\n original = event.data\n if not isinstance(original, str):\n print(f\"Not a string: {event.reference}\")\n return\n\n # Use the Admin SDK to set an \"uppercase\" sibling.\n print(f\"Uppercasing {event.params['pushId']}: {original}\")\n upper = original.upper()\n parent = db.reference(event.reference).parent\n if parent is None:\n print(\"Message can't be root node.\")\n return\n parent.child(\"uppercase\").set(upper)",
"def fix_string_case(text):\n fixed = []\n for i in text:\n if is_case_sensitive(i):\n fixed.append(i)\n else:\n fixed.append(i.lower())\n return ''.join(fixed)",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def uppersnakecase(string):\n\n return uppercase(snakecase(string))",
"def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)",
"def to_upper_case(self, secret_without_spaces: str) -> str:\n\n return secret_without_spaces.upper()",
"def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(s)):\n\t\tif (s[i] in \"Ii Iii Iv Vi Vii Viii Ix Ii: Iii: Iv: Vi: Vii: Viii: Ix:\"):\n\t\t\ts[i] = s[i].upper()\n\treturn \" \".join(s)",
"def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def LOWER(text):\n return text.lower()",
"def setAcceptAllUppercase(self, value):\n self.setBooleanOption(7, value)",
"def replace_word_candidate(self, word):\n capital_flag = word[0].isupper()\n word = word.lower()\n if capital_flag and word in self.teencode_dict:\n return self.replace_teencode(word).capitalize()\n elif word in self.teencode_dict:\n return self.replace_teencode(word)\n\n for couple in self.word_couples:\n for i in range(2):\n if couple[i] == word:\n if i == 0:\n if capital_flag:\n return couple[1].capitalize()\n else:\n return couple[1]\n else:\n if capital_flag:\n return couple[0].capitalize()\n else:\n return couple[0]",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_upper(df: DataFrame) -> DataFrame:\r\n return df.apply(lambda x: x.str.upper() if x.dtype == 'object' else x)",
"def correct_cap(title):\n try:\n fl = fln[title]\n return title\n except:\n #capitalize first letter only\n try:\n fl = fln[title[0].upper() + title[1:]]\n return title[0].upper() + title[1:]\n except:\n #try title case\n try:\n fl = fln[title.title()]\n return title.title()\n except KeyError:\n return \"\"",
"def get_uppercase(word_dict):\n\n all_keys = word_dict.keys()\n upper_keys = [key for key in all_keys if key[0][0].isupper()]\n\n return upper_keys",
"def test_evaluate_to_upper_expression(self):\n value = self.evaluate_common(\"toupper('Steve')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == ul(\"STEVE\"))\n value = self.evaluate_common(ul(\"toupper('CAF\\xc9')\"))\n self.assertTrue(value.value == ul('CAF\\xc9'))\n value = self.evaluate_common(ul(\"toupper('caf\\xe9')\"))\n self.assertTrue(value.value == ul('CAF\\xc9'))\n try:\n value = self.evaluate_common(\"toupper(3.14F)\")\n self.fail(\"floating upper\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"toupper('Steve','John')\")\n self.fail(\"2 parameters\")\n except odata.EvaluationError:\n pass",
"def firstupper(value):\n if not isinstance(value, (str, unicode,)): return value\n else: return mark_safe(value[0].upper() + value[1:])",
"def upperDisplay(self):\n return \"{0:g} ({1})\".format(self._param.toDisplay(self.upper),\n self._param.displayMagnitudeName())",
"def capitalize(self):\n\n file = open(self.filename, 'r')\n new_file = open(self.temp_filename, 'w')\n for line in file:\n new_line = \"\"\n for i in range(len(line)):\n char = line[i]\n if char == '@':\n line = line[:i + 1] + line[i + 1].upper() + line[i + 2:]\n new_line += char\n new_file.write(new_line)\n file.close()\n new_file.close()\n self.overwrite_file()",
"def clean_cases(text):\n return text.lower()",
"def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))"
] | [
"0.7926861",
"0.781187",
"0.7696189",
"0.75935656",
"0.7231077",
"0.7210327",
"0.70803845",
"0.6985208",
"0.6815186",
"0.67244554",
"0.6724301",
"0.66712606",
"0.66581935",
"0.6649381",
"0.664162",
"0.6625295",
"0.66139257",
"0.65889776",
"0.65846056",
"0.6550878",
"0.652874",
"0.649833",
"0.64698744",
"0.64543647",
"0.64520067",
"0.6414484",
"0.64110476",
"0.63691086",
"0.6359784",
"0.63391966",
"0.6282825",
"0.6276823",
"0.62660146",
"0.62263656",
"0.62089825",
"0.6199959",
"0.6164784",
"0.6116129",
"0.60938716",
"0.60632443",
"0.60537976",
"0.6044136",
"0.6009442",
"0.6002953",
"0.59842",
"0.5983304",
"0.59756523",
"0.59707576",
"0.59627223",
"0.59587073",
"0.593549",
"0.5923481",
"0.592091",
"0.59203684",
"0.5914092",
"0.5904428",
"0.58948964",
"0.5891211",
"0.58898365",
"0.58883333",
"0.58633417",
"0.5837636",
"0.5834747",
"0.5816658",
"0.580837",
"0.5802079",
"0.58007306",
"0.5783851",
"0.57838184",
"0.57765377",
"0.5749007",
"0.57426906",
"0.5735418",
"0.57346195",
"0.57248735",
"0.572065",
"0.5712604",
"0.5708128",
"0.56995785",
"0.568822",
"0.5685245",
"0.5683387",
"0.568158",
"0.5680729",
"0.56686854",
"0.56585044",
"0.56530243",
"0.56498915",
"0.5643477",
"0.5637433",
"0.56363636",
"0.5633603",
"0.56290495",
"0.5614147",
"0.56048983",
"0.55983204",
"0.5588556",
"0.5578199",
"0.5575759",
"0.55740225"
] | 0.7757802 | 2 |
Formats n words to the left of the cursor to upper case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def uppercase_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
newText = cutText.upper()
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def upper(value,n):\n return value.upper()[0:n]",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def print_upper_words(words):\n for word in words:\n print(word.upper())",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def print_upper_words(words):\n \n for word in words:\n print(word.upper())",
"def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def make_title(words):",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str",
"def test_capitalize_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n Line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"capitalize-word\",\n )",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def create_word(char_list):",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def test_upcase_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n LINE a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.7\", \"3.7\"),\n after_sel=(\"3.7\", \"3.7\"),\n command_name=\"upcase-word\",\n )",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]",
"def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def capify(text):\n return text[0].upper() + text[1:]",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")",
"def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def count_words(s, n):\n\n # TODO: Count the number of occurences of each word in s\n words = s.lower().split()\n dict = {}\n\n for item in words:\n dict[item] = words.count(item)\n\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n items = dict.items()\n\n items.sort(key=lambda tup: tup[0])\n items.sort(key=lambda tup: tup[1], reverse=True)\n\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n return items[:n]",
"def ucwords(string):\n erg=[ item.capitalize() for item in string.split( ' ' ) ]\n return ' '.join( erg )",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)",
"def UPPER(text):\n return text.upper()",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def rule_uppercase_i(words):\n for i in range(0, len(words)):\n if words[i].text == 'i':\n words[i].text = 'I'\n return words",
"def shn_abbreviate(word, size=48):\n\n if word:\n if (len(word) > size):\n word = \"%s...\" % word[:size - 4]\n else:\n return word\n else:\n return word",
"def wcount(lines, topn = 10):\n global worddict\n worddict = {}\n # record words each line by each\n linestr = lines.readline().decode() \n while linestr:\n record(linestr)\n linestr = lines.readline().decode()\n \n # sort the worddict to construct a wordlist\n wordlist = sorted(worddict.items(),\\\n key=lambda x:x[1],reverse = True)\n \n # get all words if lenth is less than number\n print(' '*3+'Word'.ljust(30),'Times'.center(10))\n for num in range(min(len(wordlist),topn)):\n print(' '*3+wordlist[num][0].ljust(30),\\\n str(wordlist[num][1]).center(10))",
"def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def make(text=input()):\n alp = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n box = []\n dic = dict()\n val = 0\n #collect alphabets into list\n for i in alp:\n if i in text:\n dic[i] = text.count(i)\n box.append(i)\n if text.count(i) > val:\n val = text.count(i)\n else:\n val = val\n for i in range(val, 0, -1):\n print(\"%03d \"%i, end=\"\")\n for wow in sorted(dic, key=str.swapcase):\n if dic[wow] >= i:\n print(\"*\", end=\" \")\n else:\n print(\" \", end=\" \")\n print()\n print(\" \", *box, sep=\" \")",
"def wc(file_):\r\n with open(file_) as f:\r\n file = f.read().strip()\r\n char_nums = len(file)\r\n lines = file.split('\\n')\r\n line_nums = len(lines)\r\n word_nums = 0\r\n for line in lines:\r\n words = line.split()\r\n word_nums += len(words)\r\n return f'{line_nums} {word_nums} {char_nums} {file_}'",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))",
"def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()",
"def test_downcase_word(self):\n before_b = \"\"\"\\\n XYZZY line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n xyzzy line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.4\", \"1.4\"),\n after_sel=(\"1.4\", \"1.4\"),\n command_name=\"downcase-word\",\n )",
"def remove_morethan2letters(text):\n words = text.split();\n n = len(words)\n for i in range(0,n):\n words[i] = util_func(words[i])\n \n return \" \".join(words)",
"def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]",
"def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(s)):\n\t\tif (s[i] in \"Ii Iii Iv Vi Vii Viii Ix Ii: Iii: Iv: Vi: Vii: Viii: Ix:\"):\n\t\t\ts[i] = s[i].upper()\n\treturn \" \".join(s)",
"def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]",
"def make_display_word(secret_word):\n return ('_ ' * len(secret_word))",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def print_word_frequency_list(my_word_frequency_list):\n for word in my_word_frequency_list:\n print(\"{} {}\".format(word[0].ljust(5), word[1]))\n\n print('\\n')",
"def words(string):\n string = upper(string) # pass the string to the upper function to capitalize it\n string_list = string.split() # split the string by spaces into a list\n \n for i in range(len(string_list)): # Itterate over words in the list\n if len(string_list) >= 1: # Edgecase (if userinput is too short)\n if string_list[i] == 'EASY': # Check if the string \"EASY\" appears in the list\n string_list[i] = 'EZ' # Replace it with EZ\n\n elif string_list[i] == 'NICE': # Check if the string \"NICE\" appears in the list\n string_list[i] = 'NYC' # Replace it with \"NYC\"\n\n elif string_list[i] == 'LATER': # Check if the string \"LATER\" appears in the list\n string_list[i] = 'L8R' # Check if the string \"L8R\" appears in the list\n\n elif string_list[i] == 'THANKS': # Check if the string \"THANKS\" appears in the list\n string_list[i] = 'TY' # Replace it with \"TY\"\n \n new_string = ' '.join(string_list) # Join the list to a string\n return new_string # Return the new string",
"def uppercase_text(text):\n newText = format_upper_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def capitalize1(s):\n return s[:1].upper() + s[1:]",
"def UCase(text):\n return text.upper()",
"def motion_W(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False",
"def underToAllCaps(value): # pragma: no cover\n return ' '.join(map(lambda x: x.title(), value.split('_')))",
"def getWordScore(word, n):\n score=0\n for i in range(len(word)):\n addition=SCRABBLE_LETTER_VALUES[word[i]]\n score+=addition*(len(word))\n if len(word)==n:\n score+=50\n return score",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def _truncate_name(orig_str, word_num):\n if not orig_str:\n return orig_str\n tokens = string_utils.tokenizer(orig_str)\n if len(tokens) > word_num:\n orig_str = ' '.join(tokens[:word_num])\n return orig_str",
"def correctCasing(words):\n strings = words.split(' ')\n strings = [s[0].upper()+s[1:].lower() for s in strings if s]\n return ' '.join(strings)",
"def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res",
"def pretty_print_order(count_list,word):\n\tfor i in range(len(word)):\n\t\tif (count_list[ord(word[i].lower())-ord('a')]) > 0:\n\t\t\tprint(word[i],count_list[ord(word[i].lower())-ord('a')],sep = \": \", end =\"\\n\")\n\t\t\tcount_list[ord(word[i].lower())-ord('a')] = 0",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def PROPER(text):\n return text.title()",
"def getWordScore(word, n):\n score = 0\n for letter in word:\n score += SCRABBLE_LETTER_VALUES[letter]\n score *= len(word)\n if len(word) == n:\n score += 50\n return score",
"def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))",
"def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\n while not any(letter in self.vowels for letter in word):\r\n length = len(word)\r\n if length == 1:\r\n index = 0\r\n elif length == 2:\r\n index = random.randrange(0, 2)\r\n else:\r\n a = len(word) / 2\r\n index = a + random.randrange(-a / 2, a / 2)\r\n word = word[:index] + self.get_letter(100) + word[index + 1:]\r\n\r\n if random.random() > self.capital_chance:\r\n word = word.capitalize()\r\n self.words.append(word)\r\n self.word_count += 1\r\n return word",
"def update_syllable_count(word, syll_count):\n\n syllables = word.split('-')\n for i in range(1, 4):\n for j in range(len(syllables) - i + 1):\n gram = '-'.join(syllables[j: j + i])\n count = syll_count.setdefault(gram, 0)\n syll_count[gram] = count + 1",
"def most_common_words(n):\n with open(os.path.join('visualization', 'vocab.tsv')) as fd:\n words = fd.readlines()[:n]\n words = [word for word in words]\n save_path = os.path.join('visualization', 'vocab_' + str(n) + '.tsv')\n with open(save_path, 'w') as fd:\n for word in words:\n fd.write(word)",
"def count_words(s, n):\n \n strList = s.split(' ');#to split the string into a list of words\n rList = [];#to store the each word only once\n nList = [];#to store how many times each word has occured\n for i in range(len(strList)):\n if ((strList[i] in rList)==False):\n rList.append(strList[i]);\n nList.append(int(1));\n else:\n for j in range(len(rList)):\n if (strList[i]==rList[j]):\n nList[j]=nList[j]+1;\n \n tList = list();#a new empty tuple list\n for i in range(len(rList)):\n tList.append((rList[i],nList[i]));#construct the tuple list from rList and nList\n \n tList.sort(key=lambda tList: (-tList[1], tList[0]));#sort the tuple list: first by its 2nd element in reverse order \"-\", then sort by its 1st element in non-reverse order, no \"-\"\n \n # for testing\n #for i in tList:\n # print i;\n \n \n # TODO: Count the number of occurences of each word in s\n \n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n \n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n \n return tList[:n] #return the first n pairs of elements as required",
"def count_words(s, n):\r\n list_of_words=get_listOfWords(s)\r\n res=wrap_with_freq_toList(list_of_words)\r\n res=sortit(res)\r\n top_n=res[0:n]\r\n return top_n\r\n \r\n # TODO: Count the number of occurences of each word in s\r\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\r\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\r",
"def spin_words(sentence):\n\n words = sentence.split()\n words = [word if len(word) < 5 else word[::-1] for word in words]\n return \" \".join(words)",
"def get_word_score(word, n=7):\n score = 0\n\n for i in word:\n score += SCRABBLE_LETTER_VALUES[i]\n\n if len(word) == n:\n score += 50\n\n return score",
"def too_long_words(word):\n\n # If work is longer than 10 letters, print the word according to these rules\n if len(word) > 10:\n print word[0] + str(len(word[1:-1])) + word[-1]\n\n else:\n print word",
"def get_word(wordlist, args): #{{{\n iters = 0\n while iters < 500:\n if args.lowercase == True:\n word = random.choice(wordlist).strip().lower()\n return word\n elif args.lowercase == False:\n word = random.choice(wordlist).strip().lower().capitalize()\n return word\n\n if args.punctuation == False:\n if len(word) < args.max_length and word.isalpha() == True:\n return word\n iters += 1\n elif args.punctuation == True:\n if len(word) < args.max_length:\n return word\n iters += 1 #}}}",
"def make_text(chains, n):\n\n words = []\n\n capital_keys = [key for key in chains.keys() if key[0][0].isupper() and chains[key] != None]\n first_key = choice(capital_keys)\n\n words.extend(list(first_key))\n rand_value = choice(chains[first_key])\n words.append(rand_value)\n\n current_string = \" \".join(words)\n\n i = 1\n while len(current_string) < 140:\n current_string = \" \".join(words)\n new_key = tuple(words[i: i + n])\n if not chains[new_key]:\n break\n else:\n rand_value = choice(chains[new_key])\n words.append(rand_value)\n i += 1\n\n return current_string",
"def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()",
"def capitalize(self) -> String:\n pass",
"async def word_counter_most_common_n(self, ctx, n=10):\n count = n if n <= MAX_COUNT else MAX_COUNT\n word_table = tabulate(self.word_counter.most_common(n), headers=[\"Word\", \"Count\"])\n message = f\"\"\"\n{ctx.author.mention} the most common {n} words are:\n\n```\n{word_table}\n```\n \"\"\"\n\n embed = discord.Embed(description=message)\n await ctx.send(embed=embed)",
"def capwords(s, sep=None):\n if sep is None:\n sep = ' '\n return sep.join(x.capitalize() for x in s.split(sep))\n #return (sep or ' ').join(x.capitalize() for x in s.split(sep))",
"def print_upper_words3(words, must_start_with):\n for word in words:\n for letter in must_start_with:\n if word.startswith(letter):\n print(word.upper())\n break",
"def add_word_count(self):\n self.dataframe['word_count'] = self.dataframe.letter.str.count('\\w+')",
"def just_do_it(text):\n from string import capwords\n return capwords(text)",
"def upper(self) -> String:\n pass"
] | [
"0.7106559",
"0.6925886",
"0.6874002",
"0.66543984",
"0.64259547",
"0.60723484",
"0.6036499",
"0.6025138",
"0.59689647",
"0.5953917",
"0.58584684",
"0.5850111",
"0.58310604",
"0.58229566",
"0.5809339",
"0.57956696",
"0.5791785",
"0.5782367",
"0.57508373",
"0.5699419",
"0.56948966",
"0.5678806",
"0.5673155",
"0.56342334",
"0.5627989",
"0.5619573",
"0.56096303",
"0.5608581",
"0.56008816",
"0.55885905",
"0.5568169",
"0.55665183",
"0.555957",
"0.5552279",
"0.5549417",
"0.5547728",
"0.5528099",
"0.55249125",
"0.5520119",
"0.55184823",
"0.5513075",
"0.55016553",
"0.54855025",
"0.5474407",
"0.5460239",
"0.54600745",
"0.5440337",
"0.5438166",
"0.5435821",
"0.5427597",
"0.5416415",
"0.5403085",
"0.5402018",
"0.54016066",
"0.5397374",
"0.5396128",
"0.53952926",
"0.5373601",
"0.53681797",
"0.5342021",
"0.53367656",
"0.53327394",
"0.53208673",
"0.53107536",
"0.5310501",
"0.5294619",
"0.5290428",
"0.5279179",
"0.52788943",
"0.5275986",
"0.5265402",
"0.52631587",
"0.5261026",
"0.525743",
"0.52566624",
"0.5255572",
"0.5241342",
"0.5227707",
"0.5227707",
"0.5226966",
"0.5226842",
"0.52248096",
"0.5223008",
"0.5208441",
"0.52038264",
"0.5202822",
"0.5199341",
"0.5199136",
"0.519502",
"0.5191567",
"0.5186547",
"0.5185767",
"0.51843095",
"0.5182006",
"0.51768047",
"0.51656216",
"0.5162241",
"0.5160465",
"0.51579064",
"0.5155862"
] | 0.6954752 | 1 |
Formats dictated text to lower case. | def lowercase_text(text):
newText = format_lower_case(text)
Text("%(text)s").execute({"text": newText}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_lower(self, text):\n return text.lower()",
"def LOWER(text):\n return text.lower()",
"def LCase(text):\n return text.lower()",
"def lower(text):\n text = text.lower()\n return text",
"def toLowerCase(self) -> None:\n self.text = self.text.lower()",
"def normalize_case(text):\n text = str(text)\n return text.lower()",
"def _lowercase(text: str) -> str:\n return text.lower()",
"def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()",
"def lowerCase(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.lowerCase(phrase)",
"def preprocess(text):\n return text.lower()",
"def lower(self) -> str:",
"def lower(self, value):\n return self.text(value).lower()",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def to_lowercase(text: str) -> str:\n text = text.lower()\n return text",
"def _transform_to_lowercase(self, doc: str):\n processed_tweet = doc.lower()\n return processed_tweet",
"def lower_case_really():",
"def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)",
"def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()",
"def lowercase(raw_text):\n lowercase_text = [text.lower() for text in raw_text]\n return lowercase_text",
"def clean_cases(text):\n return text.lower()",
"def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def lower(value): # Only one argument.\n return value.lower()",
"def lower(value): # Only one argument.\n return value.lower()",
"def lower(value): # Only one argument.\n return value.lower()",
"def fix_string_case(text):\n fixed = []\n for i in text:\n if is_case_sensitive(i):\n fixed.append(i)\n else:\n fixed.append(i.lower())\n return ''.join(fixed)",
"def lowercase(self, value):\n return value.lower()",
"def lowercase_name(name):\n return name.lower()",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def lower_caser(self, sentence):\n return sentence.strip().lower()",
"def lower(value): # Only one argument.\n return value.lower()",
"def _lowercase(self, form):\n if form is None or form in ['I', 'OK'] or form.startswith('X-'):\n return form\n return form.lower()",
"def lower(value):\n return value.lower()",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def lower(self) -> String:\n pass",
"def _lower(dictionary: dict):\n return {key.lower(): value.lower() for key, value in dictionary.items()}",
"def handle_sentence_simple(self, sentence, ctxinfo):\n global text_version\n global moses_version\n global lower_attr\n \n for w in sentence :\n setattr(w, lower_attr, getattr(w, lower_attr).lower())\n self.chain.handle_sentence(sentence, ctxinfo)",
"def lower(self):\n return self._lower",
"def to_lowercase(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = word.lower()\n new_words.append(new_word)\n # new_word += f\"{new_word} \"\n self.words = new_words\n return self",
"def lower(self, text_list):\n return [text.lower() for text in text_list]",
"def UCase(text):\n return text.upper()",
"def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")",
"def _apply_lowercase(sentence_tokens):\n return [token.lower() for token in sentence_tokens]",
"def lowercase(str):\n \n return str.lower()",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def lower(self):\n self.method = self.method.lower()\n if self.basis is not None:\n self.basis = self.basis.lower()\n if self.auxiliary_basis is not None:\n self.auxiliary_basis = self.auxiliary_basis.lower()\n if self.dispersion is not None:\n self.dispersion = self.dispersion.lower()\n if self.cabs is not None:\n self.cabs = self.cabs.lower()\n if self.method_type is not None:\n self.method_type = self.method_type.lower()\n if self.software is not None:\n self.software = self.software.lower()\n if isinstance(self.software_version, str):\n self.software_version = self.software_version.lower()\n if self.solvation_method is not None:\n self.solvation_method = self.solvation_method.lower()\n if self.solvent is not None:\n self.solvent = self.solvent.lower()\n\n args = {'keyword': dict(), 'block': dict()}\n\n # 1st level dict, set self.args in place\n if isinstance(self.args, (list, tuple)):\n for arg in self.args:\n if not isinstance(arg, str):\n raise ValueError(f'All entries in the args argument must be strings.\\n'\n f'Got {arg} which is a {type(arg)} in {self.args}.')\n self.args = ' '.join([arg.lower() for arg in self.args])\n if isinstance(self.args, str):\n self.args = {'keyword': {'general': args.lower()}, 'block': dict()}\n elif self.args is not None and not isinstance(args, dict):\n raise ValueError(f'The args argument must be either a string, an iterable or a dictionary.\\n'\n f'Got {self.args} which is a {type(self.args)}.')\n\n # 2nd level dict, set in args, then transfer to self.args\n for key1, val1 in self.args.items():\n args[key1.lower()] = dict()\n if isinstance(val1, dict):\n for key2, val2 in val1.items():\n new_val2 = str(val2) if isinstance(val2, (int, float)) else val2\n if not isinstance(new_val2, str):\n raise ValueError(f'All entries in the args argument must be str, int, or float types.\\n'\n f'Got {new_val2} which is a {type(new_val2)} in {self.args}.')\n args[key1.lower()][key2.lower()] = new_val2.lower()\n elif isinstance(val1, str):\n args[key1.lower()]['general'] = val1.lower()\n elif isinstance(val1, (list, tuple)):\n for v1 in val1:\n if not isinstance(v1, str):\n raise ValueError(f'All entries in the args argument must be strings.\\n'\n f'Got {v1} which is a {type(v1)} in {self.args}.')\n args['keyword']['general'] = ' '.join([v1.lower() for v1 in val1])\n else:\n raise ValueError(f'Values of the args dictionary must be either dictionaries, strings, or lists, '\n f'got {val1} which is a {type(val1)}.')\n\n self.args = args",
"def normalize(text):\n return text.lower().translate(TRANSLATION_TABLE)",
"def setToLowercase(self, value):\n return self._set(toLowercase=value)",
"def test_lower_case():\n assert TextCleaner().transform([[\"TEST\"]])[\"corpus\"][0] == \"test\"",
"def lower_case(value):\n return value.lower()",
"def _lower(self, mapping):\n _mapping = {}\n for k, v in sorted(mapping.items()):\n k = k.lower()\n if k not in _mapping:\n _mapping[k] = v\n return _mapping",
"def setLowercase(self, value):\n return self._set(lowercase=value)",
"def setLowercase(self, value):\n return self._set(lowercase=value)",
"def first_lower(self, s):\n if len(s) == 0:\n return s\n else:\n return s[0].lower() + s[1:]",
"def _case_insensitive(s: str):\n return s.lower()",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()",
"def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))",
"def normalize_text(text):\n # mystring.replace('\\n', ' ').replace('\\r', '')\n return text.replace('\\n', ' ').replace('\\r', '').lower()",
"def good_word(self, word):\r\n return word.strip().lower()",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def process(self, s):\n # modified for project...\n return s.upper()",
"def only_lowercase(text):\n\tnot_allowed = string.punctuation + string.whitespace + string.digits\n\ttext2 = [each for each in text if each not in not_allowed]\n\ttext2 = ''.join(text2)\n\treturn text2.lower()",
"def change_case(word):\n return word.upper() if case == \"upper\" else word.lower()",
"def filter_lowercase(self, string):\n newstring = string.lower()\n return newstring",
"def lowercase_on_corpus(text_corpus):\n\n text_corpus[text_column_name] = text_corpus[\n text_column_name].apply(lower_case)\n return text_corpus",
"def islower(self):\n return islower(self)",
"def no_caps_and_ponctuation(text):\n return re.sub(r'[^\\w\\s]', '', text).lower()",
"def SpssLower(*args):\n\tSpssMapToVar(\"lower\", args)\n\t# Does not perform EXECUTE",
"def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def lower_with_proper(text):\n\n proper_flags = NOUN_MAP['proper']\n lower_proper = []\n for word in map(str.lower, text.split()):\n #: '@' is a flag for special words\n if word.startswith('@'):\n word = word[1:]\n if any(flag.lower() in word for flag in proper_flags):\n word = word.upper()\n lower_proper.append(word)\n lower_proper = ' '.join(lower_proper)\n return lower_proper",
"def _transliterate_text(self, _text):\n return _text.upper()",
"def str_islower__Rope(space, w_self):\n l = w_self._node.length()\n \n if l == 0:\n return space.w_False\n cased = False\n iter = rope.ItemIterator(w_self._node)\n for idx in range(l):\n c = iter.nextchar()\n if c.isupper():\n return space.w_False\n elif not cased and c.islower():\n cased = True\n return space.newbool(cased)",
"def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))",
"def force_title_case(etl, field_names, **kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_function,\r\n function=value.force_title_case, **kwargs\r\n )\r\n tuple(func(field_name=name) for name in field_names)",
"def isLowercase(self, config):\n\t\treturn self.LOWERCASE & config",
"def normalize_text(w):\n return str(w, \"utf-8\").lower().replace(\"-\", \"\")",
"def normalize_word(word):\n\n return word.lower()",
"def is_case_sensitive(text):\n return text.lower() in AVRO_CASESENSITIVES",
"def lowercase_well_known_word(text):\n lines = []\n lines_append = lines.append\n for line in text.splitlines(True):\n words = []\n words_append = words.append\n for word in line.split():\n if word in COMMON_WORDS:\n word = word.lower()\n words_append(word)\n lines_append(' '.join(words))\n return '\\n'.join(lines)",
"def normalize_answer(text):\n return ' '.join(re.findall(r\"\\w+\", text)).lower()",
"def replace_contractions(self, text, lower=False):\n\n # replace words with contraction according to the contraction_dict\n if lower:\n contraction_dict = self.contraction_dict_lower\n else:\n contraction_dict = self.contraction_dict\n\n if text.strip() in contraction_dict.keys():\n text = contraction_dict[text.strip()]\n\n # replace words with \"'ve\" to \"have\"\n matches = re.findall(r'\\b\\w+[\\'`´]ve\\b', text)\n if len(matches) != 0:\n text = re.sub(r'[\\'`´]ve\\b', \" have\", text)\n\n # replace words with \"'re\" to \"are\"\n matches = re.findall(r'\\b\\w+[\\'`´]re\\b', text)\n if len(matches) != 0:\n text = re.sub(r'[\\'`´]re\\b', \" are\", text)\n\n # replace words with \"'ll\" to \"will\"\n matches = re.findall(r'\\b\\w+[\\'`´]ll\\b', text)\n if len(matches) != 0:\n text = re.sub(r'[\\'`´]ll\\b', \" will\", text)\n\n # replace words with \"'m\" to \"am\"\n matches = re.findall(r'\\b\\w+[\\'`´]m\\b', text)\n if len(matches) != 0:\n text = re.sub(r'[\\'`´]m\\b', \" am\", text)\n\n # replace words with \"'d\" to \"would\"\n matches = re.findall(r'\\b\\w+[\\'`´]d\\b', text)\n if len(matches) != 0:\n text = re.sub(r'[\\'`´]d\\b', \" would\", text)\n\n # replace all \"'s\" by space\n matches = re.findall(r'\\b\\w+[\\'`´]s\\b', text)\n if len(matches) != 0:\n text = re.sub(r'[\\'`´]s\\b', \" \", text)\n\n return text",
"def normalize_txt(txt):\n return unicodedata.normalize('NFD', txt).encode('ascii', 'ignore').decode('utf-8', 'ignoree').lower()",
"def lower_case(filename):\n return filename.lower()",
"def make_alphabetic(text):\n text = re.sub(r'[^A-Za-z\\s]', '', text)\n return text.lower()",
"def force_lowercase(etl, field_names, **kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_function,\r\n function=value.force_lowercase, **kwargs\r\n )\r\n tuple(func(field_name=name) for name in field_names)",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()",
"def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text",
"def _nice_case(line):\n line_lower = line.lower()\n s = \"\"\n i = 0\n nextCap = 1\n while i < len(line_lower):\n c = line_lower[i]\n if c >= \"a\" and c <= \"z\" and nextCap:\n c = c.upper()\n nextCap = 0\n elif c in \" .,;:\\t-_\":\n nextCap = 1\n s += c\n i += 1\n return s",
"def detect_case(text):\n\n parts = split_by_case(text, 'underscore')\n if not parts:\n # text is collection of underscores\n return 'other'\n\n if not all(part.isalnum() for part in parts):\n # one or more text part contains not alpha-numeric characters\n return 'other'\n\n if len(parts) != 1:\n return 'underscore'\n\n parts = split_by_case(parts[0], 'camel')\n if parts[0][0].isupper(): # check first character\n return 'title'\n\n # first character lower or not letter\n\n if len(parts) == 1:\n return 'mixed'\n\n return 'camel'",
"def convert_to_singular(text):\n if inflectengine.singular_noun(text):\n return inflectengine.singular_noun(text).lower()\n else:\n return text.lower()",
"def not_capitalized(): # noqa: D416",
"def add_lowercase_fields(attributes, data):\n for attrib in attributes:\n if attrib['similarity'] == 'EqualIgnoreCase':\n value = data.get(attrib['name'])\n if value is not None:\n data[attrib['name']] = value.lower()\n return data",
"def lowercase(data):\n return np.char.lower(data)",
"def case_sensitive(self):\n\n return True",
"def lowercase(tokens):\n if not lowercase_activated:\n return tokens\n output = [token.lower() for token in tokens]\n return output",
"def asciify(text: str) -> str:\n return \"\".join(\n filter(\n lambda x: x in list(string.ascii_letters) or x.isspace(), \n unidecode.unidecode(text).lower()\n )\n )"
] | [
"0.7950047",
"0.7778725",
"0.76062495",
"0.757439",
"0.7487152",
"0.7474553",
"0.744533",
"0.73600155",
"0.7350697",
"0.7326691",
"0.72125846",
"0.7002865",
"0.6945686",
"0.6917932",
"0.68785036",
"0.68477106",
"0.68410075",
"0.670077",
"0.6686607",
"0.66477966",
"0.6613953",
"0.6600545",
"0.65928",
"0.6540414",
"0.6540414",
"0.6540414",
"0.65396714",
"0.65377384",
"0.6515144",
"0.6505206",
"0.64721936",
"0.6468349",
"0.6435076",
"0.6420497",
"0.6410332",
"0.64072853",
"0.6381104",
"0.63228047",
"0.6315988",
"0.63112664",
"0.6310337",
"0.6308305",
"0.6307691",
"0.6279526",
"0.6276632",
"0.62749517",
"0.6269616",
"0.62628216",
"0.62543315",
"0.6247787",
"0.62370574",
"0.62208617",
"0.6215274",
"0.6215274",
"0.62151116",
"0.62124896",
"0.6201702",
"0.6187491",
"0.61675924",
"0.6163541",
"0.6161756",
"0.61511827",
"0.6150661",
"0.6133462",
"0.61155343",
"0.61139643",
"0.60742015",
"0.6068332",
"0.6067907",
"0.60644126",
"0.6064121",
"0.60402805",
"0.6039794",
"0.60381055",
"0.6034268",
"0.6026109",
"0.60149276",
"0.6008719",
"0.60077393",
"0.60010564",
"0.59969115",
"0.5984189",
"0.59765387",
"0.59735435",
"0.59594345",
"0.5948217",
"0.59380877",
"0.5933364",
"0.5932424",
"0.5932399",
"0.59292054",
"0.59194124",
"0.59157884",
"0.5912727",
"0.5911416",
"0.59024143",
"0.5893509",
"0.58930665",
"0.589278",
"0.58778787"
] | 0.7870158 | 1 |
Formats n words to the left of the cursor to lower case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python. | def lowercase_count(n):
saveText = _get_clipboard_text()
cutText = _select_and_cut_text(n)
if cutText:
newText = cutText.lower()
newText = newText.replace("%", "%%") # Escape any format chars.
Text(newText).execute()
else: # Failed to get text from clipboard.
Key('c-v').execute() # Restore cut out text.
_set_clipboard_text(saveText) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def to_lowercase(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = word.lower()\n new_words.append(new_word)\n # new_word += f\"{new_word} \"\n self.words = new_words\n return self",
"def lower(self) -> str:",
"def words_lower_case(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_in_lower_case = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_in_lower_case = number_of_words_in_lower_case + sum(list(map(lambda x: x.islower(), i.text.split())))\n return number_of_words_in_lower_case",
"def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret",
"def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def _set_number_of_words(self, N):\n self.N_words_to_display = N",
"def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None",
"def lowercase_well_known_word(text):\n lines = []\n lines_append = lines.append\n for line in text.splitlines(True):\n words = []\n words_append = words.append\n for word in line.split():\n if word in COMMON_WORDS:\n word = word.lower()\n words_append(word)\n lines_append(' '.join(words))\n return '\\n'.join(lines)",
"def make_title(words):",
"def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.",
"def format_words(words):\n return sorted(words, key=str.lower)",
"def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))",
"def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def LCase(text):\n return text.lower()",
"def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))",
"def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results",
"def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def LOWER(text):\n return text.lower()",
"def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()",
"def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))",
"def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str",
"def _lowercase(text: str) -> str:\n return text.lower()",
"def create_word(char_list):",
"def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})",
"def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])",
"def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter",
"def lower(text):\n text = text.lower()\n return text",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def lower_case_really():",
"def wc(file_):\r\n with open(file_) as f:\r\n file = f.read().strip()\r\n char_nums = len(file)\r\n lines = file.split('\\n')\r\n line_nums = len(lines)\r\n word_nums = 0\r\n for line in lines:\r\n words = line.split()\r\n word_nums += len(words)\r\n return f'{line_nums} {word_nums} {char_nums} {file_}'",
"def test_downcase_word(self):\n before_b = \"\"\"\\\n XYZZY line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n xyzzy line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.4\", \"1.4\"),\n after_sel=(\"1.4\", \"1.4\"),\n command_name=\"downcase-word\",\n )",
"def normalize(w):\n\n nfkd = unicodedata.normalize('NFKD', w)\n return ''.join(x for x in nfkd if unicodedata.category(x)[0] == 'L').lower()",
"def wcount(lines, topn = 10):\n global worddict\n worddict = {}\n # record words each line by each\n linestr = lines.readline().decode() \n while linestr:\n record(linestr)\n linestr = lines.readline().decode()\n \n # sort the worddict to construct a wordlist\n wordlist = sorted(worddict.items(),\\\n key=lambda x:x[1],reverse = True)\n \n # get all words if lenth is less than number\n print(' '*3+'Word'.ljust(30),'Times'.center(10))\n for num in range(min(len(wordlist),topn)):\n print(' '*3+wordlist[num][0].ljust(30),\\\n str(wordlist[num][1]).center(10))",
"def lower(self) -> String:\n pass",
"def get_word(w):\n return ''.join(c for c in w if c.isalpha()).lower()",
"def normalize_word(word):\n\n return word.lower()",
"def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)",
"def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_next_word_beginning(count=event.arg) or \\\n buffer.document.get_end_of_document_position()",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))",
"def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))",
"def nth_word(value: str, n: int) -> str:\n return value.split()[n]",
"def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)",
"def lowercase(tokens):\n if not lowercase_activated:\n return tokens\n output = [token.lower() for token in tokens]\n return output",
"def motion_w(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False",
"def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")",
"def count_words(s, n):\n\n # TODO: Count the number of occurences of each word in s\n words = s.lower().split()\n dict = {}\n\n for item in words:\n dict[item] = words.count(item)\n\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n items = dict.items()\n\n items.sort(key=lambda tup: tup[0])\n items.sort(key=lambda tup: tup[1], reverse=True)\n\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n return items[:n]",
"def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]",
"def s_words(words):\n\t\n\treturn words // 100 / 10",
"def to_lowercase(words):\r\n new_words = []\r\n for word in words:\r\n new_word = word.lower()\r\n new_words.append(new_word)\r\n return new_words",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def _to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def wordize(lines):\n parser = Parser()\n tokenizer = Tokenizer()\n word_ctr = WordCounter()\n words = []\n for l in lines :\n if (l.rstrip()) :\n statement = parser.parseSentence(l, int(word_ctr))\n token_lists = tokenizer.tokenizeStatement(statement, int(word_ctr))\n for l in token_lists :\n if len(l) > 0 :\n words.append(l)\n word_ctr += 1\n return words",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_previous_word_beginning(count=event.arg) or 0",
"def render_snake(var_words):\n return '_'.join(var_words)",
"def _truncate_name(orig_str, word_num):\n if not orig_str:\n return orig_str\n tokens = string_utils.tokenizer(orig_str)\n if len(tokens) > word_num:\n orig_str = ' '.join(tokens[:word_num])\n return orig_str",
"def get_word(wordlist, args): #{{{\n iters = 0\n while iters < 500:\n if args.lowercase == True:\n word = random.choice(wordlist).strip().lower()\n return word\n elif args.lowercase == False:\n word = random.choice(wordlist).strip().lower().capitalize()\n return word\n\n if args.punctuation == False:\n if len(word) < args.max_length and word.isalpha() == True:\n return word\n iters += 1\n elif args.punctuation == True:\n if len(word) < args.max_length:\n return word\n iters += 1 #}}}",
"def lunderize(title):\n title = title.lower()\n title = title.replace(' ', '_')\n title = title.replace('.', '')\n return title",
"def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words",
"def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText",
"def preprocess(text):\n return text.lower()",
"def camel_case_to_lower_case_underscore(string):\n words = []\n from_char_position = 0\n for current_char_position, char in enumerate(string):\n if char.isupper() and from_char_position < current_char_position:\n words.append(\n string[from_char_position:current_char_position].lower())\n from_char_position = current_char_position\n words.append(string[from_char_position:].lower())\n return '_'.join(words)",
"def titlecase(original: str, delimiter: str = \" \", small_words: list = None) -> str:\n _small_words = [\"of\", \"in\", \"at\", \"to\", \"the\", \"on\", \"an\", \"a\"]\n if small_words:\n _small_words = list(set(_small_words + small_words))\n\n original_splitted = original.split(delimiter)\n result = []\n\n for word in original_splitted:\n word = word.lower()\n if word in _small_words:\n result.append(word)\n else:\n result.append(word.capitalize())\n\n return delimiter.join(result)",
"def _apply_lowercase(sentence_tokens):\n return [token.lower() for token in sentence_tokens]",
"def to_lowercase(words):\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return(new_words)",
"def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]",
"def cap_first(word):\n return word[0].upper() + word[1:]",
"def alnum_prefix(text: str) -> str:\n\n index = 0\n while index < len(text) and text[index].isalnum():\n index += 1\n return text[:index].lower()",
"def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(s)):\n\t\tif (s[i] in \"Ii Iii Iv Vi Vii Viii Ix Ii: Iii: Iv: Vi: Vii: Viii: Ix:\"):\n\t\t\ts[i] = s[i].upper()\n\treturn \" \".join(s)",
"def fit_to_width(string, limit):\n\n input_words = string.split()\n i = 0\n line_list = []\n new_str = str()\n\n for word in input_words:\n if i == 0:\n new_str = word\n elif len(new_str+word) < limit:\n new_str = new_str + ' ' + word\n else:\n line_list.append(new_str)\n new_str = word\n if i == (len(input_words)-1):\n line_list.append(new_str)\n i += 1\n\n for string in line_list:\n print(string)",
"def motion_W(input_line, cur, count):\n pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count)\n if pos == -1:\n return len(input_line), False, False\n return cur + pos, False, False",
"def make_lexicon_words_txt(self):\n raise NotImplementedError",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def words(string):\n string = upper(string) # pass the string to the upper function to capitalize it\n string_list = string.split() # split the string by spaces into a list\n \n for i in range(len(string_list)): # Itterate over words in the list\n if len(string_list) >= 1: # Edgecase (if userinput is too short)\n if string_list[i] == 'EASY': # Check if the string \"EASY\" appears in the list\n string_list[i] = 'EZ' # Replace it with EZ\n\n elif string_list[i] == 'NICE': # Check if the string \"NICE\" appears in the list\n string_list[i] = 'NYC' # Replace it with \"NYC\"\n\n elif string_list[i] == 'LATER': # Check if the string \"LATER\" appears in the list\n string_list[i] = 'L8R' # Check if the string \"L8R\" appears in the list\n\n elif string_list[i] == 'THANKS': # Check if the string \"THANKS\" appears in the list\n string_list[i] = 'TY' # Replace it with \"TY\"\n \n new_string = ' '.join(string_list) # Join the list to a string\n return new_string # Return the new string",
"def first_lower(self, s):\n if len(s) == 0:\n return s\n else:\n return s[0].lower() + s[1:]",
"def lowercase_names(topconstruct):\n for vn in query([is_layering([syntax.VAR_NAME])], TreeItem(topconstruct)):\n vn.construct.args[0] = vn.construct.args[0].lower()",
"def count_words(filename):",
"def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output",
"def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))",
"def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)",
"def LOWER_START():\n return 7",
"def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])",
"def replace_words_fun(self):\n\n cleaned_doc = []\n for word in str(self.doc).split():\n if word.lower() in self.replacement_list.keys():\n cleaned_doc.append(self.replacement_list[word.lower()])\n else:\n cleaned_doc.append(word)\n self.doc = ' '.join(cleaned_doc)",
"def just_do_it(text):\n from string import capwords\n return capwords(text)",
"def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]",
"def createWordKnown(self):\n return ''.join(['_ ' for m in range(self.wordLen)])"
] | [
"0.68059164",
"0.67174524",
"0.6428881",
"0.612785",
"0.60862404",
"0.60718983",
"0.6050795",
"0.59884626",
"0.59352654",
"0.59344274",
"0.5890492",
"0.58382356",
"0.5795074",
"0.5784348",
"0.5748356",
"0.5746969",
"0.5746722",
"0.57407844",
"0.57185817",
"0.5685551",
"0.56584406",
"0.5656949",
"0.5629105",
"0.56139094",
"0.5612925",
"0.56016505",
"0.5589428",
"0.55866903",
"0.55855596",
"0.5556237",
"0.5538861",
"0.5535931",
"0.551938",
"0.54954356",
"0.5461828",
"0.5443356",
"0.54426396",
"0.5429247",
"0.54220736",
"0.5411308",
"0.54066014",
"0.54023874",
"0.5401143",
"0.53999555",
"0.5391621",
"0.53880686",
"0.53867346",
"0.5386275",
"0.5382647",
"0.53761417",
"0.53722",
"0.53686345",
"0.5356988",
"0.53558606",
"0.5354847",
"0.53541327",
"0.53419346",
"0.5340225",
"0.53373384",
"0.532899",
"0.5316457",
"0.5313697",
"0.5313321",
"0.5299253",
"0.5294986",
"0.52883655",
"0.5286499",
"0.5286499",
"0.5286499",
"0.5286499",
"0.5286499",
"0.5286499",
"0.5286499",
"0.52801925",
"0.52728933",
"0.5270792",
"0.52697736",
"0.52682364",
"0.52650964",
"0.52602875",
"0.5260282",
"0.525999",
"0.5233366",
"0.52322924",
"0.5227012",
"0.5223406",
"0.5222728",
"0.521731",
"0.5211889",
"0.5203128",
"0.52029",
"0.52012926",
"0.5199374",
"0.5199163",
"0.51986027",
"0.5197307",
"0.5195328",
"0.51937646",
"0.51932275",
"0.5192399"
] | 0.73799914 | 0 |
Cleans up the text before formatting to camel, pascal or snake case. Removes dashes, underscores, single quotes (apostrophes) and replaces them with a space character. Multiple spaces, tabs or new line characters are collapsed to one space character. Returns the result as a string. | def _cleanup_text(text):
prefixChars = ""
suffixChars = ""
if text.startswith("-"):
prefixChars += "-"
if text.startswith("_"):
prefixChars += "_"
if text.endswith("-"):
suffixChars += "-"
if text.endswith("_"):
suffixChars += "_"
text = text.strip()
text = text.replace('-', ' ')
text = text.replace('_', ' ')
text = text.replace("'", ' ')
text = re.sub('[ \t\r\n]+', ' ', text) # Any whitespaces to one space.
text = prefixChars + text + suffixChars
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text",
"def detokenize(self, text):\n text = ' ' + text + ' '\n text = self._dash_fixes.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes2.sub(r' \\1-\\2 ', text)\n text = self._currency_or_init_punct.sub(r' \\1', text)\n text = self._noprespace_punct.sub(r'\\1 ', text)\n text = self._contract.sub(r\" \\1'\\2\", text)\n text = self._contractions.sub(r\"\\1\", text)\n text = self._esses.sub(r\"s \", text)\n text = self.moses_detokenizer.detokenize(text.split())\n text = text.strip()\n # capitalize\n if not text:\n return ''\n return text",
"def CleanText(text):\n\n pretty_issue = text.lower().strip()\n\n quoteless_issue = re.sub('\\'', '', pretty_issue)\n no_punctuation_issue = re.sub('[^\\w\\s]|_+', ' ', quoteless_issue)\n one_space_issue = ' '.join(no_punctuation_issue.split())\n\n return one_space_issue",
"def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt",
"def sanitise(text: str):\n # Removes new lines, weird characters and dialogue\n text = \" \" + text + \" \"\n\n lined_text = text.split(\"\\n\")\n text = \"\"\n # Remove dialogue\n for line in lined_text:\n if \":\" in line:\n if line.index(\":\") < 15:\n index = line.index(\":\") + 1\n else:\n index = 0\n else:\n index = 0\n text = text + \"\\n\" + line[index:]\n\n # Lower case everything\n text = text.lower()\n\n text = text.replace(\"'s\", \" is\")\n text = text.replace(\"'ve\", \" have\")\n text = text.replace(\"n't\", \" not\")\n text = text.replace(\"I'm\", \"I am\")\n text = text.replace(\"'re\", \" are\")\n text = text.replace(\"’s\", \" is\")\n text = text.replace(\"’ve\", \" have\")\n text = text.replace(\"n’t\", \" not\")\n text = text.replace(\"I’m\", \"I am\")\n text = text.replace(\"’re\", \" are\")\n\n # Remove weird characters and double spaces\n weird_characters = [\".\", \",\", \"?\", \"!\", \"'\", \"’\", \"\\\"\", \"\\n\", \"\\t\", \"-\", \"/\", \"[\", \"]\", \"(\", \")\", \":\", \"“\", \"”\"]\n for weird_character in weird_characters:\n text = text.replace(weird_character, \" \")\n\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n return text",
"def no_caps_and_ponctuation(text):\n return re.sub(r'[^\\w\\s]', '', text).lower()",
"def pre_process(text: str) -> str:\n text = text.replace('--', '-')\n space_right = '!?:;,.-()*+-/<=>@^_'\n space_both = '-()*+-/<=>@^_'\n\n for punct in space_right:\n text = text.replace(punct, punct + ' ')\n for punct in space_both:\n text = text.replace(punct, ' ' + punct + ' ')\n\n # remove extra space\n text = re.sub(r' +', ' ', text)\n return text",
"def _clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text",
"def text_cleaning(self, text):\n # remove string formatting '\\n' or '\\t'\n tmp_text = re.sub(r'\\n+', '. ', text)\n tmp_text = re.sub(r'\\t+', '. ', text)\n # remove words with non-ascii characters\n tmp_text = \" \".join([word for word in tmp_text.split() if self.is_ascii(word)])\n # remove email address\n tmp_text = \" \".join([word for word in tmp_text.split() if not word.startswith(\"@\")])\n # remove urls\n tmp_text = re.sub(r'http\\S+', '', tmp_text, flags=re.MULTILINE)\n tmp_text = re.sub(r'www\\S+', '', tmp_text, flags=re.MULTILINE)\n # remove punctuation but . (to split sentences)\n cleaned_text = re.sub('[^A-Za-z.,]+', ' ', tmp_text)\n # lowercase\n cleaned_text = cleaned_text.lower()\n\n return cleaned_text",
"def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()",
"def remove_all_caps(text):\n return re.sub(r\"(\\b(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b(?:\\s+(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b)*)\",\n ' ', text)",
"def keyify(text):\n text = text.lower()\n text = text.strip()\n\n text = text.replace('.', '')\n text = re.sub('[,-]', ' ', text)\n text = re.sub('\\s{2,}', ' ', text)\n\n return text",
"def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text",
"def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new",
"def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def clean_text(text):\n text = text.lower()\n text = text.replace('\\xa0', ' ')\n text = text.replace('fls.', 'folhas ')\n text = text.replace('fl.', 'folha ')\n text = text.replace('arts.', 'artigos ')\n text = text.replace('art.', 'artigo ')\n text = re_tree_dots.sub('...', text)\n text = re.sub(r'\\.\\.\\.', ' ', text)\n text = re_remove_brackets.sub(' ', text)\n text = re_changehyphen.sub('-', text)\n text = re_remove_html.sub(' ', text)\n text = re_transform_numbers.sub('0', text)\n text = re_transform_url.sub('URL', text)\n text = re_transform_emails.sub('EMAIL', text)\n text = re_quotes_1.sub(r'\\1\"', text)\n text = re_quotes_2.sub(r'\"\\1', text)\n text = re_quotes_3.sub('\"', text)\n text = re.sub('\"', ' ', text)\n text = re_dots.sub('.', text)\n text = re_punctuation.sub(r'\\1', text)\n text = re_hiphen.sub(' - ', text)\n text = re_punkts.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_b.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_c.sub(r'\\1 \\2', text)\n text = re_doublequotes_1.sub('\\\"', text)\n text = re_doublequotes_2.sub('\\'', text)\n text = re_trim.sub(' ', text)\n return text.strip()",
"def preprocess_text(text):\n # replace non characers with space and lower case\n temp = re.sub(r\"[/W/D/S.,-]+\", \" \", str(text).lower())\n # merge multiple spaces to a single one\n return re.sub(r\"[ ]+\", \" \", temp)",
"def sanitize_txt(x):\n return '_'.join(smart_split(x.lower()))",
"def normalize_text(text):\n text = re.sub(r'[ \\t]+', ' ', text)\n text = re.sub(r'\\r', '', text)\n\n # Remove whitespace in the middle of text.\n text = re.sub(r'[ \\t]+\\n', '\\n', text)\n # Remove whitespace at the end of the text.\n text = text.rstrip()\n\n return text",
"def _lowercase_despace_depunctuate(some_str=None):\n some_str = some_str.replace(\" \", \"\")\n some_str = some_str.replace(\"_\", \"\")\n some_str = some_str.replace(\"-\", \"\")\n some_str = some_str.lower()\n return some_str",
"def clean_text(text):\n text = text.lower()\n text = re.sub(r\"i'm\", 'i am', text)\n text = re.sub(r\"he's\", 'he is', text)\n text = re.sub(r\"she's\", 'she is', text)\n text = re.sub(r\"that's\", 'that is', text)\n text = re.sub(r\"what's\", 'what is', text)\n text = re.sub(r\"where's\", 'where is', text)\n text = re.sub(r\"\\'ll\", ' will', text)\n text = re.sub(r\"\\'ve\", ' have', text)\n text = re.sub(r\"\\'re\", ' are', text)\n text = re.sub(r\"\\'d\", ' would', text)\n text = re.sub(r\"won't\", 'will not', text)\n text = re.sub(r\"can't\", 'cannot', text)\n text = re.sub(r\"[-()\\\"#/@;:<>{}+=~|.?,]\", '', text)\n return text",
"def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out",
"def normalize_space(text):\n return re.sub(r\"\\s+\", \" \", text.strip(), flags=re.UNICODE)",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = clean_number(text)\n text = decontracted(text)\n text = correct_spelling(text)\n text = spacing_punctuation(text)\n text = spacing_some_connect_words(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n text = text.lower()\n return text",
"def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()",
"def preprocess_text(s: str) -> str:\n s = s.translate(str.maketrans('', '', string.punctuation))\n s = re.sub('\\s+', ' ', s)\n return s.lower()",
"def prepare_for_hashing(text):\n if not text:\n return ''\n return text.translate(CHARS_TO_DELETE).lower()",
"def get_normalised_phrase(self, sentence):\n return re.sub(r'[\\W_ ]+', ' ', sentence).lower()",
"def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()",
"def sanitize(str):\n res = str.lower()\n return res.replace(' ', '-')",
"def clean_text(s,stem=False):\n\tret = s.lower()\n\tret = re.sub(r'[^a-z ]',' ',ret)\n\tret = re.sub(r' +',' ',ret).strip()\n\tret = re.sub(r'see more occupations related to this (activity|skill|task)','',ret)\n\tif stem:\n\t\tret = ' '.join( stemmer.stem(word) for word in ret.split(' ') )\n\treturn ret",
"def clean(text):\n\n # removing paragraph numbers\n text = re.sub('[0-9]+.\\t', '', str(text))\n # removing new line characters\n text = re.sub('\\n ', ' ', str(text))\n text = re.sub('\\n', ' ', str(text))\n # removing apostrophes\n text = re.sub(\"'s\", '', str(text))\n # removing hyphens\n text = re.sub(\"-\", '', str(text))\n text = re.sub(\"— \", '', str(text))\n # removing quotation marks\n text = re.sub('\\\"', '', str(text))\n # removing salutations\n text = re.sub(\"Mr\\.\", 'Mr', str(text))\n text = re.sub(\"Mrs\\.\", 'Mrs', str(text))\n # removing any reference to outside text\n text = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", str(text))\n\n return text",
"def sanitize(string):\n retval = string.lower()\n retval = re.sub(r\"[^\\w\\s]\", '', retval)\n retval = re.sub(r\"\\s+\", '_', retval)\n return retval",
"def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output",
"def _clean_up(hadith_text: str) -> str:\n punctuations = ''.join([\n # Collected from https://en.wikipedia.org/wiki/Arabic_script_in_Unicode#Punctuation_and_ornaments\n chr(int('060C', 16)), # ARABIC COMMA\n chr(int('060D', 16)), # ARABIC DATE SEPARATOR\n chr(int('060E', 16)), # ARABIC POETIC VERSE SIGN\n chr(int('060F', 16)), # ARABIC SIGN MISRA\n chr(int('061B', 16)), # ARABIC SEMICOLON\n chr(int('061E', 16)), # ARABIC TRIPLE DOT PUNCTUATION MARK\n chr(int('061F', 16)), # ARABIC QUESTION MARK\n chr(int('066D', 16)), # ARABIC FIVE POINTED STAR\n chr(int('06D4', 16)), # ARABIC FULL STOP\n chr(int('06DD', 16)), # ARABIC END OF AYAH\n chr(int('06DE', 16)), # ARABIC START OF RUB EL HIZB\n chr(int('06E9', 16)), # ARABIC PLACE OF SAJDAH\n chr(int('06FD', 16)), # ARABIC SIGN SINDHI AMPERSAND\n chr(int('FD3E', 16)), # Arabic ornate left parenthesis\n chr(int('FD3F', 16)), # Arabic ornate right parenthesis\n ])\n\n # Removing punctuations\n cleaned_text = re.sub('[' + punctuations + ']', ' ', hadith_text)\n\n # Removing any html markup\n cleaned_text = BeautifulSoup(cleaned_text, 'lxml').text\n\n # Removing multiple consecutive whitespaces, including newlines\n cleaned_text = ' '.join(cleaned_text.split())\n\n return cleaned_text",
"def _cleanString(self, s):\n\n s = str(re.sub(r'\\([^)]*\\)', '', s))\n translator = str.maketrans('', '', string.punctuation)\n s = s.translate(translator)\n\n return s.strip().replace(\" \", \"_\").lower()",
"def clean_text(some_text):\n # import re\n some_clean_text = re.sub(r'\\n|\\t', '', some_text) # Remove new line and tabs\n some_clean_text = re.sub(' +', ' ', some_clean_text) # Replace multiple spaces with one space\n return some_clean_text",
"def del_whitespace(selfs, text):\n\t\treturn text.replace(' ', '')",
"def slugify(text):\n concatenated = re.sub('\\s+', '-', text.lower())\n return re.sub('[^A-Za-z0-9_-]', '', concatenated)",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()",
"def clean_text_for_skill_extraction(text):\n multi_space_regex = re.compile(r\"[,;?!()\\\\/]\", re.IGNORECASE)\n text = re.sub(multi_space_regex, ' ', text)\n\n text = clean_text_from_private_unicode(text)\n text = clean_text_from_geometrical_shape_unicode(text)\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n return text",
"def _clean(text, remove_stopwords=False):\n text = _remove_between_square_brackets(text)\n text = _replace_contractions(text)\n \n words = nltk.word_tokenize(text)\n words = _remove_non_ascii(words)\n words = _to_lowercase(words)\n words = _remove_punctuation(words)\n words = _replace_numbers(words)\n\n if remove_stopwords:\n words = _remove_stopwords(words)\n\n return ' '.join(words)",
"def _normalize_text(s: str) ->str:\n\n def remove_articles(text: str) ->str:\n return re.sub('\\\\b(a|an|the)\\\\b', ' ', text)\n\n def white_space_fix(text: str) ->str:\n return ' '.join(text.split())\n\n def remove_punc(text: str) ->str:\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text: str) ->str:\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))",
"def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data",
"def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')",
"def normalize(text):\n return unicodedata.normalize(\"NFKD\", re.sub(r\"\\s+\", \" \", text.lower()))",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)",
"def _sanitize(self, name, camel_case=False):\n\n if camel_case:\n return self.NO_SPACES.sub('', name.title())\n return self.ALLOW_SPACES.sub('', name)",
"def normalize_text(w):\n return str(w, \"utf-8\").lower().replace(\"-\", \"\")",
"def unscorize(s):\n return s.replace(\" \", \"_\")",
"def normalize_text(s):\n import string, re\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))",
"def slugify(text: str) -> str:\n return text.strip().replace(', ', '-').replace(' ', '_').lower()",
"def format_text(text):\n\n\ttext = ' '.join(text).lower()\n\ttext = re.sub(r\"[^a-zA-Z.?!]\", \" \", text)\n\ttext = re.sub(r' +', ' ', text)\n\ttext = word_tokenize(text)\n\ttext = pos_tag(text)\n\n\treturn text",
"def normalize_text(text, lower=True, punctuations=',!?:;', chars_to_remove=r'\\(\\)\\[\\]\\{\\}\\<\\>\\#*\"-',\n char_to_make_whitespace='/'):\n if lower:\n text = text.lower()\n text = text.strip() # remove trailing spaces\n text = re.sub(r'([' + chars_to_remove + '])', '', text) # remove characters\n text = re.sub('([' + punctuations + '])', r' \\1', text) # add space before punctuations\n text = re.sub('([' + char_to_make_whitespace + '])', ' ', text) # replace with space\n text = re.sub(r'\\s+', ' ', text) # remove redundant spaces\n\n # treat points especially, for the model to be able to split sentences:\n text = re.sub(r'(\\. )', r' \\1', text) # add space only before points not part of abbreviations (e.g. U.S.A.)\n text = re.sub(r'\\.\\. \\.', r' ...', text) # join the ruined ellipsis ('...')\n if text[-1] == '.':\n text = text[:-1] + ' .'\n return text",
"def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r",
"def cleanSents(row, field):\n\n text = str(row[field]).lower()\n clean_text = re.sub('[^A-Za-z0-9]+', ' ', text).strip()\n return clean_text",
"def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()",
"def normalize_space (text):\n return RE_WS.sub (' ', text.strip ())",
"def normalize_text(text):\n\n text = text.lower().strip().replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n\n text = replace_money_token(text)\n text = replace_urls_token(text)\n text = fix_unicode_quotes(text)\n text = format_large_numbers(text)\n text = pad_punctuation(text)\n return text.strip()",
"def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text",
"def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text",
"def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text",
"def cleanup_sentence(s):\n return re.sub(\"\\s+\", \" \", s.replace(\"\\t\", \"\").strip())",
"def uglify(text):\n return text.lower().replace(' ', '_')",
"def descorize(s):\n return s.replace(\"_\", \" \")",
"def despace(txt):\n pre, c = splitdefines(txt)\n pre = \"\\n\".join(pre)\n txt = \" \".join(c)\n txt = txt.replace(\"\\t\", \" \")\n txt = re.sub(r\"\\s+\", \" \", txt, flags=re.S)\n txt = re.sub(r\"([a-zA-Z0-9_])\\s+([^a-zA-Z0-9_\\s])\", r\"\\1\\2\", txt, flags=re.S)\n txt = re.sub(r\"([^a-zA-Z0-9_\\s])\\s+([a-zA-Z0-9_])\", r\"\\1\\2\", txt, flags=re.S)\n txt = re.sub(r\"([^a-zA-Z0-9_\\s])\\s+([^a-zA-Z0-9_\\s])\", r\"\\1\\2\", txt, flags=re.S)\n txt = re.sub(r\"([^a-zA-Z0-9_\\s])\\s+([^a-zA-Z0-9_\\s])\", r\"\\1\\2\", txt, flags=re.S)\n return pre + \"\\n\" + txt",
"def _remove_whitespaces(self, text: str) -> str:\n return \" \".join(re.sub(\"\\xa0\", \" \", str(text)).split())",
"def remove_space(text):\n for space in spaces:\n text = text.replace(space, ' ')\n text = text.strip()\n text = re.sub('\\s+', ' ', text)\n return text",
"def remove_space(text):\n for space in spaces:\n text = text.replace(space, ' ')\n text = text.strip()\n text = re.sub('\\s+', ' ', text)\n return text",
"def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()",
"def clean_training_text(txt):\n return re.sub('[^A-Za-z0-9]+', ' ', str(txt)).strip()",
"def normalize_text(text):\n # mystring.replace('\\n', ' ').replace('\\r', '')\n return text.replace('\\n', ' ').replace('\\r', '').lower()",
"def UnCamelCase(text, separator='_'):\n split = re.findall(r'[A-Z][a-z0-9]*', text)\n split = map(str.lower, split)\n split = list(split)\n\n words = []\n\n while len(split) > 0:\n word = split[0]\n split = split[1:]\n\n if len(word) == 1:\n while (len(split) > 0) and (len(split[0]) == 1):\n word += split[0]\n split = split[1:]\n\n words.append(word)\n\n return separator.join(words)",
"def space_out_camel_case(camel):\r\n chars = []\r\n\r\n for char in camel:\r\n if len(chars) >= 2 and chars[-1] != ' ':\r\n if char.isupper() and chars[-1].islower():\r\n chars.append(' ')\r\n elif char.islower() and chars[-1].isupper() and chars[-2].isupper():\r\n chars.insert(len(chars) - 1, ' ')\r\n\r\n chars.append(char)\r\n\r\n return ''.join(chars)",
"def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)",
"def normalize_text(text):\n return normalize_case(normalize_punctuation(text))",
"def remove_hyphens(text):\n return re.sub(r'(\\w+)-(\\w+)-?(\\w)?', r'\\1 \\2 \\3', text)",
"def text_cleaning(self, text): # pylint: disable=no-self-use\n text = text.encode(\"ascii\", \"ignore\").decode(\"ascii\", \"ignore\")\n text = re.sub(r'[^\\x00-\\x7F]', '', text)\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\"\\'\", \"'\")\n text = text.replace(\"\\\\\\\"\", '\\\"')\n text = text.replace(\"&\", \"&\")\n text = text.replace(\""\", '\\\"')\n text = text.replace(\" \", ' ')\n text = text.strip().lstrip().rstrip()\n desc_text = ' '.join(text.split())\n return desc_text",
"def text_prepare(text):\r\n\r\n replace_by_space_re = re.compile('[/(){}\\[\\]\\|@,;]')\r\n good_symbols_re = re.compile('[^0-9a-z #+_]')\r\n stopwords_set = set(stopwords.words('english'))\r\n\r\n text = text.lower()\r\n text = replace_by_space_re.sub(' ', text)\r\n text = good_symbols_re.sub('', text)\r\n text = ' '.join([x for x in text.split() if x and x not in stopwords_set])\r\n\r\n return text.strip()",
"def _prepare_text(body):\n text = body.lower()\n text = text.replace('\\n', ' ')\n regex = re.compile('[^a-z ]')\n return regex.sub('', text)",
"def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))",
"def preprocess_text(text: str):\n # remove trailing/leading whitespace\n text = text.strip()\n\n # .lower() depends on model so doing this in collate function\n\n # TODO other preprocessing - punctuation/ascii etc.\n text = text.replace(\"\\\\n\", \" \")\n # text = text.replace(\"\\\\'\", \"\\'\")\n # text = text.replace('\\\\\"', \"\\'\")\n text = text.encode('ascii', 'ignore').decode()\n\n return text",
"def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)",
"def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n string = string.strip(\"\\n\")\n return string",
"def norm_text(self, text):\n\n # encode to apply utf-8 and decode to remove initial 'b'\n text = str(text.encode('utf-8').decode('utf-8'))\n text = text.lower()\n\n # Clean the text\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text",
"def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)",
"def remove_apostrophes(text: str) -> str:\n apostrophes_re = re.compile(\"'\")\n return apostrophes_re.sub(' ', text)",
"def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)"
] | [
"0.6987627",
"0.69115055",
"0.68299866",
"0.68170524",
"0.6792189",
"0.6779221",
"0.6683785",
"0.6663541",
"0.66497254",
"0.661544",
"0.66122067",
"0.6598049",
"0.65703285",
"0.6562379",
"0.6545204",
"0.6541442",
"0.6536878",
"0.6534535",
"0.6511388",
"0.64796376",
"0.6465856",
"0.64221936",
"0.63998723",
"0.639693",
"0.6344568",
"0.6332369",
"0.6332369",
"0.6328018",
"0.63158756",
"0.6308443",
"0.6308443",
"0.6308443",
"0.6308443",
"0.6308443",
"0.6308443",
"0.62961644",
"0.62828755",
"0.6272979",
"0.62679833",
"0.62613577",
"0.6260696",
"0.6253315",
"0.62456936",
"0.6244142",
"0.62331355",
"0.62297845",
"0.62193704",
"0.62191504",
"0.6206622",
"0.6198456",
"0.6183537",
"0.6173344",
"0.61726576",
"0.6164008",
"0.6154433",
"0.61544096",
"0.6132994",
"0.61290294",
"0.6118313",
"0.61154294",
"0.6112556",
"0.6105239",
"0.61009526",
"0.6097355",
"0.6097216",
"0.60936624",
"0.60932463",
"0.6092058",
"0.60829645",
"0.6081041",
"0.6079503",
"0.607718",
"0.60746014",
"0.60691655",
"0.6064514",
"0.6056049",
"0.60519415",
"0.60517395",
"0.60498965",
"0.6045655",
"0.6045655",
"0.6039783",
"0.6038078",
"0.60374224",
"0.6036983",
"0.6026622",
"0.6025686",
"0.6024091",
"0.60235465",
"0.60202307",
"0.6020034",
"0.6019643",
"0.60102993",
"0.600499",
"0.6002697",
"0.59945834",
"0.5987414",
"0.5986341",
"0.59862375",
"0.59794074"
] | 0.7590578 | 0 |
Returns the text contents of the system clip board. | def _get_clipboard_text():
clipboard = Clipboard()
return clipboard.get_system_text() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTextFromClipboard(self) -> str:\n cb = self.qtApp.clipboard()\n if cb:\n QtWidgets.QApplication.processEvents()\n return cb.text()\n g.trace('no clipboard!')\n return ''",
"def read_all_screen(self):\n full_text = \"\"\n for ypos in range(self.model_dimensions[\"rows\"]):\n full_text += self.string_get(ypos + 1, 1, self.model_dimensions[\"columns\"])\n return full_text",
"def getText(self):\n if self.app.children:\n return self.app.childActive.source.GetText()\n else:\n return ''",
"def get_console_text(self):\n console_text_api = '/consoleText'\n return self._api_request(self.url + console_text_api)",
"def _get_pad_content(self):\n self.ensure_one()\n return self.pad_get_content(self.description_pad)",
"def text(self):\n text = ''\n for run in self.runs:\n text += run.text\n return text",
"def text_output(self):\n print(self.board)\n print()",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def getText():",
"def text(self) -> str:\n return self._impl.get_text()",
"def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()",
"def getText(self):\n return _libsbml.TextGlyph_getText(self)",
"def get_text(self):\n\n return self.output['text']",
"def _get_edit_text(hwnd):\n buf_size = win32gui.SendMessage(hwnd, win32con.WM_GETTEXTLENGTH, 0, 0)\n buf_size += 1 # don't forget that null character boys...\n buffer = win32gui.PyMakeBuffer(buf_size)\n # odd, we're telling them how big the text is that they're giving\n # back to us\n win32gui.SendMessage(hwnd, win32con.WM_GETTEXT, buf_size, buffer)\n # don't need the null character now for Python\n return buffer[:buf_size]",
"def get_text(self):\n return self.output.getvalue()",
"def text(self):\n return self.content",
"def get_visible_text(self):\n return self.browser.find_element_by_xpath(\"//body\").text",
"def get_text(self):\n rc = \"\"\n for node in self.node.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc = rc + node.data\n return rc",
"def get_text(self):\n txt = self.lang.tool.image_to_string(\n self.image,\n lang=self.lang,\n builder=pyocr.builders.TextBuilder()\n )\n return txt",
"def copy_to_clipboard(self, txt):\r\n cmd = 'echo \"' + txt.strip() + '\"|clip'\r\n return subprocess.check_call(cmd, shell=True)",
"def text(self):\n return \"\\n\".join(self.raw_text)",
"def get_content(self):\r\n view = self.window.active_view()\r\n selection = \"\"\r\n for region in view.sel():\r\n # If no selection, use the entire file as the selection\r\n if region.empty():\r\n selection = sublime.Region(0, view.size())\r\n else:\r\n selection = region\r\n return view.substr(selection)",
"def GetText(self):\r\n \r\n return self._text",
"async def getDisplayText(self):\n display_text = await self.director.getItemVariableValue(\n self.item_id, \"DISPLAY_TEXT\"\n )\n return display_text",
"def get_text(self):",
"def currentText(self, toNative=True):\n return self.text(toNative=toNative)",
"def contents(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"contents\", _args)\n return _ctx.execute_sync(str)",
"def clipboard(self, text = None):\n if text == None:\n response = self._fetch_json('/api/clipboard')\n return response['content']\n else:\n postdata = codecs.encode(json.dumps({ 'content': text }), 'utf-8')\n self._urlopen('/api/clipboard', postdata).read()",
"def text(self) -> str:",
"def get_text(data_path):\n\tp = get_full_path(data_path)\n\tf = open(p, 'r')\n\tcontent = f.read()\n\tf.close()\n\treturn content",
"def get_text(self) -> str:\n return self.text",
"def get_text(self) -> str:\n return self._text",
"def get_text(self):\n return self.text",
"def getText(self):\n\t\treturn self.bsource.get_text() # \"no value for 'self' in unbound method call\" pylint error. Still runs. Idk. ",
"def getTextFromSpeak(self):\n raise NotImplementedError",
"def getText(self):\n return self._text",
"def getText(self):\n return self.text",
"def getText(self):\n return self.text",
"def get_message(self):\n message = \"\"\n for line in self.lines:\n message += line.show()\n\n return message",
"def text(self):\n\t\treturn ' '.join([self.write_components[x] for x in self.write_components])",
"def read_from_clipboard():\n\n return pyperclip.paste()",
"def get_text(self):\n return self.res.text",
"def text(self):\n return self._text",
"def text(self):\n return self._text",
"def text(self):\n return self._text",
"def text(self):\n return self._text",
"def text(self):\n return self._text",
"def text(self):\n return self._text",
"def text(self) -> str:\n return self.load().open().read().decode('utf-8')",
"def contents(self) -> str:\n return pulumi.get(self, \"contents\")",
"def raw_text(self):\n return self._raw_text",
"def 取所有项目文本(self): # real signature unknown; restored from __doc__\n return self.GetStrings()",
"def content(self):\n return \"\".join(self.lines)",
"def GetText(self):\r\n\r\n return self._text",
"def text(self):\n # type: () -> str\n return self._text",
"def text(self) -> str:\n return self._text",
"def text(self):\n return self.__text",
"def getText(self):",
"def Text(self):\n return self._text",
"def get_text(self):\n def get_text_for_node(node):\n if node.type == CaptionNode.TEXT:\n return node.content\n if node.type == CaptionNode.BREAK:\n return u'\\n'\n return u''\n text_nodes = [get_text_for_node(node) for node in self.nodes]\n return u''.join(text_nodes).strip()",
"def get_text(self) -> List[str]:\n return self.__texts",
"def read(self):\n return self.browser.text(self)",
"def obtain_text():\n pass",
"def get_text(self):\n return self.row",
"def text(self):\n return self.original.text",
"def __getAsciiString(self):\n lines = []\n horizontalLine = ('-' * (26))\n lines.append(horizontalLine)\n for row in self.board:\n rowLine = '|'\n for col in row:\n if col == -1:\n col = 'O'\n if col == 0:\n col = '-'\n if col == 1:\n col = 'X'\n rowLine = rowLine + ' ' + col.__str__() + ' |'\n lines.append(rowLine)\n lines.append(horizontalLine)\n return '\\n'.join(lines)",
"def get_readme_text(self):\n if self.readme_file:\n readme_text = self.readme_file.read_text(encoding=\"utf-8\")\n else:\n readme_text = \"# foxBMS 2 GUI\"\n return readme_text",
"def text(self) -> str:\n return self.__text",
"def visible_text(self):\n\n return str(self.element().text).strip() if self.exists() else ''",
"def text(self):\n txt = self.web_element.text\n return txt",
"def contents(self) -> str:\n return(self._contents)",
"def getText(self):\n return self.graph.get(\"__txt\", '')",
"def _generateDisplayedText(self, obj, **args ):\n result = self._generateSubstring(obj, **args)\n if result:\n return result\n\n displayedText = self._script.utilities.displayedText(obj)\n if not displayedText:\n return []\n\n return [displayedText]",
"def contentRaw(request):\n paste = Paste.get(request.matchdict['idContent'])\n # TODO type/mime\n return paste.content",
"def plain_text(self) -> str:\n return pulumi.get(self, \"plain_text\")",
"def text(self):\n logger.debug(\"Getting text property\")\n return self.web_element.text",
"def get_text(self):\n try:\n raw_text = self.db.blpop(\"soq_texts\")\n text = json.loads(raw_text[1])\n except Exception as e:\n print(\"An error occurred while reading from text queue:\", e)\n return None\n return text",
"def text(self):\n return self.__r.t.text",
"def get_text(downgrade_titles=False):",
"def text(self, just_text=False):\n lines = []\n for node, data in self.traverse():\n if just_text or data['has_text'] or data['pad']:\n lines += data['text']\n else:\n lines += [data['meta']] + data['title'] + data['text']\n return flatten(lines)",
"def get_PoemText(self):\n return self.text if self.text else \"No Text Yet\\n\"",
"def text(self):\n return self._combo.currentText()",
"def text(self):\n return self.label.text()",
"def current_document_text(self):\n return self.current_document.lines",
"def get_file_content(self):\n return \"\\n\".join(self._vim.current.buffer)",
"def text(self):\n return self.full_text",
"def getRawText(self):\n return self.graph.get(\"__rawTxt\", '')",
"def get_text(self):\n return self.text[:500]",
"def get_text(self):\n return self.get_property('text')",
"def sessionDescription(self):\n return self.textEdit.toHtml()",
"def get_text(self):\n return ''.join(self.result)",
"def get_text(self, course):\r\n return views.progress(self.request, course.id.to_deprecated_string(), self.user.id).content",
"def getText(self):\r\n return \"\"",
"def get_plain_text(self):\n raise NotImplementedError(\"get_plain_text is not implemented\")",
"def current_content(self):\n return self.host.open(self.remote_path, 'rb', use_sudo=self.use_sudo).read()",
"def text_content(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"text_content\")",
"def cmd_get(self):\n return self.text"
] | [
"0.65942186",
"0.6435534",
"0.63300586",
"0.61199254",
"0.6119129",
"0.61073905",
"0.6069073",
"0.5950574",
"0.5950574",
"0.5950574",
"0.5950574",
"0.5950574",
"0.58960754",
"0.58941376",
"0.588573",
"0.5876206",
"0.5874571",
"0.5853478",
"0.58394194",
"0.58292186",
"0.58160985",
"0.58100575",
"0.5805365",
"0.57969093",
"0.5796902",
"0.57928896",
"0.5784233",
"0.5780899",
"0.5770279",
"0.573885",
"0.5732732",
"0.5732032",
"0.57200223",
"0.571296",
"0.5705785",
"0.56743693",
"0.56708515",
"0.5651341",
"0.564577",
"0.5642773",
"0.5642773",
"0.563954",
"0.5602865",
"0.55981326",
"0.55980825",
"0.5578705",
"0.5578705",
"0.5578705",
"0.5578705",
"0.5578705",
"0.5578705",
"0.557299",
"0.5569069",
"0.5568515",
"0.5563491",
"0.5540819",
"0.5538459",
"0.55025876",
"0.54999447",
"0.54995143",
"0.54805773",
"0.54771364",
"0.5471978",
"0.5459917",
"0.545621",
"0.54355764",
"0.54344904",
"0.5430881",
"0.5419185",
"0.541525",
"0.5410124",
"0.54043114",
"0.5402236",
"0.5396876",
"0.53912455",
"0.53847766",
"0.53807926",
"0.5374452",
"0.5365599",
"0.53542715",
"0.534969",
"0.53260714",
"0.5322556",
"0.5308407",
"0.5292118",
"0.5279665",
"0.52740693",
"0.52727103",
"0.5261839",
"0.5258438",
"0.5253154",
"0.5243577",
"0.52412516",
"0.5230082",
"0.5230051",
"0.5219799",
"0.521822",
"0.5213559",
"0.52133334",
"0.52104527"
] | 0.68701446 | 0 |
Selects wordCount number of words to the left of the cursor and cuts them out of the text. Returns the text from the system clip board. | def _select_and_cut_text(wordCount):
clipboard = Clipboard()
clipboard.set_system_text('')
Key('cs-left/3:%s/10, c-x/10' % wordCount).execute()
return clipboard.get_system_text() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cut_in_words(self,linea):\n length = 0\n res = ''\n limit_screen = 30\n for word in linea.split(' '):\n if length + len(word) <= limit_screen:\n new_word = word + ' '\n length += len(new_word)\n else:\n new_word = '\\n' + word + ' '\n length = len(new_word) - 2 #-2 para no tener en cuenta el \\n\n res += new_word\n return res",
"def truncate(text, words=25):\n return ' '.join((text).split()[:words])",
"def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2",
"def copy_text(self):\n self.window.clipboard_clear()\n if self.tab_control.index(\"current\") == 0:\n try:\n self.text = self.textbox.get(\"sel.first\", \"sel.last\")\n except tk.TclError:\n self.text = self.textbox.get(\"1.0\", tk.END)\n self.window.clipboard_append(self.text)\n elif self.tab_control.index(\"current\") == 1:\n self.window.clipboard_append(self.words)",
"def Left(text, number):\n return text[:number]",
"def _(event):\n pos = line.document.find_start_of_previous_word(count=event.arg)\n if pos:\n deleted = line.delete_before_cursor(count=-pos)\n line.set_clipboard(ClipboardData(deleted))",
"def cut_text(value, length): # Only one argument.\n return value[0:length]",
"def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])",
"def truncate(self):\n return Truncator(self.content).words(\n self.max_words, self.more_string, html=True)",
"def cut_text(text):\n for phrase in TERMINALS:\n if phrase in text:\n return text[:text.index(phrase)]\n\n SavedSource(label=LABEL, subject='cut_text', body=text).put()\n return text",
"def extract_context(words, mask_index, window):\n total_length = len(words)\n half_window = int(window / 2)\n assert 0 <= mask_index < total_length\n return words[max(0, mask_index - half_window):min(total_length, mask_index + half_window + 1)]",
"def show_word(self):\n self.display_word = len(self.chosen_word) * \"_ \"\n Donatello.draw_word(self.display_word)\n return self.display_word",
"def characters_left(self):\r\n return self.max_chars - len(self.variable.get())",
"def crop_title(self):\n top_line_and_words = self.get_line_and_word_boxes()\n top_line_and_words = [boxes for boxes in self.get_line_and_word_boxes() if boxes.content not in self.ocr_noise]\n if top_line_and_words:\n topleft, bottomright = top_line_and_words[0].position\n print(topleft, bottomright)\n if topleft[1] <= self.title_position:\n self.image = self.image.crop((0, bottomright[1], self.width, self.height))\n self.update_img()\n self.update_size()",
"def pop_word(self, index: int=None) -> Optional[str]:\n if len(self.words) > 0:\n word = self.words.pop() if index is None else self.words.pop(index)\n if word == ' ':\n self.spaces_width -= self.space_width\n else:\n self.width -= self.get_word_width(word)\n return word",
"def sentence(num_words=20, chars=''):\r\n word_list = _Book.get_text().split()\r\n words = ' '.join(_random.choice(word_list) for x in\r\n xrange(num_words))\r\n return (words if not chars else words[:chars])",
"def remove_longer_words(text):\n return \" \".join([word for word in str(text).split() if len(word) <= 12])",
"def words_before_index(text, idx):\n while text[idx] != ' ':\n idx -= 1\n if idx <= 0:\n return 0\n n_words = len(text[:idx].split(' '))\n return n_words",
"def get_word_before(self):\n if self.word_before:\n return self.word_before\n\n lines = self.get_lines()\n row, col = position_from_utf16(lines, self.position)\n line = lines[row]\n start = line[:col]\n\n word_start_match = RE_START_WORD.search(start)\n\n if not word_start_match:\n self.word = \"\"\n self.word_before = \"\"\n else:\n substart = start[: word_start_match.start()].rstrip()\n word_before_match = RE_WORD_BEFORE.findall(substart)[0]\n\n self.word = word_start_match[0]\n self.word_before = word_before_match[0]\n return self.word_before",
"def word_wrap(self):\n textArea = self.get_current()\n if self.wrap.get() == 0:\n textArea.config(wrap='none')\n elif self.wrap.get() == 1:\n textArea.config(wrap='word')",
"def cut_words(value, arg):\n\treturn value.replace(arg, '')",
"def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)",
"def getWordUnderCursor():\n\treturn vim.eval('expand(\"<cword>\")')",
"def count_selected_chars(self, event=None):\n try:\n textArea = self.get_current()\n chars = textArea.count(\"sel.first\", \"sel.last\")\n line_breaks = textArea.count(\"sel.first\", \"sel.last\", \"lines\")\n if line_breaks:\n if line_breaks[0] == 1:\n self.selected_chars_lbl.config(text=f\"{chars[0]} chars, {line_breaks[0]} line break\")\n elif line_breaks[0] > 1:\n self.selected_chars_lbl.config(text=f\"{chars[0]} chars, {line_breaks[0]} line breaks\")\n else:\n if chars[0] == 1:\n self.selected_chars_lbl.config(text=f\"{chars[0]} char selected\")\n else:\n self.selected_chars_lbl.config(text=f\"{chars[0]} chars selected\")\n except:\n self.selected_chars_lbl.config(text=\"--------------\")",
"def displayed_words(self):\n return (len(strip_tags(self.preview).split()) -\n (len(self.more_string.split()) * int(not bool(self.lead))))",
"def findBestShift(wordList, text):\n ### TODO\n max_words = 0\n best_shift = 0\n lis = []\n for i in range(0,26):\n lis = applyShift(text, i).split(' ')\n count = 0\n for j in lis:\n if isWord(wordList, j):\n count += 1\n if count > max_words:\n max_words = count\n best_shift = i\n \n return best_shift",
"def chosen():\n wordList = loadWords()\n w = random.choice(wordList)\n word = w[:-1]\n return word",
"def truncate_description(description):\n if len(description) <= 160 :\n return description\n\n cut_desc = \"\"\n character_counter = 0\n for i, letter in enumerate(description) :\n character_counter += 1\n if character_counter > 160 :\n if letter == ' ' :\n return cut_desc+\"...\"\n else :\n return cut_desc.rsplit(' ',1)[0]+\"...\"\n cut_desc += description[i]\n return cut_desc",
"def _get_clipboard_text():\n clipboard = Clipboard()\n return clipboard.get_system_text()",
"def _get_edit_text(hwnd):\n buf_size = win32gui.SendMessage(hwnd, win32con.WM_GETTEXTLENGTH, 0, 0)\n buf_size += 1 # don't forget that null character boys...\n buffer = win32gui.PyMakeBuffer(buf_size)\n # odd, we're telling them how big the text is that they're giving\n # back to us\n win32gui.SendMessage(hwnd, win32con.WM_GETTEXT, buf_size, buffer)\n # don't need the null character now for Python\n return buffer[:buf_size]",
"def GetSelection(self):\n # STC HELL\n # Translate the UTF8 byte offsets to unicode\n start, end = super(EditraBaseStc, self).GetSelection()\n utf8_txt = self.GetTextUTF8()\n if start != 0:\n start = len(ed_txt.DecodeString(utf8_txt[0:start], 'utf-8'))\n if end != 0:\n end = len(ed_txt.DecodeString(utf8_txt[0:end], 'utf-8'))\n del utf8_txt\n return start, end",
"def find_5_words(self):\n del self.words[:]\n text = self.textbox.get(\"1.0\", tk.END)\n self.file.text = text\n self.words = self.file.find_top_5_words(self.language.get())\n words_count = len(self.words)\n if words_count == 0:\n self.first_word.configure(\"All words are stop words.\")\n if words_count >= 1:\n self.first_word.configure(text=self.words[0])\n if words_count >= 2:\n self.second_word.configure(text=self.words[1])\n if words_count >= 3:\n self.third_word.configure(text=self.words[2])\n if words_count >= 4:\n self.fourth_word.configure(text=self.words[3])\n if words_count == 5:\n self.fifth_word.configure(text=self.words[4])\n msg.showinfo(title=\"top 5 words\", message=\"5 words are found, check: Top 5 words\")",
"def wrapTextFor(player, text):\n return wrapTextAt( text, getUserScreenWidth( player ) )",
"def get_word_window(self, pattern, tokens, constraints):\n split_pattern = pattern.split()\n if len(split_pattern) > 1:\n textsnippets = self.__get_word_window_more_words_help(split_pattern, tokens, constraints)\n else:\n textsnippets = self.__get_word_window_one_word_help(pattern, tokens, constraints)\n print(textsnippets)\n return textsnippets",
"def clip(st,length):\n if len(st) > length:\n return st[:length] + \"...\"\n else:\n return st",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_previous_word_beginning(count=event.arg) or 0",
"def delete_words(self):\n self.word_1.delete(0, tk.END)\n self.word_2.delete(0, tk.END)\n self.word_3.delete(0, tk.END)\n self.word_4.delete(0, tk.END)\n self.word_5.delete(0, tk.END)",
"def word_dropout(self, x, l):\n if self.params.word_dropout == 0:\n return x, l\n assert 0 < self.params.word_dropout < 1\n\n # define words to drop\n # eos = self.params.eos_index\n # assert (x[0] == eos).sum() == l.size(0)\n keep = np.random.rand(x.size(0) - 1, x.size(1)) >= self.params.word_dropout\n keep[0] = 1 # do not drop the start sentence symbol\n\n sentences = []\n lengths = []\n for i in range(l.size(0)):\n # assert x[l[i] - 1, i] == eos\n words = x[:l[i] - 1, i].tolist()\n # randomly drop words from the input\n new_s = [w for j, w in enumerate(words) if keep[j, i]]\n # we need to have at least one word in the sentence (more than the start / end sentence symbols)\n if len(new_s) == 1:\n new_s.append(words[np.random.randint(1, len(words))])\n # new_s.append(eos)\n # assert len(new_s) >= 3 and new_s[0] == eos and new_s[-1] == eos\n sentences.append(new_s)\n lengths.append(len(new_s))\n # re-construct input\n l2 = torch.LongTensor(lengths)\n x2 = torch.LongTensor(l2.max(), l2.size(0)).fill_(self.params.pad_index)\n for i in range(l2.size(0)):\n x2[:l2[i], i].copy_(torch.LongTensor(sentences[i]))\n return x2, l2",
"def paste(text_len, copy_len):\n global counter\n counter += 1\n return (text_len + copy_len)",
"def ChopText(dc, text, max_size):\r\n \r\n # first check if the text fits with no problems\r\n x, y, dummy = dc.GetMultiLineTextExtent(text)\r\n \r\n if x <= max_size:\r\n return text\r\n\r\n textLen = len(text)\r\n last_good_length = 0\r\n \r\n for i in xrange(textLen, -1, -1):\r\n s = text[0:i]\r\n s += \"...\"\r\n\r\n x, y = dc.GetTextExtent(s)\r\n last_good_length = i\r\n \r\n if x < max_size:\r\n break\r\n\r\n ret = text[0:last_good_length] + \"...\" \r\n return ret",
"def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")",
"def remaining_words(self):\n return self.total_words - self.displayed_words",
"def substr(self, length):\r\n if self.cur + length > len(self.text):\r\n return self.text[self.cur:]\r\n else:\r\n return self.text[self.cur:self.cur+length]",
"def word_blank(self, x, l):\n if self.params.word_blank == 0:\n return x, l\n assert 0 < self.params.word_blank < 1\n\n # define words to blank\n eos = self.params.eos_index\n assert (x[0] == eos).sum() == l.size(0)\n keep = np.random.rand(x.size(0) - 1, x.size(1)) >= self.params.word_blank\n keep[0] = 1 # do not blank the start sentence symbol\n\n sentences = []\n for i in range(l.size(0)):\n assert x[l[i] - 1, i] == eos\n words = x[:l[i] - 1, i].tolist()\n # randomly blank words from the input\n new_s = [w if keep[j, i] else self.params.mask_index for j, w in enumerate(words)]\n new_s.append(eos)\n assert len(new_s) == l[i] and new_s[0] == eos and new_s[-1] == eos\n sentences.append(new_s)\n # re-construct input\n x2 = torch.LongTensor(l.max(), l.size(0)).fill_(self.params.pad_index)\n for i in range(l.size(0)):\n x2[:l[i], i].copy_(torch.LongTensor(sentences[i]))\n return x2, l",
"def get_words(self, first=10):\n return get_occurences(self.lemmatized_words)[:first]",
"def findBestShift(wordList, text):\n \n maxReal=0\n shift=0\n shiftcpy=0\n while shift<26:\n s=0\n m=applyShift(text, shift)\n \n list1=m.split(' ')\n \n for i in range(0,len(list1)):\n \n y=isWord(wordList, list1[i])\n \n if y==True:\n \n s=s+1\n\n if s>maxReal:\n maxReal=s\n shiftcpy=shift\n\n shift=shift+1\n\n return shiftcpy",
"def words_string(self):\n return ' '.join(self.words).replace(' - ', '')",
"def delete(self):\n if not self.selection.isSelection(): return False\n\n # Save the current text\n self.saveText()\n\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx = sm1[1]\n self.edCursor.setPos(w1, cx)\n # Join words before and after selection\n w1.setString(w1.string[:cx] + w2.string[sm2[1]:])\n # Delete all intervening words, and w2\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n if (tl1 == tl2): # only delete from 1 line\n # delete words from wx1+1 to wx2 (incl.)\n for w in tl1.twords[wx1+1:wx2+1]:\n w.delete()\n del(tl1.twords[wx1+1:wx2+1])\n\n else: # deletion block covers >1 line\n # delete words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n w.delete()\n del(tl1.twords[wx1+1:])\n # delete all the intervening lines\n while True:\n tl = self.rsubject.nextLine(tl1)\n if (tl == tl2): break\n self.rsubject.deleteTLine(tl)\n\n # Move remaining words after w2 in tl2 to end of tl1\n for w in tl2.twords[wx2+1:]:\n tl1.insert(w)\n del(tl2.twords[wx2+1:])\n # Delete tl2\n self.rsubject.deleteTLine(tl2)\n\n self.selection.clearSelection()\n\n self.rsubject.renderShortened(w1)\n\n self.edCursor.setPos(w1, cx)\n return True",
"def findBestShift(wordList, text):\n max_real_words = 0\n best_shift = 0\n for i in range(26):\n word_list = applyShift(text, i).split(\" \")\n temp_n_words = 0\n for word in word_list:\n if isWord(wordList, word):\n temp_n_words += 1\n if temp_n_words > max_real_words:\n max_real_words = temp_n_words\n best_shift = i\n return best_shift",
"def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)",
"def copy_selection( self, ):\n try:\n data = self.msg_text.get( \"sel.first\", \"sel.last\" )\n pyperclip.copy( data )\n except Exception as exception: # if no selection\n pass",
"def _(event):\n buffer = event.current_buffer\n buffer.cursor_position += buffer.document.find_next_word_beginning(count=event.arg) or \\\n buffer.document.get_end_of_document_position()",
"def word_under_cursor_pos(self):\n self._vim.command('normal e')\n end = self.cursor()\n self._vim.command('normal b')\n beg = self.cursor()\n return beg, end",
"def get_text(self):\n return self.text[:500]",
"def update_current_word(self):\n self.current_word = self.current_row.pop(0) + \" \"",
"def first_word(text):\n # your code here\n space = ' '\n if space in text:\n find_space = text.find(space)\n first_word = text[0:find_space]\n else:\n symbols_counter = len(text)\n first_word = text[0:symbols_counter]\n return first_word",
"def print_first_word(words):\n word = words.pop(0)\n return word",
"def cut_words(self, doc):\n return [word for word in jieba.cut(doc) if not word in self.stopwords]",
"def clip_tokenize_single(text: str) -> torch.LongTensor:\n return clip.tokenize(text)[0]",
"def unlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n cursor = self.ui.textBrowser.textCursor()\n try:\n cursor.setPosition(0, QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(len(self.selected_text_file[FULLTEXT]) - 1, QtGui.QTextCursor.MoveMode.KeepAnchor)\n cursor.setCharFormat(QtGui.QTextCharFormat())\n except Exception as e:\n logger.debug((str(e) + \"\\n unlight, text length\" + str(len(self.ui.textBrowser.toPlainText()))))",
"def wrap(text, width):\n retstr = \"\"\n for word in text.split(' '):\n if len(retstr)-retstr.rfind('\\n')-1 + len(word.split('\\n',1)[0]) >= width:\n retstr += ' \\n' + word\n else:\n retstr += ' ' + word\n return retstr",
"def select_word(options):\n options = list(set(options)) # Remove duplicate words\n selection = []\n if len(options) > 10:\n for n in range(10):\n word = random.choice(options)\n while word in selection:\n word = random.choice(options)\n selection.append(word)\n else:\n selection = options\n # Print selection options\n for n in range(len(selection)):\n index = n + 1\n print(\"{}. {}\".format(index, selection[n]))\n choice = input(\"Choice: \")\n if choice == 'x':\n sys.exit()\n if choice == 'p':\n choice = input(\"Word selection: \")\n word = selection[int(choice) - 1]\n return add_punctuation(word)\n choice = int(choice) - 1\n word = selection[choice]\n return word",
"def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]",
"def get_window_text_w(h_wnd):\n _get_window_text_w = WINDLL.user32.GetWindowTextW\n _get_window_text_w.argtypes = [HWND, LPWSTR, ctypes.c_int]\n _get_window_text_w.restype = ctypes.c_int\n\n n_max_count = 0x1000\n dw_char_size = SIZE_OF(CHAR)\n while 1:\n lp_string = ctypes.create_unicode_buffer('', n_max_count)\n n_count = _get_window_text_w(h_wnd, lp_string, n_max_count)\n if n_count == 0:\n raise ctypes.WinError()\n if n_count < n_max_count - dw_char_size:\n break\n n_max_count += 0x1000\n return lp_string.value",
"def currentSelection(self):\n # Get search items\n items = [i.strip().lower() for i in self._main._select.text().split(' ')]\n items = [i for i in items if i]\n \n prefix = ''\n tags = []\n words = []\n \n # First item can be the prefix\n if items and items[0] in '. % %% %%% ! !! !!! ? ?? ???':\n prefix = items.pop(0)\n \n # Next are either words or tags\n for item in items:\n if item.startswith('#'):\n tags.append(item)\n else:\n words.append(item)\n \n # Done\n return prefix, tags, words",
"def render_word(self, min_length=3, max_length=12):\n while True:\n word = \"\".join(self.render(lambda o: len(o) > 1 and o[-1] == \" \", lambda n: n[0] == \" \"))\n if min_length <= len(word.strip()) <= max_length:\n return word.strip()",
"def textForFind(self, getCurrentWord=True):\n aw = self.activeWindow()\n if aw is None:\n return \"\"\n \n return aw.getSearchText(not getCurrentWord)",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def wraptext(text, maxlength):\n\n return textwrap.wrap(text, maxlength)",
"def select_word(self, recursive=True): # pragma: no cover\n return self.select_by_ext(self._ms_word_ext, recursive)",
"def getWordAt(self, pos):\n return self.sentence[pos].getWord()",
"def twentychar2():\n with open('words.txt','r') as fd:\n wordList = fd.read().split() # split the word from space \n print ([word for word in wordList if len(word) > 20])",
"def reduced_word(self, index_set = None, positive = True):\n return self.to_dominant_chamber(index_set=index_set,positive=positive,get_direction = True)[1]",
"def truncate_string(text, length):\n word_tokens = word_tokenize(text)\n truncated = word_tokens[:length]\n truncated_text = \" \".join(truncated)\n return truncated_text",
"def print_first_word(words):\r\n word = words.pop(0) #Halla el valor del arreglo en la posicion cero\r\n print word",
"def pickwords(filename):\n\n word = choice(open(filename, 'r').readlines()).rstrip('\\r\\n')\n return word",
"def split_into_words(context_text):\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in context_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n return doc_tokens, char_to_word_offset",
"def print_random_word_from(words):\n i = random.randint(0, len(words))\n print(\"The selected word has index {} and is {}.\".format(i, words[i]))\n return i",
"def get_context(words, position, window_size=2):\n start = max(0, position - window_size)\n stop = min(position + window_size, len(words))\n context_words = words[start:position] + words[position + 1:stop + 1]\n\n return list(context_words)",
"def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()",
"def get_content(self):\r\n view = self.window.active_view()\r\n selection = \"\"\r\n for region in view.sel():\r\n # If no selection, use the entire file as the selection\r\n if region.empty():\r\n selection = sublime.Region(0, view.size())\r\n else:\r\n selection = region\r\n return view.substr(selection)",
"def get_word():\n return ' '.join(sys.argv[1:])",
"def pick_word(self):\n self.chosen_word = random.choice(self.words_list)\n return self.chosen_word",
"def __get_sentence_window_one_word(self, pattern, sentences, constraints):\n textsnippets = []\n for ind, sent in enumerate(sentences):\n tokens = self.tokenizer.tokenize(sent)\n for i, token in enumerate(tokens):\n if check_pattern(pattern, token):\n if constraints is not None:\n self.__check_constraints(constraints, (i, i), ind, pattern, sent, sentences, textsnippets, tokens)\n else:\n self.__get_sentence_window_help(i, ind, sentences, textsnippets)\n return textsnippets",
"def get_word_pos_list(self, raw_text):\n raw_text = raw_text.strip()\n word_list = []\n pos_list = []\n # pdb.set_trace()\n seg_list = jieba.posseg.cut(raw_text,HMM=False) # 默认是精确模式\n for word, flag in seg_list:\n # remove the punctuation, we will keep punctuation as prosodic boundary\n if word in ['「', '」', '.', '-' , '', ' ', '。' , '—' , '?', ':', '、', '…',';',',',',','!']:\n continue\n word_list.append(word)\n pos_list.append(flag)\n return word_list, pos_list",
"def cut_in_lines(self,line):\n limit_screen = 30 #caracteres que tiene de ancho la pantalla\n length = 0 #para comparar leineas\n res = ''\n\n for linea in line.split('\\n'):\n if length + len(linea) <= limit_screen:\n new_linea = linea\n length += len(new_linea)\n else:\n if len(linea) > limit_screen:\n linea = self.cut_in_words(linea)\n new_linea = '\\n' + linea\n length = len(new_linea) - 2 #-2 para no tener en cuenta el \\n\n res += new_linea\n return res",
"def keep_position_name(text):\n text = text[3:]\n return text",
"def wordWrap(self):\n return self._wordWrap",
"def last_word(text, include='alphanum_underscore'):\n\n if not text: # Empty string\n return ''\n\n if text[-1].isspace():\n return ''\n else:\n regex = cleanup_regex[include]\n matches = regex.search(text)\n if matches:\n return matches.group(0)\n else:\n return ''",
"def topBorderFor( player ):\n return centerTextAt( \"\", default_display_vars.borderChar_Top, getUserScreenWidth( player ) )",
"def break_words(stuff):\n # triple quotes turn to be the document of ex25 help\n words = stuff.split(' ')\n # listing the parts of breaking\n return words",
"def skip_to_word(self) -> None:\n char = ...\n while self._is_whitespace(char):\n char = self.next_character()",
"def words(self):\n return self.title + self.content",
"def start_word(self):\n return self._start",
"def __remove(self, text, start_index, count):\n\n return text[:start_index] + text[start_index + count:]",
"def boxTextFor(player, text = \"\"):\n pass_text = \"\"\n if isinstance( text, ( str, unicode ) ):\n pass_text = text\n return boxTextAt( pass_text, default_display_vars.borderChar_Left, default_display_vars.borderChar_Right, default_display_vars.boxText_padding, getUserScreenWidth( player ) )",
"def print_first_word(words):\r\n word = words.pop(0)\r\n print word",
"def word(word_time):\n return word_time[0]",
"def copy_to_clipboard(self, txt):\r\n cmd = 'echo \"' + txt.strip() + '\"|clip'\r\n return subprocess.check_call(cmd, shell=True)",
"def print_first_word(words):\r\n word = words.pop(0)\t\r\n print word"
] | [
"0.5991796",
"0.5976473",
"0.5937633",
"0.59360015",
"0.5921066",
"0.56286114",
"0.5511649",
"0.54804397",
"0.54544324",
"0.53912175",
"0.53642124",
"0.5343262",
"0.5335321",
"0.53241056",
"0.5289842",
"0.52851576",
"0.52594143",
"0.5233265",
"0.52057403",
"0.5187018",
"0.51681244",
"0.5141727",
"0.5131182",
"0.5123632",
"0.5094309",
"0.50735956",
"0.50644696",
"0.50620615",
"0.506059",
"0.5060172",
"0.50376475",
"0.500466",
"0.50040424",
"0.5002208",
"0.49961844",
"0.4989933",
"0.4987624",
"0.49816626",
"0.49769822",
"0.4974774",
"0.4968641",
"0.4945164",
"0.49361566",
"0.49218872",
"0.49183035",
"0.49099776",
"0.48943537",
"0.48931497",
"0.48927218",
"0.4890788",
"0.488644",
"0.48858616",
"0.48720104",
"0.48682672",
"0.48662725",
"0.48631153",
"0.4858858",
"0.4857902",
"0.48574847",
"0.48540652",
"0.48443973",
"0.48439035",
"0.483403",
"0.48104835",
"0.48023918",
"0.47926363",
"0.47854725",
"0.47826502",
"0.47681502",
"0.47654006",
"0.47511905",
"0.47493437",
"0.4747671",
"0.4746633",
"0.4731695",
"0.4722829",
"0.47217855",
"0.47081217",
"0.47077778",
"0.4707278",
"0.47051257",
"0.46990177",
"0.4696871",
"0.4696405",
"0.46883816",
"0.4687374",
"0.46855947",
"0.46807003",
"0.4679654",
"0.46787354",
"0.46625146",
"0.4659688",
"0.46575224",
"0.46560362",
"0.46545735",
"0.4653986",
"0.46520156",
"0.46504337",
"0.46493137",
"0.46480492"
] | 0.808763 | 0 |
Sets the system clip board content. | def _set_clipboard_text(text):
clipboard = Clipboard()
clipboard.set_text(text) # Restore previous clipboard text.
clipboard.copy_to_system() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_board(board):",
"def set_clipboard_contents(contents):\n pboard = NSPasteboard.generalPasteboard()\n pboard.clearContents()\n for uti in contents:\n data = nsdata(contents[uti])\n pboard.setData_forType_(data, uti.encode('utf-8'))",
"def set_clipboard(content, *args, **kwargs):\n G.DEVICE.set_clipboard(content, *args, **kwargs)",
"def set_contents(self, contents):\n self.validate_contents(contents)\n #Copy-ing the contents list is necessary because it can be modified \n #by UI elements that are based on this class\n self.contents = copy(contents)\n self.process_contents()\n self.fix_pointers_on_contents_update()",
"def setBoard(self, board):\n\t\tself.gameBoard = board",
"def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMISubEditForm, self).setContentData(content)",
"def set_content(self, widget):\n\t\tself.content = widget",
"def SetContent(self, window):\n window.SetName(\"content\")\n window.SetBackgroundColour(wx.GetApp().settings.bg_color)\n window.SetForegroundColour(wx.GetApp().settings.fg_color)\n window.SetFont(wx.GetApp().settings.text_font)",
"def set_board(self, path_to_board):\n self.current_board = path_to_board",
"def update_board(self, mpos):\n pass",
"def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)",
"def set_content(self, widget):\n\t\tpass",
"def place(self, board):\r\n self.board = board",
"def setContent(self, content):\n self.__content = content",
"def set_content(self, content):\n self.content = content",
"def set_content(self, content):\n self.content = content",
"def set_content(self, content):\n \n # clean sizer\n if self._view is not None:\n self.remove_subview(self._view)\n \n # init view\n if isinstance(content, UIView):\n self._view = content\n \n elif isinstance(content, Control):\n self._view = UIView()\n self._view.set_control(content)\n \n elif isinstance(content, Graphics):\n self._view = UIView()\n self._view.set_control(Control(graphics=content))\n \n else:\n message = \"Unknown content type! -> %s\" % type(content)\n raise TypeError(message)\n \n # add to sizer\n self.add_subview(self._view)\n \n # set layout\n self._view.flex = \"WH\"",
"def set_content(self, content):\n\n # pylint: disable=W0201\n self.clear()\n self.content = content",
"def set_content(self, content):\n self.data['content'] = content",
"def _set(self, value):\n assert isinstance(self.world, pixel_worlds.ScreenBasedPixelWorld), \\\n 'Only ScreenBasedPixelWorlds can have the \"screen\" world attribute'\n\n super(ScreenWorldAttribute, self)._set(value)\n\n if self.world._populated:\n self.world.remove_objects(self.world.objects)\n\n objects, width, height = h.world.screen(self.world._screens[value], self.world._legend)\n\n self.world.create_objects(objects)\n else:\n # do nothing, assume the ScreenBasedPixelWorld constructor does all\n # the work\n pass",
"def clipping(self, value):\n\n self._clipping = value\n if self.is_attached:\n self[\"clipping\"] = self._clipping",
"def updateClip(self, clip, text):\n text = handleStr(text)\n\n if clip.content == text:\n return False\n\n SQL = ''' update clippings set content = '%s' where ID = %d ''' % (\n text, clip.id)\n self.__execute__(SQL)\n clip.content = text\n\n return True",
"def _set_board(self) -> None:\n try:\n self.board.set_pin_mode_sonar(self.trig, self.echo, self._callback)\n except Exception:\n print(f'Problem setting up {self.sonar_name}')\n print(f'Set up {self.sonar_name} successfully')",
"def set_item(self, row, col, value):\n self.board[row][col] = value",
"def content(self, content: FrameworkDataImageContentInterface):\n\n self._content = content",
"def board(self, board):\n\n self._board = board",
"def set_scene(self, pixels=[]):\n if not pixels:\n self.clear()",
"def contents(self, text):\n self.app.contents = text",
"def setAllowModifyContents(self,value):\n self.PDFreactorConfiguration.in1[\"allowModifyContents\"] = value",
"def content(self, content):\n\n self._content = content",
"def content(self, content):\n\n self._content = content",
"def set_content(self, content: Any, data: Any):\n super(PnlCell, self).set_content(content, data)\n\n if str(content).startswith(\"-\"):\n self.setForeground(COLOR_SHORT)\n else:\n self.setForeground(COLOR_LONG)",
"def contents(self, contents):\n\n self._contents = contents",
"def SetClipboard(*args, **kwargs):\n return _gdi_.MetaFile_SetClipboard(*args, **kwargs)",
"def update_well(self, row, col, content):\n self.plate.get_well(row, col).composition.update(content)",
"def clear(self):\n self.command(self.LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self._cols for _ in range(self._rows)]\n self._msleep(2)",
"def contents(self, value: str) -> None:\n self._contents = value",
"def setScaledContents(self, scaled):\n self._scaled_contents = scaled\n self.update()",
"def set_value(self, row, col, value):\n\n #add the value to the appropriate position on the board\n self.CurrentGameBoard[row][col]=value\n #return a new board of the same size with the value added\n return SudokuBoard(self.BoardSize, self.CurrentGameBoard)",
"def replaceClipboardWith(self, s: str) -> None:\n cb = self.qtApp.clipboard()\n if cb:\n # cb.clear() # unnecessary, breaks on some Qt versions\n s = g.toUnicode(s)\n QtWidgets.QApplication.processEvents()\n # Fix #241: QMimeData object error\n cb.setText(s)\n QtWidgets.QApplication.processEvents()\n else:\n g.trace('no clipboard!')",
"def clear(self):\n self.command(_LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self.cols for _ in range(self.rows)]\n time.sleep(2*MILLISECOND)",
"def update_board(self, position, value):\n self.board[position] = value\n self.render()\n self.events()\n sleep(self.sleep_time)",
"def setContents(self, item):\n if item == None:\n self.pot.a(None, 0)\n else:\n self.pot.a(CraftMagicNumbers.getItem(item.getItemType()), item.getData())\n # PAIL: rename",
"def set_content(self, content):\n if check_data_exist(content) is True:\n self.content = content.text",
"def updateContents(self):\n selSpots = self.treeView.selectionModel().selectedSpots()\n if self.isChildView:\n if len(selSpots) > 1 or self.hideChildView:\n self.hide()\n return\n if not selSpots:\n # use top node childList from tree structure\n selSpots = [globalref.mainControl.activeControl.structure.\n structSpot()]\n elif not selSpots:\n self.hide()\n return\n self.show()\n if not self.isVisible() or self.height() == 0 or self.width() == 0:\n return\n if self.isChildView:\n selSpots = selSpots[0].childSpots()\n self.blockSignals(True)\n if selSpots:\n self.setPlainText('\\n'.join(spot.nodeRef.title(spot) for spot in\n selSpots))\n else:\n self.clear()\n self.blockSignals(False)",
"def initBoard(self):\n pass",
"def __editPaste(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").paste()\n else:\n self.activeWindow().paste()",
"def fill_interface(self):\r\n path = os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"bg.png\"))\r\n pixmap = QPixmap(path)\r\n pixmap = pixmap.scaled(self.width(), self.height())\r\n palette = QPalette()\r\n palette.setBrush(QPalette.Background, QBrush(pixmap))\r\n self.edit_add.setText(HOST)\r\n self.setPalette(palette)\r\n\r\n self.set_online_status(False)\r\n self.paint_project_button(False)\r\n print \"filled\"",
"def resetBoard(self):\n pass",
"def clipboard(self, data):\n p = subprocess.Popen([\"xclip\", \"-selection\", \"clipboard\"], stdin=subprocess.PIPE)\n p.stdin.write(data.encode())\n p.stdin.close()",
"def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value",
"def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))",
"def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))",
"def setClipboardSelection(self, s: str) -> None:\n # Alas, returning s reopens #218.\n return",
"def set_tile(self, row, col, value):\r\n self._board[row][col] = value",
"def reset_board(self):\n\n self.board = np.array(self.initial_board)",
"def model_refresh(self):\n for x in range(self._dim):\n for y in range(self._dim):\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)",
"def _set_cli_area(self):\n pass",
"def content(self, content: str):\r\n self._content = content",
"def update(self):\n self.board.update()",
"def system(self, system):\n\n self._system = system",
"def system(self, system):\n\n self._system = system",
"def __setitem__(self, xxx_todo_changeme, value):\n (x, y) = xxx_todo_changeme\n xpos, ypos = self.move(x, y)\n self.canvas.itemconfig(self.box[x][y], fill = self.color[value], outline = self.color[value])\n self.canvas.update()\n self.board[x][y] = value",
"def __editCopy(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").copy()\n else:\n self.activeWindow().copy()",
"def game_system(self, game_system):\n\n self._game_system = game_system",
"def update_content(self):\n raise NotImplementedError",
"def set(self, coord, value):\n layer, row, column = tuple(coord)\n self.validPosition(layer, row, column)\n self._state['visible']['board'][layer][row][column] = value",
"def defaultLoad (self):\n self.srcEditor.setText( \"\" )\n self.srcEditor.setFocus()\n self.setReadOnly( readOnly=False )",
"def set_content(self, text):\n img, size = self._render_as_image(text)\n # Modify the Drawable attributes based on text attributes\n self.image = img\n self.rect.size = size",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value",
"def setupScreenText(self) :\n\t\t# Create object to show avatar's position on the screen.\n\t\t# Update actual text using setText method on object.\n\t\tself.avPos = showText(0.92)\n\n \t\t# Create object to show a list of visible avatars\n \t\tself.showNumVisible = showText(0.85)\n \t\tself.visList = []\n\n\t\t# Create object for displaying keyboard shortcuts\n\t\tself.helpText = showText(0.78)\n\t\tself.helpText.setText(\"h: for help\")",
"def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board",
"def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value",
"def setup(self):\n curses.curs_set(1)\n curses.noecho()\n curses.cbreak()\n # Keypad disabled until scrolling properly implemented\n # self.stdscr.keypad(True)\n self.stdscr.clear()\n self.stdscr.addstr(\"SecureChat v{}\".format(__version__))\n self.chat_container.box()\n self.chat_win.addstr(\"Welcome to SecureChat!\")\n self.chat_win.scrollok(True)\n self.chat_win.setscrreg(0, self.max_y - 5)\n self.prompt_win.addstr(\"> \")\n self.refresh_all()",
"def set_share_clipboard(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetShareClipboard', self.handle, bEnabled)",
"def _update_board(self):\n\n self.game_board.update_board(self.tetrino_set)",
"def _setwin(self, win):\n\t\tself.win = win",
"def pasteSettings(self):\n\n # it does this 4 times because for some reason it would not grab everything one time through. Investigate\n for i in range(4):\n\n tempDir = cmds.internalVar(userTmpDir=True)\n clipboardFile = os.path.normcase(os.path.join(tempDir, \"ART_clipboard.txt\"))\n\n if os.path.exists(clipboardFile):\n # load the data\n json_file = open(clipboardFile)\n data = json.load(json_file)\n json_file.close()\n\n # attempt to paste data if module type is the same\n networkNode = self.returnNetworkNode\n moduleType = cmds.getAttr(networkNode + \".moduleType\")\n if moduleType == data[0][1]:\n\n for each in data:\n attr = each[0]\n value = each[1]\n\n try:\n attrType = str(cmds.getAttr(networkNode + \".\" + attr, type=True))\n\n if attrType != \"string\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, value, lock=True)\n except:\n pass\n\n else:\n cmds.warning(\"No data in clipboard\")\n\n # relaunch the UI\n self.updateSettingsUI()\n self.applyModuleChanges(self)",
"def reset_board(self):\n self.board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n self.turn = 0\n\n self.change_button_img_to_null()\n\n #self.Score_Label.grid(row=0,column=1, ipadx=32)\n\n self.player_highlight()\n self.change_button_state('normal')\n self.update_score()",
"def __init__(self,clip_list):\n self.requested_clips=clip_list",
"def text_update(self):\n if self.stext is not None:\n # Get index of non-masked sources :\n # idx = self._select_unmasked()[-1]\n\n # Set masked-sources text to '':\n text = np.array(self.stext)\n # text[np.array(~idx, dtype=bool)] = ''\n\n # Update elements :\n self.stextmesh.text = text\n self.stextmesh.color = self.stextcolor\n self.stextmesh.font_size = self.stextsize\n self.stextmesh.update()",
"def applyPrefs (self):\r\n self.storyPanel.eachWidget(lambda w: w.applyPrefs())\r\n self.storyPanel.Refresh()",
"def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]",
"async def imgboard(self, ctx, value: bool=None):\n\t\tif value is None:\n\t\t\tv = await self.config.guild(ctx.guild).doImage()\n\t\t\tif v:\n\t\t\t\tawait ctx.send('The board is currently displayed using an image.')\n\t\t\telse:\n\t\t\t\tawait ctx.send('The board is currently displayed using text.')\n\t\telse:\n\t\t\tawait self.config.guild(ctx.guild).doImage.set(value)\n\t\t\tif value:\n\t\t\t\tawait ctx.send('The board will now be displayed using an image.')\n\t\t\telse:\n\t\t\t\tawait ctx.send('The board will now be displayed using text.')",
"def displayBoard(self, ui):\n #self.ui.mainArea.fill(self.ui.color_background)\n self.board = Board(ui, self.player1, self.player2)",
"def set_tile(self, row, col, value):\r\n self.grid[row][col] = value",
"def updateVision(self, *args):\r\n\r\n # Update the list of frame choices and the default frame choice\r\n self._appChoice[\"frame\"] = [choice[0] for choice in self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()]]\r\n self._appString[\"frame\"].set(self._appChoice[\"frame\"][0])\r\n\r\n # Delete the old choices fromt the option menu\r\n menu = self._appOption[\"frame\"][\"menu\"]\r\n menu.delete(0, \"end\")\r\n\r\n # Add the new list of choices to the option menu\r\n for string in self._appChoice[\"frame\"]:\r\n menu.add_command(label=string, command=lambda value=string: self._appString[\"frame\"].set(value))",
"def __init__(self, slit_width_xaxis, slit_height_zaxis):\n super(SwScreen, self).__init__()\n self.sw = self.create_instance()\n\n self.set_output_files(fwrite=0, f_angle=0) #write all, TODO: remove\n\n n_screen = 1\n i_screen = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n i_abs = np.zeros(10)\n i_slit = np.zeros(10)\n i_stop = np.zeros(10)\n k_slit = np.zeros(10)\n thick = np.zeros(10)\n file_abs = np.array(['', '', '', '', '', '', '', '', '', ''])\n rx_slit = np.zeros(10)\n rz_slit = np.zeros(10)\n sl_dis = np.zeros(10)\n file_src_ext = np.array(['', '', '', '', '', '', '', '', '', ''])\n cx_slit = np.zeros(10)\n cz_slit = np.zeros(10)\n\n i_abs[0] = 0 # NO ABSORPTION\n i_slit[0] = 0 # APERTURING\n i_stop[0] = 0 # SLIT\n k_slit[0] = 0 # RECTANGULAR\n\n rx_slit[0] = slit_width_xaxis\n rz_slit[0] = slit_height_zaxis\n cx_slit[0] = 0.0\n cz_slit[0] = 0.0\n\n self.sw._oe.set_screens(n_screen,\n i_screen,\n i_abs,\n sl_dis,\n i_slit,\n i_stop,\n k_slit,\n thick,\n file_abs,\n rx_slit,\n rz_slit,\n cx_slit,\n cz_slit,\n file_src_ext)",
"def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value",
"def update(self, ucs):\n try:\n self.content = Ansi(Ansi(ucs).decode_pipe()).wrap(\n self.visible_width).splitlines()\n except AssertionError, err:\n # indeterminate length\n logger = logging.getLogger()\n logger.warn('%s in [%r]', err, ucs)\n self.content = ucs.split('\\r\\n')\n return self.refresh()",
"def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._grid_tile[row][col] = value",
"def set_defaults(self):\n if not self.HAS_DS9: # pragma: no cover\n return\n self.run('frame delete all')\n self.run('wcs degrees')\n if self.disp_parameters['tile']:\n self.run('tile yes')\n else:\n self.run('tile no')\n self.cs = str(self.disp_parameters['lock_image']).lower()\n self.lock()",
"def set_content(self, content: Any, data: Any):\n if content:\n super(EnumCell, self).set_content(content.value, data)",
"def _on_clip_notes_changed(self):\n if liveobj_valid(self._sequencer_clip) and self._can_edit():\n time_start, time_length = self._get_clip_notes_time_range()\n self._clip_notes = self._get_notes_handler(self._sequencer_clip, time_start, self._pitches, time_length)\n else:\n self._clip_notes = []\n self._update_editor_matrix()\n self.notify_notes_changed()",
"def set_tile(self, row, col, value):\n self.grid[row][col] = value",
"def set_tile(self, row, col, value):\n self.grid[row][col] = value",
"def updateUI (self, event = None):\r\n\r\n hasSelection = self.storyPanel.hasSelection()\r\n\r\n canPaste = False\r\n if wx.TheClipboard.Open():\r\n canPaste = wx.TheClipboard.IsSupported(wx.CustomDataFormat(StoryPanel.CLIPBOARD_FORMAT))\r\n wx.TheClipboard.Close()\r\n \r\n # window title\r\n \r\n if self.saveDestination == '':\r\n title = StoryFrame.DEFAULT_TITLE\r\n else:\r\n bits = os.path.splitext(self.saveDestination)\r\n title = os.path.basename(bits[0])\r\n \r\n percent = str(int(round(self.storyPanel.scale * 100)))\r\n dirty = ''\r\n if self.dirty: dirty = ' *'\r\n\r\n self.SetTitle(title + dirty + ' (' + percent + '%) ' + '- ' + self.app.NAME)\r\n \r\n # File menu\r\n \r\n revertItem = self.menus.FindItemById(wx.ID_REVERT_TO_SAVED)\r\n revertItem.Enable(self.saveDestination != '' and self.dirty)\r\n \r\n # Edit menu\r\n \r\n undoItem = self.menus.FindItemById(wx.ID_UNDO)\r\n undoItem.Enable(self.storyPanel.canUndo())\r\n if self.storyPanel.canUndo():\r\n undoItem.SetText('Undo ' + self.storyPanel.undoAction() + '\\tCtrl-Z')\r\n else:\r\n undoItem.SetText(\"Can't Undo\\tCtrl-Z\")\r\n \r\n redoItem = self.menus.FindItemById(wx.ID_REDO)\r\n redoItem.Enable(self.storyPanel.canRedo())\r\n if self.storyPanel.canRedo():\r\n redoItem.SetText('Redo ' + self.storyPanel.redoAction() + '\\tCtrl-Y')\r\n else:\r\n redoItem.SetText(\"Can't Redo\\tCtrl-Y\")\r\n \r\n cutItem = self.menus.FindItemById(wx.ID_CUT)\r\n cutItem.Enable(hasSelection)\r\n copyItem = self.menus.FindItemById(wx.ID_COPY)\r\n copyItem.Enable(hasSelection)\r\n deleteItem = self.menus.FindItemById(wx.ID_DELETE)\r\n deleteItem.Enable(hasSelection) \r\n pasteItem = self.menus.FindItemById(wx.ID_PASTE)\r\n pasteItem.Enable(canPaste)\r\n \r\n findAgainItem = self.menus.FindItemById(StoryFrame.EDIT_FIND_NEXT)\r\n findAgainItem.Enable(self.storyPanel.lastSearchRegexp != None)\r\n \r\n # View menu\r\n \r\n toolbarItem = self.menus.FindItemById(StoryFrame.VIEW_TOOLBAR)\r\n toolbarItem.Check(self.showToolbar)\r\n snapItem = self.menus.FindItemById(StoryFrame.VIEW_SNAP)\r\n snapItem.Check(self.storyPanel.snapping)\r\n \r\n # Story menu\r\n \r\n editItem = self.menus.FindItemById(wx.ID_EDIT)\r\n editItem.Enable(hasSelection)\r\n \r\n editFullscreenItem = self.menus.FindItemById(StoryFrame.STORY_EDIT_FULLSCREEN)\r\n editFullscreenItem.Enable(hasSelection and not self.storyPanel.hasMultipleSelection())\r\n \r\n rebuildItem = self.menus.FindItemById(StoryFrame.STORY_REBUILD)\r\n rebuildItem.Enable(self.buildDestination != '')\r\n \r\n viewLastItem = self.menus.FindItemById(StoryFrame.STORY_VIEW_LAST)\r\n viewLastItem.Enable(self.buildDestination != '')\r\n \r\n # Story format submenu\r\n\r\n for key in self.storyFormats:\r\n self.menus.FindItemById(key).Check(self.target == self.storyFormats[key])",
"def set_piece(self, square, piece):\n self.board[square.row][square.col] = piece"
] | [
"0.5636461",
"0.56241775",
"0.56007797",
"0.55882066",
"0.5579036",
"0.5543145",
"0.5503222",
"0.54816544",
"0.54584825",
"0.5431708",
"0.5427628",
"0.53481144",
"0.5347229",
"0.53063387",
"0.5292606",
"0.5292606",
"0.52838135",
"0.52283597",
"0.519499",
"0.51938814",
"0.51222885",
"0.5115658",
"0.5081506",
"0.5037578",
"0.50186074",
"0.5005838",
"0.5003519",
"0.49945062",
"0.4974308",
"0.4967609",
"0.4967609",
"0.49201757",
"0.4906016",
"0.4895655",
"0.48904523",
"0.48820797",
"0.4819094",
"0.4813305",
"0.47996458",
"0.47658187",
"0.47538483",
"0.4732589",
"0.4732158",
"0.4731565",
"0.47296107",
"0.47191843",
"0.47098967",
"0.4655792",
"0.46550474",
"0.46502942",
"0.4650165",
"0.46388343",
"0.46388343",
"0.4632613",
"0.46235868",
"0.4623522",
"0.46211523",
"0.46121496",
"0.46100277",
"0.46095932",
"0.46053752",
"0.46053752",
"0.4603926",
"0.4601404",
"0.45991313",
"0.45949134",
"0.4589558",
"0.4586315",
"0.45817915",
"0.4578027",
"0.4577375",
"0.4577375",
"0.45688",
"0.45676932",
"0.4567584",
"0.45622727",
"0.45577195",
"0.4551857",
"0.4540812",
"0.4532118",
"0.45305938",
"0.45248216",
"0.45211363",
"0.45192406",
"0.45184228",
"0.4518311",
"0.45160487",
"0.45146996",
"0.45089665",
"0.45022905",
"0.44989073",
"0.4497591",
"0.44961998",
"0.44910285",
"0.44849283",
"0.44802752",
"0.44766474",
"0.44766474",
"0.44707575",
"0.44588834"
] | 0.46337497 | 53 |
Builds the wx Panel | def init_ui(self):
self.panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.figure_bmp = wx.StaticBitmap(self, wx.ID_ANY,
bitmap=self.controller.empty_bitmap(self.bitmap_width,
self.bitmap_height),
pos=wx.DefaultPosition, size=wx.DefaultSize)
self.panel_sizer.Add(self.figure_bmp, ui_defaults.ctrl_pct, wx.CENTER,
ui_defaults.widget_margin)
self.SetSizerAndFit(self.panel_sizer) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_main_panel(self):\n self.panel = wx.Panel(self)\n\n self.init_plot()\n self.canvas = FigCanvas(self.panel, -1, self.fig)\n\n self.control_box = VSControlBox(self.panel, -1, 'Information board')\n\n self.vbox = wx.BoxSizer(wx.VERTICAL)\n self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.vbox.Add(self.control_box, 0, wx.ALIGN_LEFT | wx.TOP | wx.EXPAND)\n \n self.panel.SetSizer(self.vbox)\n self.vbox.Fit(self)",
"def define_panel_structure(self):\n w, h = self.parent.GetSize()\n self.vbox = wx.BoxSizer(wx.VERTICAL)\n self.sizer1 = wx.BoxSizer(wx.VERTICAL)\n self.sizer1.SetMinSize(wx.Size(w/13, h*2/5))\n\n self.sizer2 = wx.BoxSizer(wx.VERTICAL)\n self.sizer3 = wx.FlexGridSizer(9, 2, 4, 1)\n self.sizer4 = wx.BoxSizer(wx.VERTICAL)\n self.sizer5 = wx.BoxSizer(wx.VERTICAL)\n\n self.vbox.Add(self.sizer5, 0, wx.EXPAND | wx.ALL, 1)\n self.vbox.Add(self.sizer1, 1, wx.EXPAND | wx.ALL, 0)\n self.vbox.Add(self.sizer2, 0, wx.EXPAND | wx.ALL, 1)\n self.vbox.Add(self.sizer3, 0, wx.EXPAND | wx.ALL, 10)\n # self.vbox.Add(self.sizer4, 0, wx.EXPAND|wx.ALL,5)\n\n self.SetSizer(self.vbox)",
"def create_widget(self, parent, tree):\n widget = wx.Panel(parent)\n sizer = wxSingleWidgetSizer()\n widget.SetSizer(sizer)\n return widget",
"def _create_content(self):\n sizer = wx.BoxSizer(wx.VERTICAL)\n\n btn_size = sppasScrolledPanel.fix_size(64)\n\n btn_back_top = BitmapTextButton(self, name=\"arrow_up\")\n btn_back_top.FocusWidth = 0\n btn_back_top.BorderWidth = 0\n btn_back_top.BitmapColour = self.GetForegroundColour()\n btn_back_top.SetMinSize(wx.Size(btn_size, btn_size))\n\n title = sppasStaticText(self, label=\"Procedure Outcome Report\", name=\"title_text\")\n\n sizer_top = wx.BoxSizer(wx.HORIZONTAL)\n sizer_top.Add(btn_back_top, 0, wx.RIGHT, btn_size // 4)\n sizer_top.Add(title, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_CENTER_HORIZONTAL)\n sizer.Add(sizer_top, 0, wx.EXPAND)\n\n log_txt = self.__create_log_text()\n sizer.Add(log_txt, 2, wx.EXPAND | wx.LEFT, btn_size // 4)\n\n self.SetSizer(sizer)\n self.SetupScrolling(scroll_x=True, scroll_y=True)",
"def _create_canvas(self, parent):\n # The panel lets us add additional controls.\n panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)\n sizer = wx.BoxSizer(wx.VERTICAL)\n panel.SetSizer(sizer)\n # matplotlib commands to create a canvas\n mpl_control = FigureCanvas(panel, -1, self.value)\n sizer.Add(mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW)\n toolbar = NToolbar(mpl_control)\n sizer.Add(toolbar, 0, wx.EXPAND)\n self.value.canvas.SetMinSize((10,10))\n return panel",
"def build_controls(self):\n controlSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n btnData = [{'bitmap':'player_pause.png', \n 'handler':self.on_pause, 'name':'pause'},\n {'bitmap':'player_stop.png',\n 'handler':self.on_stop, 'name':'stop'}]\n for btn in btnData:\n self.build_btn(btn, controlSizer)\n \n return controlSizer",
"def create(self, parent):\n self.widget = wxBitmapWidget(parent)",
"def init ( self, parent ):\n # Create a panel to hold all of the buttons:\n self.control = panel = wx.Panel( parent, -1 )\n sizer = wx.BoxSizer( wx.VERTICAL )\n \n # Add the standard font control:\n font = self._font = wx.TextCtrl( panel, -1, self.str_value )\n wx.EVT_KILL_FOCUS( font, self.update_object )\n wx.EVT_TEXT_ENTER( panel, font.GetId(), self.update_object )\n sizer.Add( font, 0, wx.EXPAND | wx.BOTTOM, 3 )\n \n # Add all of the font choice controls:\n sizer2 = wx.BoxSizer( wx.HORIZONTAL )\n facenames = all_facenames()\n control = self._facename = wx.Choice( panel, -1, wx.Point( 0, 0 ), \n wx.Size( choice_width( facenames ), 20 ), \n facenames )\n \n sizer2.Add( control, 2, wx.EXPAND )\n wx.EVT_CHOICE( panel, control.GetId(), self.update_object_parts )\n \n control = self._point_size = wx.Choice( panel, -1, \n wx.Point( 0, 0 ), wx.Size( 30, 20 ), \n PointSizes )\n sizer2.Add( control, 1, wx.EXPAND | wx.RIGHT, 3 )\n wx.EVT_CHOICE( panel, control.GetId(), self.update_object_parts ) \n \n sizer.Add( sizer2, 0, wx.EXPAND )\n \n # Set-up the layout:\n panel.SetAutoLayout( True )\n panel.SetSizer( sizer )\n sizer.Fit( panel )",
"def layout(self):\n\t\t\n\t\tself.mainSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tbtnSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\timg = wx.Image(self.photoMaxSize,self.photoMaxSize)\n\t\tself.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY, \n\t\t\t\t\t\t\t\t\t\t wx.Bitmap(img))\n\t\tself.mainSizer.Add(self.imageCtrl, 0, wx.ALL|wx.CENTER, 5)\n\t\tself.imageLabel = wx.StaticText(self, label=\"\")\n\t\tself.mainSizer.Add(self.imageLabel, 0, wx.ALL|wx.CENTER, 5)\n\t\t\n\t\tbtnData = [(\"Previous\", btnSizer, self.onPrevious),\n\t\t\t\t (\"Slide Show\", btnSizer, self.onSlideShow),\n\t\t\t\t (\"Next\", btnSizer, self.onNext)]\n\t\tfor data in btnData:\n\t\t\tlabel, sizer, handler = data\n\t\t\tself.btnBuilder(label, sizer, handler)\n\t\t\t\n\t\tself.mainSizer.Add(btnSizer, 0, wx.CENTER)\n\t\tself.SetSizer(self.mainSizer)",
"def create_ui(self):\n main_sizer = wx.BoxSizer(wx.VERTICAL)\n\n self.text_ctrl = wx.TextCtrl(self, style=wx.TE_MULTILINE)\n self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text)\n main_sizer.Add(self.text_ctrl, 1, wx.ALL | wx.EXPAND, 5)\n\n self.SetSizer(main_sizer)",
"def __init__(self, parent, id=-1, title=\"\", name=\"\"):\n wx.Frame.__init__(self, parent, id=id, title=title,\n pos=wx.DefaultPosition, size=(1800,830), name=name) #1000,875\n\n self.menubar = wx.MenuBar()\n self.SetMenuBar(self.menubar)\n\n views_menu = wx.Menu()\n self.views_default_id = wx.NewId()\n views_menu.Append(self.views_default_id, \"&Default\\tCtrl-D\",\n \"Activate default view layout.\", wx.ITEM_NORMAL)\n\n self.views_max_image_id = wx.NewId()\n views_menu.Append(self.views_max_image_id, \"&Axial-Sagittal-Coronal View\\tCtrl-M\",\n \"Activate maximum image view size layout.\",\n wx.ITEM_NORMAL)\n\t\t\n self.views_contour_view_id = wx.NewId()\n views_menu.Append(self.views_contour_view_id, \"&Contour Uncertainty view\\tCtrl-C\",\n \"Activate contour uncertainty view size layout.\",\n wx.ITEM_NORMAL)\n\n self.views_voxel_view_id = wx.NewId()\n views_menu.Append(self.views_voxel_view_id, \"&Voxel Uncertainty view\\tCtrl-V\",\n \"Activate voxel uncertainty view size layout.\",\n wx.ITEM_NORMAL)\n\n self.menubar.Append(views_menu, \"&Views\")\n\t\t\n\t\t\n help_menu = wx.Menu()\n help_about_id = wx.NewId()\n help_menu.Append(help_about_id, \"&About\\tCtrl-0\",\n \"Info about application.\", wx.ITEM_NORMAL)\n\t\t\t\t\t\t \n self.menubar.Append(help_menu, \"&Help\")\n\t\t\n # tell FrameManager to manage this frame\n self._mgr = wx.aui.AuiManager()\n self._mgr.SetManagedWindow(self)\n\n self._mgr.AddPane(self._create_patients_pane(), wx.aui.AuiPaneInfo().\n Name(\"patient\").Caption(\"Patient Data\").\n Left().\n BestSize(wx.Size(600,400)).\n MinimizeButton(True).MaximizeButton(True))\n\n\t\t\n self._mgr.AddPane(self._create_controls_pane(), wx.aui.AuiPaneInfo().\n Name(\"control\").Caption(\"Dose Plan Explorer\").\n Bottom().\n BestSize(wx.Size(600,400)).\n MinimizeButton(True).MaximizeButton(True))\n\t\t\t\t\t\t \n self._mgr.AddPane(self._create_axial_slices_pane(), wx.aui.AuiPaneInfo().\n Name(\"axial\").Caption(\"Axial\").\n Center().\n BestSize(wx.Size(400,400)).\n CloseButton(False).MaximizeButton(True))\n\t\t\t\t\t\t \n self._mgr.AddPane(self._create_3D_pane(), wx.aui.AuiPaneInfo().\n Name(\"3dview\").Caption(\"3D Dose Plan\").\n Left().\n BestSize(wx.Size(1000,800)).\n MinimizeButton(True).MaximizeButton(True))\n\n self._mgr.AddPane(self._create_sagittal_slices_pane(), wx.aui.AuiPaneInfo().\n Name(\"sagittal\").Caption(\"Sagittal\").\n Right().\n BestSize(wx.Size(400,400)).\n MinimizeButton(True).MaximizeButton(True))\n\n\n self._mgr.AddPane(self._create_coronal_slices_pane(), wx.aui.AuiPaneInfo().\n Name(\"coronal\").Caption(\"Coronal\").\n Right().\n BestSize(wx.Size(400,400)).\n MinimizeButton(True).MaximizeButton(True))\n\n self._mgr.AddPane(self._create_barplot_pane(), wx.aui.AuiPaneInfo().\n Name(\"overview\").Caption(\"Probability Overview\").\n Bottom().\n BestSize(wx.Size(1000,800)).\n MinimizeButton(True).MaximizeButton(True))\n\t\t\t\t\t\t \n self._mgr.AddPane(self._create_probs_pane(), wx.aui.AuiPaneInfo().\n Name(\"probs\").Caption(\"Probabilities for Dose Plans\").\n Bottom().\n BestSize(wx.Size(1000,800)).\n MinimizeButton(True).MaximizeButton(True))\t\t\t\t \t\t\t\t\t\t \n\t\t\t\t\t\t \n self._mgr.AddPane(self._create_distplot_pane(), wx.aui.AuiPaneInfo().\n Name(\"distplot\").Caption(\"Exploration of dose distribution\").\n Bottom().\n BestSize(wx.Size(1000,800)).\n MinimizeButton(True).MaximizeButton(True))\n\t\t\t\t\t\t \n self._mgr.AddPane(self._create_scatterplot_pane(), wx.aui.AuiPaneInfo().\n Name(\"scatterplot\").Caption(\"Variability scatterplot\").\n Bottom().\n BestSize(wx.Size(1000,800)).\n MinimizeButton(True).MaximizeButton(True))\n\t\t\n\n self.SetMinSize(wx.Size(400, 300))\n\n # first we save this default perspective with all panes\n # visible\n self._perspectives = {}\n self._mgr.GetPane(\"doseplan\").Hide()\n self._mgr.GetPane(\"scatterplot\").Hide()\n self._mgr.GetPane(\"3dview\").Hide()\n self._mgr.GetPane(\"distplot\").Hide()\n self._perspectives['default'] = self._mgr.SavePerspective()\n\n #------------- Show maximum image view ------------------#\n\t\t\n\t\t#Show axial,coronal and sagittal panes\n self._mgr.GetPane(\"axial\").Show()\n self._mgr.GetPane(\"coronal\").Show()\n self._mgr.GetPane(\"sagittal\").Show()\n\n\t\t#Hide all the others\t\t\n self._mgr.GetPane(\"patient\").Hide()\t\t\n self._mgr.GetPane(\"control\").Hide()\n self._mgr.GetPane(\"overview\").Hide()\n self._mgr.GetPane(\"probs\").Hide()\n self._mgr.GetPane(\"3dview\").Hide()\n self._mgr.GetPane(\"scatterplot\").Hide()\n self._mgr.GetPane(\"distplot\").Hide()\n # save the perspective again\n self._perspectives['max_image'] = self._mgr.SavePerspective()\n\n #------------- Show contour uncertainty view ------------------#\n\t\t\n\t\t#Show axial, coronal, sagittal, control, overview and probabilities panes\n self._mgr.GetPane(\"axial\").Show()\n self._mgr.GetPane(\"coronal\").Right()\n self._mgr.GetPane(\"coronal\").Show()\n self._mgr.GetPane(\"sagittal\").Right()\n self._mgr.GetPane(\"sagittal\").Show()\n self._mgr.GetPane(\"control\").Show()\n self._mgr.GetPane(\"overview\").Show()\n self._mgr.GetPane(\"probs\").Show()\n\t\t\n\t\t#Hide all the others\n self._mgr.GetPane(\"patient\").Hide()\n self._mgr.GetPane(\"3dview\").Hide()\n self._mgr.GetPane(\"scatterplot\").Hide()\n self._mgr.GetPane(\"distplot\").Hide()\n\t\t\n self._perspectives['contour_view'] = self._mgr.SavePerspective()\n\t\t\n #------------- Show voxel uncertainty view ------------------#\t\t\n\t\t\n\t\t#Show 3dview, scatterplot, axial and distplot panes\n self._mgr.GetPane(\"axial\").Left().Center()\n self._mgr.GetPane(\"axial\").Show()\n self._mgr.GetPane(\"3dview\").Left().Center()\n self._mgr.GetPane(\"3dview\").Show()\n self._mgr.GetPane(\"distplot\").Right()\n self._mgr.GetPane(\"distplot\").Show()\n self._mgr.GetPane(\"scatterplot\").Right()\n self._mgr.GetPane(\"scatterplot\").Show()\n\t\t\n self._mgr.GetPane(\"patient\").Hide()\n self._mgr.GetPane(\"control\").Hide()\n self._mgr.GetPane(\"coronal\").Hide()\n self._mgr.GetPane(\"sagittal\").Hide()\n self._mgr.GetPane(\"probs\").Hide()\n self._mgr.GetPane(\"overview\").Hide()\n\t\t\n self._perspectives['voxel_view'] = self._mgr.SavePerspective()\n\t\t\n # and put back the default perspective / view\n self._mgr.LoadPerspective(self._perspectives['default'])\n\n # finally tell the AUI manager to do everything that we've\n # asked\n self._mgr.Update()\n\t\t\n\t\t#Bind the events\n self.Bind(wx.EVT_MENU, self.onAbout, id = help_about_id)\n\t\n self.patients = {}\n self.contours = {}\n\t\t\n #sns.plt.close(self.figure)\n\t\t\t\t\t\t\n self.CreateStatusBar()\n self.SetStatusText(\"Status information can be find here...\")\n\t\t\n self.new_pat = None\n self.aboutbox = None\n\t\t\n self.tracer = vtk.vtkImageTracerWidget()\n self.tracer.SetCaptureRadius(10.5)\n self.tracer.GetGlyphSource().SetColor(1, 0, 0)\n self.tracer.GetGlyphSource().SetScale(1.0) # set the size of the glyph handle\n\t\t\n # Set the initial rotation of the glyph if desired. The default glyph\n # set internally by the widget is a '+' so rotating 45 deg. gives a 'x'\n self.tracer.GetGlyphSource().SetRotationAngle(90.0)\n self.tracer.GetGlyphSource().Modified()\n self.tracer.GetLineProperty().SetColor(1,0,0)\n self.tracer.SetPriority(1)\n self.tracer.AutoCloseOn()\n self.tracer.IsClosed()\n\t\t\n self.dc = None\n self.cursor_dist = None",
"def __init__(self,user,passw):\n self.user = user\n self.passw=passw\n wx.Frame.__init__(self, None, -1, 'Friends', \n size=(500, 350))\n self.panel = ScrolledPanel(self, size = wx.Size( 500, 350 ))\n self.panel.SetupScrolling()\n self.Bind(wx.EVT_CLOSE, self.OnCloseMe)\n self.sizer = wx.FlexGridSizer(5,2,5,5)\n self.buttons=[]\n Publisher().subscribe(self.createButton, \"update\")\n Publisher().subscribe(self.updateButton, \"updatebuttons\")",
"def generate_panel(self):\r\n \r\n self.PanelData = self.RawData.filter(['ID', 'X', 'Z', 'W', 'R', 'β', 'LFP', 'H'], axis=1)",
"def create_widgets(self):\r\n self.create_containers()\r\n self.setup_containers()\r\n self.create_panel_widgets()\r\n self.setup_scrollbar()",
"def __init__(self, parent):\n wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)\n \n # create the AuiNotebook instance\n self.nb = wx.aui.AuiNotebook(self)\n \n \n #self.param = Data.param\n #self.currentdata = Data.currentdata\n \n \"\"\"\n Each tab on the main gui gets an entry here. Then is added to the \n \"\"\"\n self.datapanel = self.DataPanel.TabPanel(self.nb, wx.ID_ANY)\n self.plotpanel = self.PlotPanel.TabPanel(self.nb, wx.ID_ANY)\n self.parampanel = self.ParamPanel.TabPanel(self.nb, wx.ID_ANY)\n #self.fitcodepanel = wx.py.editor.EditorFrame(self.nb , filename='imports/models/default.py')\n \n \"\"\"\n Set the visable names for the tabs.\n \"\"\" \n self.tabs = [\n (self.datapanel, \"Data\"),\n (self.plotpanel, \"Plot\"),\n (self.parampanel, \"Parameters\")\n #(self.fitcodepanel, \"Fitting Code\")\n ]\n \n \"\"\"\n Add the tabs to the manager and setup the automatic sizer.\n \"\"\" \n for page, label in self.tabs:\n self.nb.AddPage(page, label)\n \n self.sizer = wx.GridSizer()\n self.sizer.Add(self.nb, 1, wx.EXPAND)\n self.SetSizerAndFit(self.sizer)",
"def _create_3D_pane(self):\n\t\t\n panel = wx.Panel(self,-1)\n self.interactor3d = wxVTKRenderWindowInteractor(panel, -1, (600,800))\n self.generate_button = wx.Button(panel, label=\"Generate 3D view\")\n self.text_position = wx.StaticText(panel, -1, \"Dose (Gy) \" , wx.Point(0, 0))\n self.slider_dose3d = wx.Slider(panel, -1, 75, 60, 100, wx.DefaultPosition, wx.Size( 100,-1 ), wx.SL_HORIZONTAL)\n self.spin_dose3d = wx.SpinCtrl(panel, wx.ID_ANY, str(self.slider_dose3d.GetValue()), wx.DefaultPosition, wx.Size( 70,-1 ), wx.SP_ARROW_KEYS, min=0, max=100, initial=11)\t\n\t\t\n button_sizer = wx.BoxSizer(wx.HORIZONTAL)\n button_sizer.AddSpacer(30)\n button_sizer.Add(self.generate_button)\n button_sizer.AddSpacer(30)\n button_sizer.Add(self.text_position)\n button_sizer.Add(self.slider_dose3d)\n button_sizer.Add(self.spin_dose3d)\n\t\t\n listsizer = wx.BoxSizer(wx.VERTICAL)\n listsizer.Add(self.interactor3d, 1, wx.EXPAND|wx.BOTTOM, 7)\n listsizer.Add(button_sizer)\n panel.SetSizer(listsizer)\n listsizer.Fit(panel)\n\t\t\n self._create_orientation_widget(self.interactor3d)\n return panel",
"def create_panel(self):\n return\n # return Panel(self)",
"def __init__(self, parent):\n # Super\n wx.Panel.__init__(self, parent)\n\n # Fig & canvas\n self.__fig = plt.figure()\n self.__canvas = FigureCanvas(self, -1, self.__fig)\n\n # Date format for x axes\n self.__tick_fmt_date = matplotlib.dates.DateFormatter('%d-%b')\n self.__tick_fmt_time = matplotlib.dates.DateFormatter('%H:%M:%S')\n\n # Sizer etc.\n self.__sizer = wx.BoxSizer(wx.VERTICAL)\n self.__sizer.Add(self.__canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.SetSizer(self.__sizer)\n self.Fit()",
"def setup(self, myPorts):\n panel = wx.Panel(self, wx.ID_ANY)\n panel.SetAutoLayout(1)\n self.font_std = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)\n\n serialSys_label = wx.StaticText(panel, -1, \"System Available:\")\n serialSys_comboBox = wx.ComboBox(panel, -1, size=(200, 30),\n choices=myPorts, style=wx.CB_READONLY)\n serialSys_comboBox.SetSelection(0)\n serialSys_refresh_Btn = wx.Button(panel, size=(100,30),\n label=\"Refresh\")\n\n network_label = wx.StaticText(panel, -1, \"Choose a Network:\")\n network_comboBox = wx.ComboBox(panel,-1, size=(200,30), choices='',\n style=wx.CB_READONLY)\n network_comboBox.SetSelection(0)\n network_refresh_Btn = wx.Button(panel, size=(100,30), label=\"Refresh\")\n\n password_label = wx.StaticText(panel, -1, \"Password:\")\n password_txtBox = wx.TextCtrl(panel, wx.ID_ANY, '', size=(200,27),\n style=wx.ALIGN_LEFT|wx.TE_PASSWORD)\n\n pushBtn = wx.Button(panel, size=(130,30), label=\"Push\")\n pingBtn = wx.Button(panel, size=(130,30), label=\"Ping\")\n calibrateSysBtn = wx.Button(panel, size=(130,30),label=\"Calibrate\")\n\n serialSys_label.SetFont(self.font_std)\n serialSys_comboBox.SetFont(self.font_std)\n serialSys_refresh_Btn.SetFont(self.font_std)\n network_label.SetFont(self.font_std)\n network_comboBox.SetFont(self.font_std)\n network_refresh_Btn.SetFont(self.font_std)\n password_label.SetFont(self.font_std)\n password_txtBox.SetFont(self.font_std)\n pushBtn.SetFont(self.font_std)\n pingBtn.SetFont(self.font_std)\n calibrateSysBtn.SetFont(self.font_std)\n\n serialCommStaticBox = wx.StaticBox(panel,label=\"Serial\")\n serialCommStaticBox.SetFont(self.font_std)\n serialCommBoxSizer = wx.StaticBoxSizer(serialCommStaticBox,\n wx.HORIZONTAL)\n serialCommBoxSizer.Add(serialSys_label,\n flag=wx.LEFT|wx.TOP|wx.BOTTOM|wx.RIGHT,\n border=5)\n serialCommBoxSizer.Add(serialSys_comboBox,\n flag=wx.LEFT|wx.TOP|wx.BOTTOM|wx.RIGHT,\n border=5)\n serialCommBoxSizer.Add(serialSys_refresh_Btn,\n flag=wx.TOP|wx.BOTTOM|wx.RIGHT|wx.ALIGN_RIGHT,\n border=5)\n\n wirelessGridSizer = wx.GridBagSizer(2,3)\n wirelessGridSizer.Add(network_label, pos=(0,0),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n wirelessGridSizer.Add(network_comboBox, pos=(0,1),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n wirelessGridSizer.Add(network_refresh_Btn, pos=(0,2),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n wirelessGridSizer.Add(password_label, pos=(1,0),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n wirelessGridSizer.Add(password_txtBox, pos=(1,1),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n\n wirelessGroupBox = wx.StaticBox(panel, label = \"Wireless\")\n wirelessGroupBox.SetFont(self.font_std)\n wirelessBoxsizer = wx.StaticBoxSizer(wirelessGroupBox, wx.HORIZONTAL)\n wirelessBoxsizer.Add(wirelessGridSizer, flag=wx.TOP|wx.LEFT|wx.BOTTOM,\n border=10)\n\n btnStaticBox = wx.StaticBox(panel, label=\"\")\n btnStaticBox.SetFont(self.font_std)\n btnBoxSizer = wx.StaticBoxSizer(btnStaticBox, wx.HORIZONTAL)\n btnBoxSizer.Add(pushBtn, flag=wx.LEFT, border=30)\n btnBoxSizer.Add(pingBtn, flag=wx.LEFT|wx.RIGHT, border=10)\n btnBoxSizer.Add(calibrateSysBtn, flag=wx.RIGHT, border=5)\n\n configDevGridSizer = wx.GridBagSizer(3,5)\n configDevGridSizer.Add(serialCommBoxSizer, pos=(0,0), span=(0,4),\n flag=wx.EXPAND|wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM\n |wx.LEFT, border=10)\n configDevGridSizer.Add(wirelessBoxsizer, pos=(1,0), span=(1,4),\n flag=wx.EXPAND|wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM\n |wx.LEFT, border=10)\n configDevGridSizer.Add(btnBoxSizer, pos=(2,0), span=(2,4),\n flag=wx.EXPAND|wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM\n |wx.LEFT, border=10)\n panel.SetSizerAndFit(configDevGridSizer)\n\n self.Bind(wx.EVT_CLOSE,self.close_configuration)\n calibrateSysBtn.Bind(wx.EVT_BUTTON, self.calibrate_device)",
"def __init__(self, parent, id = -1, pos = wx.DefaultPosition,\r\n\t\t\t\t size = wx.DefaultSize, style = wx.TAB_TRAVERSAL|wx.NO_BORDER, name = wx.PanelNameStr):\r\n\t\tNbookPanel.__init__ ( self, parent, id, pos, size, style, name )\r\n\t\tself.Tag = \"ImgView\"\r\n\t\tself.Title = _(\"Image Library\")\r\n\t\tself.icon = wx.ArtProvider_GetBitmap(str(ed_glob.ID_DECA_IMAGES), wx.ART_MENU, wx.Size(16, 16))\r\n\r\n\t\tbSizer = wx.BoxSizer( wx.VERTICAL )\r\n\r\n\t\tself.mtb = aui.AuiToolBar(self, -1, agwStyle=aui.AUI_TB_HORZ_LAYOUT)\r\n\t\tself.mtb.SetToolBitmapSize(wx.Size(16,16))\r\n\t\ttbmp = wx.ArtProvider_GetBitmap(str(ed_glob.ID_ADD), wx.ART_MENU, wx.Size(16, 16))\r\n\t\tself.mtb.AddTool(wx.ID_ADD, '', tbmp, tbmp, wx.ITEM_NORMAL,\r\n\t\t\t\t\t\t_(\"Add image\"), _(\"Import image into the library\"), None)\r\n\t\ttbmp = wx.ArtProvider_GetBitmap(str(ed_glob.ID_REMOVE), wx.ART_MENU, wx.Size(16, 16))\r\n\t\tself.mtb.AddTool(wx.ID_REMOVE, '', tbmp, tbmp, wx.ITEM_NORMAL,\r\n\t\t\t\t\t\t_(\"Remove image\"), _(\"Remove imeage from the library\"), None)\r\n\t\ttbmp = wx.ArtProvider_GetBitmap(str(ed_glob.ID_REFRESH), wx.ART_MENU, wx.Size(16, 16))\r\n\t\tself.mtb.AddTool(wx.ID_REFRESH, '', tbmp, tbmp, wx.ITEM_NORMAL,\r\n\t\t\t\t\t\t_(\"Refresh\"), _(\"Reload library contents\"), None)\r\n\t\tself.mtb.Realize()\r\n\r\n\t\tbSizer.Add( self.mtb, proportion=0, flag=wx.EXPAND, border=5 )\r\n\t\tself.view = wx.ListCtrl( self, wx.ID_ANY, style=wx.LC_ICON|wx.LC_AUTOARRANGE )\r\n\t\t#self.view = libul.UltimateListCtrl( self, agwStyle=wx.LC_ICON|wx.LC_AUTOARRANGE| libul.ULC_AUTOARRANGE)\r\n\t\t#self.view.InsertColumn(0, heading=\"\", width= 220)\r\n\t\t#self.view.InsertColumn(1, heading=\"\", width= 220)\r\n\r\n\t\tbSizer.Add( self.view, proportion=1, flag=wx.EXPAND, border=0 )\r\n\r\n\t\tself.SetSizer( bSizer )\r\n\t\tself.Layout()\r\n\r\n\t\tself.items = []\r\n\r\n\t\tself.Bind(wx.EVT_MENU, self.OnAddImage, id=wx.ID_ADD)\r\n\t\tself.Bind(wx.EVT_MENU, self.OnDelete, id=wx.ID_REMOVE)\r\n\t\tself.Bind(wx.EVT_MENU, self.UpdateView, id=wx.ID_REFRESH)\r\n\t\tself.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.view)\r\n\r\n\t\twx.CallAfter(self.UpdateView)",
"def create_widgets( self ):",
"def make_control_frame(self):\n \n #Generate and place the frame for housing the controls\n control_frame = tk.Frame(self, padx = 0,\n bg=self.default_background)\n control_frame.grid(column=0, row=4, columnspan = 1)\n \n #Generate the time indicator\n time_label = tk.Label(control_frame, text=self.translate('Day')+': ',\n bg=self.default_background)\n time_label.grid(column=0, row=0)\n self.timev = tk.StringVar()\n self.timev.set(str(self.timeSeries[-1]))\n self.time_value_label = tk.Label(control_frame, textvariable=self.timev,\n bg=self.default_background)\n self.time_value_label.grid(column=1, row=0, sticky='W')\n \n #set width of the control dropdowns\n boxwidth = 30\n \n #Generate each of the policy control dropdowns\n index = 0 \n self.policy_option_vars = dict()\n self.option_menus = []\n fontsizelist = []\n for policy in self.PolicyDicts.keys():\n option1_label = self.DynamicLabel(400, control_frame, text=self.translate(policy)+': ',\n bg=self.default_background)\n option1_label.grid(column=0, row=index+1)\n self.option1_label_list.append(option1_label)\n fontsizelist.append(option1_label.font.actual(\"size\"))\n option1_list = []\n for entry in list(self.PolicyDicts[policy].keys()):\n option1_list.append(self.translate(entry))\n \n self.policy_option_vars[policy] = tk.StringVar()\n defaultpolicy = self.translate(self.PolicyDictsInv[policy][self.SD_Map.retrieve_ob(policy).value()])\n \n self.policy_option_vars[policy].set(defaultpolicy)\n self.option_menus.append(tk.OptionMenu(control_frame, self.policy_option_vars[policy], \n *option1_list, \n command=lambda value, policy=policy: self.update_Policy(policy)\n ))\n self.option_menus[-1].config(width=boxwidth, anchor='w',\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n self.option_menus[-1]['menu'].config(bg=self.button_color)\n self.option_menus[-1].grid(column=1, row=index+1, columnspan=2)\n \n index+=1\n \n #Resize the fontsize of all the labels to match that of the smallest\n newsize = min(fontsizelist)\n for policylabel in self.option1_label_list:\n policylabel.font.configure(size=newsize)\n\n \n button_font_sizes = []\n \n #Generate the Next Week simulation button\n run_button = self.DynamicButton(300,control_frame, text=self.translate('Next Week'), \n command = lambda: self.increment_time(),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n run_button.grid(column=0, row=8, columnspan=1, sticky='E')\n button_font_sizes.append(run_button.font.actual(\"size\"))\n \n #Generate the Run Autonomously button\n automatic_button = self.DynamicButton(300,control_frame, text=self.translate('Run Autonomously'), \n command = lambda: self.automatic_window(),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n automatic_button.grid(column=1, row=8, columnspan=1)\n button_font_sizes.append(automatic_button.font.actual(\"size\"))\n \n #Generate the Clear Simulation Button\n clear_button = self.DynamicButton(300,control_frame, text = self.translate('Clear Simulation'),\n command = lambda: self.clear_simulation(),\n bg=self.button_color,\n highlightbackground=self.highlight_color,)\n clear_button.grid(column=2, row=8, columnspan=2)\n button_font_sizes.append(clear_button.font.actual(\"size\"))\n \n #Resize all the button texts to match that of the smallest\n new_button_font_size = min(button_font_sizes)\n run_button.font.configure(size=new_button_font_size)\n automatic_button.font.configure(size=new_button_font_size)\n clear_button.font.configure(size=new_button_font_size)\n \n return control_frame",
"def create_widgets(self):",
"def OnPanelPaint(self, event):\r\n\r\n dc = wx.PaintDC(self._panel)\r\n rect = self._panel.GetClientRect()\r\n\r\n bmp = wx.EmptyBitmap(rect.width, rect.height)\r\n\r\n mem_dc = wx.MemoryDC()\r\n mem_dc.SelectObject(bmp)\r\n\r\n endColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW)\r\n startColour = LightColour(endColour, 50)\r\n mem_dc.GradientFillLinear(rect, startColour, endColour, wx.SOUTH)\r\n\r\n # Draw the caption title and place the bitmap\r\n # get the bitmap optimal position, and draw it\r\n bmpPt, txtPt = wx.Point(), wx.Point()\r\n bmpPt.y = (rect.height - self._bmp.GetHeight())/2\r\n bmpPt.x = 3\r\n mem_dc.DrawBitmap(self._bmp, bmpPt.x, bmpPt.y, True)\r\n\r\n # get the text position, and draw it\r\n font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)\r\n font.SetWeight(wx.BOLD)\r\n mem_dc.SetFont(font)\r\n fontHeight = mem_dc.GetCharHeight()\r\n \r\n txtPt.x = bmpPt.x + self._bmp.GetWidth() + 4\r\n txtPt.y = (rect.height - fontHeight)/2\r\n mem_dc.SetTextForeground(wx.WHITE)\r\n mem_dc.DrawText(\"Opened tabs:\", txtPt.x, txtPt.y)\r\n mem_dc.SelectObject(wx.NullBitmap)\r\n \r\n dc.DrawBitmap(bmp, 0, 0)",
"def create_panel_widgets(self):\r\n self.panel_widgets[\"create_order_button\"] = tk.Button(\r\n master=self.container_widgets[\"panel_frame\"],\r\n command=lambda: self.create_order(),\r\n text=const.RECORD_WIDGET_CONFIG_VALUES[\"CREATE_ORDER_BUTTON\"][\"text\"],\r\n width=const.RECORD_WIDGET_CONFIG_VALUES[\"CREATE_ORDER_BUTTON\"][\"width\"],\r\n height=const.RECORD_WIDGET_CONFIG_VALUES[\"CREATE_ORDER_BUTTON\"][\"height\"])\r\n self.panel_widgets[\"create_order_button\"].grid(\r\n row=const.RECORD_WIDGET_CONFIG_VALUES[\"CREATE_ORDER_BUTTON\"][\"row\"],\r\n column=const.RECORD_WIDGET_CONFIG_VALUES[\"CREATE_ORDER_BUTTON\"][\"column\"])\r\n\r\n self.panel_widgets[\"show_menu_button\"] = tk.Button(\r\n self.container_widgets[\"panel_frame\"],\r\n command=lambda: MenuWindow(self.databases.menu),\r\n text=const.RECORD_WIDGET_CONFIG_VALUES[\"SHOW_MENU_BUTTON\"][\"text\"],\r\n width=const.RECORD_WIDGET_CONFIG_VALUES[\"SHOW_MENU_BUTTON\"][\"width\"],\r\n height=const.RECORD_WIDGET_CONFIG_VALUES[\"SHOW_MENU_BUTTON\"][\"height\"])\r\n self.panel_widgets[\"show_menu_button\"].grid(\r\n row=const.RECORD_WIDGET_CONFIG_VALUES[\"SHOW_MENU_BUTTON\"][\"row\"],\r\n column=const.RECORD_WIDGET_CONFIG_VALUES[\"SHOW_MENU_BUTTON\"][\"column\"])\r\n\r\n self.panel_widgets[\"show_orders_button\"] = tk.Button(\r\n self.container_widgets[\"panel_frame\"],\r\n command=lambda: self.open_archive(),\r\n text=const.RECORD_WIDGET_CONFIG_VALUES[\"OPEN_ARCHIVE_BUTTON\"][\"text\"],\r\n width=const.RECORD_WIDGET_CONFIG_VALUES[\"OPEN_ARCHIVE_BUTTON\"][\"width\"],\r\n height=const.RECORD_WIDGET_CONFIG_VALUES[\"OPEN_ARCHIVE_BUTTON\"][\"height\"])\r\n\r\n self.panel_widgets[\"show_orders_button\"].grid(\r\n row=const.RECORD_WIDGET_CONFIG_VALUES[\"OPEN_ARCHIVE_BUTTON\"][\"row\"],\r\n column=const.RECORD_WIDGET_CONFIG_VALUES[\"OPEN_ARCHIVE_BUTTON\"][\"column\"])\r\n\r\n self.panel_widgets[\"restore_orders_button\"] = tk.Button(\r\n self.container_widgets[\"panel_frame\"],\r\n command=lambda: self.restore_open_orders(),\r\n text=const.RECORD_WIDGET_CONFIG_VALUES[\"RESTORE_ORDERS_BUTTON\"][\"text\"],\r\n width=const.RECORD_WIDGET_CONFIG_VALUES[\"RESTORE_ORDERS_BUTTON\"][\"width\"],\r\n height=const.RECORD_WIDGET_CONFIG_VALUES[\"RESTORE_ORDERS_BUTTON\"][\"height\"])\r\n\r\n self.panel_widgets[\"restore_orders_button\"].grid(\r\n row=const.RECORD_WIDGET_CONFIG_VALUES[\"RESTORE_ORDERS_BUTTON\"][\"row\"],\r\n column=const.RECORD_WIDGET_CONFIG_VALUES[\"RESTORE_ORDERS_BUTTON\"][\"column\"])",
"def buildmainframe(self):\n self.mainframewidgets=[]\n for x in range(3):\n thislabel = Label(self.mainframe, text=str(x))\n thislabel.grid()\n self.mainframewidgets.append(thislabel)",
"def _initLayout(self):\n\t\tpanel = wx.Panel(self)\n\n\t\t# Create a font object\n\t\tfont = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)\n\t\tfont.SetPointSize(9)\n\n\t\t# Vertical sizer will contain multiple horizontal sizers as rows\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\n\t\t# First Row: The text we need to categorize\n\t\thbox1 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst1 = wx.StaticText(panel, label='Text')\n\t\tst1.SetFont(font)\n\t\thbox1.Add(st1, flag=wx.RIGHT, border=8)\n\t\ttc = wx.TextCtrl(panel)\n\t\tself._textControl = tc\n\t\thbox1.Add(tc, proportion=1)\n\t\tvbox.Add(hbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\t\t\n\t\t# The existing class assignment\n\t\thboxExisting = wx.BoxSizer(wx.HORIZONTAL)\n\t\tlabel = wx.StaticText(panel, label='Current')\n\t\tlabel.SetFont(font)\n\t\thboxExisting.Add(label, flag=wx.RIGHT, border=8)\n\t\t\n\t\tlabel = wx.StaticText(panel, label=\"(unassigned)\")\n\t\tself._existingClass = \"(unassigned)\"\n\t\tself._existingClassLabel = label\n\t\tlabel.SetFont(font)\n\t\thboxExisting.Add(label, flag=wx.RIGHT, border=8)\n\t\tvbox.Add(hboxExisting, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\t\t\n\t\t# Button to keep the current class assignment\n\t\tbutton = wx.Button(panel, label=\"KEEP\", name=\"*KEEP\")\n\t\thboxExisting.Add(button, flag=wx.RIGHT)\n\n\t\t# Button to skip this record, i.e., move to next record without writing this one out\n\t\tbutton = wx.Button(panel, label=\"DELETE\", name=\"*KILL\")\n\t\thboxExisting.Add(button, flag=wx.RIGHT)\n\n\t\tvbox.Add((-1, 10))\n\n\t\t# Buttons for classes that can be assigned to the text\n\t\thbox2 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst2 = wx.StaticText(panel, label='Reassign to...')\n\t\tst2.SetFont(font)\n\t\thbox2.Add(st2)\n\t\tvbox.Add(hbox2, flag=wx.LEFT | wx.TOP, border=10)\n\n\t\tvbox.Add((-1, 10))\n\n\t\t# Grid of buttons, one for each class label\n\t\thbox3 = wx.GridSizer(8,5,50)\n\t\n\t\tfor label in sorted(labels.LABELS):\n\t\t\tbutton = MyButton(panel, label=label, size=(70, 30), name=label)\n\t\t\thbox3.Add(button)\n\n\t\tvbox.Add(hbox3, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, \n\t\t\tborder=10)\n\t\t\n\t\tpanel.SetSizer(vbox)",
"def build(self):\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)",
"def InitUI(self, frame):\n\t\timport wx\n\t\tframe.SetSize((200,210))\n\t\t\n\t\tpanel = wx.Panel(frame)\n\t\t\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\t\t\n\t\thbox1 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tstartButton = wx.Button(panel, label='Start Baselining', size=(170, 25))\n\t\thbox1.Add(startButton)\n\t\tvbox.Add(hbox1, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 5))\n\t\t\n\t\thbox2 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tstopButton = wx.Button(panel, label='Stop Baselining', size=(170, 25))\n\t\thbox2.Add(stopButton)\n\t\tvbox.Add(hbox2, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 5))\n\t\t\n\t\thbox6 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tresetButton = wx.Button(panel, label='Reset', size=(170, 25))\n\t\thbox6.Add(resetButton)\n\t\tvbox.Add(hbox6, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 10))\n\t\t\n\t\thbox3 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.onOffText = wx.StaticText(panel, label='Off')\n\t\thbox3.Add(self.onOffText)\n\t\tvbox.Add(hbox3, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 20))\n\t\t\n\t\thbox4 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tminText = wx.StaticText(panel, label='Min', style=wx.ALIGN_CENTRE)\n\t\tmaxText = wx.StaticText(panel, label='Max', style=wx.ALIGN_CENTRE)\n\t\thbox4.Add(minText, -1)\n\t\thbox4.Add(maxText, -1)\n\t\tvbox.Add(hbox4, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=5)\n\t\t\n\t\tvbox.Add((-1, 5))\n\t\t\n\t\thbox5 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.minBox = wx.TextCtrl(panel, style=wx.TE_READONLY)\n\t\tself.maxBox = wx.TextCtrl(panel, style=wx.TE_READONLY)\n\t\tself.minBox.SetBackgroundColour((210,210,210))\n\t\tself.maxBox.SetBackgroundColour((210,210,210))\n\t\thbox5.Add(self.minBox, -1, wx.EXPAND)\n\t\thbox5.Add(self.maxBox, -1, wx.EXPAND)\n\t\tvbox.Add(hbox5, flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=5)\n\t\t\n\t\tpanel.SetSizer(vbox)\n\t\t\n\t\t# Bind items\n\t\tframe.Bind(wx.EVT_BUTTON, self.OnStartPress, id=startButton.GetId())\n\t\tframe.Bind(wx.EVT_BUTTON, self.OnStopPress, id=stopButton.GetId())\n\t\tframe.Bind(wx.EVT_BUTTON, self.OnResetPress, id=resetButton.GetId())",
"def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout",
"def __init__(self, title, path, names, devices, network, monitors):\n super().__init__(parent=None, title=title, size=(800, 600))\n\n \"\"\"Initialise variables.\"\"\"\n self.names = names\n self.devices = devices\n self.monitors = monitors\n self.network = network\n # Setting up the file menu\n self.fileMenu = wx.Menu()\n self.fileMenu.Append(102, _(u\"&About\"))\n self.fileMenu.Append(wx.ID_OPEN, _(u\"&Open\"))\n self.fileMenu.Append(103, _(u\"&Quit\"))\n\n # Create the menu bar\n self.menuBar = wx.MenuBar()\n # Adding the \"file menu\" to the menu bar\n self.menuBar.Append(self.fileMenu, _(u\"&File\"))\n # Adding the menu bar to the frame content\n self.SetMenuBar(self.menuBar)\n\n # Canvas for drawing signals\n self.canvas_2d = MyGLCanvas2D(self, devices, monitors)\n self.canvas_3d = MyGLCanvas3D(self, devices, monitors)\n self.canvas_2d.Show()\n self.canvas_3d.Hide()\n\n # Configure the widgets\n self.lblLogWindow = wx.StaticText(self, -1, label = _(u\"Console\"))\n self.logWindow = wx.TextCtrl(self, -1, style = wx.TE_MULTILINE|wx.TE_READONLY, size = (100, 500))\n\n self.lblList = ['2D', '3D'] \n self.render = wx.RadioBox(self, label = _(u\"Render\"), choices = self.lblList, majorDimension = 1, style = wx.RA_SPECIFY_ROWS)\n self.render.SetSelection(0)\n self.state = self.render.GetSelection()\n self.languagelist = [\"English\", \"Chinese\"]\n self.language = wx.RadioBox(self, label = _(u\"Language\"), choices = self.languagelist, majorDimension = 1, style = wx.RA_SPECIFY_ROWS) \n global lang_sel\n self.language.SetSelection(lang_sel)\n self.current_lang = self.language.GetSelection()\n self.text_run = wx.StaticText(self, wx.ID_ANY, _(u\"Cycles to run\"))\n self.text_cont = wx.StaticText(self, wx.ID_ANY, _(u\"Cycles to continue\"))\n self.spin_run = wx.SpinCtrl(self, wx.ID_ANY, \"10\", max=2147483647)\n self.spin_cont = wx.SpinCtrl(self, wx.ID_ANY, \"2\", max=2147483647)\n self.run = wx.Button(self, wx.ID_ANY, _(u\"Run\"))\n self.cont = wx.Button(self, wx.ID_ANY, _(u\"Continue\"))\n self.ResetButton = wx.Button(self, wx.ID_ANY, _(u\"Clear\"))\n self.set_switch = wx.Button(self, wx.ID_ANY, _(u\"Set Switches\"))\n self.select_monitor = wx.Button(self, wx.ID_ANY, _(u\"Monitor\"))\n self.default_position = wx.Button(self, wx.ID_ANY, _(u\"Default Position\"))\n\n # Bind events to widgets\n self.Bind(wx.EVT_MENU, self.on_menu)\n self.render.Bind(wx.EVT_RADIOBOX, self.on_radiobox)\n self.language.Bind(wx.EVT_RADIOBOX, self.on_sel_language)\n self.spin_run.Bind(wx.EVT_SPINCTRL, self.on_spin)\n self.spin_cont.Bind(wx.EVT_SPINCTRL, self.on_spin_cont)\n self.run.Bind(wx.EVT_BUTTON, self.on_run_button)\n self.cont.Bind(wx.EVT_BUTTON, self.on_continue_button)\n self.ResetButton.Bind(wx.EVT_BUTTON, self.on_reset_button)\n self.set_switch.Bind(wx.EVT_BUTTON, self.check_box)\n self.select_monitor.Bind(wx.EVT_BUTTON, self.check_box_monitor)\n self.default_position.Bind(wx.EVT_BUTTON, self.on_default_pos)\n \n # Configure sizers for layout\n self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)\n side_sizer = wx.BoxSizer(wx.VERTICAL)\n\n self.main_sizer.Add(self.canvas_2d, 5, wx.EXPAND | wx.ALL, 5)\n self.main_sizer.Add(side_sizer, 1, wx.ALL, 5)\n\n side_sizer.Add(self.language, 0, wx.TOP, 5)\n side_sizer.Add(self.render, 0, wx.TOP, 5)\n side_sizer.Add(self.text_run, 1, wx.TOP, 10)\n side_sizer.Add(self.spin_run, 1, wx.ALL, 5)\n side_sizer.Add(self.run, 1, wx.EXPAND, 5)\n side_sizer.Add(self.text_cont, 1, wx.TOP, 10)\n side_sizer.Add(self.spin_cont, 1, wx.ALL, 5)\n side_sizer.Add(self.cont, 1, wx.EXPAND, 5)\n side_sizer.Add(self.set_switch, 1, wx.EXPAND, 5)\n side_sizer.Add(self.select_monitor, 1, wx.EXPAND, 5)\n side_sizer.Add(self.ResetButton, 1, wx.EXPAND, 5)\n side_sizer.Add(self.default_position, 1, wx.EXPAND, 5)\n side_sizer.Add(self.lblLogWindow, 1, wx.TOP, 5)\n side_sizer.Add(self.logWindow, 1, wx.EXPAND, 5)\n\n self.SetSizeHints(600, 600)\n self.SetSizer(self.main_sizer)\n\n # A modal show will lock out the other windows until it has been dealth with\n # Very useful in some programming tasks to ensure that things happen in an order\n # that the programmer expects, but can be very frustrating to the user if it is\n # used to excess\n self.exitconfirmation = wx.MessageDialog(\n self,\n _(u\"Are you sure you want to quit the simulation? \\n\"),\n _(u\"Confirmation\"),\n wx.YES_NO)\n self.openFileDialog = wx.FileDialog(\n self,\n _(u\"Select Logic Definition File\"),\n \"\",\n \"\",\n _(u\"Logic definition files (*.txt)|*.txt\"),\n wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n sys.stdout = self.logWindow",
"def __init__(self,parent=None, id=-1,\r\n pos=wx.DefaultPosition,\r\n title='Hello, wxPython!',\r\n size=wx.DefaultSize): #3图像参数\r\n wx.Frame.__init__(self, parent, id, title, pos, size)\r\n#4 显示图像\r\n panel = wx.Panel(self) #创建画板\r\n button_submit = wx.Button(panel, label=u\"提交\", pos=(400, 50),size=(100, 30)) \r\n button_reset = wx.Button(panel, label=u\"重置\", pos=(400, 100),size=(100, 30)) \r\n button_view = wx.Button(panel, label=u\"查看\", pos=(400, 150),size=(100, 30)) \r\n #temp = image.ConvertToBitmap()\r\n #bmp_size = temp.GetWidth(), temp.GetHeight()\r\n self.bmp = wx.StaticBitmap(parent=panel, pos=(40,40))\r\n\r\n self.tips_ctrl = wx.StaticText(panel, -1, \"Start\", pos=(40, 170))\r\n self.code_ctrl = wx.TextCtrl(panel, -1, \"\", pos=(200, 50),size=(150,30),style=wx.TE_PROCESS_ENTER)\r\n self.code_ctrl.SetFocus()\r\n \r\n #绑定按钮的单击事件\r\n self.Bind(wx.EVT_BUTTON, self.OnSubmit, button_submit)\r\n self.Bind(wx.EVT_BUTTON, self.OnReset, button_reset)\r\n self.Bind(wx.EVT_BUTTON, self.OnView, button_view)\r\n #绑定窗口的关闭事件\r\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\r\n self.Bind(wx.EVT_TEXT_ENTER,self.OnSubmit,self.code_ctrl)\r\n\r\n self.register = business.SohuMailbox()\r\n self.refresh()",
"def _buidUISizer(self):\n flagsR = wx.RIGHT | wx.ALIGN_CENTER_VERTICAL\n mSizer = wx.BoxSizer(wx.HORIZONTAL)\n mSizer.AddSpacer(5)\n gv.iMapPanel = self.mapPanel = gp.PanelMap(self)\n mSizer.Add(self.mapPanel, flag=flagsR, border=2)\n mSizer.AddSpacer(5)\n mSizer.Add(wx.StaticLine(self, wx.ID_ANY, size=(-1, 560),\n style=wx.LI_VERTICAL), flag=flagsR, border=2)\n mSizer.AddSpacer(5)\n gv.iCtrlPanel = gp.PanelCtrl(self)\n mSizer.Add(gv.iCtrlPanel)\n return mSizer",
"def addInputs(self):\n #create a static boxsizer\n static_box = wx.StaticBox(self, label=\"Step 2: Optimize\")\n box_sizer = wx.StaticBoxSizer(static_box, wx.VERTICAL)\n fgs = wx.FlexGridSizer(rows=4, cols=4, vgap=10, hgap=10)\n box_sizer.Add(fgs, proportion=1, flag=wx.EXPAND)\n\n fgs.Add(wx.StaticText(self, label=\"Score Weights\"))\n self.prefWghts1 = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n self.prefWghts2 = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n self.prefWghts3 = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n\n fgs.AddMany([(self.prefWghts1), \n (self.prefWghts2), \n (self.prefWghts3)]) \n\n fgs.Add(wx.StaticText(self, label=\"Preferences\"))\n self.Prefs = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n fgs.AddMany([ (self.Prefs),\n (wx.StaticText(self)), \n (wx.StaticText(self)) ])\n\n fgs.Add(wx.StaticText(self, label=\"Excess Capacity\"))\n self.ExcessCap = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n fgs.AddMany([ (self.ExcessCap),\n (wx.StaticText(self)), \n (wx.StaticText(self)) ])\n\n fgs.Add(wx.StaticText(self, label=\"Congestion\"))\n self.CongPenalty = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n fgs.AddMany([ (self.CongPenalty),\n (wx.StaticText(self)), \n (wx.StaticText(self)) ])\n\n fgs.Add(wx.StaticText(self, label=\"Dept. Fairness\"))\n self.DeptFairness = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n fgs.AddMany([ (self.DeptFairness),\n (wx.StaticText(self)), \n (wx.StaticText(self)) ])\n\n fgs.Add(wx.StaticText(self, label=\"Back to Back\"))\n self.Back2Back = wx.lib.intctrl.IntCtrl(self, value=10, \n min=0, max=100, size=(34,22))\n fgs.AddMany([ (self.Back2Back),\n (wx.StaticText(self)), \n (wx.StaticText(self)) ])\n\n self.optimize_btn = wx.Button(self, label=\"Optimize\")\n self.optimize_btn.Bind(wx.EVT_BUTTON, self.onOptimize)\n fgs.Add(self.optimize_btn)\n \n self.SetSizerAndFit(box_sizer)\n self.Disable()\n \n pub.subscribe(self.enable, \"data_loaded\")\n pub.subscribe(self.updateWeights, \"update_weights\")",
"def _create_barplot_pane(self):\n \n panel = wx.Panel(self, -1)\n\t\t\n self.figb = Figure()\n self.axb = self.figb.add_subplot(111)\n\t\t\n self.axb.set_xlabel(\"Isodoses\", fontsize=14, fontweight = 'semibold') #fontsize=24\n self.axb.set_ylabel(\"Probability\", fontsize = 14, fontweight = 'semibold')\n self.axb.set_xlim(68, 93)\n self.axb.set_ylim(0, 1)\n\t\t\n self.canvasb = FigureCanvas(panel, -1, self.figb)\n self.toolbarb = NavigationToolbar(self.canvasb)\n\t\t\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(self.canvasb, 1, wx.EXPAND|wx.BOTTOM, 7)\n vbox.Add(self.toolbarb, 0, wx.EXPAND)\n\t\t\n panel.SetSizer(vbox)\n vbox.Fit(panel)\n\t\t\n return panel",
"def __init__(self, IdRange, parent=None):\n\n wx.Frame.__init__(self, parent, -1, 'Settings', size=(300, 500))\n self.SetIcon(wx.Icon('icons/gEcrit.png', wx.BITMAP_TYPE_PNG))\n ConfigBook = wx.Notebook(self)\n\n dflt_text_win = DefaultCodeFr(self, -1)\n\n ConfigPanel = wx.Panel(ConfigBook)\n ConfigPanel2 = wx.Panel(ConfigBook)\n\n ColPal.CollorPaletteWindow(0, IdRange)\n first_sizer = wx.BoxSizer(wx.VERTICAL)\n\n AutosaveBox = wx.CheckBox(ConfigPanel, -1, \"Enable Autosave\", (10,\n 10), (160, -1))\n AutosaveBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"Autosave\", AutosaveBox.GetValue(), IdRange))\n\n AutosaveBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n AutosaveBox.GetValue(), Interval))\n\n Inter_Info = wx.StaticText(ConfigPanel, -1,\n \"Save data each # of characters:\", (20,\n 35))\n\n Interval = wx.SpinCtrl(ConfigPanel, -1, \"\", (20, 60), (90, -1))\n Interval.SetRange(1, 500)\n Interval.SetValue(Config.GetOption(\"Autosave Interval\"))\n Interval.Bind(wx.EVT_SPINCTRL, lambda event: CallChangeOption(event,\n \"Autosave Interval\", Interval.GetValue(), IdRange))\n\n if not Config.GetOption(\"Autosave\"):\n AutosaveBox.SetValue(False)\n Interval.Disable()\n else:\n AutosaveBox.SetValue(True)\n\n RmTrlBox = wx.CheckBox(ConfigPanel,-1,\"Strip Trailing Spaces On Save\",\n pos = (20, 70), size = (-1, -1))\n RmTrlBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"StripTrails\", RmTrlBox.GetValue()))\n RmTrlBox.SetValue(Config.GetOption(\"StripTrails\"))\n\n StatusBarBox = wx.CheckBox(ConfigPanel, -1, \"Enable StatusBar\",\n (10, 90), (160, -1))\n StatusBarBox.Bind(wx.EVT_CHECKBOX, lambda event: \\\n CallChangeOption(event, \"StatusBar\",\n StatusBarBox.GetValue(), IdRange))\n\n StatusBarBox.SetValue(Config.GetOption(\"StatusBar\"))\n\n Src_Br_Box = wx.CheckBox(ConfigPanel, -1,\n \"Enable Source Browser\", (10, 115), (-1,\n -1))\n\n Src_Br_Box.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"SourceBrowser\", Src_Br_Box.GetValue(), IdRange))\n\n Src_Br_Box.SetValue(Config.GetOption(\"SourceBrowser\"))\n\n FileTreeBox = wx.CheckBox(ConfigPanel, -1,\n \"Enable File Tree Browser\", (10, 117),\n (-1, -1))\n\n FileTreeBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"FileTree\", FileTreeBox.GetValue(), IdRange))\n\n FileTreeBox.SetValue(Config.GetOption(\"FileTree\"))\n\n SpellBox = wx.CheckBox(ConfigPanel, -1, \"Enable Spell Checker\",\n (10, 120), (-1, -1))\n\n SpellBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"SpellCheck\", SpellBox.GetValue(), IdRange))\n\n SpellBox.SetValue(Config.GetOption(\"SpellCheck\"))\n\n SpellBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n SpellBox.GetValue(), SpellSugBox))\n\n SpellSugBox = wx.CheckBox(ConfigPanel, -1,\n \"Show Spell Suggestions\", (10, 120), (-1,\n -1))\n\n SpellSugBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"SpellSuggestions\", SpellSugBox.GetValue(),\n IdRange))\n\n SpellSugBox.SetValue(Config.GetOption(\"SpellSuggestions\"))\n\n DfltTextBox = wx.CheckBox(ConfigPanel, -1,\n \"Enable New Document Default Text\", (10,\n 130), (-1, -1))\n\n DfltTextBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"DefaultTextAct\", DfltTextBox.GetValue()))\n\n DfltTextBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n DfltTextBox.GetValue(), DfltTextBtn))\n DfltTextBox.SetValue(Config.GetOption(\"DefaultTextAct\"))\n\n DfltTextBtn = wx.Button(ConfigPanel, -1,\n \"Edit Document Default Text\", (50, 135),\n (-1, -1))\n\n DfltTextBtn.Bind(wx.EVT_BUTTON, dflt_text_win.ShowMe)\n DfltTextBtn.Enable(Config.GetOption(\"DefaultTextAct\"))\n\n LogActBox = wx.CheckBox(ConfigPanel, -1, \"Enable Log\", (10, 140),\n (160, -1))\n\n LogActBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"ActLog\", LogActBox.GetValue(), IdRange))\n\n LogActBox.SetValue(Config.GetOption(\"ActLog\"))\n\n PalleteButton = wx.Button(ConfigPanel, -1, \"Colour Palette\", pos=\n (10, 220), size=(-1, -1))\n\n PalleteButton.Bind(wx.EVT_BUTTON, ColPal.ShowMe)\n\n DefaultsButton = wx.Button(ConfigPanel, -1, \"Reset to Defaults\",\n pos=(10, 260), size=(-1, -1))\n\n DefaultsButton.Bind(wx.EVT_BUTTON, lambda event: \\\n CallChangeOption(event, \"Defaults\",\n \"Defaults\", IdRange))\n\n DefaultsButton.Bind(wx.EVT_BUTTON, lambda event: \\\n CallChangeColorFile(event, \"Defaults\",\n \"Defaults\"))\n\n ViewButton = wx.Button(ConfigPanel, label=\"View Log\", pos=(10,\n 180), size=(-1, -1))\n\n ViewButton.Bind(wx.EVT_BUTTON, self.viewLog)\n\n EraseButton = wx.Button(ConfigPanel, label=\"Erase Log\", pos=(50,\n 180), size=(-1, -1))\n\n EraseButton.Bind(wx.EVT_BUTTON, Log.EraseLog)\n EraseButton.Bind(wx.EVT_BUTTON, lambda event: ToggleSpinner(event,\n False, EraseButton))\n\n\n OKButton = wx.Button(ConfigPanel, -1, \"OK\", pos=(200, 420), size=\n (80, 40))\n\n OKButton.Bind(wx.EVT_CLOSE, self.HideMe)\n OKButton.Bind(wx.EVT_BUTTON, self.HideMe)\n\n special_sizer = wx.BoxSizer(wx.HORIZONTAL)\n special_sizer.Add(ViewButton, 0)\n special_sizer.Add(EraseButton, 0)\n\n first_sizer.Add(AutosaveBox, 0, wx.EXPAND, wx.ALL, 5)\n first_sizer.Add(Inter_Info, 0, wx.ALL, 5)\n first_sizer.Add(Interval, 0, wx.LEFT, 30)\n first_sizer.Add(RmTrlBox, 0 , wx.EXPAND)\n first_sizer.Add(StatusBarBox, 0, wx.EXPAND, wx.ALL, 5)\n first_sizer.Add(Src_Br_Box, 0, wx.EXPAND, wx.ALL, 5)\n first_sizer.Add(FileTreeBox, 0, wx.EXPAND, wx.ALL, 5)\n first_sizer.Add(SpellBox, 0, wx.EXPAND, wx.ALL, 5)\n first_sizer.Add(SpellSugBox, 0, wx.EXPAND, wx.ALL, 15)\n first_sizer.Add(DfltTextBox, 0, wx.EXPAND)\n first_sizer.Add(DfltTextBtn, 0, wx.LEFT, 30)\n first_sizer.Add(LogActBox, 0, wx.EXPAND, wx.ALL, 5)\n first_sizer.Add(PalleteButton, 0, wx.ALL, 5)\n first_sizer.Add(special_sizer, 0, wx.ALL, 5)\n first_sizer.Add(DefaultsButton, 0)\n ConfigPanel.SetSizer(first_sizer)\n\n second_sizer = wx.BoxSizer(wx.VERTICAL)\n LineNrBox = wx.CheckBox(ConfigPanel2, -1, \"Show Line Numbers\", (10,\n 10), (-1, -1))\n\n LineNrBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"LineNumbers\", LineNrBox.GetValue(), IdRange))\n\n LineNrBox.SetValue(Config.GetOption(\"LineNumbers\"))\n\n SyntaxHgBox = wx.CheckBox(ConfigPanel2, -1, \"Syntax Highlight \",\n (10, 35), (-1, -1))\n\n SyntaxHgBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"SyntaxHighlight\", SyntaxHgBox.GetValue(),\n IdRange))\n\n SyntaxHgBox.SetValue(Config.GetOption(\"SyntaxHighlight\"))\n\n AutoIdentBox = wx.CheckBox(ConfigPanel2, -1, \"Autoindentation\",\n (10, 60), (-1, -1))\n\n AutoIdentBox.Bind(wx.EVT_CHECKBOX, lambda event: \\\n CallChangeOption(event, \"Autoindentation\",\n AutoIdentBox.GetValue(), IdRange))\n\n AutoIdentBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n AutoIdentBox.GetValue(), IndentSizeBox))\n\n AutoIdentBox.SetValue(Config.GetOption(\"Autoindentation\"))\n\n IndentSizeBox = wx.SpinCtrl(ConfigPanel2, -1, \"\", (35, 85), (90,\n -1))\n\n IndentSizeBox.SetRange(1, 12)\n IndentSizeBox.SetValue(Config.GetOption(\"IndentSize\"))\n\n IndentSizeBox.Bind(wx.EVT_SPINCTRL, lambda event: \\\n CallChangeOption(event, \"IndentSize\",\n IndentSizeBox.GetValue(), IdRange))\n\n if Config.GetOption(\"Autoindentation\") == True:\n IndentSizeBox.Enable()\n else:\n IndentSizeBox.Disable()\n\n IndentationGuidesBox = wx.CheckBox(ConfigPanel2, -1,\n \"Indentation Guides\", (10, 110), (-1, -1))\n\n IndentationGuidesBox.SetValue(Config.GetOption(\"IndetationGuides\"))\n\n IndentationGuidesBox.Bind(wx.EVT_CHECKBOX, lambda event: \\\n CallChangeOption(event,\n \"IndetationGuides\",\n IndentationGuidesBox.GetValue(),\n IdRange))\n\n BackSpaceUnindentBox = wx.CheckBox(ConfigPanel2, -1,\n \"Backspace to Unindent\", (10, 135), (-1, -1))\n BackSpaceUnindentBox.SetValue(Config.GetOption(\"BackSpaceUnindent\"))\n\n BackSpaceUnindentBox.Bind(wx.EVT_CHECKBOX, lambda event: \\\n CallChangeOption(event,\n \"BackSpaceUnindent\",\n BackSpaceUnindentBox.GetValue(),\n IdRange))\n\n WhitespaceBox = wx.CheckBox(ConfigPanel2, -1, \"Show Whitespace\",\n (10, 160), (-1, -1))\n WhitespaceBox.SetValue(Config.GetOption(\"Whitespace\"))\n\n WhitespaceBox.Bind(wx.EVT_CHECKBOX, lambda event: \\\n CallChangeOption(event, \"Whitespace\",\n WhitespaceBox.GetValue(), IdRange))\n\n UseTabsBox = wx.CheckBox(ConfigPanel2, -1, \"Use Tabs\", (10, 185),\n (160, -1))\n UseTabsBox.SetValue(Config.GetOption(\"UseTabs\"))\n\n UseTabsBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"UseTabs\", UseTabsBox.GetValue(), IdRange))\n\n CarretInfo = wx.StaticText(ConfigPanel2, -1, 'Carret Width:', (10,\n 210))\n\n CarretWidthSpin = wx.SpinCtrl(ConfigPanel2, -1, \"\", (35, 235), (-1,\n -1))\n CarretWidthSpin.SetRange(1, 20)\n CarretWidthSpin.SetValue(Config.GetOption(\"CarretWidth\"))\n\n CarretWidthSpin.Bind(wx.EVT_SPINCTRL, lambda event: \\\n CallChangeOption(event, \"CarretWidth\",\n CarretWidthSpin.GetValue(), IdRange))\n\n FoldMarkBox = wx.CheckBox(ConfigPanel2, -1, \"Fold Marks\", (10,\n 265), (160, -1))\n\n FoldMarkBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"FoldMarks\", FoldMarkBox.GetValue(), IdRange))\n\n FoldMarkBox.SetValue(Config.GetOption(\"FoldMarks\"))\n\n TabInfo = wx.StaticText(ConfigPanel2, -1, \"Tab Width:\", pos=(10,\n 300), size=(-1, -1))\n\n TabWidthBox = wx.SpinCtrl(ConfigPanel2, -1, \"\", pos=(35, 320),\n size=(90, -1))\n\n TabWidthBox.SetValue(Config.GetOption(\"TabWidth\"))\n\n TabWidthBox.Bind(wx.EVT_SPINCTRL, lambda event: CallChangeOption(event,\n \"TabWidth\", TabWidthBox.GetValue(), IdRange))\n\n EdgeLineBox = wx.CheckBox(ConfigPanel2, -1, \"Edge Line\", pos=(10,\n 350), size=(-1, -1))\n EdgeLineBox.SetValue(Config.GetOption(\"EdgeLine\"))\n\n EdgeLineBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"EdgeLine\", EdgeLineBox.GetValue(), IdRange))\n\n EdgeLineBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n EdgeLineBox.GetValue(), EdgeLinePos))\n\n EdgeInfo = wx.StaticText(ConfigPanel2, -1, \"Edge Line Position:\",\n pos=(35, 375), size=(-1, -1))\n\n EdgeLinePos = wx.SpinCtrl(ConfigPanel2, -1, \"\", pos=(35, 400),\n size=(-1, -1))\n EdgeLinePos.SetValue(Config.GetOption(\"EdgeColumn\"))\n\n if Config.GetOption(\"EdgeLine\"):\n EdgeLinePos.Enable()\n else:\n EdgeLinePos.Disable()\n\n EdgeLinePos.Bind(wx.EVT_SPINCTRL, lambda event: CallChangeOption(event,\n \"EdgeColumn\", EdgeLinePos.GetValue(), IdRange))\n\n BraceCompBox = wx.CheckBox(ConfigPanel2,-1,\"Autocomplete Braces\",\n pos=(10,200),size=(-1,-1))\n BraceCompBox.Bind(wx.EVT_CHECKBOX,lambda event: CallChangeOption(\n event,\"BraceComp\",BraceCompBox.GetValue(),IdRange))\n BraceCompBox.SetValue(Config.GetOption(\"BraceComp\"))\n\n second_sizer.Add(LineNrBox, 0, wx.EXPAND)\n second_sizer.Add(SyntaxHgBox, 0, wx.EXPAND)\n second_sizer.Add(AutoIdentBox, 0, wx.EXPAND)\n second_sizer.Add(IndentSizeBox, 0, wx.LEFT, 30)\n second_sizer.Add(IndentationGuidesBox, 0, wx.EXPAND)\n second_sizer.Add(BackSpaceUnindentBox, 0, wx.EXPAND)\n second_sizer.Add(WhitespaceBox, 0, wx.EXPAND)\n second_sizer.Add(UseTabsBox, 0, wx.EXPAND, 30)\n second_sizer.Add(CarretInfo, 0, wx.EXPAND)\n second_sizer.Add(CarretWidthSpin, 0, wx.LEFT, 30)\n second_sizer.Add(FoldMarkBox, 0, wx.EXPAND)\n second_sizer.Add(TabInfo, 0, wx.EXPAND)\n second_sizer.Add(TabWidthBox, 0, wx.LEFT, 30)\n second_sizer.Add(EdgeLineBox, 0, wx.EXPAND)\n second_sizer.Add(EdgeInfo, 0, wx.EXPAND)\n second_sizer.Add(EdgeLinePos, 0, wx.LEFT, 30)\n second_sizer.Add(BraceCompBox,0,wx.EXPAND)\n\n ConfigPanel2.SetSizer(second_sizer)\n\n OKButton2 = wx.Button(ConfigPanel2, -1, \"OK\", pos=(200, 420),\n size=(80, 40))\n OKButton2.Bind(wx.EVT_CLOSE, self.HideMe)\n OKButton2.Bind(wx.EVT_BUTTON, self.HideMe)\n\n third_sizer = wx.BoxSizer(wx.VERTICAL)\n\n ConfigPanel3 = wx.Panel(ConfigBook)\n BashBox = wx.CheckBox(ConfigPanel3, -1, \"OS Terminal\", pos=(10,\n 10), size=(-1, -1))\n BashBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"BashShell\", BashBox.GetValue(), IdRange))\n\n BashBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n BashBox.GetValue(), OSPath))\n\n BashBox.SetValue(Config.GetOption(\"BashShell\"))\n\n OSInfo = wx.StaticText(ConfigPanel3, -1, \"OS shell path:\", pos=(10,\n 30), size=(-1, -1))\n\n OSPath = wx.TextCtrl(ConfigPanel3, -1, \"\", pos=(10, 50), size=(250,\n -1))\n OSPath.SetValue(Config.GetOption(\"OSPath\"))\n OSPath.Enable(BashBox.GetValue())\n\n OSApply = wx.Button(ConfigPanel3, -1, \"Apply\", pos=(10, 80),\n size=(-1, -1))\n OSApply.Bind(wx.EVT_BUTTON, lambda event: CallChangeOption(event,\n \"OSPath\", OSPath.GetValue(), IdRange))\n\n PythonBox = wx.CheckBox(ConfigPanel3, -1, \"Python Terminal\", pos=\n (10, 110), size=(-1, -1))\n PythonBox.Bind(wx.EVT_CHECKBOX, lambda event: CallChangeOption(event,\n \"PythonShell\", PythonBox.GetValue(), IdRange))\n\n PythonBox.Bind(wx.EVT_CHECKBOX, lambda event: ToggleSpinner(event,\n PythonBox.GetValue(), PyPath))\n\n PythonBox.SetValue(Config.GetOption(\"PythonShell\"))\n\n PyInfo = wx.StaticText(ConfigPanel3, -1, \"Python shell path:\",\n pos=(10, 130), size=(-1, -1))\n\n PyPath = wx.TextCtrl(ConfigPanel3, -1, \"\", pos=(10, 150), size=(250,\n -1))\n PyPath.SetValue(Config.GetOption(\"PyPath\"))\n PyPath.Enable(PythonBox.GetValue())\n\n PyApply = wx.Button(ConfigPanel3, -1, \"Apply\", pos=(10, 180),\n size=(-1, -1))\n PyApply.Bind(wx.EVT_BUTTON, lambda event: CallChangeOption(event,\n \"PyPath\", PyPath.GetValue(), IdRange))\n\n third_sizer.Add(BashBox, 0, wx.EXPAND, 5)\n third_sizer.Add(OSInfo, 0, wx.EXPAND, 5)\n third_sizer.Add(OSPath, 0, wx.EXPAND, 5)\n third_sizer.Add(OSApply, 0, 5)\n third_sizer.Add(PythonBox, 0, wx.EXPAND, 5)\n third_sizer.Add(PyInfo, 0, wx.EXPAND, 5)\n third_sizer.Add(PyPath, 0, wx.EXPAND, 5)\n third_sizer.Add(PyApply, 0, 5)\n\n ConfigPanel3.SetSizer(third_sizer)\n\n OKButton4 = wx.Button(ConfigPanel3, -1, \"OK\", pos=(200, 420),\n size=(80, 40))\n OKButton4.Bind(wx.EVT_BUTTON, self.HideMe)\n\n ConfigBook.AddPage(ConfigPanel, \"General\")\n ConfigBook.AddPage(ConfigPanel2, \"Editor\")\n ConfigBook.AddPage(ConfigPanel3, \"Terminals\")\n self.Bind(wx.EVT_CLOSE, self.HideMe)\n\n self.Hide()\n self.Centre()",
"def __init__(self):\r\n \r\n self.orientation = wx.VERTICAL\r\n self.type = 0\r\n self.rect = wx.Rect()",
"def __init__(self, parent, *args, **kwds):\n \n Lead12Dialog.__init__(self, *args, **kwds)\n self.parent=parent\n sizersize = self.leadI_sizer.GetSize()\n print sizersize\n bigsizer = self.leadII_sizer.GetSize()\n print bigsizer\n self.plotter_I=Plotter(self,(308,162))\n self.plotter_II=Plotter(self,(308,162))\n self.plotter_III=Plotter(self,(308,162))\n self.plotter_aVR=Plotter(self,(308,162))\n self.plotter_aVL=Plotter(self,(308,162))\n self.plotter_aVF=Plotter(self,(308,162))\n self.plotter_V1=Plotter(self,(308,162))\n self.plotter_V2=Plotter(self,(308,162))\n self.plotter_V3=Plotter(self,(308,162))\n self.plotter_V4=Plotter(self,(308,162))\n self.plotter_V5=Plotter(self,(308,162))\n self.plotter_V6=Plotter(self,(308,162))\n \n self.leadI_sizer.Add(self.plotter_I.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.small_leadII_sizer.Add(self.plotter_II.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.leadIII_sizer.Add(self.plotter_III.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.aVR_sizer.Add(self.plotter_aVR.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.aVL_sizer.Add(self.plotter_aVL.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.aVF_sizer.Add(self.plotter_aVF.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.V1_sizer.Add(self.plotter_V1.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.V2_sizer.Add(self.plotter_V2.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.V3_sizer.Add(self.plotter_V3.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.V4_sizer.Add(self.plotter_V4.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.V5_sizer.Add(self.plotter_V5.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.V6_sizer.Add(self.plotter_V6.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n\n self.plotter_I.plot(self.parent.myECG.ecg_leadI[500:2000])\n self.plotter_II.plot(self.parent.myECG.ecg_leadII[500:2000])\n self.plotter_III.plot(self.parent.myECG.ecg_leadIII[500:2000])\n self.plotter_aVR.plot(self.parent.myECG.ecg_leadaVR[500:2000])\n self.plotter_aVL.plot(self.parent.myECG.ecg_leadaVL[500:2000])\n self.plotter_aVF.plot(self.parent.myECG.ecg_leadaVF[500:2000])\n self.plotter_V1.plot(self.parent.myECG.ecg_leadV1[500:2000])\n self.plotter_V2.plot(self.parent.myECG.ecg_leadV2[500:2000])\n self.plotter_V3.plot(self.parent.myECG.ecg_leadV3[500:2000])\n self.plotter_V4.plot(self.parent.myECG.ecg_leadV4[500:2000])\n self.plotter_V5.plot(self.parent.myECG.ecg_leadV5[500:2000])\n self.plotter_V6.plot(self.parent.myECG.ecg_leadV6[500:2000])\n\n self.plotter_bigII=extendPlotter(self,(1500,162)) \n self.leadII_sizer.Add(self.plotter_bigII.plotpanel,1, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 4)\n self.plotter_bigII.extendplot(self.parent.myECG.ecg_leadII[500:6500])",
"def init_control_panel(self):\n # initialize panel as QFrame\n panel = QtGui.QFrame(self)\n panel.setFrameStyle(QtGui.QFrame.StyledPanel)\n\n # set components\n vbox = QtGui.QVBoxLayout(panel)\n vbox.setSpacing(15)\n vbox.addWidget(self.init_summary_panel())\n vbox.addWidget(self.init_edit_panel())\n\n return panel",
"def __init__(self):\r\n\r\n object.__init__(self)\r\n \r\n self.dock_direction = 0\r\n self.dock_layer = 0\r\n self.dock_row = 0\r\n self.size = 0\r\n self.min_size = 0\r\n self.resizable = True\r\n self.fixed = False\r\n self.toolbar = False\r\n self.rect = wx.Rect()\r\n self.panes = []",
"def __init__(self):\n\n # Create the main frame\n EasyFrame.__init__(self, \"Panel Demo - v2\")\n\n # Create the nested frame for the date panel\n data_panel = self.addPanel(row=0, column=0,\n background=\"gray\")\n\n # Create and add widgets to the data panel\n data_panel.addLabel(text=\"Label 1\", row=0, column=0,\n background=\"gray\")\n data_panel.addTextField(text=\"Text1\", row=0, column=1)\n data_panel.addLabel(text=\"Label 2\", row=1, column=0,\n background=\"gray\")\n data_panel.addTextField(text=\"Text2\", row=1, column=1)\n\n # Create nested frame for button panel\n button_panel = self.addPanel(row=1, column=0,\n background=\"black\")\n\n # Create and add buttons to the button panel\n button_panel.addButton(text=\"B1\", row=0, column=0)\n button_panel.addButton(text=\"B2\", row=0, column=1)\n button_panel.addButton(text=\"B3\", row=0, column=2)",
"def btnBuilder(self, label, sizer, handler):\n\t\tself.btns[label]=btn = wx.Button(self, label=label)\n\t\tbtn.Bind(wx.EVT_BUTTON, handler)\n\t\tsizer.Add(btn, 0, wx.ALL|wx.CENTER, 5)",
"def __init__(self, image, parent=None, id=-1,\n pos=wx.DefaultPosition, title='Hello, wxPython!'):\n print('test.py.Frame.init:{},txt_line:{}'.format(image, self.txt_line))\n temp = image.ConvertToBitmap()\n size = temp.GetWidth(), temp.GetHeight()\n wx.Frame.__init__(self, parent, id, title, pos, size)\n self.panel = wx.Panel(self)\n\n self.btn = wx.Button(self.panel, label='完成支付', pos=(0, 325), size=(150, 75))\n self.btn.Bind(event=wx.EVT_BUTTON, handler=self.on_exist)\n btn1 = wx.Button(self.panel, label='取消支付', pos=(150, 325), size=(150, 75), name='cancel')\n btn1.Bind(event=wx.EVT_BUTTON, handler=self.on_cancel)\n\n self.bmp = wx.StaticBitmap(parent=self.panel, bitmap=temp)\n self.SetClientSize(size)\n\n self.txt = wx.StaticText(self.panel, label='', pos=(0, 300), size=(300, 30))\n font = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.NORMAL)\n self.txt.SetFont(font)",
"def _build_gui(self):\n box = qt.QHBoxLayout(self)\n box.addWidget(self._but)\n\n lab = self._lab\n Pol = qt.QSizePolicy\n lab.setSizePolicy(Pol.Expanding, Pol.Preferred)\n lab.setFrameStyle(qt.QLabel.Panel)\n box.addWidget(lab)",
"def _build_gui(self):\n box = qt.QHBoxLayout(self)\n box.addWidget(self._but)\n\n lab = self._lab\n Pol = qt.QSizePolicy\n lab.setSizePolicy(Pol.Expanding, Pol.Preferred)\n lab.setFrameStyle(qt.QLabel.Panel)\n box.addWidget(lab)",
"def _InitUI( self, two_axes = False ):\n dpis = wx.ScreenDC().GetPPI()\n size = ( WIDGET_PREF_SIZE[ 0 ] / dpis[ 0 ], WIDGET_PREF_SIZE[ 1 ] / dpis[ 0 ] )\n self.fig = Figure( facecolor = '#ececec', figsize = size, dpi = dpis[ 0 ] )\n\n self._InitAxes()\n# if two_axes:\n# self.ax = self.fig.add_axes([ 0.1, 0.1, 0.85, 0.65 ])\n# self.ax2 = self.ax.twiny()\n# else:\n# self.ax = self.fig.add_subplot( 111 )\n self.canvas = FigureCanvas( self, -1, self.fig )\n self.canvas.SetMinClientSize( wx.Size( 200, 200 ) )\n self.toolbar = NavigationToolbar( self.canvas )\n #self.toolbar.Realize()\n self.toolbar.SetBackgroundColour( wx.Colour( 236, 236, 236, 255 ) )\n self.toolbar.Show( False )\n\n sizer = wx.BoxSizer( wx.VERTICAL )\n sizer.Add( self.toolbar, 0, wx.LEFT | wx.TOP | wx.BOTTOM | wx.EXPAND, 1 )\n sizer.Add( self.canvas, 1, wx.LEFT | wx.TOP | wx.BOTTOM | wx.EXPAND, 1 )\n self.SetSizer( sizer )\n\n self.callbackIds[ 'button_release_event' ] = \\\n self.canvas.mpl_connect( 'button_release_event', self._OnMplMouseRelease )\n self.callbackIds[ 'motion_notify_event' ] = \\\n self.canvas.mpl_connect( 'motion_notify_event', self._OnMplMouseMotion )\n\n self.Bind( wx.EVT_CLOSE, self._OnClose )\n self.Bind( wx.EVT_CONTEXT_MENU, self._OnContextMenu )\n self.Bind( wx.EVT_SIZE, self._OnSize )\n\n self.timer = wx.Timer( self, TIMERID_RESIZE )\n self.Bind( wx.EVT_TIMER, self._OnTimer )",
"def LayoutComponents(self):\n sizer = wx.BoxSizer(wx.VERTICAL)\n\n # Add header\n header = self.FindWindow(\"header\")\n if header is not None:\n sizer.Add(header, 0, wx.EXPAND, 0)\n sizer.Add(self.HorizLine(self), 0, wx.ALL | wx.EXPAND, 0)\n\n # Add content\n content = self.FindWindow(\"content\")\n if content is not None:\n sizer.Add(content, 1, wx.EXPAND, 0)\n else:\n sizer.AddSpacer(1)\n\n # Add action buttons\n actions = self.FindWindow(\"actions\")\n if actions is not None:\n sizer.Add(self.HorizLine(self), 0, wx.ALL | wx.EXPAND, 0)\n # proportion is 0 to ask the sizer to never hide the buttons\n sizer.Add(actions, 0, wx.EXPAND, 0)\n\n # Since Layout doesn't happen until there is a size event, you will\n # sometimes have to force the issue by calling Layout yourself. For\n # example, if a frame is given its size when it is created, and then\n # you add child windows to it, and then a sizer, and finally Show it,\n # then it may not receive another size event (depending on platform)\n # in order to do the initial layout. Simply calling self.Layout from\n # the end of the frame's __init__ method will usually resolve this.\n self.SetSizer(sizer)\n self.Layout()",
"def _create_content(self):\n panel = sppasPanel(self, name=\"content\")\n\n to = wx.StaticText(panel, label=\"To: \")\n self.to_text = wx.StaticText(\n parent=panel,\n label=sg.__contact__)\n\n subject = wx.StaticText(panel, label=MSG_EMAIL_SUBJECT)\n self.subject_text = wx.StaticText(\n parent=panel,\n label=sg.__name__ + \" \" + sg.__version__ + \" - Feedback...\")\n\n body = wx.StaticText(panel, label=MSG_EMAIL_BODY)\n body_style = wx.TAB_TRAVERSAL | wx.TE_BESTWRAP |\\\n wx.TE_MULTILINE | wx.BORDER_STATIC\n self.body_text = sppasTextCtrl(\n parent=panel,\n value=DESCRIBE_TEXT,\n style=body_style)\n self.body_text.SetSelection(0, len(DESCRIBE_TEXT))\n self.body_text.Bind(wx.EVT_CHAR, self._on_char, self.body_text)\n\n grid = wx.FlexGridSizer(4, 2, 5, 5)\n grid.AddGrowableCol(1)\n grid.AddGrowableRow(2)\n\n grid.Add(to, 0, wx.LEFT, 4)\n grid.Add(self.to_text, 0, flag=wx.EXPAND)\n\n grid.Add(subject, 0, wx.LEFT, 4)\n grid.Add(self.subject_text, 0, flag=wx.EXPAND)\n\n grid.Add(body, 0, wx.TOP | wx.LEFT, 4)\n grid.Add(self.body_text, 2, flag=wx.EXPAND)\n\n s = wx.StaticText(panel, label=MSG_EMAIL_SEND_WITH)\n grid.Add(s, 0, wx.LEFT | wx.BOTTOM, 4)\n\n panel.SetAutoLayout(True)\n panel.SetSizer(grid)\n self.SetContent(panel)",
"def _create_distplot_pane(self):\n panel = wx.Panel(self, -1)\n\t\t\n self.fig_violin = Figure()\n self.ax_violin = self.fig_violin.add_subplot(111)\n\t\t\n self.ax_violin.set_xlabel(\"Voxel values\")\n self.ax_violin.set_ylabel(\"Density\")\n self.ax_violin.set_xlim(60, 120)\n self.ax_violin.set_ylim(0, 0.3)\n\t\t\n self.canvas_violin = FigureCanvas(panel, -1, self.fig_violin)\n self.toolbar_violin = NavigationToolbar(self.canvas_violin)\n\t\t\n self.canvas_violin.mpl_connect('pick_event', self.onPickdist)\n\t\t\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(self.canvas_violin, 1, wx.EXPAND|wx.BOTTOM, 7)\n vbox.Add(self.toolbar_violin, 0, wx.EXPAND)\n\t\t\n panel.SetSizer(vbox)\n vbox.Fit(panel)\n\t\t\n return panel",
"def createResample(self):\n\t\tbox = wx.StaticBox(self, -1, \"Resample now to\")\n\t\tself.currDimText = u\"Current dataset original dimensions: %d x %d x %d\"\n\t\tself.dimsLbl = wx.StaticText(self, -1, self.currDimText % (0, 0, 0))\n\t\t\n\t\tboxsizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n\t\tpanel = wx.Panel(self, -1)\n\t\tboxsizer.Add(panel, 1)\n\t\t\n\t\tsizer = wx.GridBagSizer()\n\t\t\n\n\t\tself.factorBox = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.factorLabel = factorLabel = wx.StaticText(panel, -1, \"scale:\")\n\t\t#self.factorBox.Add(factorLabel,0,wx.ALIGN_CENTER_VERTICAL)\n\t\tself.factorX = wx.TextCtrl(panel, -1, \"%.2f\" % 1, size = (50, -1))\n\t\tself.factorBox.Add(self.factorX)\n\t\tx1 = wx.StaticText(panel, -1, \"x\")\n\t\tself.factorBox.Add(x1, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tself.factorY = wx.TextCtrl(panel, -1, \"%.2f\" % 1, size = (50, -1))\n\t\tself.factorBox.Add(self.factorY, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tx2 = wx.StaticText(panel, -1, \"x\")\n\t\tself.factorBox.Add(x2, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tself.factorZ = wx.TextCtrl(panel, -1, \"%.2f\" % 1, size = (50, -1))\n\t\tself.factorBox.Add(self.factorZ, 0, wx.ALIGN_CENTER_VERTICAL)\n\n\t\t\n\t\tself.dimLabel = dimLabel = wx.StaticText(panel, -1, \"dimensions: \")\n\t\tval = UIElements.AcceptedValidator\n\t\tx1 = wx.StaticText(panel, -1, \"x\")\n\t\tx2 = wx.StaticText(panel, -1, \"x\")\n\t\t\n\t\tself.newDimX = wx.TextCtrl(panel, -1, \"512\", validator = val(string.digits), size = (50, -1))\n\t\tself.newDimY = wx.TextCtrl(panel, -1, \"512\", validator = val(string.digits), size = (50, -1))\n\t\tself.newDimZ = wx.TextCtrl(panel, -1, \"25\", validator = val(string.digits), size = (50, -1))\n\t\t\n\t\tself.newDimX.Bind(wx.EVT_TEXT, self.onUpdateDims)\n\t\tself.newDimY.Bind(wx.EVT_TEXT, self.onUpdateDims)\n\t\tself.newDimZ.Bind(wx.EVT_TEXT, self.onUpdateDims)\n\t\t\n\t\tself.factorX.Bind(wx.EVT_TEXT, self.onUpdateFactors)\n\t\tself.factorY.Bind(wx.EVT_TEXT, self.onUpdateFactors)\n\t\tself.factorZ.Bind(wx.EVT_TEXT, self.onUpdateFactors)\n\t\t\n\t\t\n\t\tdimsizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tdimsizer.Add(self.newDimX, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tdimsizer.Add(x1, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tdimsizer.Add(self.newDimY, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tdimsizer.Add(x2, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\tdimsizer.Add(self.newDimZ, 0, wx.ALIGN_CENTER_VERTICAL)\n\t\t\n\t\thalfSize = wx.RadioButton(panel, -1, \"1/2 original size\")\n\t\thalfSize.SetValue(1)\n\t\thalfSize.Bind(wx.EVT_RADIOBUTTON, self.onSetToHalfSize)\n\t\n\t\tfourthSize = wx.RadioButton(panel, -1, \"1/4 original size\")\n\t\tfourthSize.Bind(wx.EVT_RADIOBUTTON, self.onSetToFourthSize)\n\t\tcustDims = wx.RadioButton(panel, -1, \"custom size\")\n\t\tcustDims.Bind(wx.EVT_RADIOBUTTON, self.onSetToCustDims)\n\t\thalfSizeBox = wx.BoxSizer(wx.HORIZONTAL)\n\t\tfourthSizeBox = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\tparenLbl = wx.StaticText(panel, -1, \"(\")\n\t\tparenLbl2 = wx.StaticText(panel, -1, \"(\")\n\t\t\n\t\t\n\t\tself.halfResampleZ = wx.CheckBox(panel, -1, \"resample Z)\")\n\t\tself.halfResampleZ.Bind(wx.EVT_CHECKBOX, self.onSetToHalfSize)\n\t\tself.fourthResampleZ = wx.CheckBox(panel, -1, \"resample Z)\")\n\t\tself.fourthResampleZ.Bind(wx.EVT_CHECKBOX, self.onSetToFourthSize)\n\t\thalfSizeBox.Add(parenLbl)\n\t\thalfSizeBox.Add(self.halfResampleZ)\n\t\t\n\t\tfourthSizeBox.Add(parenLbl2)\n\t\tfourthSizeBox.Add(self.fourthResampleZ)\n\t\t\n\t\tcustDimScaleSizer = wx.GridBagSizer()\n\t\t\n\t\tn = 0\n\t\tcustDimScaleSizer.Add(dimLabel, (n, 0))\n\t\tcustDimScaleSizer.Add(dimsizer, (n, 1))\n\t\tn += 1\n\t\tcustDimScaleSizer.Add(factorLabel, (n, 0))\n\t\tcustDimScaleSizer.Add(self.factorBox, (n, 1), flag = wx.EXPAND | wx.LEFT | wx.RIGHT)\n\t\t\n\t\tn = 0\n\t\tsizer.Add(self.dimsLbl, (n, 0), flag = wx.EXPAND | wx.LEFT | wx.RIGHT, span = (1, 2))\n\t\tn += 1\n\t\t\n\t\tsizer.Add(halfSize, (n, 0))\n\t\tsizer.Add(halfSizeBox, (n, 1))\n\t\tn += 1\n\t\tsizer.Add(fourthSize, (n, 0))\n\t\tsizer.Add(fourthSizeBox, (n, 1))\n\t\tn += 1\n\t\tsizer.Add(custDims, (n, 0))\n\t\tsizer.Add(custDimScaleSizer, (n, 1))\n\t\t\n\t\tpanel.SetSizer(sizer)\n\t\tpanel.SetAutoLayout(1)\n\t\tsizer.Fit(panel)\n\t\tself.dimsPanel = panel\n\t\t\n\t\tself.sizer.Add(self.dimsLbl, (0, 0), flag = wx.ALIGN_CENTRE | wx.EXPAND | wx.LEFT | wx.RIGHT)\n\t\tself.sizer.Add(boxsizer, (1, 0), flag = wx.ALIGN_CENTRE | wx.EXPAND | wx.ALL)\n\n\t\tself.panel = panel",
"def create_base(self):\n if self.debug:\n print(\"Creating base\")\n self.console_panel = ConsolePanel(self.root)\n self.side_panel = SidePanel(self.root, self.populate_main_panel)\n self.side_panel.set_separator(\"word_word\")\n self.main_panel = MainPanel(self.root, action=\"word_word\")",
"def __init__( self, parent, remote):\r\n\r\n PanelExecute.__init__( self, parent )\r\n self.remote = remote\r\n print 'ExecuteControl constructor '\r\n self.timer = wx.Timer(self)\r\n self.Bind(wx.EVT_TIMER, self.timerAnimate, self.timer)\r\n self.animateIdx = 0\r\n self.repeat = False\r\n self.bitmap = Bitmap(FanbotConfig.width,FanbotConfig.height)\r\n self.initFileList('.gif') \r\n self.scaleX = 1\r\n self.scaleY = 1\r\n self.Refresh()",
"def __init__(self, parent, tile_dir=None, start_level=None,\n min_level=None, max_level=None, **kwargs):\n\n # create and initialise the base panel\n wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY, **kwargs)\n self.SetBackgroundColour(pySlip.BackgroundColour)\n\n # get tile info\n self.tiles = pySlip.Tiles(tile_dir)\n self.max_level = max_level\n if max_level is None:\n self.max_level = self.tiles.max_level\n self.min_level = min_level\n if min_level is None:\n self.min_level = self.tiles.min_level\n self.level = start_level\n if start_level is None:\n self.level = self.min_level\n\n self.tile_size_x = self.tiles.tile_size_x\n self.tile_size_y = self.tiles.tile_size_y\n\n # set some internal state\n self.view_width = None # view size in pixels\n self.view_height = None # set on onResize()\n\n self.ppd_x = 0 # pixel_per_degree for current tileset\n self.ppd_y = 0\n\n self.view_offset_x = 0 # pixel offset at left & top of view\n self.view_offset_y = 0\n\n self.view_llon = self.view_rlon = None # view limits\n self.view_tlat = self.view_blat = None\n\n self.was_dragging = False # True if dragging map\n self.move_dx = 0 # drag delta values\n self.move_dy = 0\n self.last_drag_x = None # previous drag position\n self.last_drag_y = None\n\n self.ignore_next_up = False # flag to ignore next UP event\n\n self.is_box_select = False # True if box selection\n self.sbox_1_x = self.sbox_1_y = None # box size\n\n # layer stuff\n self.next_layer_id = 1 # source of unique layer IDs\n self.layer_z_order = [] # layer Z order, contains layer IDs\n self.layer_mapping = {} # maps layer ID to (...layer data...)\n\n # callback to report mouse position in view\n self.mouse_position_callback = None\n\n # callback on right mouse click (right button up event)\n self.rightclick_callback = None\n\n # callback on level change\n self.change_level_callback = None\n\n # bind events\n self.Bind(wx.EVT_SIZE, self.onResize) # widget events\n self.Bind(wx.EVT_PAINT, self.onPaint)\n\n self.Bind(wx.EVT_MOTION, self.onMove) # mouse events\n self.Bind(wx.EVT_LEFT_DOWN, self.onLeftDown)\n self.Bind(wx.EVT_LEFT_DCLICK, self.onLeftDClick)\n self.Bind(wx.EVT_LEFT_UP, self.onLeftUp)\n self.Bind(wx.EVT_RIGHT_DOWN, self.onRightDown)\n self.Bind(wx.EVT_RIGHT_UP, self.onRightUp)\n self.Bind(wx.EVT_MIDDLE_DOWN, self.onMiddleDown)\n self.Bind(wx.EVT_MIDDLE_UP, self.onMiddleUp)\n self.Bind(wx.EVT_MOUSEWHEEL, self.onMouseWheel)\n\n # OK, use the tile level the user wants\n self.use_level(self.level)\n\n # force a resize, which sets up the rest of the state\n self.onResize()",
"def _create_scatterplot_pane(self):\n panel = wx.Panel(self, -1)\n\t\t\n self.fig_scatter = Figure()\n self.ax_scatter = self.fig_scatter.add_subplot(111)\n\t\t\n families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']\n\t\t\n self.ax_scatter.set_xlabel(\"Mean\") #fontsize = 14, fontweight = 'semibold', name = families[2]\n self.ax_scatter.set_ylabel(\"Standard Deviation\")\n #self.ax_scatter.grid(color='black', alpha=0.5, linestyle='-', linewidth=1.0)\n self.ax_scatter.set_axis_bgcolor((0.8,0.8,0.8))\n #self.ax_scatter.set_ylim(0, 35)\n #self.ax_scatter.set_ylim(0, 90)\n\t\t\n self.canvas_scatter = FigureCanvas(panel, -1, self.fig_scatter)\n self.toolbar_scatter = NavigationToolbar(self.canvas_scatter)\n\t\t\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(self.canvas_scatter, 1, wx.EXPAND|wx.BOTTOM, 7)\n vbox.Add(self.toolbar_scatter, 0, wx.EXPAND)\n\t\t\n panel.SetSizer(vbox)\n vbox.Fit(panel)\n\t\t\n return panel",
"def setup(self):\n self.statusbar = self.CreateStatusBar()\n pub.subscribe(self.update_feed,'update_feed') \n\n menubar = wx.MenuBar()\n fileMenu = wx.Menu()\n returnMain = fileMenu.Append(wx.ID_ANY, '&Main Menu',\n \"Return to Application Main Menu\")\n self.Bind(wx.EVT_MENU, self.return_main_menu, returnMain)\n qmi = wx.MenuItem(fileMenu, 110, '&Quit\\tCtrl+Q', \"Quit Application\")\n fileMenu.AppendItem(qmi)\n self.Bind(wx.EVT_MENU, self.quit_application, qmi)\n editMenu = wx.Menu()\n optionsMenu = wx.Menu()\n optionsMenu.Append(wx.ID_ANY,'&Configure Wi-Fi')\n optionsMenu.Append(wx.ID_ANY,'&Calibrate System')\n helpMenu = wx.Menu()\n helpMenu.Append(wx.ID_ANY,'&About')\n menubar.Append(fileMenu, '&File')\n menubar.Append(editMenu, '&Edit')\n menubar.Append(optionsMenu, '&Options')\n menubar.Append(helpMenu, '&Help')\n self.SetMenuBar(menubar)\n\n panel = wx.Panel(self, wx.ID_ANY)\n panel.SetAutoLayout(1)\n font_std = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)\n font_stdBold = wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)\n\n measureBtn = wx.Button(panel, size=(100,100),label=\"Measure\")\n str_trgChan = wx.StaticText(panel, -1, \"Channel 1 (Speaker 1: Mic 1)\")\n distanceLabel = wx.StaticText(panel, -1, \"Distance:\")\n self.distance_txtBox = wx.TextCtrl(panel, wx.ID_ANY, '', size=(84,22),\n style=wx.TE_READONLY|wx.ALIGN_RIGHT)\n self.metrics = ['cm','m']\n self.imperials = ['in','ft']\n self.distanceUnitCombobox = wx.ComboBox(panel, -1, size=(80,27),\n choices=self.metrics,\n style=wx.CB_READONLY)\n self.distanceUnitCombobox.SetValue(self.metrics[0])\n self.metricUnitRadioBtn = wx.RadioButton(panel, label=\"Metric\",\n style=wx.RB_GROUP)\n self.metricUnitRadioBtn.SetValue(True)\n self.imperialUnitRadioBtn = wx.RadioButton(panel, label=\"Imperial\")\n propTimeLabel = wx.StaticText(panel, -1, \"Propogation Time:\") \n self.propdelay_txtBox = wx.TextCtrl(panel, wx.ID_ANY, '',size=(84,22),\n style=wx.TE_READONLY\n |wx.ALIGN_RIGHT)\n propdelayUnitLabel = wx.StaticText(panel, -1, \"msec\")\n gainLabel = wx.StaticText(panel, -1, \"Gain:\")\n gainTextBox = wx.TextCtrl(panel, wx.ID_ANY, '',size=(84,22),\n style=wx.TE_READONLY|wx.ALIGN_RIGHT)\n gainUnitLabel = wx.StaticText(panel, -1, '%')\n measureBtn.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))\n str_trgChan.SetFont(font_stdBold)\n distanceLabel.SetFont(font_std)\n self.distance_txtBox.SetFont(font_std)\n self.distanceUnitCombobox.SetFont(font_std)\n self.metricUnitRadioBtn.SetFont(font_std)\n self.imperialUnitRadioBtn.SetFont(font_std)\n propTimeLabel.SetFont(font_std)\n self.propdelay_txtBox.SetFont(font_std)\n propdelayUnitLabel.SetFont(font_std)\n gainLabel.SetFont(font_std)\n gainTextBox.SetFont(font_std)\n gainUnitLabel.SetFont(font_std)\n self.distance_txtBox.SetBackgroundColour(wx.Colour(255,250,250))\n self.propdelay_txtBox.SetBackgroundColour(wx.Colour(255,250,250))\n gainTextBox.SetBackgroundColour(wx.Colour(255,250,250))\n\n trgMeasureGridBag = wx.GridBagSizer(4,8)\n trgMeasureGridBag.Add(str_trgChan, pos=(0, 0), span=(1,8),\n flag=wx.TOP|wx.LEFT|wx.RIGHT|wx.BOTTOM\n |wx.ALIGN_CENTRE, border=5)\n trgMeasureGridBag.Add(measureBtn, pos=(1, 0), span=(4,3),\n flag=wx.TOP|wx.LEFT|wx.Right|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(distanceLabel, pos=(1, 3), \n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(self.distance_txtBox, pos=(1, 4),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(self.distanceUnitCombobox, pos=(1, 5),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(self.metricUnitRadioBtn, pos=(1, 6),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(self.imperialUnitRadioBtn, pos=(1, 7),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM|wx.Right, border=5)\n trgMeasureGridBag.Add(propTimeLabel, pos=(2, 3),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(self.propdelay_txtBox, pos=(2, 4),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(propdelayUnitLabel, pos=(2, 5),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(gainLabel, pos=(3, 3),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(gainTextBox, pos=(3, 4),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureGridBag.Add(gainUnitLabel, pos=(3, 5),\n flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)\n trgMeasureStaticBox = wx.StaticBox(panel, label= \"Trigger Measurement\")\n trgMeasureStaticBox.SetFont(font_stdBold)\n trgMeasureBoxSizer = wx.StaticBoxSizer(trgMeasureStaticBox,\n wx.HORIZONTAL)\n trgMeasureBoxSizer.Add(trgMeasureGridBag,\n flag=wx.EXPAND|wx.LEFT|wx.TOP\n |wx.BOTTOM|wx.RIGHT,\n border=10)\n self.metricUnitRadioBtn.Bind(wx.EVT_RADIOBUTTON,\n self.change_distance_units)\n self.imperialUnitRadioBtn.Bind(wx.EVT_RADIOBUTTON,\n self.change_distance_units)\n measureBtn.Bind(wx.EVT_BUTTON, self.trig_measure)\n\n systemID = wx.StaticText(panel, -1, 'System ID:')\n sysID_field = wx.StaticText(panel, -1, '--------------------')\n wiFiStatus = wx.StaticText(panel, -1, 'Status:')\n wiFiStatus_field = wx.StaticText(panel, -1, '--------------------')\n wiFiSSID = wx.StaticText(panel, -1, 'Wi-Fi SSID:')\n wiFiSSID_field = wx.StaticText(panel, -1, '--------------------')\n systemID.SetFont(font_stdBold)\n sysID_field.SetFont(font_std)\n wiFiStatus.SetFont(font_stdBold)\n wiFiStatus_field.SetFont(font_std)\n wiFiSSID.SetFont(font_stdBold)\n wiFiSSID_field.SetFont(font_std)\n sysStatusGridSizer = wx.GridBagSizer(3,3)\n sysStatusGridSizer.Add(systemID, pos=(0, 0),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n sysStatusGridSizer.Add(sysID_field, pos=(0, 2),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM, border=5)\n sysStatusGridSizer.Add(wiFiStatus, pos=(1,0),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.LEFT,\n border=5)\n sysStatusGridSizer.Add(wiFiStatus_field, pos=(1,2),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.RIGHT,\n border=5)\n sysStatusGridSizer.Add(wiFiSSID, pos=(2,0),\n flag=wx.LEFT|wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM,\n border=5)\n sysStatusGridSizer.Add(wiFiSSID_field, pos=(2,2),\n flag=wx.TOP|wx.ALIGN_LEFT|wx.BOTTOM|wx.RIGHT,\n border=5)\n sysStatusStaticBox = wx.StaticBox(panel, label=\"System Status\")\n sysStatusStaticBox.SetFont(font_stdBold)\n sysStatusBoxSizer = wx.StaticBoxSizer(sysStatusStaticBox, wx.HORIZONTAL)\n sysStatusBoxSizer.Add(sysStatusGridSizer,\n flag=wx.EXPAND|wx.LEFT|wx.TOP|wx.BOTTOM|wx.RIGHT,\n border=25) \n\n editPrefBtn = wx.Button(panel, size=(160,27),\n label=\"Edit Preferences\")\n configDeviceBtn = wx.Button(panel, size=(160,27),\n label=\"Configure Device\")\n searchDeviceBtn = wx.Button(panel, size=(160,27),\n label=\"Search for Device\")\n editPrefBtn.SetFont(font_std)\n configDeviceBtn.SetFont(font_std)\n searchDeviceBtn.SetFont(font_std)\n congfiStaticBox = wx.StaticBox(panel, label='')\n congfiStaticBox.SetFont(font_stdBold)\n configBoxSizer = wx.StaticBoxSizer(congfiStaticBox, wx.VERTICAL)\n configBoxSizer.Add(editPrefBtn, flag=wx.EXPAND|wx.LEFT|wx.TOP|wx.RIGHT,\n border=20)\n configBoxSizer.Add(configDeviceBtn,\n flag=wx.EXPAND|wx.LEFT|wx.TOP|wx.BOTTOM|wx.RIGHT,\n border=20)\n configBoxSizer.Add(searchDeviceBtn,\n flag=wx.EXPAND|wx.LEFT|wx.BOTTOM|wx.RIGHT,\n border=20)\n editPrefBtn.Bind(wx.EVT_BUTTON, self.open_preferences)\n configDeviceBtn.Bind(wx.EVT_BUTTON, self.open_configuration)\n searchDeviceBtn.Bind(wx.EVT_BUTTON, self.search_device)\n\n self.feed_txtBox = wx.TextCtrl(panel, wx.ID_ANY, '',size=(500,120),\n style=wx.TE_READONLY|wx.ALIGN_LEFT|wx.TE_MULTILINE\n |wx.TE_RICH2)\n self.feed_txtBox.SetFont(font_std)\n self.feed_txtBox.SetBackgroundColour(wx.Colour(255,250,250))\n feedStaticBox = wx.StaticBox(panel, label='')\n feedStaticBox.SetFont(font_stdBold)\n feedBoxSizer = wx.StaticBoxSizer(feedStaticBox, wx.VERTICAL)\n feedBoxSizer.Add(self.feed_txtBox,\n flag=wx.EXPAND|wx.LEFT|wx.TOP|wx.RIGHT, border=1)\n sizer = wx.GridBagSizer(3,2)\n sizer.Add(trgMeasureBoxSizer, pos=(0,0),span=(1,2),\n flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)\n sizer.Add(sysStatusBoxSizer, pos=(1,0), span=(1,2),\n flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, border=10)\n sizer.Add(feedBoxSizer, pos=(2,0),span=(1,2),\n flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, border=10)\n panel.SetSizerAndFit(sizer)",
"def __init__(self, parent, primers, seq, id=wx.ID_ANY,\n size=wx.DefaultSize):\n self.PrimerPan = wx.Panel.__init__(self, parent, id, size=size)\n self.fvbox = wx.BoxSizer(wx.VERTICAL)\n self.hbox = wx.BoxSizer(wx.HORIZONTAL)\n self.primers = primers\n radlist = self.MaakRadio()\n self.hbox.Add(self.MaakPrimers(primers[0], 'Forward', radlist[0]),\n 1, wx.ALL | wx.EXPAND)\n self.hbox.Add(self.MaakPrimers(primers[1], 'Reversed', radlist[1]),\n 1, wx.ALL | wx.EXPAND)\n self.fvbox.Add(self.hbox, 1, wx.ALL | wx.EXPAND)\n self.LenText = wx.StaticText(self, id=-1, label='PCR Lengte: ')\n self.seq = seq\n self.fvbox.Add(self.LenText, 0, wx.ALL | wx.ALIGN_CENTRE)\n self.SetSizer(self.fvbox)\n self.PCRLengte(0)",
"def panel(*args, control: bool=True, copy: AnyStr=\"\", createString: bool=True, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", editString: bool=True, exists: bool=True,\n init: bool=True, isUnique: bool=True, label: Union[AnyStr, bool]=\"\",\n menuBarRepeatLast: bool=True, menuBarVisible: bool=True, needsInit: bool=True,\n parent: AnyStr=\"\", popupMenuProcedure: Union[Script, bool]=None, replacePanel:\n AnyStr=\"\", tearOff: bool=True, tearOffCopy: AnyStr=\"\", tearOffRestore: bool=True,\n unParent: bool=True, useTemplate: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def Realize(self):\r\n\r\n dc = wx.ClientDC(self)\r\n \r\n if not dc.IsOk():\r\n return False\r\n\r\n horizontal = True\r\n if self._agwStyle & AUI_TB_VERTICAL:\r\n horizontal = False\r\n\r\n # create the new sizer to add toolbar elements to\r\n sizer = wx.BoxSizer((horizontal and [wx.HORIZONTAL] or [wx.VERTICAL])[0])\r\n\r\n # add gripper area\r\n separator_size = self._art.GetElementSize(AUI_TBART_SEPARATOR_SIZE)\r\n gripper_size = self._art.GetElementSize(AUI_TBART_GRIPPER_SIZE)\r\n \r\n if gripper_size > 0 and self._gripper_visible: \r\n if horizontal:\r\n self._gripper_sizer_item = sizer.Add((gripper_size, 1), 0, wx.EXPAND)\r\n else:\r\n self._gripper_sizer_item = sizer.Add((1, gripper_size), 0, wx.EXPAND)\r\n else:\r\n self._gripper_sizer_item = None\r\n \r\n # add \"left\" padding\r\n if self._left_padding > 0:\r\n if horizontal:\r\n sizer.Add((self._left_padding, 1))\r\n else:\r\n sizer.Add((1, self._left_padding))\r\n \r\n count = len(self._items)\r\n for i, item in enumerate(self._items):\r\n \r\n sizer_item = None\r\n kind = item.kind\r\n\r\n if kind == ITEM_LABEL:\r\n \r\n size = self._art.GetLabelSize(dc, self, item)\r\n sizer_item = sizer.Add((size.x + (self._tool_border_padding*2),\r\n size.y + (self._tool_border_padding*2)),\r\n item.proportion,\r\n item.alignment)\r\n if i+1 < count:\r\n sizer.AddSpacer(self._tool_packing)\r\n \r\n\r\n elif kind in [ITEM_CHECK, ITEM_NORMAL, ITEM_RADIO]:\r\n \r\n size = self._art.GetToolSize(dc, self, item)\r\n sizer_item = sizer.Add((size.x + (self._tool_border_padding*2),\r\n size.y + (self._tool_border_padding*2)),\r\n 0,\r\n item.alignment)\r\n # add tool packing\r\n if i+1 < count:\r\n sizer.AddSpacer(self._tool_packing)\r\n\r\n elif kind == ITEM_SEPARATOR:\r\n \r\n if horizontal:\r\n sizer_item = sizer.Add((separator_size, 1), 0, wx.EXPAND)\r\n else:\r\n sizer_item = sizer.Add((1, separator_size), 0, wx.EXPAND)\r\n\r\n # add tool packing\r\n if i+1 < count:\r\n sizer.AddSpacer(self._tool_packing)\r\n\r\n elif kind == ITEM_SPACER:\r\n \r\n if item.proportion > 0:\r\n sizer_item = sizer.AddStretchSpacer(item.proportion)\r\n else:\r\n sizer_item = sizer.Add((item.spacer_pixels, 1))\r\n \r\n elif kind == ITEM_CONTROL:\r\n \r\n vert_sizer = wx.BoxSizer(wx.VERTICAL)\r\n vert_sizer.AddStretchSpacer(1)\r\n ctrl_sizer_item = vert_sizer.Add(item.window, 0, wx.EXPAND)\r\n vert_sizer.AddStretchSpacer(1)\r\n \r\n if self._agwStyle & AUI_TB_TEXT and \\\r\n self._tool_text_orientation == AUI_TBTOOL_TEXT_BOTTOM and \\\r\n item.GetLabel() != \"\":\r\n \r\n s = self.GetLabelSize(item.GetLabel())\r\n vert_sizer.Add((1, s.y))\r\n\r\n sizer_item = sizer.Add(vert_sizer, item.proportion, wx.EXPAND)\r\n min_size = item.min_size\r\n\r\n # proportional items will disappear from the toolbar if\r\n # their min width is not set to something really small\r\n if item.proportion != 0:\r\n min_size.x = 1\r\n \r\n if min_size.IsFullySpecified():\r\n sizer.SetItemMinSize(vert_sizer, min_size)\r\n vert_sizer.SetItemMinSize(item.window, min_size)\r\n \r\n # add tool packing\r\n if i+1 < count:\r\n sizer.AddSpacer(self._tool_packing)\r\n \r\n item.sizer_item = sizer_item\r\n \r\n\r\n # add \"right\" padding\r\n if self._right_padding > 0:\r\n if horizontal:\r\n sizer.Add((self._right_padding, 1))\r\n else:\r\n sizer.Add((1, self._right_padding))\r\n \r\n # add drop down area\r\n self._overflow_sizer_item = None\r\n\r\n if self._agwStyle & AUI_TB_OVERFLOW:\r\n \r\n overflow_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)\r\n if overflow_size > 0 and self._overflow_visible:\r\n \r\n if horizontal:\r\n self._overflow_sizer_item = sizer.Add((overflow_size, 1), 0, wx.EXPAND)\r\n else:\r\n self._overflow_sizer_item = sizer.Add((1, overflow_size), 0, wx.EXPAND)\r\n \r\n else:\r\n \r\n self._overflow_sizer_item = None\r\n \r\n # the outside sizer helps us apply the \"top\" and \"bottom\" padding\r\n outside_sizer = wx.BoxSizer((horizontal and [wx.VERTICAL] or [wx.HORIZONTAL])[0])\r\n\r\n # add \"top\" padding\r\n if self._top_padding > 0:\r\n \r\n if horizontal:\r\n outside_sizer.Add((1, self._top_padding))\r\n else:\r\n outside_sizer.Add((self._top_padding, 1))\r\n \r\n # add the sizer that contains all of the toolbar elements\r\n outside_sizer.Add(sizer, 1, self._tool_alignment)\r\n\r\n # add \"bottom\" padding\r\n if self._bottom_padding > 0:\r\n \r\n if horizontal:\r\n outside_sizer.Add((1, self._bottom_padding))\r\n else:\r\n outside_sizer.Add((self._bottom_padding, 1))\r\n\r\n del self._sizer # remove old sizer\r\n self._sizer = outside_sizer\r\n self.SetSizer(outside_sizer)\r\n\r\n # calculate the rock-bottom minimum size\r\n for item in self._items:\r\n \r\n if item.sizer_item and item.proportion > 0 and item.min_size.IsFullySpecified():\r\n item.sizer_item.SetMinSize((0, 0))\r\n \r\n self._absolute_min_size = self._sizer.GetMinSize()\r\n\r\n # reset the min sizes to what they were\r\n for item in self._items:\r\n \r\n if item.sizer_item and item.proportion > 0 and item.min_size.IsFullySpecified():\r\n item.sizer_item.SetMinSize(item.min_size)\r\n \r\n # set control size\r\n size = self._sizer.GetMinSize()\r\n self.SetMinSize(size)\r\n self._minWidth = size.x\r\n self._minHeight = size.y\r\n\r\n if self._agwStyle & AUI_TB_NO_AUTORESIZE == 0:\r\n \r\n cur_size = self.GetClientSize()\r\n new_size = self.GetMinSize()\r\n\r\n if new_size != cur_size:\r\n \r\n self.SetClientSize(new_size)\r\n \r\n else:\r\n \r\n self._sizer.SetDimension(0, 0, cur_size.x, cur_size.y)\r\n \r\n else:\r\n \r\n cur_size = self.GetClientSize()\r\n self._sizer.SetDimension(0, 0, cur_size.x, cur_size.y)\r\n \r\n self.Refresh(False)\r\n return True",
"def build(self):\n self.main_layout = MainLayout()\n self.main_layout.settings_panel.load_settings()\n return self.main_layout",
"def set_menus(self):\n #---MENU\n self.menubar = wx.MenuBar() # initialize a menu bar at the top\n\n fileMenu = wx.Menu() # initialize a menu and add items to it\n newItem = fileMenu.Append(wx.ID_NEW, '&New Project', 'New')\n openItem = fileMenu.Append(wx.ID_OPEN, '&Open', 'Open Existing Input File')\n saveItem = fileMenu.Append(wx.ID_SAVE, '&Save', 'Write Project')\n quitItem = fileMenu.Append(wx.ID_EXIT, '&Quit', 'Quit')\n\n #toolMenu = wx.Menu() # initialize another menu and add stuff to it\n #pngItem = toolMenu.Append(wx.ID_ANY, '&Build LaTeX Formulas', 'LaTeX Formulas')\n\n nmlMenu = wx.Menu() # initialize another menu\n\n # puposefully do *not* bind this with anything just yet\n nmlItem = nmlMenu.Append(wx.ID_ANY, '--No File Loaded--', '--No File Loaded--')\n\n # add the menu(s) to the menu bar\n self.menubar.Append(fileMenu, '&File')\n #self.menubar.Append(toolMenu, '&Tools')\n self.menubar.Append(nmlMenu, '&Namelists')\n\n self.nml_menu_index = 1 # index into menubar that returns the Namelist menu\n\n # finalize/build the menubar\n self.SetMenuBar(self.menubar)\n\n # direct GUI what to do when something is selected\n self.Bind(wx.EVT_MENU, self.buttons.OnNew, newItem)\n self.Bind(wx.EVT_MENU, self.buttons.OnOpen, openItem)\n self.Bind(wx.EVT_MENU, self.buttons.OnSave, saveItem)\n self.Bind(wx.EVT_MENU, self.buttons.OnQuit, quitItem)\n\n #self.Bind(wx.EVT_MENU, self.buttons.OnBuildPNGs, pngItem)\n\n #---TOOLBAR\n toolbar = self.CreateToolBar() # build a toolbar\n\n # build tools\n file_dir = os.path.dirname(os.path.abspath(__file__)) # directory of current file\n image_dir = os.path.join(file_dir, 'images')\n ntool = toolbar.AddTool(wx.ID_ANY, 'New', wx.Bitmap(os.path.join(image_dir, 'new_file.png')))\n otool = toolbar.AddTool(wx.ID_ANY, 'Open', wx.Bitmap(os.path.join(image_dir, 'open_folder.png')))\n stool = toolbar.AddTool(wx.ID_ANY, 'Save', wx.Bitmap(os.path.join(image_dir, 'filesave.png')))\n\n # direct GUI what to do when something is selected\n self.Bind(wx.EVT_TOOL, self.buttons.OnNew, ntool)\n self.Bind(wx.EVT_TOOL, self.buttons.OnOpen, otool)\n self.Bind(wx.EVT_TOOL, self.buttons.OnSave, stool)\n\n # finalize/build the toolbar\n toolbar.Realize()",
"def create(self):\n self.panel = pg.rect.Rect(self.position, self.dimensions)",
"def set_config_panel(self, item):\n logging.info(\"Setting config for :{0!s}\".format(item))\n data = None\n try:\n data = self.tree.GetPyData(item)\n except Exception as e:\n print(\"{0!s}:{1!s}\".format(item.get_label(), e))\n\n logging.info(\"Clicked on {0!s}\".format(data))\n self.current_selection = item\n\n main_sizer = self.config_panel.GetSizer()\n if main_sizer:\n widgets = self.config_panel.GetChildren()\n for widget in widgets:\n logging.info(\"Destroying: {0!s}\".format((str(widget))))\n widget.Destroy()\n self.Layout()\n logging.info(\"Removing: MainSizer\")\n self.sizer.Remove(main_sizer)\n\n btn_sizer = wx.StdDialogButtonSizer()\n\n if data is not None:\n logging.info(\"Item has data\")\n\n values = data[\"Config\"]\n gridsizer = wx.FlexGridSizer(rows=len(values), cols=2)\n colsizer = wx.BoxSizer(wx.HORIZONTAL)\n\n self.widgetNames = values\n font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)\n\n for key, value in values.iteritems():\n logging.info(\"Parsing: {0!s} {1!s}\".format(key, value))\n\n lbl = wx.StaticText(self.config_panel, label=key)\n lbl.SetFont(font)\n\n # Deal with funky functions\n if hasattr(value, '__call__'):\n value = value()\n\n # LIST VALUES\n if isinstance(value, list):\n default = value[0]\n choices = value[1:]\n input_choice = wx.ComboBox(self.config_panel, value=default,\n choices=choices,\n style=wx.CB_READONLY,\n name=key)\n # STRING VALUES\n elif isinstance(value, (basestring, unicode)):\n input_choice = wx.TextCtrl(\n self.config_panel, value=value, name=key)\n # INTEGER VALUES\n elif isinstance(value, int):\n input_choice = IntCtrl(self.config_panel, value=value, name=key)\n # FLOAT VALUES\n elif isinstance(value, float):\n input_choice = FloatSpin(\n self.config_panel, increment=0.01, value=value, name=key)\n input_choice.SetFormat(\"%f\")\n input_choice.SetDigits(2)\n # DICT VALUES - Assume position or vector\n elif isinstance(value, dict):\n input_choice = wx.FlexGridSizer(rows=len(value), cols=2)\n for k, v in sorted(value.iteritems()):\n i_lbl = wx.StaticText(self.config_panel, label=k)\n i_lbl.SetFont(font)\n\n widget = FloatSpin(\n self.config_panel, increment=0.01, value=v, name=k)\n widget.SetFormat(\"%f\")\n widget.SetDigits(2)\n input_choice.AddMany(\n [(thing, 0, wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, 5) for thing in (i_lbl, widget)])\n else:\n raise NotImplementedError, \"Value ({0!s}, {1!s}) has not been coped with by set_config_panel\".format(\n str(value),\n type(value)\n )\n gridsizer.AddMany(\n [(thing, 0, wx.ALL | wx.ALIGN_RIGHT, 5) for thing in (lbl, input_choice)])\n\n colsizer.Add(gridsizer, 1, wx.EXPAND)\n\n save_btn = wx.Button(self.config_panel, wx.ID_OK, label=\"Save\")\n save_btn.Bind(wx.EVT_BUTTON, self.on_save)\n btn_sizer.AddButton(save_btn)\n\n update_btn = wx.Button(self.config_panel, wx.ID_ANY, label=\"Update\")\n update_btn.Bind(wx.EVT_BUTTON, self.on_update)\n btn_sizer.AddButton(update_btn)\n\n cancel_btn = wx.Button(self.config_panel, wx.ID_CANCEL)\n btn_sizer.AddButton(cancel_btn)\n btn_sizer.Realize()\n\n main_sizer = wx.BoxSizer(wx.VERTICAL)\n main_sizer.Add(colsizer, 0, wx.EXPAND | wx.ALL | wx.ALIGN_RIGHT)\n main_sizer.Add(btn_sizer, 0, wx.ALL | wx.ALIGN_RIGHT, 5)\n self.config_panel.SetSizer(main_sizer)\n else:\n logging.info(\"Item has no data\")\n\n self.Layout()",
"def __init__(self, parent, id):\n \n # init frame\n wx.Frame.__init__(self, parent, -1, \"Papyrus\", size=(800, 500), style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)\n \n # init error handler\n sys.excepthook = self._on_error\n \n # init library\n self._library = None\n \n # set icon\n icons = wx.IconBundle()\n icons.AddIcon(images.APP_ICON_16)\n icons.AddIcon(images.APP_ICON_32)\n icons.AddIcon(images.APP_ICON_48)\n icons.AddIcon(images.APP_ICON_128)\n icons.AddIcon(images.APP_ICON_256)\n self.SetIcons(icons)\n \n # init menu bar\n self._menu_bar = MenuBar()\n if config.SETTINGS['menu_bar_enabled']:\n self.SetMenuBar(self._menu_bar)\n \n # init main ui\n self._make_ui()\n \n # set size\n self.SetSize((config.SETTINGS['app_width'], config.SETTINGS['app_height']))\n self.SetMinSize((800, 500))\n \n # maximize\n if config.SETTINGS['app_maximized']:\n print(config.SETTINGS)\n self.Maximize()\n \n # bind events\n self._bind_events()\n \n # set hot keys\n self.SetAcceleratorTable(wx.AcceleratorTable(ACCELERATORS))\n \n # show frame\n self.Layout()\n self.Centre(wx.BOTH)\n self.Show(True)",
"def __init__(self, parent, dummy_app, title, model):\r\n wx.Frame.__init__(\r\n self, parent, -1,\r\n title,\r\n )\r\n\r\n self.box = wx.GridBagSizer(5, 5)\r\n self.product = wx.StaticText(self, -1, \"\")\r\n self.proc_view = MVCList(\r\n self, -1, style=wx.LC_REPORT,\r\n columns=[\r\n (\"operation\", \"Op.\"),\r\n (\"act\", \"Act\"),\r\n ],\r\n )\r\n self.ops_view = MVCList(\r\n self, -1, style=wx.LC_REPORT,\r\n columns=[\r\n (\"operation\", \"Operation\"),\r\n (\"tm\", \"Time\"),\r\n (\"workplace\", \"Workplace\"),\r\n ],\r\n )\r\n self.proc_count = wx.StaticText(self, -1, \"\")\r\n self.ops_count = wx.StaticText(self, -1, \"\")\r\n\r\n self.box.Add(self.product, (0, 0), (1, 2), flag=wx.EXPAND)\r\n self.box.Add(self.proc_view, (1, 0), flag=wx.EXPAND)\r\n self.box.Add(self.ops_view, (1, 1), flag=wx.EXPAND)\r\n self.box.Add(self.proc_count, (2, 0), flag=wx.EXPAND)\r\n self.box.Add(self.ops_count, (2, 1), flag=wx.EXPAND)\r\n\r\n next_prod = wx.Button(self, -1, \"Next Record\")\r\n add_op = wx.Button(self, -1, \"Add Operation\")\r\n del_selected_op = wx.Button(self, -1, \"Delete Operation\")\r\n\r\n self.box.Add(next_prod, (3, 0))\r\n self.box.Add(add_op, (3, 1))\r\n self.box.Add(del_selected_op, (4, 1))\r\n\r\n self.box.AddGrowableRow(1)\r\n self.box.AddGrowableCol(0)\r\n self.box.AddGrowableCol(1)\r\n self.SetSizerAndFit(self.box)\r\n\r\n self.Bind(wx.EVT_BUTTON, self.on_next, next_prod)\r\n self.Bind(wx.EVT_BUTTON, self.on_add_op, add_op)\r\n self.Bind(wx.EVT_BUTTON, self.on_del_op, del_selected_op)\r\n\r\n self.model = model\r\n self.mapper = hotmodel.Mapper()\r\n self.proc_view.add_routes(self.mapper, \"process\")\r\n self.ops_view.add_routes(self.mapper, \"operations\")\r\n\r\n self.mapper.add_route(\r\n \"process\",\r\n \"\",\r\n lambda m, fqn, event, key: self.update_count(m, self.proc_count),\r\n )\r\n self.mapper.add_route(\r\n \"operations\",\r\n \"\",\r\n lambda m, fqn, event, key: self.update_count(m, self.ops_count),\r\n )\r\n\r\n self.mapper.add_route(\"sn\", \"\", self.on_product,)\r\n self.mapper.add_route(\"article\", \"\", self.on_product,)\r\n self.model.add_listener(self.mapper)\r\n\r\n wx.CallAfter(lambda: self.model.set_product(\"FIRST8\", 1))",
"def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()",
"def layout_button(self):\n # Load Data Button\n self.bt_add = wx.Button(self, wx.NewId(), \"Load Data\",\n size=(BUTTON_WIDTH, -1))\n self.bt_add.SetToolTipString(\"Load data files\")\n wx.EVT_BUTTON(self, self.bt_add.GetId(), self._load_data)\n\n # Delete Data Button\n self.bt_remove = wx.Button(self, wx.NewId(), \"Delete Data\",\n size=(BUTTON_WIDTH, -1))\n self.bt_remove.SetToolTipString(\"Delete data from the application\")\n wx.EVT_BUTTON(self, self.bt_remove.GetId(), self.on_remove)\n\n # Send data to perspective button\n self.bt_import = wx.Button(self, wx.NewId(), \"Send To\",\n size=(BUTTON_WIDTH, -1))\n self.bt_import.SetToolTipString(\"Send Data set to active perspective\")\n wx.EVT_BUTTON(self, self.bt_import.GetId(), self.on_import)\n\n # Choose perspective to be send data to combo box\n self.perspective_cbox = wx.ComboBox(self, -1,\n style=wx.CB_READONLY)\n if not IS_MAC:\n self.perspective_cbox.SetMinSize((BUTTON_WIDTH*1.6, -1))\n wx.EVT_COMBOBOX(self.perspective_cbox, -1,\n self._on_perspective_selection)\n\n # Append data to current Graph Button\n self.bt_append_plot = wx.Button(self, wx.NewId(), \"Append Plot To\",\n size=(BUTTON_WIDTH, -1))\n self.bt_append_plot.SetToolTipString(\n \"Plot the selected data in the active panel\")\n wx.EVT_BUTTON(self, self.bt_append_plot.GetId(), self.on_append_plot)\n\n # Create a new graph and send data to that new graph button\n self.bt_plot = wx.Button(self, wx.NewId(), \"New Plot\",\n size=(BUTTON_WIDTH, -1))\n self.bt_plot.SetToolTipString(\"To trigger plotting\")\n wx.EVT_BUTTON(self, self.bt_plot.GetId(), self.on_plot)\n\n # Freeze current theory button - becomes a data set and stays on graph\n self.bt_freeze = wx.Button(self, wx.NewId(), \"Freeze Theory\",\n size=(BUTTON_WIDTH, -1))\n freeze_tip = \"To trigger freeze a theory: making a copy\\n\"\n freeze_tip += \"of the theory checked to Data box,\\n\"\n freeze_tip += \" so that it can act like a real data set.\"\n self.bt_freeze.SetToolTipString(freeze_tip)\n wx.EVT_BUTTON(self, self.bt_freeze.GetId(), self.on_freeze)\n\n # select plot to send to combo box (blank if no data)\n if sys.platform == 'darwin':\n self.cb_plotpanel = wx.ComboBox(self, -1,\n style=wx.CB_READONLY)\n else:\n self.cb_plotpanel = wx.ComboBox(self, -1,\n style=wx.CB_READONLY | wx.CB_SORT)\n wx.EVT_COMBOBOX(self.cb_plotpanel, -1, self._on_plot_selection)\n self.cb_plotpanel.Disable()\n\n # Help button\n self.bt_help = wx.Button(self, wx.NewId(), \"HELP\",\n size=(BUTTON_WIDTH, -1))\n self.bt_help.SetToolTipString(\"Help for the Data Explorer.\")\n wx.EVT_BUTTON(self, self.bt_help.GetId(), self.on_help)\n\n self.sizer3.AddMany([(self.bt_add),\n ((10, 10)),\n (self.bt_remove),\n ((10, 10)),\n (self.bt_freeze),\n ((10, 10)),\n (self.bt_plot),\n ((10, 10)),\n (self.bt_append_plot),\n (self.cb_plotpanel,\n wx.EXPAND | wx.ADJUST_MINSIZE, 5),\n ((5, 5)),\n ((5, 5)),\n (self.bt_import, 0, wx.EXPAND | wx.RIGHT, 5),\n (self.perspective_cbox,\n wx.EXPAND | wx.ADJUST_MINSIZE, 5),\n ((10, 10)),\n (self.sizer4),\n ((10, 10)),\n (self.bt_help, 0, wx.RIGHT, 5)])\n\n self.sizer3.AddGrowableCol(1, 1)\n self.show_data_button()\n self.enable_remove()\n self.enable_import()\n self.enable_plot()\n self.enable_append()\n self.enable_freeze()\n self.enable_remove_plot()",
"def addToParent(self):\n self.parent.GetSizer().Add(self.fieldLabel, 0,\n wx.ALIGN_CENTER_VERTICAL)\n self.parent.GetSizer().Add(self.textInput, 0,\n wx.ALIGN_CENTER_VERTICAL)\n self.parent.GetSizer().Add(self.browseButton, 0,\n wx.ALIGN_CENTER_VERTICAL)",
"def create_main(self):\n self.frame = wxMediatorMainFrame(self)",
"def build(self):\n self.logging = LoggingView(self.model, 20)\n\n l = [\n self.logging\n ]\n\n w = urwid.Filler(urwid.Pile(l), 'top')\n return w",
"def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass",
"def addComponents(self):\n\n self.mainLayout = QVBoxLayout()\n self.setLayout(self.mainLayout)\n # title\n self.lblTitle = QLabel(self.title)\n self.mainLayout.addWidget(self.lblTitle)\n styleTitle = \"\"\"\nfont-size: 20px; \nfont-style:italic; \nfont-weight: bold; \nmargin:auto;\nmargin-bottom: 1px; \n\"\"\"\n self.lblTitle.setStyleSheet(styleTitle)\n\n # controls\n self.widgetControls = QWidget()\n self.layoutControls = QGridLayout()\n # self.layoutControls.setColumnStretch(0, 4)\n # self.layoutControls.setColumnStretch(1, 4)\n # self.layoutControls.setColumnStretch(2, 4)\n\n self.widgetControls.setLayout(self.layoutControls)\n self.mainLayout.addWidget(self.widgetControls)\n\n # buttons\n styleControls = \"\"\"\n width: 60px; \n height: 50px; \n \"\"\"\n self.buttons = []\n for i in range(self.shapeRow):\n self.buttons.append(self.generateColumnButtons())\n\n for i in range(self.shapeRow):\n for j in range(self.shapeColumn):\n self.buttons[i][j].setStyleSheet(styleControls)\n self.layoutControls.addWidget(self.buttons[i][j], i, j)",
"def create_widgets(self):\n\n # tk.Button(win, text=\"Update\", command=self.update).grid(row=1, column=1)\n tkvar = tk.StringVar(win)\n # Dictionary with options\n choices = ('Clear', 'Small Glider', 'Glider', 'Exploder', '10 Cell Row', 'Light Weight Spaceship', 'Tumbler',\n 'Gosper Glider Gu')\n self.combo_input = ttk.Combobox(self.control_area, width=25, values=choices, state='readonly')\n self.combo_input.pack(side=tk.LEFT)\n self.combo_input.current(0)\n self.combo_input.bind(\"<<ComboboxSelected>>\", self.combo_callback)\n\n self.next = tk.Button(self.control_area, text=\"Next\", command=self.next_generation)\n self.next.pack(side=tk.LEFT, padx=3, pady=2)\n self.start = tk.Button(self.control_area, text=\"Start\", command=self.start_game)\n self.start.pack(side=tk.LEFT, padx=3, pady=2)\n\n self.stop = tk.Button(self.control_area, text=\"Stop\", fg=\"red\", command=self.stop_game)\n self.stop.pack(side=tk.LEFT, padx=3, pady=2)\n\n self.stop = tk.Button(self.control_area, text=\"Fast\", fg=\"red\", command=self.stop_game)\n self.stop.pack(side=tk.LEFT, padx=3, pady=2)\n self.gen_label = tk.Label(win, text=\"label\", bg=\"#808080\")\n self.gen_label.grid(row=0, column=1)",
"def hsbSizer(boxArgs,*elements):\r\n return aSizer(wx.StaticBoxSizer(wx.StaticBox(*boxArgs),wx.HORIZONTAL),*elements)",
"def _init(self):\n self.wx_menu = wx.Menu()",
"def createWidget(self):\n figure = Figure(figsize=(4,2), dpi=100)\n \"\"\"Figure size is measured in inches.\"\"\"\n graph = figure.add_subplot(111)\n \"\"\"The default subplot, which creates one row, one column, with index one.\"\"\"\n graph.plot(self.wave_table[0], self.wave_table[1])\n\n canvas = FigureCanvasTkAgg(figure, self.master)\n canvas.draw()\n canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)",
"def OnRender(self, event):\r\n\r\n # if the frame is about to be deleted, don't bother\r\n if not self._frame or self._frame.IsBeingDeleted():\r\n return\r\n \r\n if not self._frame.GetSizer():\r\n return\r\n\r\n mouse = wx.GetMouseState()\r\n mousePos = wx.Point(mouse.GetX(), mouse.GetY())\r\n point = self._frame.ScreenToClient(mousePos)\r\n art = self._art\r\n\r\n dc = event.GetDC()\r\n \r\n for part in self._uiparts:\r\n \r\n # don't draw hidden pane items or items that aren't windows\r\n if part.sizer_item and ((not part.sizer_item.IsWindow() and \\\r\n not part.sizer_item.IsSpacer() and \\\r\n not part.sizer_item.IsSizer()) or \\\r\n not part.sizer_item.IsShown()):\r\n \r\n continue\r\n \r\n ptype = part.type\r\n \r\n if ptype in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:\r\n art.DrawSash(dc, self._frame, part.orientation, part.rect)\r\n\r\n elif ptype == AuiDockUIPart.typeBackground:\r\n art.DrawBackground(dc, self._frame, part.orientation, part.rect)\r\n\r\n elif ptype == AuiDockUIPart.typeCaption:\r\n art.DrawCaption(dc, self._frame, part.pane.caption, part.rect, part.pane)\r\n\r\n elif ptype == AuiDockUIPart.typeGripper:\r\n art.DrawGripper(dc, self._frame, part.rect, part.pane)\r\n\r\n elif ptype == AuiDockUIPart.typePaneBorder:\r\n art.DrawBorder(dc, self._frame, part.rect, part.pane)\r\n\r\n elif ptype == AuiDockUIPart.typePaneButton: \r\n self.DrawPaneButton(dc, part, point)",
"def __init__(self, parent, id):\n wx.Frame.__init__(self, parent,id,'Frame With Button', size=(300,100))\n panel = wx.Panel(self) # 创建画板\n # 将按钮添加到画板中去\n button = wx.Button(panel, label='Close', pos=(125, 10), size=(50, 50))\n # 绑定按钮单击事件\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n # 绑定窗口关闭事件\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)",
"def build_ui(self):\n self.ui = UI_procstep.Ui_Form()#.Ui_USGSContactInfoWidgetMain()\n self.ui.setupUi(self)\n self.setup_dragdrop(self)\n\n self.proc_step = RepeatingElement(which='tab',\n tab_label='Step', add_text='Additional Step',\n widget=ProcessStep, remove_text='Remove Step', italic_text='Processing Steps Taken')\n\n #self.proc_step = RepeatingElement(params=params, which='tab', tab_label='Source',)\n self.proc_step.add_another()\n self.ui.widget_procstep.layout().addWidget(self.proc_step)",
"def __init__(self, toolPanel):\r\n self._toolPanel = toolPanel\r\n self._toolSet = toolPanel.createToolSet( \"Snapshots\", resizable = True ) \r\n app = wx.GetApp()\r\n\r\n buttonPanel = self._toolSet.addTool( wx.Panel )\r\n self._takeSnapshotButton = wx.BitmapButton( buttonPanel, bitmap = wx.Bitmap('../data/ui/takeSnapshotButton.png', wx.BITMAP_TYPE_PNG) ) \r\n self._takeSnapshotButton.SetBitmapDisabled( wx.Bitmap('../data/ui/takeSnapshotButtonDisabled.png', wx.BITMAP_TYPE_PNG) )\r\n self._dontRestoreControllerParamsButton = UI.Ext.ToggleBitmapButton( buttonPanel, bitmap = wx.Bitmap('../data/ui/restoreControllerParams.png', wx.BITMAP_TYPE_PNG) )\r\n self._dontRestoreControllerParamsButton.SetBitmapSelected( wx.Bitmap('../data/ui/dontRestoreControllerParams.png', wx.BITMAP_TYPE_PNG) )\r\n self._previousButton = wx.BitmapButton( buttonPanel, bitmap = wx.Bitmap('../data/ui/previousSnapshotButton.png', wx.BITMAP_TYPE_PNG) )\r\n self._previousButton.SetBitmapDisabled( wx.Bitmap('../data/ui/previousSnapshotButtonDisabled.png', wx.BITMAP_TYPE_PNG) )\r\n self._restoreButton = wx.BitmapButton( buttonPanel, bitmap = wx.Bitmap('../data/ui/restoreSnapshotButton.png', wx.BITMAP_TYPE_PNG) )\r\n self._restoreButton.SetBitmapDisabled( wx.Bitmap('../data/ui/restoreSnapshotButtonDisabled.png', wx.BITMAP_TYPE_PNG) )\r\n self._nextButton = wx.BitmapButton( buttonPanel, bitmap = wx.Bitmap('../data/ui/nextSnapshotButton.png', wx.BITMAP_TYPE_PNG) )\r\n self._nextButton.SetBitmapDisabled( wx.Bitmap('../data/ui/nextSnapshotButtonDisabled.png', wx.BITMAP_TYPE_PNG) )\r\n \r\n self._takeSnapshotButton.Bind( wx.EVT_BUTTON, lambda e: app.takeSnapshot() )\r\n self._previousButton.Bind( wx.EVT_BUTTON, lambda e: app.previousSnapshot(self.restoreControllerParams()) )\r\n self._restoreButton.Bind( wx.EVT_BUTTON, lambda e: app.restoreActiveSnapshot(self.restoreControllerParams()) )\r\n self._nextButton.Bind( wx.EVT_BUTTON, lambda e: app.nextSnapshot(self.restoreControllerParams()) )\r\n \r\n self._hBoxButtons = wx.BoxSizer( wx.HORIZONTAL )\r\n self._hBoxButtons.Add(self._takeSnapshotButton)\r\n self._hBoxButtons.AddStretchSpacer(1)\r\n self._hBoxButtons.Add(self._dontRestoreControllerParamsButton)\r\n self._hBoxButtons.Add(self._previousButton)\r\n self._hBoxButtons.Add(self._restoreButton)\r\n self._hBoxButtons.Add(self._nextButton)\r\n \r\n buttonPanel.SetSizerAndFit(self._hBoxButtons)\r\n\r\n self._infoTree = self._toolSet.addTool( UI.InfoTreeBasic, 1, object = app.getSnapshotTree(), desiredHeight = 200, autoVisible = True, onUpdate = self.update )\r\n self._infoTree.Bind( wx.EVT_TREE_ITEM_ACTIVATED , self.selectSnapshot )\r\n\r\n self._activeTreeItemId = None",
"def OnPaint(self, event):\r\n\r\n dc = wx.AutoBufferedPaintDC(self)\r\n cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())\r\n\r\n horizontal = True\r\n if self._agwStyle & AUI_TB_VERTICAL:\r\n horizontal = False\r\n\r\n if self._agwStyle & AUI_TB_PLAIN_BACKGROUND:\r\n self._art.DrawPlainBackground(dc, self, cli_rect)\r\n else:\r\n self._art.DrawBackground(dc, self, cli_rect, horizontal)\r\n\r\n gripper_size = self._art.GetElementSize(AUI_TBART_GRIPPER_SIZE)\r\n dropdown_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)\r\n\r\n # paint the gripper\r\n if gripper_size > 0 and self._gripper_sizer_item:\r\n gripper_rect = wx.Rect(*self._gripper_sizer_item.GetRect())\r\n if horizontal:\r\n gripper_rect.width = gripper_size\r\n else:\r\n gripper_rect.height = gripper_size\r\n \r\n self._art.DrawGripper(dc, self, gripper_rect)\r\n \r\n # calculated how far we can draw items\r\n if horizontal:\r\n last_extent = cli_rect.width\r\n else:\r\n last_extent = cli_rect.height\r\n \r\n if self._overflow_visible:\r\n last_extent -= dropdown_size\r\n\r\n # paint each individual tool\r\n for item in self._items:\r\n\r\n if not item.sizer_item:\r\n continue\r\n\r\n item_rect = wx.Rect(*item.sizer_item.GetRect())\r\n\r\n if (horizontal and item_rect.x + item_rect.width >= last_extent) or \\\r\n (not horizontal and item_rect.y + item_rect.height >= last_extent):\r\n\r\n break\r\n \r\n if item.kind == ITEM_SEPARATOR:\r\n # draw a separator\r\n self._art.DrawSeparator(dc, self, item_rect)\r\n \r\n elif item.kind == ITEM_LABEL:\r\n # draw a text label only\r\n self._art.DrawLabel(dc, self, item, item_rect)\r\n \r\n elif item.kind == ITEM_NORMAL:\r\n # draw a regular button or dropdown button\r\n if not item.dropdown:\r\n self._art.DrawButton(dc, self, item, item_rect)\r\n else:\r\n self._art.DrawDropDownButton(dc, self, item, item_rect)\r\n \r\n elif item.kind == ITEM_CHECK:\r\n # draw a regular toggle button or a dropdown one\r\n if not item.dropdown:\r\n self._art.DrawButton(dc, self, item, item_rect)\r\n else:\r\n self._art.DrawDropDownButton(dc, self, item, item_rect)\r\n\r\n elif item.kind == ITEM_RADIO:\r\n # draw a toggle button\r\n self._art.DrawButton(dc, self, item, item_rect)\r\n \r\n elif item.kind == ITEM_CONTROL:\r\n # draw the control's label\r\n self._art.DrawControlLabel(dc, self, item, item_rect)\r\n \r\n # fire a signal to see if the item wants to be custom-rendered\r\n self.OnCustomRender(dc, item, item_rect)\r\n \r\n # paint the overflow button\r\n if dropdown_size > 0 and self._overflow_sizer_item:\r\n dropdown_rect = self.GetOverflowRect()\r\n self._art.DrawOverflowButton(dc, self, dropdown_rect, self._overflow_state)",
"def vSizer(*elements):\r\n return aSizer(wx.BoxSizer(wx.VERTICAL),*elements)",
"def build_ui(self):\n\n self.frame.columnconfigure(0, pad=20)\n self.frame.columnconfigure(1, pad=20)\n\n\n self.frame.rowconfigure(0, pad=3)\n self.frame.rowconfigure(1, pad=3)\n self.frame.rowconfigure(2, pad=3)\n\n p1_label = Label(self.frame)\n p1_label[\"text\"] = \"Player 1\"\n p1_label.grid(row=0, column=0)\n\n p2_label = Label(self.frame)\n p2_label[\"text\"] = \"Player 2\"\n p2_label.grid(row=0, column=1)\n\n self.lb1 = Listbox(self.frame)\n for script in self.ai_list:\n self.lb1.insert(END, script)\n self.lb1.grid(row=1, column=0)\n\n self.lb1.selection_set(0)\n self.lb1[\"exportselection\"] = 0\n\n self.lb2 = Listbox(self.frame)\n for script in self.ai_list:\n self.lb2.insert(END, script)\n self.lb2.grid(row=1, column=1)\n\n self.lb2.selection_set(0)\n self.lb2[\"exportselection\"] = 0\n\n start_game_button = Button(self.frame)\n start_game_button[\"text\"] = \"Start Game\"\n start_game_button[\"command\"] = self.start_game\n start_game_button.grid(row=2, column=0)\n\n self.check_box = Checkbutton(self.frame, text=\"Draw UI\",variable=self.ui_draw)\n self.check_box.grid(row=2,column=1)\n\n self.frame.pack()",
"def topLayout(self, parent):\n #Top level panel divided in to left/right\n hbox = wx.BoxSizer(wx.HORIZONTAL)\n\n #the left handside panels do not need to size\n left_vbox = wx.BoxSizer(wx.VERTICAL)\n hbox.Add(left_vbox, border = 10) \n \n left_vbox.Add(LoadingPanel(parent, sesModel=self.model), \n flag = wx.EXPAND)\n left_vbox.Add(OptimizationPanel(parent, sesModel=self.model), \n flag = wx.TOP, \n border = 20)\n \n #the Notebook area\n self.notebookPanel = wx.Panel(parent, \n style = wx.RAISED_BORDER)\n\n hbox.Add(self.notebookPanel, \n proportion=1, \n border=10, \n flag = wx.EXPAND | wx.LEFT)\n self.populateNotebook()\n\n parent.SetSizerAndFit(hbox)",
"def Init(self):\r\n\r\n base_colour = GetBaseColour()\r\n darker1_colour = StepColour(base_colour, 85)\r\n darker2_colour = StepColour(base_colour, 75)\r\n darker3_colour = StepColour(base_colour, 60)\r\n darker4_colour = StepColour(base_colour, 40)\r\n\r\n self._background_colour = base_colour\r\n self._background_gradient_colour = StepColour(base_colour, 180)\r\n\r\n isMac = wx.Platform == \"__WXMAC__\"\r\n\r\n if isMac:\r\n self._active_caption_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)\r\n else:\r\n self._active_caption_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_ACTIVECAPTION)\r\n\r\n self._active_caption_gradient_colour = LightContrastColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))\r\n self._active_caption_text_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)\r\n self._inactive_caption_colour = darker1_colour\r\n self._inactive_caption_gradient_colour = StepColour(base_colour, 97)\r\n self._inactive_caption_text_colour = wx.BLACK\r\n \r\n self._sash_brush = wx.Brush(base_colour)\r\n self._background_brush = wx.Brush(base_colour)\r\n self._border_pen = wx.Pen(darker2_colour)\r\n self._gripper_brush = wx.Brush(base_colour)\r\n self._gripper_pen1 = wx.Pen(darker4_colour)\r\n self._gripper_pen2 = wx.Pen(darker3_colour)\r\n self._gripper_pen3 = wx.WHITE_PEN",
"def create_controls(self):\n\n self.button_frame = tk.LabelFrame(self, text=\"Controls\", padx=5, pady=5)\n self.button_frame.grid(row=0, column=1, padx=5, pady=5, sticky=\"n\")\n self.load_data = tk.Button(\n self.button_frame, text=\"Load Data\", command=self.update_stats\n )\n self.load_data.grid(row=0)\n\n self.print_data = tk.Button(\n self.button_frame, text=\"Print Data\", command=self.print_raw_data,\n )\n self.print_data.grid(row=1)\n\n self.quit = tk.Button(\n self.button_frame, text=\"Quit\", fg=\"red\", command=self.master.destroy\n )\n self.quit.grid(row=2)",
"def create_widget(self):\n pass",
"def init_foxbms_dialog(self):\n # Add icon\n _icon = wx.Icon()\n logo_img = wx.Image(logo_foxbms.GetImage())\n logo_img_size = logo_img.GetSize()\n resized = logo_img_size / 5\n logo_img.Rescale(resized[0], resized[1])\n image = wx.Bitmap(logo_img)\n _icon.CopyFromBitmap(image)\n self.SetIcon(_icon)\n # fill panel\n panel = wx.Panel(self, -1)\n sizer = wx.GridBagSizer(4, 2)\n # logo and heading\n png = wx.Image(logo_foxbms.GetImage()).ConvertToBitmap()\n logo = wx.StaticBitmap(self, -1, png, (5, 5), (png.GetWidth(), png.GetHeight()))\n heading = wx.TextCtrl(\n self,\n -1,\n f\"foxBMS 2 GUI - {__version__}\",\n size=(275, 30),\n style=wx.TE_READONLY | wx.BORDER_NONE,\n )\n # pylint:disable=no-member\n font = wx.Font(20, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n heading.SetFont(font)\n # copyright hyperlink to foxbms.org\n foxbms_copyright = wx.TextCtrl(\n self,\n -1,\n __copyright__,\n (5, 5),\n size=(275, 30),\n style=wx.TE_READONLY | wx.BORDER_NONE,\n )\n foxbms_url = hl.HyperLinkCtrl(panel, -1, \"foxbms.org\", URL=FOXBMS_URL)\n # License text\n license_html = markdown.markdown(self.license_text, output_format=\"html5\")\n license_ctrl = wx.html.HtmlWindow(\n self,\n -1,\n pos=(5, 5),\n size=(600, 550),\n style=wx.TE_READONLY | wx.TE_MULTILINE | wx.BORDER_NONE,\n )\n license_ctrl.SetPage(license_html)\n # GUI description\n readme_html = markdown.markdown(self.readme_text, output_format=\"html5\")\n description_ctrl = wx.html.HtmlWindow(\n self,\n -1,\n pos=(5, 5),\n size=(600, 400),\n style=wx.TE_READONLY | wx.TE_MULTILINE | wx.BORDER_NONE,\n )\n description_ctrl.SetPage(readme_html)\n # add stuff to sizer\n sizer.Add(logo, pos=(0, 0), flag=wx.ALIGN_CENTER)\n sizer.Add(heading, pos=(0, 1), flag=wx.ALIGN_CENTER)\n sizer.Add(foxbms_copyright, pos=(1, 0), flag=wx.ALIGN_CENTER)\n sizer.Add(foxbms_url, pos=(1, 1), flag=wx.ALIGN_CENTER)\n sizer.Add(license_ctrl, pos=(2, 0), span=(1, 2), flag=wx.EXPAND)\n sizer.Add(description_ctrl, pos=(3, 0), span=(1, 2), flag=wx.EXPAND)\n panel.SetSizerAndFit(sizer)",
"def buildUI(self):\n outside = QtWidgets.QVBoxLayout(self)\n columns = QtWidgets.QHBoxLayout(self)\n layout = QtWidgets.QVBoxLayout(self)\n self.details_layout = QtWidgets.QVBoxLayout(self) #column with edit panel\n self.restriction = QtWidgets.QWidget() #restricts size of details_layout\n self.restriction.setLayout(self.details_layout)\n self.restriction.setFixedWidth(200)\n columns.addLayout(layout)\n columns.addWidget(self.restriction)\n outside.addLayout(columns)\n\n #tab widget\n self.tabwidget = tabsWindow(self) #QtWidgets.QTabWidget(tabsClosable = True, movable = True)\n layout.addWidget(self.tabwidget)\n #add base tab\n self.tabwidget.newTab(name =\"Untitled\", image = \"\")\n\n #add second column with details\n self.updateDetails(\"edit\")\n\n #edit button\n layout_btns = QtWidgets.QHBoxLayout()\n editBtn = QtWidgets.QPushButton(\"Stop Editing\")\n editBtn.clicked.connect(lambda: self.editChange(editBtn))\n layout_btns.addWidget(editBtn)\n\n #save button\n saveBtn = QtWidgets.QPushButton(\"Save\")\n saveBtn.clicked.connect(self.save)\n layout_btns.addWidget(saveBtn)\n\n #load button\n loadBtn = QtWidgets.QPushButton(\"Load\")\n loadBtn.clicked.connect(self.load)\n layout_btns.addWidget(loadBtn)\n\n #close button\n closeBtn = QtWidgets.QPushButton('Close')\n closeBtn.clicked.connect(self.closeEvent)\n layout_btns.addWidget(closeBtn)\n\n outside.addLayout(layout_btns) #add buttons to layout",
"def vsbSizer(boxArgs,*elements):\r\n return aSizer(wx.StaticBoxSizer(wx.StaticBox(*boxArgs),wx.VERTICAL),*elements)",
"def build_gui(self):\n # Build header\n layout = [[sg.Text(f\"Welcome to {self.app_name}\")], [sg.Text('')]]\n\n # Build form\n for (field_name, field) in (self.config.get(\"fields\")).items():\n # By default we will use str as type\n if \"type\" not in field:\n field.update({\"type\": \"str\"})\n\n # Make sure we have a default value\n if \"default\" not in field:\n field.update({\"default\": \"\"})\n\n if field.get(\"type\") == \"str\" or field.get(\"type\") == \"int\":\n layout.append(self.build_string_field(field_name, field))\n elif field.get(\"type\") == \"date\":\n layout.append(self.build_date_field(field_name, field))\n elif field.get(\"type\") == \"list\":\n layout.append(self.build_list_field(field_name, field))\n elif field.get(\"type\") == \"textarea\":\n layout.append(self.build_textarea_field(field_name, field))\n else: # If not identified, just treat it as a str\n layout.append(self.build_string_field(field_name, field))\n\n # Build footer\n layout.append([sg.Text('')])\n layout.append([sg.Text('* Mandatory fields', text_color=\"Red\")])\n layout.append([sg.Button('Build'), sg.Button('Cancel')])\n layout.append([sg.Text('')])\n return layout",
"def viewBuild (self, event = None):\r\n path = 'file://' + urllib.pathname2url(self.buildDestination)\r\n path = path.replace('file://///', 'file:///')\r\n wx.LaunchDefaultBrowser(path)",
"def createWidgets(self):\n raise NotImplementedError",
"def __init__(self, parent, state, position = wx.DefaultPosition):\n ##Set up data.\n self.state = state\n modeName = MODE_LIST[self.state.GetSurface(\"Mode\")]\n wx.Dialog.__init__(self, parent, -1, \"%s Mode Settings\" %(modeName),\n pos = position,\n style = wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | \n wx.MINIMIZE_BOX |\n wx.MAXIMIZE_BOX)\n | wx.TAB_TRAVERSAL)\n ##Jconf pull-down menu.\n \n self.lblStBox1 = wx.StaticBox(self, -1, \"Programs to launch\" )\n ##Name Server checkbox.\n self.cbNameServer = wx.CheckBox(self, -1, \"Name Server\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbNameServer.SetToolTip(wx.ToolTip(\"Run Name Server at Launch\"))\n ##Conductor checkbox.\n self.cbConductor = wx.CheckBox(self, -1, \"Conductor\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbConductor.SetToolTip(wx.ToolTip(\"Run Conductor at Launch\"))\n ##Xplorer checkbox.\n self.cbXplorer = wx.CheckBox(self, -1, \"Xplorer\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbXplorer.SetToolTip(wx.ToolTip(\"Run Xplorer at Launch\"))\n ##Desktop checkbox.\n self.cbDesktop = wx.CheckBox(self, -1, \"Desktop Mode\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbDesktop.SetToolTip(wx.ToolTip(\"Set Desktop Mode for\" +\n \" Conductor and Xplorer\"))\n \n self.lblStBox2 = wx.StaticBox(self, -1, \"Xplorer Configuration\" )\n ##Xplorer Type radio box.\n self.rbXplorer = wx.RadioBox(self, -1, \"Mode\",\n wx.DefaultPosition, wx.DefaultSize,\n RADIO_XPLORER_LIST, 1, wx.RA_SPECIFY_ROWS)\n self.rbXplorer.SetToolTip(wx.ToolTip(\"Which Xplorer format do you\" +\n \" want to launch?\"))\n ##Cluster button.\n self.bCluster = wx.Button(self, -1, \"Cluster Settings\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCluster.SetToolTip(wx.ToolTip(\"Set the computers and extra\" +\n \" variables in the cluster.\"))\n ##Configuration Choice\n self.chJconf = wx.Choice(self, -1, wx.DefaultPosition, [150,-1])\n self.chJconf.SetToolTip(wx.ToolTip(\"Choose Xplorer's configuration.\"))\n ##Edit Jconf button.\n self.bEditJconf = wx.Button(self, -1, \"Edit Configuration List\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bEditJconf.SetToolTip(wx.ToolTip(\"Edit the list of Xplorer\" +\n \" configurations.\")) \n #OK and Cancel button\n if windows:\n self.bOk = wx.Button( self, wx.ID_OK, \"OK\", wx.DefaultPosition, wx.DefaultSize, 0 )\n else:\n self.bOk = wx.Button( self, wx.ID_SAVE, \"Save\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCancel = wx.Button( self, wx.ID_CANCEL, \"Cancel\", wx.DefaultPosition, wx.DefaultSize, 0 )\n \n ##Bind events.\n self.Bind(wx.EVT_LISTBOX, self.Refresh, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.Refresh, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_LISTBOX, self.UpdateData, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.UpdateData, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n if windows:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_OK)\n else:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_SAVE)\n self.Bind(wx.EVT_BUTTON, self.EditJconf, self.bEditJconf)\n self.Bind(wx.EVT_BUTTON, self.EditCluster, self.bCluster)\n \n ##Set sizers.\n vSizerMain = wx.BoxSizer( wx.VERTICAL )\n vSizer1 = wx.BoxSizer( wx.VERTICAL )\n svSizer1 = wx.StaticBoxSizer( self.lblStBox1, wx.VERTICAL )\n svSizer1.Add( self.cbNameServer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n hSizer1 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer1.Add( self.cbConductor, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n spacer1 = wx.StaticText(self, -1, \" \", wx.DefaultPosition, wx.DefaultSize, 0 )\n hSizer1.Add( spacer1, 0, wx.ALIGN_CENTER, 5 )\n hSizer1.Add( self.cbDesktop, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer1.Add( hSizer1, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n svSizer1.Add( self.cbXplorer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n vSizer1.Add( svSizer1, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.TOP, 5 )\n spacer2 = wx.StaticText(self, -1, \"\", wx.DefaultPosition, [10,10], 0 )\n vSizer1.Add( spacer2, 0, wx.ALIGN_CENTER, 5 )\n svSizer2 = wx.StaticBoxSizer( self.lblStBox2, wx.VERTICAL )\n hSizer2 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer2.Add( self.rbXplorer, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer2.Add( self.bCluster, 0, wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.TOP, 5 )\n svSizer2.Add( hSizer2, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer3 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer3.Add( self.chJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer3.Add( self.bEditJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer2.Add( hSizer3, 0, wx.ALIGN_CENTER, 5 )\n vSizer1.Add( svSizer2, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer4 = wx.BoxSizer( wx.HORIZONTAL )\n if windows:\n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n else: \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n vSizer1.Add( hSizer4, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.TOP, 5 )\n vSizerMain.Add( vSizer1, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n \n vSizerMain.SetSizeHints(self)\n self.SetSizer(vSizerMain)\n #self.CenterOnParent(wx.BOTH)\n ##Set the background color.\n #Style(self)\n if not CLUSTER_ENABLED:\n self.bCluster.Hide()\n ##Set up OK button.\n ##Update Display\n self.React()",
"def create_panel_navigation(self, frame_parent):\r\n panel = ttk.Frame(frame_parent)\r\n\r\n tree = ttk.Treeview(panel, selectmode=\"browse\") # \"browse\" mode limits to one selection only\r\n tree.heading(\"#0\", text=\"Category\")\r\n tree.column(\"#0\", width=130)\r\n #tree.bind(\"<ButtonRelease-1>\", self.on_category_select) # left-button release\r\n tree.bind(\"<<TreeviewSelect>>\", self.on_category_select)\r\n #\r\n tree.insert('', tk.END, text=\"Email\")\r\n tree.insert('', tk.END, text=\"Access Restriction\")\r\n tree.selection_set(tree.get_children()[0]) # select the first item on init\r\n tree.grid(sticky=\"NS\")\r\n\r\n # http://stackoverflow.com/questions/25940217/python-getting-started-with-tk-widget-not-resizing-on-grid\r\n # or you can just do this: tree.pack(fill=tk.BOTH, expand=1)\r\n tree.rowconfigure(0, weight=1)\r\n tree.columnconfigure(0, weight=1)\r\n return panel",
"def make_widgets(self):\n self.mode_select = Selector(**MODE_SELECT_SETTINGS)\n self.bind_keys_to_modes()\n self.layer_select = Selector(**LAYER_SELECT_SETTINGS)\n self.check_boxes = CheckBoxArray(**CHECK_ARRAY_SETTINGS)\n self.check_boxes.bind_key(pg.K_v, self.toggle_layer_visibility)\n self.navs = [Button(**NAV_LEFT), Button(**NAV_RIGHT)]\n self.save_button = Button(**SAVE_BUTTON)\n self.load_button = Button(**LOAD_BUTTON)\n self.new_button = Button(**NEW_BUTTON)\n self.widgets = [self.mode_select, self.layer_select, self.check_boxes,\n self.navs[0], self.navs[1],\n self.save_button, self.load_button, self.new_button]",
"def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)",
"def getPanel(*args, allConfigs: bool=True, allPanels: bool=True, allScriptedTypes: bool=True,\n allTypes: bool=True, atPosition: List[int, int]=None, configWithLabel: AnyStr=\"\",\n containing: AnyStr=\"\", invisiblePanels: bool=True, scriptType: AnyStr=\"\", type:\n AnyStr=\"\", typeOf: AnyStr=\"\", underPointer: bool=True, visiblePanels: bool=True,\n withFocus: bool=True, withLabel: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass",
"def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)",
"def toControls(self,widget):",
"def __init__(self, parent, id, title):\n wx.Frame.__init__(self, parent, id, title, size=(1200, 560))\n self.SetBackgroundColour(wx.Colour(0, 0, 360))\n self.SetIcon(wx.Icon(gv.ICO_PATH))\n gv.iGeoMgr = self.geoMgr = GeoMgr(self)\n gv.iDCPosMgr = DataCenterMgr(self)\n self.SetSizer(self._buidUISizer())"
] | [
"0.7385788",
"0.6923711",
"0.6813496",
"0.66688484",
"0.66683596",
"0.66556764",
"0.65513384",
"0.652836",
"0.645381",
"0.64036214",
"0.6377987",
"0.6332031",
"0.63203603",
"0.6313604",
"0.63063955",
"0.63021123",
"0.6299205",
"0.6261179",
"0.6248059",
"0.6245965",
"0.62254584",
"0.6199167",
"0.616521",
"0.61551",
"0.6142791",
"0.6115495",
"0.60695773",
"0.6037544",
"0.6014396",
"0.5998616",
"0.5978992",
"0.59789145",
"0.59717935",
"0.596864",
"0.5967746",
"0.59672886",
"0.5952658",
"0.5950794",
"0.5934973",
"0.5929627",
"0.5928215",
"0.5927257",
"0.59085447",
"0.5897224",
"0.5897224",
"0.5868143",
"0.5840068",
"0.5832268",
"0.58222014",
"0.5821692",
"0.58179665",
"0.58173275",
"0.5814037",
"0.5803026",
"0.5798637",
"0.577704",
"0.5767804",
"0.57457715",
"0.5738024",
"0.5732161",
"0.5690326",
"0.56884634",
"0.5677711",
"0.5677597",
"0.5674782",
"0.56729054",
"0.56556785",
"0.56309056",
"0.56269103",
"0.56143516",
"0.56066144",
"0.56048274",
"0.55929554",
"0.5591052",
"0.5590417",
"0.5589564",
"0.55645275",
"0.55596596",
"0.555454",
"0.55542487",
"0.5553406",
"0.55479145",
"0.55420226",
"0.5531342",
"0.55268544",
"0.552117",
"0.5510905",
"0.5509751",
"0.5508968",
"0.5504492",
"0.5499614",
"0.54927725",
"0.5490998",
"0.54811555",
"0.5480569",
"0.547068",
"0.5470034",
"0.5469775",
"0.5448143",
"0.5447784"
] | 0.6124254 | 25 |
Generates a plot of the specified data file and sets the ThumbnailPanel's bitmap accordingly | def plot_thumb(self, data_fname):
thumbnail = self.controller.plot_thumb(data_fname, self.bitmap_width, self.bitmap_height)
if thumbnail is not None:
self.figure_bmp.SetBitmap(thumbnail)
else:
self.plot_blank() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_plot(ax, power_data, title, min_db, max_db):\n # only generate plots for the transducers that have data\n if power_data.size <= 0:\n return\n\n ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)\n return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)",
"def test_plot_images(self):\n save_file(self.quart.plot_images)",
"def plot(self, job):\n # fill PlotJob with needed data if it doesn't exist\n # Plotter will look for the files it needs relative to the work directory\n # If this fails it will fall back to a baseline location if one was \n # Provided to cmake at the time this file was generated\n if job.dataPath == None :\n job.dataPath = \"Scenarios/\" + job.verificationDirectory + \"/baselines/\"\n \n if job.dataFile == None:\n job.dataFile = job.name + \"Results.zip\"\n \n if job.outputFilename==None:\n job.outputFilename=job.titleOverride+\".jpg\"\n \n if len(job.outputFilename.split(\".\"))==1:\n job.outputFilename+=\".jpg\"\n \n if job.imageWidth==None and job.imageHeight==None:\n job.imageWidth=1600\n job.imageHeight=800\n \n if not os.path.exists(job.dataPath):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not job.fontSize:\n job.fontSize=22\n \n if not os.path.exists(os.path.dirname(job.outputDir)):\n os.mkdir(os.path.dirname(job.outputDir))\n \n self.drawgraph(job,os.path.join(job.dataPath,job.dataFile),os.path.join(job.outputDir,job.outputFilename))",
"def save_figure(self, data):\n\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, \"gray\")\n\n\t\t#plt.show()\n\t\tself.plotfile = os.path.join('static', 'Figure' + '.png')\n\t\tplt.savefig(self.plotfile, dpi = sizes[1])",
"def plot_chosen_data(main, dataPath):\n error = \"Error \"+errorPath+\"plot_chosen_data: Must choose data of proper format (tiff, jpeg, etc.)\"\n try:\n if dataPath == '':\n main.msg('thinks it has nothing')\n main.msg(error)\n return\n data = mpimg.imread(dataPath)\n imgObj = Img.Img(data, title = os.path.basename(dataPath), filePath = dataPath)\n main.imgObjList.append(imgObj)\n main.horizontalSlider.setMaximum(len(main.imgObjList)-1)\n main.horizontalSlider.setValue(main.horizontalSlider.maximum())\n func.plot_img_obj(main, imgObj)\n except:\n main.msg(error)",
"def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)",
"def plotfile(self):\r\n filename = self.locatefile()\r\n if filename == \"\":\r\n print \"\\nNo file was chosen, exiting ...\\n\"\r\n return\r\n else:\r\n print \"\\nXYZ Data file:\\n\" + filename\r\n \r\n print \"\\nReading XYZ data file....\"\r\n xyz = XYZImporter(filename)\r\n geodata = xyz.genericdata\r\n print \"FINISHED reading XYZ data file\"\r\n\r\n # Note PNG is only 8 bit, and so PDF has greater colour\r\n # depth \r\n print \"\\nAbout to render plot ...\"\r\n gp = GridPlotterCustom()\r\n gp.shownulls = False\r\n title = \"Plot of XYZ data file: \" + filename\r\n outfname = (filename.replace('.', '_') +\r\n '_PLOT_custom.pdf')\r\n gp.plotgeodata(geodata, title, outfname)\r\n print \"FINISHED rendering plot to:\\n\" + outfname\r\n print \"\\n\\n\"",
"def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"",
"def plot(data, title='Figure', legends=None, axis_x=None, axis_y=None, file_path=None, file_name=None,\n figure_size=(16, 9), has_grid=True, limits_axis_y=None, upper_lower_data=None, limits_axis_x=None,\n verbose=True):\n\n plots = []\n colors = ['steelblue', 'indianred', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon']\n\n plt.rcParams['figure.figsize'] = figure_size\n plt.title(title)\n plt.grid(has_grid)\n\n if not (axis_x is None):\n plt.xlabel(axis_x)\n if not (axis_y is None):\n plt.ylabel(axis_y)\n\n for d in range(len(data)):\n current_fig, = plt.plot(data[d][0], data[d][1], color=colors[d])\n if not (upper_lower_data is None):\n plt.fill_between(data[d][0], np.array(upper_lower_data[d][0], dtype=float),\n np.array(upper_lower_data[d][1], dtype=float),\n where=np.array(upper_lower_data[d][0], dtype=float) > np.array(upper_lower_data[d][1],\n dtype=float), alpha=0.5,\n interpolate=True)\n\n plots.append(current_fig)\n\n if not (legends is None):\n plt.legend(plots, legends)\n\n if not (limits_axis_y is None):\n plt.ylim(limits_axis_y[:2])\n plt.yticks(np.arange(limits_axis_y[0], limits_axis_y[1] + limits_axis_y[2], limits_axis_y[2]))\n\n if not (limits_axis_x is None):\n plt.xlim(limits_axis_x[:2])\n plt.xticks(np.arange(limits_axis_x[0], limits_axis_x[1] + limits_axis_x[2], limits_axis_x[2]))\n\n if (file_name is None) or (file_path is None):\n plt.show()\n else:\n full_path = path.join(file_path, file_name)\n if not path.isdir(file_path):\n makedirs(file_path)\n plt.savefig(full_path, format='svg')\n plt.close()\n if verbose:\n print('Figure saved at %s successfully.' % full_path)",
"def plot_waveforms(data, name, title, directory_name):\n plt.figure(figsize=(20, 10))\n plt.plot(data)\n plt.title(title)\n plt.savefig('./' + directory_name + '/' + name)\n pass",
"def plot_blank(self):\n self.figure_bmp.SetBitmap(self.controller.plot_blank())",
"def plot_data(self):",
"def plot(self, windowSize='800x600'):\n if not hasattr(self, 'compiled'):\n raise RuntimeError('The object has not compiled yet')\n # create a scrollable window\n _, fm, run = simple_scrollable_window(windowSize)\n count = 0\n img_ref = []\n for key, val in {**self.qubitDict, **self.readoutDict}.items():\n Label(\n fm, text=key + f':{val}', font='Consolas',\n relief='solid', borderwidth=1\n ).grid(row=count, column=0, ipadx=5, ipady=5, sticky='news')\n img_data = self.compiled[val].plot(\n allInOne=False, toByteStream=True, showSizeInfo=False,\n size=[20, 4]\n )\n render = ImageTk.PhotoImage(Image.open(img_data))\n img_ref += [render]\n img = Label(fm, image=render, borderwidth=1, relief='solid')\n img.grid(row=count, column=1, ipadx=5, ipady=5, sticky='news')\n img.image = render\n count += 1\n run()",
"def draw_plot(self, plot_name, file_name, num_of_tests):\n plt.axis([0, num_of_tests, 0, 100])\n plt.title(plot_name)\n plt.xlabel(\"Číslo testu\")\n plt.ylabel(\"Přesnost (%)\")\n plt.legend()\n path = os.getcwd()+file_name+\".png\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n plt.savefig(path)\n plt.clf()",
"def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')",
"def plot12(self, dataset, ts_string_indices, source_jpg_folder='jpg_images', extension='jpg', rows=3, cols=4,\n outfname='Sample Frames.png', cmap=None, gui_color='green'):\n # Settings ############################################################\n font_label_box = {\n 'color': 'green',\n 'size': 16,\n }\n font_steering = {'family': 'monospace',\n # 'color': 'darkred',\n 'weight': 'normal',\n 'size': 20,\n }\n ROWS = rows\n COLS = cols\n NUM_IMAGES = ROWS * COLS\n\n # Figure ##############################################################\n # figsize = [width, height]\n fig = plt.figure(figsize=PAPER_A3_LAND, facecolor='white')\n fig.suptitle(\"Sample frames, Dataset: {}\".format(dataset.data_folder), fontsize=20)\n\n for i, ts_string_index in enumerate(ts_string_indices):\n rec = dataset.df.loc[ts_string_index]\n\n timestamp_string = rec['datetime'].strftime(\"%D %H:%M:%S.\") + \"{:.2}\".format(\n str(rec['datetime'].microsecond))\n\n if 'steering_pred_signal' in dataset.df.columns:\n this_label = \"{}\\n{:0.2f}/{:0.2f} steering \\n{:0.2f} throttle\".format(timestamp_string,\n rec['steering_signal'],\n rec['steering_pred_signal'],\n rec['throttle_signal'])\n else:\n this_label = \"{}\\n{:0.2f}/ steering \\n{:0.2f} throttle\".format(timestamp_string, rec['steering_signal'],\n rec['throttle_signal'])\n\n ax = fig.add_subplot(ROWS, COLS, i + 1)\n\n # Main Image ##########################################################\n jpg_path = os.path.join(dataset.path_dataset, source_jpg_folder, ts_string_index + '.' + extension)\n assert os.path.exists(jpg_path), \"{} does not exist\".format(jpg_path)\n img = mpl.image.imread(jpg_path)\n ax.imshow(img, cmap=cmap)\n # plt.title(str_label)\n\n # Data box ########################################################\n\n # ax.axes.get_xaxis().set_visible(False)\n # ax.axes.get_yaxis().set_visible(False)\n t = ax.text(5, 25, this_label, color=gui_color, alpha=1)\n # t = plt.text(0.5, 0.5, 'text', transform=ax.transAxes, fontsize=30)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='none'))\n\n # Steering widget HUD #################################################\n # Steering HUD: Actual steering signal\n steer_actual = ''.join(['|' if v else '-' for v in dataset.linear_bin(rec['steering_signal'])])\n text_steer = ax.text(80, 105, steer_actual, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color=gui_color)\n # Steering HUD: Predicted steering angle\n if 'steering_pred_signal' in dataset.df.columns:\n steer_pred = ''.join(['◈' if v else ' ' for v in dataset.linear_bin(rec['steering_pred_signal'])])\n text_steer_pred = ax.text(80, 95, steer_pred, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color='red')\n\n outpath = os.path.join(dataset.path_dataset, outfname)\n fig.savefig(outpath)\n logging.debug(\"Wrote Sample Frames figure to {}\".format(outpath))",
"def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()",
"def test_plot_save_figure(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, '--noplot', '--figname',\n os.path.join(self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03.png'),\n '-o', 'dummy.qdp'])",
"def create_preview(name):\n file_type = os.path.splitext(name)[1]\n\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n dir = os.path.dirname(os.path.realpath(__file__))\n file = open(dir+'/instances/'+name)\n if file_type == '.csv':\n\n for nodeNo,line in enumerate(file): #enumerate used to obtain line numbers and thus node numbers\n coords = line.rsplit()[0].split(\",\")\n\n x = int(coords[0])\n y = int(coords[1])\n axis.scatter(x, y, c = 'b', label = nodeNo)\n axis.set_title(name)\n axis.text(x+5,y+5, str(nodeNo))\n else:\n file.readline()\n file.readline()\n file.readline()\n no_nodes = int(file.readline().strip().split()[1])\n file.readline()\n file.readline()\n file.readline()\n\n for i in range(0, no_nodes):\n\n coords = file.readline().strip().split()[1:]\n x = float(coords[0])\n y = float(coords[1])\n axis.scatter(x, y, c = 'b', label = i)\n axis.set_title(name)\n axis.text(x,y, str(i))\n\n return fig",
"def plot_and_save_2d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (2d)'+'-'*24\n \n print 'Loading data...',\n data = load_file(path_name+file_name)\n t = data['t']\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # Moment.\n plt.figure(1)\n plt.plot(t, data['dyn']['M'], t, data['static']['M'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment')\n plt.grid()\n plt.savefig('%sM.png' %pic_path)\n\n # Axial force.\n plt.figure(2)\n plt.plot(t, data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fa')\n plt.title('Fa')\n plt.grid()\n plt.savefig('%sFa.png' %pic_path)\n\n # Transverse force.\n plt.figure(3)\n plt.plot(t, data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Ft')\n plt.title('Ft')\n plt.grid()\n plt.savefig('%sFt.png' %pic_path)\n\n # Resultant force.\n plt.figure(4)\n plt.plot(t, np.sqrt(data['dyn']['FY']**2+data['dyn']['FZ']**2),\n t, np.sqrt(data['static']['FY']**2+data['static']['FZ']**2))\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fr')\n plt.title('Fr')\n plt.grid()\n plt.savefig('%sFr.png' %pic_path)\n print 'done'\n\n if show:\n plt.show()",
"def save_fig(ax_data, file_name):\n with open(file_name,'wb') as fid:\n pickle.dump(ax_data, fid)",
"def plotXY(xName,xDataRaw,yName, yDataRaw):\n scanFileHolder = getScanFileHolderXY(xName,xDataRaw,yName, yDataRaw) \n scanFileHolder.plot(xName, yName)\n return scanFileHolder",
"def add_plot(self, img_path, width):\n shutil.copy(img_path, f'{ReportGenerator.TEMP_FOLDER}/{hash(img_path)}.png')\n plot_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/plot.html')\n plot_output = plot_template.render(img_path=f'{hash(img_path)}.png', style=f\"'width:{width};'\")\n self.contents.append(plot_output)",
"def show_picture(self, data):\n raise NotImplementedError",
"def making_plot(sample_points_x_y_nonZero, gauge_volume, y_upper_imit, y_lower_limit,\n sample_height=10, sample_width=5., min_color=None, max_color = None):\n if sample_points_x_y_nonZero.size==0:\n print \"the array does not have a non zero gauge volume\"\n\n\n else:\n\n xS, yS=sample_points_x_y_nonZero\n X,Y= np.meshgrid(xS,yS)\n\n gauge_volume=np.array(gauge_volume)\n\n Z = griddata((xS,yS), gauge_volume, (X,Y), method='nearest')\n\n plt.figure()\n # r=plt.contour( X, Y,Z)\n # plt.clabel(r, inline=1, fontsize=10)\n plt.pcolormesh(X, Y, Z, cmap = plt.get_cmap('rainbow'),vmin=min_color, vmax=max_color )\n plt.xlabel('points along sample width (mm)')\n plt.ylabel('points along sample height (mm)')\n plt.ylim(y_lower_limit,y_upper_imit)\n plt.colorbar()\n plt.axhline(y=-sample_height/2., color='r', linestyle='-')\n plt.axhline(y=sample_height/2., color='r', linestyle='-')\n plt.axvline(x=- sample_width/2., color='r', linestyle='-')\n plt.axvline(x= sample_width/2., color='r', linestyle='-')\n # plt.scatter(xS,yS ,marker = 'o', c = 'b', s = 5, zorder = 10)\n plt.savefig(os.path.join(thisdir, '../figures/{sample}.png'.format(sample='gauge_volume')))\n plt.show()",
"def render_plot(filename):\n \n file = f\"{app.config['FILE_UPLOADS']}/{filename}\"\n df = read_dataset(file)\n plot_json = generate_dataset_JSON(df)\n\n min = str(round(df['y'].min(),2))\n max = str(round(df['y'].max(),2))\n mean = str(round(df['y'].mean(),2))\n std = str(round(df['y'].std(),2))\n \n return render_template('render_plot.html', plot_json=plot_json, filename=filename, min=min, max=max, mean=mean, std=std)",
"def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False",
"def show_plot_in_new_figure(data, ylim=(-0.3, 0.3),\n to_save=False, fname=\"extractor_test_results/result.png\"):\n \n plt.figure(figsize = (30,10))\n plt.ylim(ylim)\n plt.plot(list(data), 'b', lw=1)\n plt.grid()\n if show_plots: \n plt.show()\n \n if to_save:\n plt.savefig(fname)",
"def heatmap(filename, data):\n\n fig, ax = ppl.subplots(1)\n ppl.pcolormesh(fig, ax, data, vmin=-0.0016, vmax=0.0016)\n fig.savefig(filename + \".png\")",
"def render(self, chart):\n chart.create_visualization_files(self.__outputpath)",
"def _plotData(self, identity, src, plotFile, outputFile, overlay):\n # determine the max y-values for best overlay placement\n maxima = []\n acceleration = plt.figure()\n\n if self.multiplot:\n plots = len(self.graph[0])\n else:\n # create only one plot\n ax = acceleration.add_subplot(111)\n plots = 1\n\n if identity == 'accel':\n source = self.graph[0]\n identityLong = 'Acceleration'\n elif identity == 'gyro':\n source = self.graph[1]\n identityLong = 'Gyroscope'\n\n for i, accel in enumerate(source):\n j = i + 1 if self.multiplot else 1\n ax = acceleration.add_subplot(plots,1,j)\n ax.set_title('{} of {}'.format(identityLong, plotFile))\n ax.plot(\n src['x']\n , src[accel]\n , color=self.colors[i]\n , label=accel)\n if overlay:\n # place overlay arrows at 1/3 of y-max\n # yOverlay = 0.3 * max(maxima)\n yOverlay = 3 if identity == 'accel' else 200\n legend = []\n for gs in overlay:\n ax.annotate(''\n , xy=(gs['start'],yOverlay)\n , xycoords='data'\n , xytext=(gs['end'],yOverlay)\n , textcoords='data'\n , horizontalalignment='bottom'\n , verticalalignment='center'\n , arrowprops=dict(\n arrowstyle=\"<|-|>\"\n , fc=\"white\"\n , ec=\"black\"\n , alpha=0.5\n )\n ).set_alpha(0.5)\n legend.append(gs['id']+\": \"+gs['start']+\"-\"+gs['end'])\n\n at = AnchoredText(\"Gold Standard:\\n\"+\"\\n\".join(legend),\n prop=dict(size=10), loc=4)\n at.patch.set_boxstyle(\"round\")\n at.patch.set_alpha(0.5)\n ax.add_artist(at)\n\n ax.legend(\n loc=1\n , fontsize=10\n , fancybox=True).get_frame().set_alpha(0.5)\n # plt.show()\n plt.savefig(outputFile, format='svg')",
"def plot(self, plotType):\n # Build plotting data\n self.data_x_axis = []\n self.data_y_axis = []\n for i in range(0, self.csv_data_table.rowCount()):\n value = self.csv_data_table.item(i, self.selected_columns[0]).text()\n self.data_x_axis.append(value)\n value = self.csv_data_table.item(i, self.selected_columns[1]).text()\n self.data_y_axis.append(value)\n\n self.label_x_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[0]).text()\n self.label_y_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[1]).text()\n\n # Avoid duplication of resources if already allocated\n if self.figure is None:\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n\n # self.plot_frame_horizontal.addStretch()\n self.plot_frame_horizontal.addWidget(self.canvas)\n # self.plot_frame_horizontal.addStretch()\n\n # Ensures only 2 tabs at max are open at a time - file and plot tabs respectively\n if self.tabWidget.count() == 1:\n self.tabWidget.insertTab(1, self.plot_page_tab, \"Plot\")\n\n self.tabWidget.setCurrentIndex(1)\n\n self.plotType = plotType\n\n try:\n for i in range(0, len(self.data_x_axis)):\n if self.data_x_axis[i] == '':\n self.data_x_axis[i] = 0\n if self.data_y_axis[i] == '':\n self.data_y_axis[i] = 0\n\n self.data_x_axis[i] = self.strToNumber(self.data_x_axis[i])\n self.data_y_axis[i] = self.strToNumber(self.data_y_axis[i])\n\n self.data_x_axis = np.array(self.data_x_axis)\n self.data_y_axis = np.array(self.data_y_axis)\n\n print(self.data_x_axis)\n print(self.data_y_axis)\n\n except:\n pass\n print(\"In generic plotting\")\n\n self.drawPlot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)",
"def plot_to_file(file_prefix=None, file_suffix=None, **kwargs) -> str:\n file_prefix = file_prefix or 'plot'\n file_suffix = file_suffix or '.png'\n path = tempfile.mktemp(prefix='%s-' % file_prefix, suffix=file_suffix)\n plt.savefig(path, **kwargs)\n plt.close() # Else plt.show() happens automatically [sometimes: with plt.* but not with plotnine...]\n return path",
"def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top', extent=(0., 360., -90., 90.))\n ax.set_title('Driscoll Healy Grid')\n ax.set_xlabel('longitude')\n ax.set_ylabel('latitude')\n fig.tight_layout(pad=0.5)\n return fig,ax",
"def plot_and_save(data, prefix, name):\n plt.figure()\n plt.hist(data)\n plt.title(name)\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.savefig(prefix + name + '.png')\n plt.close()",
"def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n # plt.show()\n return buf",
"def setup_figure(self):\n \n # connect ui widgets to measurement/hardware settings or functions\n self.ui.start_pushButton.clicked.connect(self.start)\n self.ui.interrupt_pushButton.clicked.connect(self.interrupt)\n self.settings.save_h5.connect_to_widget(self.ui.save_h5_checkBox)\n self.settings.save_movie.connect_to_widget(self.ui.save_movie_checkBox)\n \n # Set up pyqtgraph graph_layout in the UI\n self.graph_layout=pg.GraphicsLayoutWidget()\n self.ui.plot_groupBox.layout().addWidget(self.graph_layout)\n \n self.aux_graph_layout=pg.GraphicsLayoutWidget()\n self.ui.aux_plot_groupBox.layout().addWidget(self.aux_graph_layout)\n \n self.camera_layout=pg.GraphicsLayoutWidget()\n self.ui.camera_groupBox.layout().addWidget(self.camera_layout)\n\n # Create PlotItem object (a set of axes) \n \n self.plot1 = self.graph_layout.addPlot(row=1,col=1,title=\"Lick\")\n self.plot2 = self.graph_layout.addPlot(row=2,col=1,title=\"breathing\")\n\n # Create PlotDataItem object ( a scatter plot on the axes )\n self.breathing_plot = self.plot2.plot([0])\n self.lick_plot_0 = self.plot1.plot([0])\n self.lick_plot_1 = self.plot1.plot([1]) \n \n self.lick_plot_0.setPen('y')\n self.lick_plot_1.setPen('g')\n \n self.T=np.linspace(0,10,10000)\n self.k=0\n \n self.camera_view=pg.ViewBox()\n self.camera_layout.addItem(self.camera_view)\n self.camera_image=pg.ImageItem()\n self.camera_view.addItem(self.camera_image)",
"def createPlot(self, plotData=None, **kwargs):\n\t\treturn super().createPlot(plotData=plotData, **kwargs)",
"def __init__(self, dataset, training_data):\n super().__init__()\n self.threadpool = QThreadPool()\n self.setFixedSize(1000, 600)\n self.center()\n self.setWindowTitle('GA practice') \n self.show()\n #read the map and training data\n self.map_datalist = dataset.keys()\n self.map_data = dataset \n self.training_datalist = training_data.keys()\n self.training_data = training_data\n\n #creat file choosing area\n self.file_run_creation(self.map_datalist, self.training_datalist)\n \n self.operation_parameter_creation()\n self.ouput_text_creation()\n hbox = QHBoxLayout()\n vbox = QVBoxLayout()\n vbox.addWidget(self.file_run)\n vbox.addWidget(self.operation_type)\n vbox.addWidget(self.text_group_box)\n hbox.addLayout(vbox)\n self.m = PlotCanvas(self.map_data)\n hbox.addWidget(self.m)\n self.setLayout(hbox)",
"def create_image(image_data, title, figure_name):\n #print(figure_name)\n #plt.figure()\n fig_ax = plt.gca()\n image_data = np.array(image_data)\n #image_data[image_data1800] = np.min(image_data)\n #image_data = np.abs(image_data)\n image = fig_ax.imshow(image_data[0:1028, :], cmap='nipy_spectral',\n origin='lower', interpolation='none')\n #image = fig_ax.imshow(np.array(image_data), cmap='nipy_spectral',\n #origin='lower', interpolation='none')\n plt.title(title)\n divider = make_axes_locatable(fig_ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.colorbar(image, cax=cax)\n plt.grid(False)\n plt.savefig(figure_name, dpi=100, bbox_inches=\"tight\")\n #plt.show()\n #plt.pause(0.1)\n #plt.show()\n# plt.draw()\n# plt.pause(0.001)\n# print('OK, Move forward')\n #plt.show(block=False)\n plt.close('all')",
"def plot(self, csvDataset = None):\n for item in self.data_array:\n item.plot()\n # If csvDataset is not None, plots also the file\n csvDataset.plot(sampleName=item.file_name)",
"def handle_as_data_url(view: View, point: int, ext: str, encoded: str):\n\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR, \"tmp_data_image.\" + ext)\n file_hash = int(hashlib.sha1(encoded.encode('utf-8')\n ).hexdigest(), 16) % (10 ** 8)\n name = str(file_hash) + \".\" + ext\n\n # Save downloaded data in the temporary file\n try:\n dst = open(tmp_file, \"wb\")\n dst.write(base64.b64decode(encoded))\n except Exception as e:\n print(e)\n return\n finally:\n dst.close()\n\n real_width, real_height, size = get_image_size(tmp_file)\n width, height = get_dimensions(view, tmp_file)\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n save(tmp_file, name, \"data_url\")\n elif href == \"save_as\":\n convert(tmp_file, \"data_url\", name)\n else:\n sublime.active_window().open_file(tmp_file)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )",
"def RefreshThumbnail(self):\n if not self.property:\n self.bmp = None\n return\n\n path = self.property.DoGetValue()\n\n if not os.path.isfile(path):\n self.bmp = None\n return\n\n image = wx.Image(path)\n image.Rescale(64, 64)\n self.bmp = wx.BitmapFromImage(image)",
"def save_plot(data, title, fname, format='png', cb = True,vmax=None,vmin=None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(data,interpolation='nearest',vmax=vmax,vmin=vmin)\n if cb == True: \n plt.colorbar(im)\n ax.set_title(title)\n plt.savefig(fname+'.'+format,dpi=100)\n plt.close(fig)",
"def _display_window(self, axes, filename, extra=None):\n\n fn = os.path.join(self._data_directory, filename)\n data = self._rgb2plot(fits.open(fn)[1].data)\n\n # Plot the hubble data in a subplot\n axes.clear()\n axes.imshow(data, cmap=plt.gray(), origin='upper')\n axes.set_xticks([]) \n axes.set_yticks([])\n clow, chigh = np.percentile(data[np.isfinite(data)], (1, 99))\n axes.get_images()[0].set_clim((clow, chigh))\n\n # Display the filename as the title of the axes\n tt = filename.split('/')[-1]\n if extra:\n tt += ' ' + str(extra)\n axes.set_title(tt)\n plt.figure(1).canvas.draw()",
"def create_graphic(X):\n plt.close('all')\n plt.figure(figsize=(12,6))\n sns.set(style='darkgrid', palette='bright')\n for i,j in enumerate(X): \n plt.subplot(2, 3, (i+1))\n plt.text(X[j], 0, X[j], color='black')\n plt.axvline(x=X[j], linestyle='--', c='red')\n sns.distplot(data[j].dropna(), bins=30, kde=False)\n plt.tight_layout()\n img = io.BytesIO()\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n graph = 'data:image/png;base64,{}'.format(graph_url)\n return graph",
"def loadAndPlot1DMassData(dataFile='movingPointMassData/testPointMassData000.pkl'):\n # Load the data back\n inputDataFile = open(dataFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Iterate over the different saved trajectores and plot out the results.\n for i in range(len(dataOut[0])):\n plt.figure(i)\n plt.plot(dataOut[0][i][1],dataOut[0][i][0])\n plt.show()",
"def cb_save(event):\n fig.savefig('sample.univariate_discrete.py.png', dpi=300, format='png', transparent=True)",
"def plot(self, data, mode='image', **kwargs):\n self.config = self.get_defaults(mode)\n self.config.update(kwargs)\n\n for layer_index, layer_data in enumerate(data):\n layer_config = self.config.maybe_index(layer_index)\n layer = Layer(self, mode=mode, index=layer_index, config=layer_config, data=layer_data)\n self.layers.append(layer)\n\n annotations = self.annotate(mode)\n self.annotations.update(annotations)",
"def make_figure(self, traces):\n pass",
"def plot_and_save_3d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (3d)'+'-'*24\n \n print 'Loading force data...', \n data = load_file(path_name+file_name)\n t = data['t']\n dyn = 1.0\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # x-moment\n plt.figure(1)\n plt.plot(t, dyn*data['dyn']['MX'], t, data['static']['MX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mx')\n plt.title('Moment (x)')\n plt.grid()\n plt.savefig('%sMx.png' %pic_path)\n\n # y-moment\n plt.figure(2)\n plt.plot(t, dyn*data['dyn']['MY'], t, data['static']['MY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment (y)')\n plt.grid()\n plt.savefig('%sMy.png' %pic_path)\n\n # z-moment\n plt.figure(3)\n plt.plot(t, dyn*data['dyn']['MZ'], t, data['static']['MZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Mz')\n plt.title('Moment (z)')\n plt.grid()\n plt.savefig('%sMz.png' %pic_path)\n \n # x-force\n plt.figure(4)\n plt.plot(t, dyn*data['dyn']['FX'], t, data['static']['FX'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fx')\n plt.title('Fx')\n plt.grid()\n plt.savefig('%sFx.png' %pic_path)\n\n # y-force\n plt.figure(5)\n plt.plot(t, dyn*data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fy')\n plt.title('Fy')\n plt.grid()\n plt.savefig('%sFy.png' %pic_path)\n\n # z-force\n plt.figure(6)\n plt.plot(t, dyn*data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fz')\n plt.title('Fz')\n plt.grid()\n plt.savefig('%sFz.png' %pic_path)\n print 'done'\n\n #nice_looking_plots(t, data['dyn'], data['static'])\n\n if show:\n plt.show()",
"def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())",
"def save_plot(self):\r\n\t\t# Generate the plot\r\n\t\tself.generate_plot()\r\n\t\t# Create save directory\r\n\t\tdirectory = self.dir + '/%s/' % str(int(self.universe.init_time))\r\n\t\tif not path_exists(directory):\r\n\t\t\tmakedirs(directory)\r\n\t\t# Save image file\r\n\t\tself.fig.savefig(directory+str(self.universe.time))",
"def plot_preview_png():\n name = request.args.get('prev_instance')\n name = str(name)\n fig = create_preview(name)\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype='image/png')",
"def write_graph( self ):\n\n #If we are on a logarithmic scale, set the lowest order of magnitude\n if getattr(self,'log_yaxis',False):\n self.ax.set_ylim(ymin=10**float(find_info('log_ymin',self.kw,self.metadata,-1)))\n\n kw = self.kw\n file = self.file\n canvas = self.canvas\n if 'svg' in kw.keys():\n svg = kw['svg']\n else:\n svg = False\n canvas.draw() # **kw )\n if svg:\n renderer = RendererSVG(prefs[width], prefs[height], file)\n canvas.figure.draw(renderer)\n renderer.finalize()\n else:\n size = canvas.get_renderer().get_canvas_width_height()\n # Hack: for some unknown reason in py27 this call is returning floats.\n # Convert it to int coordinates so that PIL doesnt complain.\n size = (int(size[0]),int(size[1]))\n buf = canvas.tostring_argb()\n im = PILImage.fromstring('RGBA', size, buf, 'raw', 'RGBA', 0, 1)\n\n # We must realign the color bands, as matplotlib outputs\n # ARGB and PIL uses RGBA.\n a, r, g, b = im.split()\n im = PILImage.merge( 'RGBA', (r, g, b, a) )\n im.save( file, format = 'PNG' )",
"def visualize(base_path, test_dataset, plot_dir, batch_size=4, ):\n device = torch.device('cuda')\n dataset = HeadDataset(test_dataset,\n base_path,\n dataset_param={},\n train=False)\n batch_iterator = iter(data.DataLoader(dataset, batch_size,\n shuffle=False,\n num_workers=4,\n collate_fn=coco_collate))\n for ind, (images, targets) in enumerate(tqdm(batch_iterator)):\n images = list(img.to(device) for img in images)\n np_images = [(ims.cpu().numpy()*255.).astype(np.uint8) for ims in images]\n gt_boxes = [gt['boxes'].numpy().astype(np.float64) for gt in targets]\n for np_im, gt_box in zip(np_images, gt_boxes):\n plot_images = plot_ims(np_im, [], gt_box)\n imsave(osp.join(plot_dir, str(ind) + '.jpg'), plot_images)",
"def custom_preview_png():\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n coords = request.args.get('custom_coords')\n coords = str(coords)\n coords = coords.split(':')\n coords.pop(len(coords)-1)\n for i in coords:\n i = i.strip(\"()\")\n vals = i.split(\",\")\n x = float(vals[0])\n y = float(vals[1])\n axis.text(x,y, str(i))\n axis.scatter(x, y, c = 'b', label = str(i))\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype='image/png')",
"def render(self):\r\n super().render()\r\n layers, titles, lat, lon = self.make_layers()\r\n plots = []\r\n for i in range(len(layers)):\r\n p = figure(\r\n tools=self.tools, \r\n toolbar_location=self.toolbarLocation, \r\n plot_width=self.width, \r\n plot_height=self.height,\r\n x_range=(np.min(lon), np.max(lon)),\r\n y_range=(np.min(lat), np.max(lat)),\r\n title=titles[i]\r\n )\r\n p.xaxis.axis_label = self.xlabel\r\n p.yaxis.axis_label = self.ylabel\r\n colorMapper = LinearColorMapper(palette=self.cmap, low=self.vmin, high=self.vmax)\r\n p.image(\r\n image=[layers[i]], \r\n color_mapper=colorMapper, \r\n x=np.min(lon), \r\n y=np.min(lat), \r\n dw=np.max(lon)-np.min(lon), \r\n dh=np.max(lat)-np.min(lat)\r\n )\r\n\r\n p.add_tools(HoverTool(\r\n tooltips=[\r\n ('longitude', '$x'),\r\n ('latitude', '$y'),\r\n (self.variable + self.unit, '@image'),\r\n ],\r\n mode='mouse'\r\n )\r\n )\r\n\r\n colorBar = ColorBar(\r\n color_mapper=colorMapper, \r\n ticker=BasicTicker(),\r\n label_standoff=12, \r\n border_line_color=None, \r\n location=(0,0)\r\n )\r\n\r\n p.add_layout(colorBar, 'right')\r\n plots.append(p)\r\n \r\n \r\n if not inline(): output_file(get_figure_dir() + self.variable + \".html\", title=self.variable) \r\n show(column(plots))",
"def plot_files(plot_file_name, files):\n curve_names, metric_sets, set_of_number_of_embeddings = _read_result_pickle(files)\n\n _plot_curves(plot_file_name, curve_names, metric_sets, set_of_number_of_embeddings)",
"def plot_file(filename, params):\n\tarr = None\n\twith open(filename) as filep:\n\t\tarr = json.load(filep)\n\tplot_data(arr, params)",
"def save_figure(self, data, name, rects=None):\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, self.HEATMAP)\n\n\t\tif rects:\n\t\t\tfor r in rects:\n\t\t\t\tax.add_patch(r)\n\n\t\tplotfile = os.path.join('static', name + '_' + str(time.time()) + '.png')\n\t\t#plt.show()\n\t\tplt.savefig(plotfile, dpi = sizes[1])\n\n\t\treturn plotfile",
"def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()",
"def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize=14)\n plt.ylabel(\"TPR\", fontsize=14)\n plt.title(\"ROC Curve\", fontsize=14)\n plot = plt.plot(fpr, tpr, linewidth=2)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n plt.close()\n return buf",
"def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()",
"def generate_plots(type_, data, name, plots_location):\n plt.cla()\n plt.clf()\n plot_type = getattr(sns, type_)\n plot_ = plot_type(data)\n fig = plot_.get_figure()\n fig.savefig('{}/{}_{}.png'.format(plots_location, name, type_))",
"def save_plot(p, file_name, path='../static/images/'):\n p.output_backend = \"svg\"\n export_svgs(p, filename=path + file_name + '.svg')",
"def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')",
"def plotFigures(self, path):\n self.width = 500\n self.height = 400\n\n if self.pathIds[path] == 0:\n self.lineFigurePath = path\n self.lineFigure.load(path)\n self.lineFigure = self.checkFigureSize(self.lineFigure)\n self.lineFigureScene.addPixmap(QtGui.QPixmap.fromImage( self.lineFigure))\n x, y = self.getWidgetPos(self.displayLineFigure)\n w, h = self.getWidgetDims(self.lineFigure)\n self.displayLineFigure.setGeometry(QtCore.QRect(x,y,w,h))\n self.displayLineFigure.fitInView(self.displayLineFigure.sceneRect() , self.ratioOption)\n elif self.pathIds[path] == 1:\n self.barFigurePath = path\n self.barFigure.load(path)\n self.barFigureScene.addPixmap(QtGui.QPixmap.fromImage(self.barFigure))\n self.barFigure = self.checkFigureSize(self.barFigure)\n\n x, y = self.getWidgetPos(self.displayBarFigure)\n w, h = self.getWidgetDims(self.barFigure)\n self.displayBarFigure.setGeometry(QtCore.QRect(x,y,w,h))\n self.displayBarFigure.fitInView(self.barFigureScene.sceneRect(), self.ratioOption)\n else:\n if self.barFigurePath is None:\n self.barFigurePath = path\n self.barFigure.load(path)\n self.barFigure = self.checkFigureSize(self.barFigure)\n self.barFigureScene.addPixmap(QtGui.QPixmap.fromImage( self.barFigure))\n x, y = self.getWidgetPos(self.displayBarFigure)\n w, h = self.getWidgetDims(self.barFigure)\n self.displayBarFigure.setGeometry(QtCore.QRect(x,y,w,h))\n self.displayBarFigure.fitInView(self.barFigureScene.sceneRect() , self.ratioOption)\n elif self.lineFigurePath is None:\n self.lineFigurePath = path\n self.lineFigure.load(path)\n self.lineFigure = self.checkFigureSize(self.lineFigure)\n self.lineFigureScene.addPixmap(QtGui.QPixmap.fromImage( self.lineFigure))\n x, y = self.getWidgetPos(self.displayLineFigure)\n w, h = self.getWidgetDims(self.lineFigure)\n self.displayLineFigure.setGeometry(QtCore.QRect(x,y,w,h))\n self.displayLineFigure.fitInView(self.lineFigureScene.sceneRect (), self.ratioOption)",
"def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale",
"def plot(\r\n self,\r\n file_name: str = \"index\",\r\n path: str = \"./\",\r\n template: str = \"default\",\r\n notebook_height: int = 500,\r\n ):\r\n self.notebook_height = notebook_height\r\n\r\n script_path = os.path.dirname(os.path.abspath(__file__))\r\n if template in [\"default\", \"reaction_smiles\", \"smiles\", \"url_image\"]:\r\n template = \"template_\" + template + \".j2\"\r\n else:\r\n script_path = os.path.dirname(template)\r\n\r\n html_path = os.path.join(path, file_name + \".html\")\r\n js_path = os.path.join(path, file_name + \".js\")\r\n jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(script_path))\r\n\r\n has_legend = False\r\n\r\n for _, value in self.scatters.items():\r\n if value[\"has_legend\"]:\r\n has_legend = True\r\n break\r\n\r\n if not self.show_legend:\r\n has_legend = False\r\n\r\n # Drop colormaps before passing them to the document, as they are\r\n # not JSON serializable.\r\n trees_copy = copy.deepcopy(self.trees)\r\n scatters_copy = copy.deepcopy(self.scatters)\r\n\r\n for key, _ in trees_copy.items():\r\n del trees_copy[key][\"colormap\"]\r\n\r\n for key, _ in scatters_copy.items():\r\n del scatters_copy[key][\"colormap\"]\r\n\r\n model = {\r\n \"title\": self.title,\r\n \"file_name\": file_name + \".js\",\r\n \"clear_color\": self.clear_color,\r\n \"view\": self.view,\r\n \"coords\": str(self.coords).lower(),\r\n \"coords_color\": self.coords_color,\r\n \"coords_box\": str(self.coords_box).lower(),\r\n \"coords_ticks\": str(self.coords_ticks).lower(),\r\n \"coords_grid\": str(self.coords_grid).lower(),\r\n \"coords_tick_count\": self.coords_tick_count,\r\n \"coords_tick_length\": self.coords_tick_length,\r\n \"coords_offset\": self.coords_offset,\r\n \"x_title\": self.x_title,\r\n \"y_title\": self.y_title,\r\n \"tree_helpers\": list(trees_copy.values()),\r\n \"point_helpers\": list(scatters_copy.values()),\r\n \"has_legend\": str(has_legend).lower(),\r\n \"legend_title\": self.legend_title,\r\n \"legend_orientation\": self.legend_orientation,\r\n \"alpha_blending\": str(self.alpha_blending).lower(),\r\n \"anti_aliasing\": str(self.anti_aliasing).lower(),\r\n \"style\": self.style,\r\n \"impress\": self.impress,\r\n \"in_notebook\": Faerun.in_notebook(),\r\n \"thumbnail_width\": self.thumbnail_width,\r\n \"thumbnail_fixed\": str(self.thumbnail_fixed).lower(),\r\n }\r\n\r\n if Faerun.in_notebook():\r\n model[\"data\"] = self.create_data()\r\n else:\r\n with open(js_path, \"w\") as f:\r\n f.write(self.create_data())\r\n\r\n output_text = jenv.get_template(template).render(model)\r\n\r\n with open(html_path, \"w\") as result_file:\r\n result_file.write(output_text)\r\n\r\n if Faerun.in_notebook():\r\n display(IFrame(html_path, width=\"100%\", height=self.notebook_height))\r\n display(FileLink(html_path))",
"def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)",
"def plt_to_img(dummy: any = None, **kwargs) -> PIL.Image.Image:\n return PIL.Image.open(plot_to_file(**kwargs))",
"def plot(title, ylabel,data,toprange,span):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n x = [1,2,3]\n nodata = True\n for key in data.keys():\n if len(data[key]) > 0:\n nodata = False\n ax.plot(x[:len(data[key])],data[key],marker='o',label=key)\n if nodata:\n print data\n print \"none\"\n plt.close()\n return\n plt.xlabel(\"Block Number\")\n plt.ylabel(\"Performance\")\n plt.xticks(x)\n plt.legend()\n plt.axis([0,len(x)+1,0,0.1+toprange])\n plt.title(title)\n if not os.path.isdir(\"analysis\"):\n os.mkdir(\"analysis\")\n if not os.path.isdir(\"analysis/\"+span):\n os.mkdir(\"analysis/\"+span)\n plt.savefig(\"analysis/\"+span+\"/\"+title.replace(\" \",\"\").lower()+\".png\")\n plt.close()",
"def plot(figure_or_data, show_link=True, link_text='Export to plot.ly',\n validate=True, output_type='file', include_plotlyjs=True,\n filename='temp-plot.html', auto_open=True, image=None,\n image_filename='plot_image', image_width=800, image_height=600):\n if output_type not in ['div', 'file']:\n raise ValueError(\n \"`output_type` argument must be 'div' or 'file'. \"\n \"You supplied `\" + output_type + \"``\")\n if not filename.endswith('.html') and output_type == 'file':\n warnings.warn(\n \"Your filename `\" + filename + \"` didn't end with .html. \"\n \"Adding .html to the end of your file.\")\n filename += '.html'\n\n config = {}\n config['showLink'] = show_link\n config['linkText'] = link_text\n\n plot_html, plotdivid, width, height = _plot_html(\n figure_or_data, config, validate,\n '100%', '100%', global_requirejs=False)\n\n resize_script = ''\n if width == '100%' or height == '100%':\n resize_script = (\n ''\n '<script type=\"text/javascript\">'\n 'window.removeEventListener(\"resize\");'\n 'window.addEventListener(\"resize\", function(){{'\n 'Plotly.Plots.resize(document.getElementById(\"{id}\"));}});'\n '</script>'\n ).format(id=plotdivid)\n\n if output_type == 'file':\n with open(filename, 'w') as f:\n if include_plotlyjs:\n plotly_js_script = ''.join([\n '<script type=\"text/javascript\">',\n get_plotlyjs(),\n '</script>',\n ])\n else:\n plotly_js_script = ''\n\n if image:\n if image not in __IMAGE_FORMATS:\n raise ValueError('The image parameter must be one of the '\n 'following: {}'.format(__IMAGE_FORMATS))\n # if the check passes then download script is injected.\n # write the download script:\n script = get_image_download_script('plot')\n script = script.format(format=image,\n width=image_width,\n height=image_height,\n filename=image_filename,\n plot_id=plotdivid)\n else:\n script = ''\n\n f.write(''.join([\n '<html>',\n '<head><meta charset=\"utf-8\" /></head>',\n '<body>',\n plotly_js_script,\n plot_html,\n resize_script,\n script,\n '</body>',\n '</html>']))\n\n url = 'file://' + os.path.abspath(filename)\n if auto_open:\n webbrowser.open(url)\n\n return url\n\n elif output_type == 'div':\n if include_plotlyjs:\n return ''.join([\n '<div>',\n '<script type=\"text/javascript\">',\n get_plotlyjs(),\n '</script>',\n plot_html,\n '</div>'\n ])\n else:\n return plot_html",
"def make_setplot1(d):\n setplot = open('setplot.py', 'w')\n setplot.write('\\n\"\"\" ')\n setplot.write(\"\"\"\nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n \"\"\")\n setplot.write('\\n\"\"\" ')\n setplot.write(\"\"\"\n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \"\"\")\n setplot.write('\\n \"\"\" ')\n setplot.write(\"\"\"\n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n \"\"\")\n setplot.write('\\n \"\"\" ')\n setplot.write(\"\"\"\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n \"\"\")\n\n # create a figure for each component of q:\n\n for iq in range(d.meqn):\n setplot.write(\"\"\"\n\n # Figure for q[%s]\n plotfigure = plotdata.new_plotfigure(name='q[%s]', figno=%s)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'q[%s]'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d')\n plotitem.plot_var = %s\n plotitem.plotstyle = '-o'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n \"\"\" % (iq,iq,iq,iq,iq))\n\n\n setplot.write(\"\"\"\n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html' # pointer for top of index\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata\n\n \"\"\")\n setplot.close()\n print(\"=== Created setplot.py\")\n # end of make_setplot1",
"def buildPlot(self):\r\n style.use('fivethirtyeight')\r\n self.fig = plt.figure()\r\n self.ax1 = self.fig.add_subplot(1,1,1)\r\n self.ax1.clear()\r\n self.ax1.plot(self.inputValInt,self.inputValInt1)",
"def generate_heatmap(data, labels_dict, file_title, plot_title):\n\n fig = plt.figure()\n ax = sn.heatmap(data,\n linewidths=0.3)\n figure = ax.get_figure()\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n ax.set_ylabel(labels_dict[\"y\"])\n if plot_title:\n ax.set_title(plot_title)\n\n figure.savefig(file_title)",
"def diagram_plugs(data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large,\r\n er_no, er_little,\r\n er_means,\r\n er_great,\r\n er_large_enough,\r\n er_super_large):\r\n\r\n\r\n plt.bar(range(6), [data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large],\r\n width=0.1, color='black',\r\n yerr=[er_no, er_little, er_means,\r\n er_great, er_large_enough,\r\n er_super_large],\r\n ecolor='black', capsize=10)\r\n\r\n\r\n plt.xticks(range(6), ['non', 'petit', 'moyen',\r\n 'grand', 'assez grand', 'tres grand'])\r\n\r\n\r\n plt.ylabel('Taux de pollution en AQI')\r\n plt.title(\"Taux de pollution selon les bouchons\")\r\n\r\n nouveau = new()\r\n print(nouveau)\r\n plt.savefig(nouveau, transparent=True)\r\n plt.clf()\r\n plt.close()\r\n\r\n shutil.move(nouveau, '/app/static/popo')\r\n\r\n return nouveau",
"def plot(data, layout, file_name):\n offline.plot({'data': data,\n 'layout': layout},\n filename='{}-{}_{}-{}.html'.format(file_name,\n todays_day,\n todays_month,\n currency))",
"def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')",
"def addFigure(self,fig,xl,yl,scale):\n img = py.image.load(fig)\n w,h = img.get_size()\n img = py.transform.scale(img,(int(w*scale),int(h*scale)))\n self.figures.append(img)\n self.locs.append((xl,yl))",
"def generate_2D_plot(x, y, labels_dict, file_title, plot_title):\n fig = plt.figure()\n plt.plot(x, y)\n\n if labels_dict:\n plt.xlabel(labels_dict[\"x\"])\n plt.ylabel(labels_dict[\"y\"])\n if plot_title:\n plt.title(plot_title)\n\n plt.savefig(file_title)",
"def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")",
"def save(file_name):\n setup()\n plt.savefig(file_name)",
"def test_make_plot_custom(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='c',plot_title='test',ylabel='test',xlabel='test',xticks=[0,2,4,6],yticks=[0,2,4,6])\n except Exception as e:\n raise\n plt.close('all')",
"def plot_into_img_widget(img_widget, figure):\n buf = cStringIO.StringIO()\n figure.savefig(buf, format='png', dpi=__dpi__)\n img_widget.value = buf.getvalue()\n buf.close()",
"def create_plot(self, options, row=0, column=0, rowspan=1, columnspan=1):\n print \"ImageWidgetMixin [CurveWidgetMixin] . create_plot\"\n self.plot_widget = BaseImageWidget(self, **options)\n self.plot_layout.addWidget(self.plot_widget,\n row, column, rowspan, columnspan)\n \n # Configuring plot manager\n self.add_plot(self.plot_widget.plot)\n self.add_panel(self.plot_widget.itemlist)\n self.add_panel(self.plot_widget.xcsw)\n self.add_panel(self.plot_widget.ycsw)\n self.add_panel(self.plot_widget.contrast)",
"def handle_as_data_url(view: sublime.View, point: int, ext: str, encoded: str):\n\n need_conversion = False\n # TODO: is this the only case ?\n if ext == \"svg+xml\":\n ext = \"svg\"\n need_conversion = True\n\n # create a temporary file\n temp_img = osp.join(TEMP_DIR, \"tmp_data_image.\" + ext)\n basename = str(int(hashlib.sha1(encoded.encode('utf-8')).hexdigest(), 16) % (10 ** 8))\n name = basename + \".\" + ext\n\n # Save downloaded data in the temporary file\n try:\n img = open(temp_img, \"wb\")\n img.write(base64.b64decode(encoded))\n except Exception as e:\n print(e)\n return\n finally:\n img.close()\n\n if need_conversion:\n ext = \".png\"\n\n conv_file = temp_img\n\n temp_png = osp.splitext(temp_img)[0] + \".png\"\n\n magick(temp_img, temp_png)\n\n with open(temp_png, \"rb\") as png:\n encoded = str(base64.b64encode(png.read()), \"utf-8\")\n\n temp_img = temp_png\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, name, \"data_url\")\n else:\n save(temp_img, name, \"data_url\")\n elif href == \"save_as\":\n if need_conversion:\n convert(conv_file, \"dat_url\", name)\n else:\n convert(temp_img, \"data_url\", name)\n else:\n sublime.active_window().open_file(temp_img)\n\n width, height, real_width, real_height, size = get_data(view, temp_img)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height,\n str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )",
"def __init__(self, plot_dir: str, file_name_prefix: str = '') -> None:\n super().__init__()\n self.plot_dir = plot_dir\n self.file_name_prefix = file_name_prefix\n\n self.resolution = 0",
"def plot(self, plotType):\n # Build plotting data\n self.data_x_axis = []\n self.data_y_axis = []\n for i in range(0, self.csv_data_table.rowCount()):\n value = self.csv_data_table.item(i, self.selected_columns[0]).text()\n self.data_x_axis.append(value)\n value = self.csv_data_table.item(i, self.selected_columns[1]).text()\n self.data_y_axis.append(value)\n\n self.label_x_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[0]).text()\n self.label_y_axis = self.csv_data_table.horizontalHeaderItem(self.selected_columns[1]).text()\n\n # Avoid duplication of resources if already allocated\n if self.figure is None:\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n\n # self.plot_frame_horizontal.addStretch()\n self.plot_frame_horizontal.addWidget(self.canvas)\n # self.plot_frame_horizontal.addStretch()\n\n # Ensures only 2 tabs at max are open at a time - file and plot tabs respectively\n if self.tabWidget.count() == 1:\n self.tabWidget.insertTab(1, self.plot_page_tab, \"Plot\")\n\n self.tabWidget.setCurrentIndex(1)\n\n # Set plot type (1,2,3 => order according to scatter, scatter-line, line)\n self.plotType = plotType\n\n # Convert the data to np arrays if it is purely numerical\n try:\n for i in range(0, len(self.data_x_axis)):\n if self.data_x_axis[i] == '':\n self.data_x_axis[i] = 0\n if self.data_y_axis[i] == '':\n self.data_y_axis[i] = 0\n\n self.data_x_axis[i] = self.coerce_str_to_number(self.data_x_axis[i])\n self.data_y_axis[i] = self.coerce_str_to_number(self.data_y_axis[i])\n\n self.data_x_axis = np.array(self.data_x_axis)\n self.data_y_axis = np.array(self.data_y_axis)\n\n print(self.data_x_axis)\n print(self.data_y_axis)\n\n print(\"In specialized plotting\")\n\n except:\n pass\n # Dont attempt the conversion, directly plot\n print(\"In generic plotting\")\n\n self.draw_plot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)",
"def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None",
"def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize = 14)\n plt.ylabel(\"TPR\", fontsize = 14)\n plt.title(\"ROC Curve\", fontsize = 14)\n plot = plt.plot(fpr, tpr, linewidth = 2)\n buf = io.BytesIO()\n plt.savefig(buf, format = 'jpeg')\n buf.seek(0)\n plt.close()\n\n return buf",
"def gen_plot(fpr, tpr):\n plt.figure()\n plt.xlabel(\"FPR\", fontsize = 14)\n plt.ylabel(\"TPR\", fontsize = 14)\n plt.title(\"ROC Curve\", fontsize = 14)\n plot = plt.plot(fpr, tpr, linewidth = 2)\n buf = io.BytesIO()\n plt.savefig(buf, format = 'jpeg')\n buf.seek(0)\n plt.close()\n\n return buf",
"def draw_pic(data2draw, cmp='Greens'):\n t = ['news_yr', 'news_yr + 1', 'news_yr + 2', 'news_yr + 3', 'news_yr + 4']\n colormap = plt.cm.get_cmap(str(cmp))\n color = [colormap(i) for i in np.linspace(0, 1, 12)]\n fig, ax1 = plt.subplots(figsize=(9, 6))\n color_count = 0\n for ylab, sim_values in data2draw.items():\n if len(sim_values) == 5:\n color_count += 1\n line = sim_values\n ax1.plot(t, line, color=color[color_count], label=ylab)\n else:\n raise ValueError('The number of similarity scores should be five.')\n ax1.tick_params(axis='y', labelcolor='k')\n plt.tight_layout()\n plt.legend(loc='upper right', prop={'size': 15})\n\n return ax1",
"def plot_graph(self, graphinfo):\n\n WIDTH = 450\n HEIGHT = WIDTH * 0.55\n opts = []\n\n # Generate outfile name\n if not self.rrdfile:\n self.outfiles[graphinfo.name] = self.SKIPPED\n return\n\n logging.info(\"Plotting %s graph for %s\" % (graphinfo.name, self.node))\n self.outfiles[graphinfo.name] = \"%s/%s_%s_%s.png\" % (self.topdir,\n self.file_prefix,\n self.node,\n graphinfo.name)\n opts = opts + [self.outfiles[graphinfo.name]]\n\n # Generate general image options\n opts = opts + [\"--width\", str(WIDTH),\n \"--height\", str(HEIGHT),\n \"--slope-mode\"]\n\n # Generate title\n if graphinfo.title:\n opts = opts + [\"--title\", \"%s (%s)\" % (graphinfo.title, node)]\n\n # Generate X-axis options\n start, end, step = ds.get_time_info()\n duration = end - start\n mg_step = duration / 10\n bg_step = mg_step / 5\n label_step = mg_step\n if mg_step == 0 or bg_step == 0:\n # This is unlikely to happen, but just to be on the safe side.\n x_grid = \"SECOND:1:SECOND:10:SECOND:10:0:%R\"\n else:\n x_grid = \"SECOND:%s:SECOND:%s:SECOND:%s:0:%%R\" % \\\n (bg_step, mg_step, label_step)\n opts = opts + [\"--start\", str(self.start),\n \"--end\", str(self.end),\n \"--step\", str(self.rrdtool_step),\n \"--x-grid\", x_grid]\n\n # Generate Y-axis options\n if graphinfo.y_axis_label:\n opts = opts + [\"--vertical-label\", graphinfo.y_axis_label]\n if graphinfo.y_axis_min_value == 0 or graphinfo.y_axis_min_value:\n opts = opts + [\"--lower-limit\", str(graphinfo.y_axis_min_value)]\n if graphinfo.y_axis_max_value == 0 or graphinfo.y_axis_max_value:\n opts = opts + [\"--upper-limit\", str(graphinfo.y_axis_max_value)]\n if graphinfo.y_axis_rigid:\n opts = opts + [\"--rigid\"]\n\n # Generate metric parameters\n stack_opt = \"\"\n if graphinfo.stack:\n stack_opt = \":STACK\"\n deflist = []\n cdeflist = []\n arealist = []\n for i in graphinfo.metrics:\n name, name_in_graph, unit_in_graph, color = i\n if unit_in_graph:\n new_unit, rate = unit_in_graph\n newname = \"%s_%s\" % (name, new_unit)\n deflist.append(\"DEF:%s=%s:%s:AVERAGE\" %\n (name, self.rrdfile, name))\n cdeflist.append(\"CDEF:%s=%s,%s,/\" %\n (newname, name, rate))\n arealist.append(\"AREA:%s%s:%s%s\" %\n (newname, color, name_in_graph, stack_opt))\n else:\n deflist.append(\"DEF:%s=%s:%s:AVERAGE\" %\n (name, self.rrdfile, name))\n arealist.append(\"AREA:%s%s:%s%s\" %\n (name, color, name_in_graph, stack_opt))\n opts = opts + deflist + cdeflist + arealist\n\n self.rrdtool_cmd(\"graph\", opts, log_level=logging.DEBUG)",
"def save_image(self):\n # open the dialog and get the selected file\n file_one = QtGui.QFileDialog.getSaveFileName(\n self,\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Supported formats:\", \n None, \n QtGui.QApplication.UnicodeUTF8)+\n \" emf, eps, pdf, png, ps, raw, rgba, svg, svgz\", \n \"\",\n \"Enhanced MetaFile \\t.emf (*.emf);; \"+\n \"Encapsulated PostScript \\t.eps (*.eps);; \"+\n \"Portable Document Format \\t.pdf (*.pdf);; \"+\n \"Portable Network Graphics \\t.png (*.png);;\"+\n \"PostScript \\t.ps (*.ps);; \"+\n \"RAW image file \\t.raw (*.raw);; \"+\n \"Red Green Blue Alpha \\t.rgba (*.rgba);; \"+\n \"Scalable Vector Graphics \\t.svg (*.svg);; \"+\n \"Scalable Vector Graphics compressed \\t.svgz (*.svgz)\")\n # if a file is selected\n if file_one:\n # update the lineEdit text with the selected filename\n self.textBrowser.append(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Saving to \", \n None, \n QtGui.QApplication.UnicodeUTF8\n )+file_one)\n\n plt.clf()\n # adding different plots if required\n if self.checkBox_R.isChecked():\n lines = plt.plot(self.graf_t, self.graf_r)\n plt.setp(lines[0], \n lw=1, \n label=str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Reference\", \n None, \n QtGui.QApplication.UnicodeUTF8)), \n color = 'blue')\n \n if self.checkBox_U.isChecked():\n lines = plt.plot(self.graf_t, self.graf_u)\n plt.setp(lines[0], \n lw=2, \n label=str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Input\", \n None, \n QtGui.QApplication.UnicodeUTF8)), \n color='red')\n \n if self.checkBox_x1.isChecked():\n lines = plt.plot(self.graf_t, self.graf_x1)\n plt.setp(lines[0], \n lw=1, \n label=str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"First Int.\", \n None, \n QtGui.QApplication.UnicodeUTF8)), \n color='grey')\n \n if self.checkBox_x0.isChecked():\n lines = plt.plot(self.graf_t, self.graf_x0)\n plt.setp(lines[0], \n lw=2, \n label=str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Second Int.\", \n None, \n QtGui.QApplication.UnicodeUTF8)), \n color='orange')\n\n plt.axis([self.graf_t[0], self.graf_t[-1], -3, 3])\n \n plt.title(str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Double Integrator\", \n None, \n QtGui.QApplication.UnicodeUTF8)))\n \n plt.suptitle(str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Networked and Embedded Control Systems:\", \n None, \n QtGui.QApplication.UnicodeUTF8)))\n \n plt.xlabel(str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"t (s)\", \n None, \n QtGui.QApplication.UnicodeUTF8)))\n \n plt.ylabel(str(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Voltage (V)\", \n None, \n QtGui.QApplication.UnicodeUTF8)))\n \n plt.legend(bbox_to_anchor=(0, 1, 1, 0), loc=1, ncol=6, mode=\"expand\", borderaxespad=0.)\n \n filename = str(file_one)# + '.svg'\n try:\n plt.savefig(filename, dpi=300)\n except ValueError as e:\n print(e)\n self.textBrowser.append(str(e))\n self.textBrowser.append(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Trying to save as .svg\", \n None, \n QtGui.QApplication.UnicodeUTF8))\n \n filename = str(file_one) + '.svg'\n plt.savefig(filename, dpi=300)\n pass\n except ImportError as e:\n print(e)\n self.textBrowser.append(str(e))\n self.textBrowser.append(\n QtGui.QApplication.translate(\n \"MainWindow\", \n \"Trying to save as .svg\", \n None, \n QtGui.QApplication.UnicodeUTF8))\n \n filename = str(file_one) + '.svg'\n plt.savefig(filename, dpi=300)\n pass\n except:\n self.textBrowser.append(\"Error al guardar la imatge\")\n \n plt.clf()",
"def scatter(filename, data, lines=[]):\n import matplotlib.pyplot as plot\n plot.figure(random.randint(0, 10000000))\n plot.scatter(data[0], data[1], 20, 'b', 'o')\n plot.title(filename.split('.')[0])\n for line in lines:\n plot.plot([line[0], line[2]], [line[1], line[3]], '-')\n plot.savefig(filename)",
"def plotData(data, xLabel, yLabel, plotTitle, save=False, saveName=None):\n fig, ax = plt.subplots()\n ax.plot(data)\n ax.set(xlabel=xLabel, ylabel=yLabel,\n title=plotTitle)\n ax.grid()\n if save:\n if saveName is not None:\n fig.savefig(saveName)\n else:\n fig.savefig(\"figure\")\n plt.show()",
"def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()",
"def mplimage(request):\n\n fig = Figure()\n buffer = io.BytesIO()\n\n ax = fig.add_subplot(111)\n x = np.arange(-2,1.5,.01)\n y = np.sin(np.exp(2*x))\n ax.plot(x, y)\n\n canvas = FigureCanvas(fig)\n\n # Store image in a string buffer\n canvas.print_png(buffer)\n\n response = HttpResponse(buffer.getvalue(), content_type='image/png')\n # I recommend to add Content-Length for Django\n response['Content-Length'] = str(len(response.content))\n\n # if required clear the figure for reuse\n fig.clear()\n\n return response"
] | [
"0.6426788",
"0.63435924",
"0.6265439",
"0.62604624",
"0.62195396",
"0.62031156",
"0.6172043",
"0.613385",
"0.61045796",
"0.60914683",
"0.60585433",
"0.6013517",
"0.5999623",
"0.5993755",
"0.5926441",
"0.5907435",
"0.58943826",
"0.58767754",
"0.5841144",
"0.5835282",
"0.5827787",
"0.5816113",
"0.5791753",
"0.57793593",
"0.5775896",
"0.57556903",
"0.57362264",
"0.5727105",
"0.5723166",
"0.57099086",
"0.5705483",
"0.5689757",
"0.5687561",
"0.5673505",
"0.5654661",
"0.5653618",
"0.5651122",
"0.5635057",
"0.5632946",
"0.5632047",
"0.56208575",
"0.56108737",
"0.56091094",
"0.56084996",
"0.5604577",
"0.560386",
"0.5602429",
"0.5587369",
"0.5583726",
"0.5580632",
"0.55709153",
"0.5570365",
"0.556973",
"0.55687",
"0.5563591",
"0.5556603",
"0.555607",
"0.5555184",
"0.5554766",
"0.555447",
"0.55497074",
"0.5542511",
"0.55374104",
"0.5535068",
"0.5531663",
"0.5528226",
"0.5524468",
"0.5521703",
"0.55146474",
"0.5510746",
"0.55106735",
"0.55105233",
"0.55075794",
"0.5506393",
"0.54994494",
"0.5490774",
"0.5490149",
"0.5490077",
"0.5489714",
"0.548577",
"0.5483684",
"0.5483241",
"0.5474614",
"0.54715306",
"0.54693",
"0.54675174",
"0.54674304",
"0.5465027",
"0.5461434",
"0.54613024",
"0.5458096",
"0.5457563",
"0.5457563",
"0.54478395",
"0.54464823",
"0.5445472",
"0.54443675",
"0.54322624",
"0.54299706",
"0.5424721"
] | 0.7840498 | 0 |
Sets the ThumbnailPanel's bitmap to a placeholder bitmap when thumbnails are disabled | def plot_blank(self):
self.figure_bmp.SetBitmap(self.controller.plot_blank()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MakeDisabledBitmap(original):\r\n \r\n img = original.ConvertToImage()\r\n return wx.BitmapFromImage(img.ConvertToGreyscale())",
"def SetDisabledBitmap(self, bmp):\r\n \r\n self.disabled_bitmap = bmp",
"def RefreshThumbnail(self):\n if not self.property:\n self.bmp = None\n return\n\n path = self.property.DoGetValue()\n\n if not os.path.isfile(path):\n self.bmp = None\n return\n\n image = wx.Image(path)\n image.Rescale(64, 64)\n self.bmp = wx.BitmapFromImage(image)",
"def plot_thumb(self, data_fname):\n thumbnail = self.controller.plot_thumb(data_fname, self.bitmap_width, self.bitmap_height)\n if thumbnail is not None:\n self.figure_bmp.SetBitmap(thumbnail)\n else:\n self.plot_blank()",
"def _create_placeholder(self, thumbnail_size):\n logger.debug(\"Creating placeholder. thumbnail_size: %s\", thumbnail_size)\n placeholder = Image.new(\"RGB\", (thumbnail_size, thumbnail_size))\n draw = ImageDraw.Draw(placeholder)\n draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline=\"#E5E5E5\", width=1)\n placeholder = np.array(placeholder)\n self._previewcache[\"placeholder\"] = placeholder\n logger.debug(\"Created placeholder. shape: %s\", placeholder.shape)",
"def SetBitmapDisabled(self, bitmap):\n\n self.bmpDisabled = bitmap",
"def clear_thumbnails(self):",
"def setBlackFrame(self):\r\n frame = ImageTk.PhotoImage(Image.fromarray(np.array([[0]*self.w]*self.h)))\r\n self.player.config(image=frame)\r\n self.player.image = frame",
"def clear_thumbnail(self):\n from anima.ui import utils\n utils.clear_thumbnail(self.thumbnail_graphics_view)",
"def setThumbnailImage(*args):",
"def SetToolDisabledBitmap(self, tool_id, bitmap):\r\n \r\n tool = self.FindTool(tool_id)\r\n if tool:\r\n tool.disabled_bitmap = bitmap",
"def thumbnail(self, thumbnail):\n self._thumbnail = thumbnail",
"def set_thumbnail(self, **kwargs):\n self.thumbnail_url = kwargs.get('url')",
"def _blankimage():\n img = TK.PhotoImage(width=1, height=1)\n img.blank()\n return img",
"def thumbnail_url(self):\n return None",
"def GetDisabledBitmap(self):\r\n \r\n return self.GetRotatedBitmap(True)",
"def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url",
"def thumbnail_url_if_set(self):\n progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL\n return self.thumbnail.url if self.thumbnail else progress_url",
"def set_use_pictures(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetUsePictures', self.handle, bEnabled)",
"def thumbnails(self, value):\n self._thumbnails = value",
"def _set(self, thumbnail_name, thumbnail):\n raise NotImplementedError",
"def ConvertToDisabled(*args, **kwargs):\n return _gdi_.Bitmap_ConvertToDisabled(*args, **kwargs)",
"def SetBitmap(self, bmp):\r\n \r\n self.bitmap = bmp",
"def GetBitmapDisabled(self):\n\n return self.bmpDisabled",
"def MakeDisabledBitmap(bitmap):\r\n\r\n anImage = bitmap.ConvertToImage() \r\n factor = 0.7 # 0 < f < 1. Higher Is Grayer\r\n \r\n if anImage.HasMask():\r\n maskColour = (anImage.GetMaskRed(), anImage.GetMaskGreen(), anImage.GetMaskBlue())\r\n else:\r\n maskColour = None\r\n \r\n data = map(ord, list(anImage.GetData()))\r\n\r\n for i in range(0, len(data), 3):\r\n \r\n pixel = (data[i], data[i+1], data[i+2])\r\n pixel = MakeGray(pixel, factor, maskColour)\r\n\r\n for x in range(3):\r\n data[i+x] = pixel[x]\r\n\r\n anImage.SetData(''.join(map(chr, data)))\r\n \r\n return anImage.ConvertToBitmap()",
"def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)",
"def SetHoverBitmap(self, bmp):\r\n \r\n self.hover_bitmap = bmp",
"def set_image(self, path):\r\n \r\n image = self._load_image(path)\r\n self.image_raw = image\r\n self.image = ImageTk.PhotoImage(image)\r\n self.image_panel.configure(image=self.image)",
"def __showPixmap(self, pixmap):\n scene = QGraphicsScene(self.imagePreview)\n if pixmap.isNull():\n self.imagePreview.setBackgroundBrush(\n self.__imagePreviewStandardBackground)\n scene.addText(self.tr(\"Preview not available.\"))\n else:\n self.imagePreview.setBackgroundBrush(QBrush(self.__tilePixmap))\n scene.addPixmap(pixmap)\n self.imagePreview.setScene(scene)",
"def init_ui(self):\n self.panel_sizer = wx.BoxSizer(wx.VERTICAL)\n self.figure_bmp = wx.StaticBitmap(self, wx.ID_ANY,\n bitmap=self.controller.empty_bitmap(self.bitmap_width,\n self.bitmap_height),\n pos=wx.DefaultPosition, size=wx.DefaultSize)\n self.panel_sizer.Add(self.figure_bmp, ui_defaults.ctrl_pct, wx.CENTER,\n ui_defaults.widget_margin)\n self.SetSizerAndFit(self.panel_sizer)",
"def thumb64(self, value: str) -> None:\n self.thumb = Image.decode64(value)",
"def content_library_images_none(self, content_library_images_none):\n\n self._content_library_images_none = content_library_images_none",
"def SetBitmapLabel(self, bitmap, createOthers=True):\n\n self.bmpLabel = bitmap\n if bitmap is not None and createOthers:\n image = bitmap.ConvertToImage()\n imageutils.grayOut(image)\n self.SetBitmapDisabled(wx.Bitmap(image))",
"def set_image(self, image_URL, bkg = None):\r\n\r\n self.image = self.image = pygame.image.load(image_URL).convert()\r\n if not bkg == None:\r\n # Set our transparent color\r\n self.image.set_colorkey(white)\r\n self.rect = self.image.get_rect()\r\n if self.drawable:\r\n self.set_drawable()",
"def buttonPress(self):\n if self.inPlay and not self.shown:\n self.configure(image = Tile.images[0])",
"def get_placeholder_img(self):\n pars = {\n 'width': self.max_width,\n 'height': self.max_height\n }\n out = {\n 'placeholder': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.DEBUG_PLACEHOLDER_PROVIDER_TEMPLATE % pars\n }\n return out",
"def _EmptyBitmapRGBA(*args, **kwargs):\n return _gdi_._EmptyBitmapRGBA(*args, **kwargs)",
"def RescaleScreenShot(bmp, thumbnail_size=200):\r\n\r\n bmpW, bmpH = bmp.GetWidth(), bmp.GetHeight()\r\n img = bmp.ConvertToImage()\r\n\r\n newW, newH = bmpW, bmpH\r\n \r\n if bmpW > bmpH:\r\n if bmpW > thumbnail_size:\r\n ratio = bmpW/float(thumbnail_size)\r\n newW, newH = int(bmpW/ratio), int(bmpH/ratio)\r\n img.Rescale(newW, newH, wx.IMAGE_QUALITY_HIGH)\r\n else:\r\n if bmpH > thumbnail_size:\r\n ratio = bmpH/float(thumbnail_size)\r\n newW, newH = int(bmpW/ratio), int(bmpH/ratio)\r\n img.Rescale(newW, newH, wx.IMAGE_QUALITY_HIGH)\r\n\r\n newBmp = img.ConvertToBitmap()\r\n otherBmp = wx.EmptyBitmap(newW+5, newH+5) \r\n\r\n memDC = wx.MemoryDC()\r\n memDC.SelectObject(otherBmp)\r\n memDC.SetBackground(wx.WHITE_BRUSH)\r\n memDC.Clear()\r\n \r\n memDC.SetPen(wx.TRANSPARENT_PEN)\r\n\r\n pos = 0\r\n for i in xrange(5, 0, -1):\r\n brush = wx.Brush(wx.Colour(50*i, 50*i, 50*i))\r\n memDC.SetBrush(brush)\r\n memDC.DrawRoundedRectangle(0, 0, newW+5-pos, newH+5-pos, 2)\r\n pos += 1\r\n\r\n memDC.DrawBitmap(newBmp, 0, 0, True)\r\n \r\n # Select the Bitmap out of the memory DC by selecting a new\r\n # uninitialized Bitmap\r\n memDC.SelectObject(wx.NullBitmap)\r\n\r\n return otherBmp",
"def change_button_img_to_null(self, null_img=None):\n null_img = self.null_img\n self.button1.configure(image=null_img)\n self.button2.configure(image=null_img)\n self.button3.configure(image=null_img)\n\n self.button4.configure(image=null_img)\n self.button5.configure(image=null_img)\n self.button6.configure(image=null_img)\n\n self.button7.configure(image=null_img)\n self.button8.configure(image=null_img)\n self.button9.configure(image=null_img)",
"def set_invalidated(self):\n # GTK Settings for evogtk\n self.set_property('image',self.__errorimg)",
"def get_preview(self, res, res_timeout):\n\n self.qimage = QImage()\n url = (\n \"http://koordinates-tiles-d.global.ssl.fastly.net\"\n \"/services/tiles/v4/thumbnail/layer={0},style=auto/{1}.png\".format(\n self.object_id, res\n )\n )\n try:\n img_data = urllib.request.urlopen(url, timeout=res_timeout).read()\n except URLError:\n return False\n except timeout:\n return False\n self.qimage.loadFromData(img_data)\n if res == \"300x200\":\n self.dlg.uLabelImgPreview.setPixmap(QPixmap(self.qimage))\n else:\n self.dlg.uLabelImgPreview.setPixmap(\n QPixmap(self.qimage).scaledToHeight(200)\n )\n return True",
"def turn_squeeze_image_off(self):\n self.squeeze_image = False",
"def prepare_thumbnail_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_thumb_file.name)\n else:\n return ''",
"def GET_link_thumb(self, *a, **kw):\r\n return \"nothing to see here.\"",
"def create(self, parent):\n self.widget = wxBitmapWidget(parent)",
"def set_image(self, image):\n bmp = image.as_wxBitmap() if image is not None else None\n self.widget.SetBitmap(bmp)\n # Emit a size hint updated event if the size hint has actually\n # changed. This is an optimization so that a constraints update\n # only occurs when the size hint has actually changed. This \n # logic must be implemented here so that the label has been\n # updated before the new size hint is computed. Placing this\n # logic on the shell object would not guarantee that the label\n # has been updated at the time the change handler is called.\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()",
"def EmptyBitmap(*args, **kwargs):\n val = _gdi_.new_EmptyBitmap(*args, **kwargs)\n return val",
"def provider_pre_save(sender, instance: DataProvider, **kwargs):\n if instance.preview_url:\n try:\n # First check to see if this DataProvider should update the thumbnail\n # This should only be needed if it is a new entry, or the preview_url has changed,\n is_thumbnail_fresh = True\n try:\n provider = sender.objects.get(uid=instance.uid)\n except sender.DoesNotExist:\n is_thumbnail_fresh = False\n else:\n # The last preview url doesn't match the current or we still don't have a thumbnail.\n if instance.preview_url != provider.preview_url or instance.thumbnail is None:\n is_thumbnail_fresh = False\n\n if not is_thumbnail_fresh:\n provider_image_dir = get_provider_image_dir(instance.uid)\n make_dirs(provider_image_dir)\n # Return a file system path to the image.\n filepath = save_thumbnail(\n instance.preview_url,\n os.path.join(provider_image_dir, f\"{get_provider_thumbnail_name(instance.slug)}.jpg\"),\n )\n\n if instance.thumbnail:\n instance.thumbnail.delete()\n instance.thumbnail = MapImageSnapshot.objects.create(file=str(filepath))\n instance.save()\n except Exception as e:\n # Catch exceptions broadly and log them, we do not want to prevent saving provider's if\n # a thumbnail creation error occurs.\n logger.error(f\"Could not save thumbnail for DataProvider: {instance.slug}\")\n logger.exception(e)",
"def __ne__(*args, **kwargs):\n return _gdi_.Bitmap___ne__(*args, **kwargs)",
"def screenshot_disabled(self, screenshot_disabled):\n\n self._screenshot_disabled = screenshot_disabled",
"def _prepareImage(self):\n painter = QPainter(self)\n if len(self.thumbs) == 0:\n return\n destwidth = self.width()\n division = len(self.thumbs)\n NF = division\n slit_width = destwidth // division + 1\n if slit_width < self.minwidth:\n slit_width = self.minwidth\n division = destwidth // slit_width - 1\n for slit in range(division):\n point = QPoint(slit*destwidth // division,0)\n i = slit*NF // division\n thumb = self.transformer(self.thumbs[i])\n w = thumb.width()\n h = thumb.height()\n if w > slit_width:\n w0 = (w-slit_width)//2\n cropped = thumb.copy(w0,0,slit_width,h)\n painter.drawImage(point, cropped)\n else:\n painter.drawImage(point, thumb)",
"def __init__(self, width, height):\n self._image = tk.PhotoImage(master=root, width = width, height = height)\n self.fill((0,0,0))",
"def resize_profile_pic(sender, instance, **kwargs):\n profile_pic = instance.profile_picture\n if profile_pic.name != \"default.png\":\n img = Image.open(profile_pic.path)\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(profile_pic.path)",
"def create_thumbnail(self, target, format=None):",
"def CreateBitmap(self, notebook, page, button_state, tabArt):\r\n\r\n control = page.control\r\n memory = wx.MemoryDC(wx.EmptyBitmap(1, 1))\r\n\r\n tab_size, x_extent = tabArt.GetTabSize(memory, notebook, page.caption, page.bitmap, page.active,\r\n button_state, control)\r\n \r\n tab_width, tab_height = tab_size\r\n rect = wx.Rect(0, 0, tab_width, tab_height)\r\n\r\n bitmap = wx.EmptyBitmap(tab_width+1, tab_height+1)\r\n memory.SelectObject(bitmap)\r\n\r\n if wx.Platform == \"__WXMAC__\":\r\n memory.SetBackground(wx.TRANSPARENT_BRUSH)\r\n else:\r\n memory.SetBackground(wx.Brush(self._backgroundColour))\r\n \r\n memory.SetBackgroundMode(wx.TRANSPARENT)\r\n memory.Clear()\r\n\r\n paint_control = wx.Platform != \"__WXMAC__\"\r\n tabArt.DrawTab(memory, notebook, page, rect, button_state, paint_control=paint_control)\r\n \r\n memory.SetBrush(wx.TRANSPARENT_BRUSH)\r\n memory.SetPen(wx.BLACK_PEN)\r\n memory.DrawRoundedRectangle(0, 0, tab_width+1, tab_height+1, 2)\r\n\r\n memory.SelectObject(wx.NullBitmap)\r\n \r\n # Gtk and Windows unfortunatly don't do so well with transparent\r\n # drawing so this hack corrects the image to have a transparent\r\n # background.\r\n if wx.Platform != '__WXMAC__':\r\n timg = bitmap.ConvertToImage()\r\n if not timg.HasAlpha():\r\n timg.InitAlpha()\r\n for y in xrange(timg.GetHeight()):\r\n for x in xrange(timg.GetWidth()):\r\n pix = wx.Colour(timg.GetRed(x, y),\r\n timg.GetGreen(x, y),\r\n timg.GetBlue(x, y))\r\n if pix == self._backgroundColour:\r\n timg.SetAlpha(x, y, 0)\r\n bitmap = timg.ConvertToBitmap()\r\n return bitmap",
"def set_background_image(self, imagename):\n self.background.image = ui.get_image(imagename, '/home/pi/music/images/')",
"def _set_thumbnail(self, instance=None, **kwargs):\n warn('This setter is deprecated in favor of _set_variations.', DeprecationWarning)\n if getattr(instance, self.name):\n filename = self.generate_filename(instance,\n os.path.basename(getattr(instance, self.name).path))\n variation = getattr(self, 'thumbnail')\n thumbnail_filename = self._get_variation_filename(variation, filename)\n thumbnail_field = VariationField(thumbnail_filename)\n setattr(getattr(instance, self.name), 'thumbnail', thumbnail_field)",
"def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)",
"def upd_preview(self):\n\n if self.data_type != \"layer\":\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")\n return\n\n if self.get_preview(\"300x200\", 0.5):\n return\n if self.get_preview(\"150x100\", 5):\n return\n\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")",
"def set_from_original(self):\n self.image = self.orig_image\n self.update_img()\n self.update_size()",
"def get_blank_img(self):\n if photos_settings.DEBUG:\n return self.get_placeholder_img()\n\n out = {\n 'blank': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name),\n }\n return out",
"def test_default_thumbnail(self):\n\n for size in self.article.sizes.keys():\n assert self.article.default_thumbnail(size) is not None",
"def __init__(self, imagen, padre=None, id=-1, pos=wx.DefaultPosition, titulo=\"Hola solo programadores\"):\n temp = imagen.ConvertToBitmap() # Convierte la imagen a bitmap\n size = temp.GetWidth(), temp.GetHeight() # Obtiene el tamaño de la imagen\n wx.Frame.__init__(self, padre, id, titulo, pos, size) # Inicializa la clase padre de la ventana, que es wx.Frame\n self.bmp = wx.StaticBitmap(self, -1, temp) # Inicializa el atributo self.bmp con una instancia de un control wx.StaticBitmap que contiene el bitmap creado antes",
"def set_image(self):\r\n return loader.GFX['loadgamebox']",
"def SetBitmap(self, bitmap):\n self._bitmap = bitmap\n self.Refresh()",
"def setUp(self) -> None:\r\n self.bitmap = CompressedBitmap([True, False, False, True], 4)\r\n self.un_bitmap = CompressedBitmap([True, False], 5)\r\n self.empty_bitmap = CompressedBitmap([], 0)",
"def _getGUIImage(self): \n # read the system of your computer\n\n image = ImagePIL.fromarray(self.cv_image)\n\n size = round(image.size[0]/2), round(image.size[1]/2)\n\n image.thumbnail(size, ImagePIL.ANTIALIAS)\n image = ImageTkPIL.PhotoImage(image)\n # self.panel = tki.Label(image=image)\n self.panel.config(image=image)\n self.panel.image = image",
"def make_thumbnail(self):\n # https://gist.github.com/valberg/2429288\n\n # make sure image data is set\n if not self.image_data:\n return False\n\n if self.proxy_data:\n return True\n\n # Create a resized version of the image\n image = Image.open(self.image_data)\n image.thumbnail(THUMBNAIL_SIZE, Image.BICUBIC)\n\n # Save the thumbnail to in-memory 'file'\n temp_thumb = BytesIO()\n image.save(temp_thumb, 'jpeg')\n temp_thumb.seek(0) # rewinds the file\n\n # Save image to a SimpleUploadFile which can be saved\n # into ImageField\n # TODO figure out how to pass base image's UUID before\n # image is committed to DB\n basename = os.path.basename(self.image_data.name)\n uuidname = os.path.splitext(basename)[0]\n suf = SimpleUploadedFile(uuidname,\n temp_thumb.read(), content_type='image/jpeg')\n thumb_filename = '{}_thumb.jpeg'.format(suf.name)\n\n # set save=False, or else it will infinite loop\n self.proxy_data.save(thumb_filename,\n suf,\n save=False)\n\n # Also store the real dimensions for the Pillow thumbnail\n self.proxy_width, self.proxy_height = image.size\n\n temp_thumb.close()\n\n return True",
"def OnUpdateUIImage(self, event):\n index = GK_SHAPE_TYPE.index(\"image\")\n if self.m_style_ctrl.GetSelection() == GK_SHAPE_TYPE.index(\"image\"):\n event.Enable(True)\n else:\n event.Enable(False)",
"def _clear(self):\n\n self.image = Image.new(\"RGB\", (self._width, self._height), self._color)",
"async def request_thumbnail(self) -> None:\n await self.vivintskyapi.request_camera_thumbnail(\n self.alarm_panel.id, self.alarm_panel.partition_id, self.id\n )",
"def SetUniformBitmapSize(self, size):\r\n\r\n self._requested_bmp_size = wx.Size(*size)\r\n\r\n # if window is already initialized, recalculate the tab height\r\n if self._dummy_wnd:\r\n self.UpdateTabCtrlHeight()",
"def load_image_i(self, img_tk):\n\n self.p2_label_img.configure(image=img_tk)\n self.p2_label_img.image = img_tk",
"def for_tests_only():\n root = tk.Tk()\n panel = tk.Frame(root)\n panel.pack(expand=tk.YES, fill=tk.BOTH)\n lbl = ShowGif(panel)\n lbl.place(bordermode='outside', x=135, y=500)\n lbl.show('..\\\\PicUi\\\\100x100.gif')\n root.mainloop()",
"def thumbnail(self, size, resample=BICUBIC):\r\n # preserve aspect ratio\r\n x, y = self.size\r\n if x > size[0]:\r\n y = int(max(y * size[0] / x, 1))\r\n x = int(size[0])\r\n if y > size[1]:\r\n x = int(max(x * size[1] / y, 1))\r\n y = int(size[1])\r\n size = x, y\r\n if size == self.size:\r\n return\r\n self.draft(None, size)\r\n self._instance = self.resize(size, resample, image=self._instance)\r\n self.readonly = 0\r\n self.pyaccess = None",
"def turn_squeeze_image_on(self):\n self.squeeze_image = True",
"def small_image(self):\n pass",
"def clear(self):\n self.display(Image.new(self.mode, self.size))",
"def SetDefaultPaneBitmaps(self, isMac):\r\n\r\n if isMac:\r\n self._inactive_close_bitmap = DrawMACCloseButton(wx.WHITE, self._inactive_caption_colour)\r\n self._active_close_bitmap = DrawMACCloseButton(wx.WHITE, self._active_caption_colour)\r\n else:\r\n self._inactive_close_bitmap = BitmapFromBits(close_bits, 16, 16, self._inactive_caption_text_colour)\r\n self._active_close_bitmap = BitmapFromBits(close_bits, 16, 16, self._active_caption_text_colour)\r\n \r\n if isMac:\r\n self._inactive_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, wx.WHITE)\r\n self._active_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, wx.WHITE)\r\n else:\r\n self._inactive_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, self._inactive_caption_text_colour)\r\n self._active_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, self._active_caption_text_colour)\r\n\r\n if isMac:\r\n self._inactive_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, wx.WHITE)\r\n self._active_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, wx.WHITE)\r\n else:\r\n self._inactive_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, self._inactive_caption_text_colour)\r\n self._active_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, self._active_caption_text_colour)\r\n\r\n if isMac:\r\n self._inactive_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, wx.WHITE)\r\n self._active_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, wx.WHITE)\r\n else:\r\n self._inactive_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, self._inactive_caption_text_colour)\r\n self._active_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, self._active_caption_text_colour)\r\n\r\n self._inactive_pin_bitmap = BitmapFromBits(pin_bits, 16, 16, self._inactive_caption_text_colour)\r\n self._active_pin_bitmap = BitmapFromBits(pin_bits, 16, 16, self._active_caption_text_colour)\r\n\r\n self._custom_pane_bitmaps = False",
"def thumbnail_images(self, thumbnail_images):\n\n self._thumbnail_images = thumbnail_images",
"def initialise(self):\r\n self.set_image(\"wall.png\")\r\n return self",
"def SetCustomPaneBitmap(self, bmp, button, active, maximize=False):\r\n\r\n if bmp.GetWidth() > 16 or bmp.GetHeight() > 16:\r\n raise Exception(\"The input bitmap is too big\")\r\n\r\n if button == AUI_BUTTON_CLOSE:\r\n if active:\r\n self._active_close_bitmap = bmp\r\n else:\r\n self._inactive_close_bitmap = bmp\r\n\r\n if wx.Platform == \"__WXMAC__\":\r\n self._custom_pane_bitmaps = True \r\n\r\n elif button == AUI_BUTTON_PIN:\r\n if active:\r\n self._active_pin_bitmap = bmp\r\n else:\r\n self._inactive_pin_bitmap = bmp\r\n\r\n elif button == AUI_BUTTON_MAXIMIZE_RESTORE:\r\n if maximize:\r\n if active:\r\n self._active_maximize_bitmap = bmp\r\n else:\r\n self._inactive_maximize_bitmap = bmp\r\n else:\r\n if active:\r\n self._active_restore_bitmap = bmp\r\n else:\r\n self._inactive_restore_bitmap = bmp\r\n\r\n elif button == AUI_BUTTON_MINIMIZE:\r\n if active:\r\n self._active_minimize_bitmap = bmp\r\n else:\r\n self._inactive_minimize_bitmap = bmp",
"def set_image(self, image):\n pm = image.as_QPixmap() if image is not None else None\n self.widget.setPixmap(pm)\n # Emit a size hint updated event if the size hint has actually\n # changed. This is an optimization so that a constraints update\n # only occurs when the size hint has actually changed. This \n # logic must be implemented here so that the control has been\n # updated before the new size hint is computed. Placing this\n # logic on the shell object would not guarantee that the control\n # has been updated at the time the change handler is called.\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()",
"def widget_image(w):\n copy = w.image.copy()\n # Blit extra images onto copy\n for img in map(lambda x: w._images[x], w._extra_images):\n if img._show:\n copy.blit(img.image, img.rect)\n # Blend transparent surface when fading and blit to screen.\n if w._fade is not None:\n transparent = pygame.surface.Surface(w.rect.size, SRCALPHA)\n transparent.fill((255,255,255, w._fade))\n copy.blit(transparent, (0,0), special_flags=BLEND_RGBA_MULT)\n return copy",
"def media_image_url(self):\n\n if self._table.active_track:\n return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)\n\n return super().media_image_url",
"def setFlag(self):\n if self.inPlay and not self.shown:\n self.flag = not(self.flag)\n image_index = 11 if self.flag else 10\n self.configure(image = Tile.images[image_index])\n return 1 if self.flag else -1\n return 0",
"def NotebookPreview(self, thumbnail_size=200):\r\n\r\n if wx.Platform == \"__WXMAC__\":\r\n return False\r\n\r\n tabCtrl = self.GetActiveTabCtrl()\r\n activePage = tabCtrl.GetActivePage()\r\n pages = tabCtrl.GetPages()\r\n\r\n pageStatus, pageText = [], []\r\n\r\n for indx, page in enumerate(pages):\r\n\r\n pageStatus.append(page.enabled)\r\n\r\n if not page.enabled:\r\n continue\r\n \r\n self.SetSelectionToPage(page) \r\n pageText.append(page.caption)\r\n\r\n rect = page.window.GetScreenRect()\r\n bmp = RescaleScreenShot(TakeScreenShot(rect), thumbnail_size)\r\n\r\n page.enabled = False\r\n if indx == 0:\r\n il = wx.ImageList(bmp.GetWidth(), bmp.GetHeight(), True)\r\n\r\n il.Add(bmp) \r\n\r\n # create the list control\r\n listCtrl = wx.ListCtrl(self, style=wx.LC_ICON|wx.LC_AUTOARRANGE|wx.LC_HRULES|wx.LC_VRULES,\r\n name=\"__fake__page__\")\r\n\r\n # assign the image list to it\r\n listCtrl.AssignImageList(il, wx.IMAGE_LIST_NORMAL)\r\n listCtrl.__previousStatus = [activePage, pageStatus]\r\n\r\n # create some items for the list\r\n for indx, text in enumerate(pageText):\r\n listCtrl.InsertImageStringItem(10000, text, indx)\r\n \r\n self.AddPage(listCtrl, \"AuiNotebook Preview\", True, bitmap=auinotebook_preview.GetBitmap(), disabled_bitmap=wx.NullBitmap)\r\n return True",
"def set_default_image(self, image):\n raise NotImplementedError",
"def OnPaint(self, event=None):\r\n dc = wx.PaintDC(self)\r\n dc.SetBackground(wx.MEDIUM_GREY_BRUSH)\r\n if self.scaled:\r\n if self.GetSizeTuple() != self.oldSize:\r\n self.Rescale()\r\n panelWidth,panelHeight = self.GetSizeTuple()\r\n xPos = max(0,(panelWidth - self.scaled.GetWidth())/2)\r\n yPos = max(0,(panelHeight - self.scaled.GetHeight())/2)\r\n dc.Clear()\r\n dc.DrawBitmap(self.scaled,xPos,yPos,False)\r\n else:\r\n dc.Clear()\r\n #dc.SetPen(wx.Pen(\"BLACK\", 1))\r\n #dc.SetBrush(wx.TRANSPARENT_BRUSH)\r\n #(width,height) = self.GetSize()\r\n #dc.DrawRectangle(0,0,width,height)\r",
"def SetBitmap(self, bitmap):\n self._bitmap = bitmap\n if self._bitmap.Ok():\n self.SetSize(self._bitmap.GetWidth(), self._bitmap.GetHeight())",
"def OnPanelEraseBg(self, event):\r\n\r\n pass",
"def SetToolBitmap(self, tool_id, bitmap):\r\n \r\n tool = self.FindTool(tool_id)\r\n if tool:\r\n tool.bitmap = bitmap",
"def get_thumbnail_url():",
"def build_filler_images(self):",
"def update_image(self, surface):\n self.ui_widget.update_image(surface=surface)",
"def thumbnail(self):\n return self._thumbnail",
"def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()",
"def clear_tiles(self):\n for y in range(Settings.SIZE_Y):\n for x in range(Settings.SIZE_X):\n self.__tile_grid[y][x].configure(\n image=self.__marker_images[MarkerType.NONE])",
"def thumbnail(self):\n return self.get_thumbnail_url()",
"def setPhotoDisplay(self):\n if self.imageDirectoryObj:\n self.imf = self.imageDirectoryObj.images[ self.currentPhotoIndex ]\n photo_pixmap = QPixmap( self.imf.file_path )\n self.scaled_photo = photo_pixmap.scaled(self.photoDisplay.size(), QtCore.Qt.KeepAspectRatio)\n self.photoDisplay.setPixmap( self.scaled_photo )"
] | [
"0.659299",
"0.65669173",
"0.6550721",
"0.6123886",
"0.6098775",
"0.6092194",
"0.60567355",
"0.6041891",
"0.5903169",
"0.5866036",
"0.5741572",
"0.57384914",
"0.5658414",
"0.5480358",
"0.5475113",
"0.5414041",
"0.54001987",
"0.53946155",
"0.5358883",
"0.5336738",
"0.5332489",
"0.52908415",
"0.528474",
"0.52475524",
"0.52440804",
"0.5217962",
"0.51954037",
"0.5173527",
"0.51360786",
"0.5129816",
"0.5117524",
"0.5114172",
"0.511154",
"0.5101918",
"0.50770575",
"0.50583404",
"0.5050324",
"0.50434667",
"0.5030127",
"0.50246847",
"0.50197476",
"0.5011554",
"0.4978469",
"0.49727044",
"0.49717903",
"0.49682897",
"0.49639556",
"0.49610925",
"0.49600086",
"0.4957256",
"0.49565917",
"0.495602",
"0.49459037",
"0.4944342",
"0.4936042",
"0.4924051",
"0.49207643",
"0.49123222",
"0.49038985",
"0.4897228",
"0.48833027",
"0.48780835",
"0.48716405",
"0.48589012",
"0.48570126",
"0.48552635",
"0.48533514",
"0.4851801",
"0.48508164",
"0.48477513",
"0.48421612",
"0.48410988",
"0.48364478",
"0.48327574",
"0.48306543",
"0.4827999",
"0.48246032",
"0.48221093",
"0.4820302",
"0.4817888",
"0.48081055",
"0.4805593",
"0.47926146",
"0.4776884",
"0.47711062",
"0.47693056",
"0.47637567",
"0.47599506",
"0.47464696",
"0.47361892",
"0.47324464",
"0.4723766",
"0.4716118",
"0.47155362",
"0.47152466",
"0.47118694",
"0.4710246",
"0.47088388",
"0.47045323",
"0.46920508"
] | 0.56492853 | 13 |
Given the u, v parameters, return a point on the crosssection u is the angle around the cross section curve. For example, for a circle, theta = 2 pi u v is the parameter along the extrusion path. It is used for varying the crosssection at different parts of the path. Return a Vector where the x and y coordinates are in the plane of the cross section. the z coordinate should normally be 0.0, but it doesn't have to be ;) | def position(self, u, v):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle_between_vectors(self, u, v):\n vec1_unit = self.get_unit_vector(u)\n vec2_unit = self.get_unit_vector(v)\n return np.arccos(np.clip(np.dot(vec1_unit, vec2_unit), -1.0, 1.0)) * (180/math.pi)",
"def angle_between_vectors_degrees(u, v):\n a = np.dot(u, v)\n b = np.linalg.norm(u)\n c = np.linalg.norm(v)\n d = a / (b* c)\n if d > 1:\n d = 1\n if d < -1:\n d = -1\n e = acos(d)\n f = np.degrees(e)\n return f",
"def angle_between_vectors(u, v):\r\n mag_u = math.sqrt(u[0]**2 + u[1]**2 + u[2]**2)\r\n mag_v = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)\r\n dot_prod = u[0] * v[0] + u[1] * v[1] + u[2] * v[2]\r\n return math.acos(dot_prod/(mag_u*mag_v))",
"def angle_between_vectors_degrees(u, v):\n return np.degrees(\n math.acos(np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))))",
"def angles_vectors(u, v):\n a = angle_smallest_vectors(u, v)\n return a, pi * 2 - a",
"def geochart(u, v):\n cv = torch.cos(v)\n cu = torch.cos(u)\n sv = torch.sin(v)\n su = torch.sin(u)\n return torch.stack((cv*su, sv*su, cu)).T",
"def cross(u,v):\n u1, u2, u3 = u\n v1, v2, v3 = v\n return np.array([u2*v3 - u3*v2,\n u3*v1 - u1*v3,\n u1*v2 - u2*v1], dtype=u.dtype)",
"def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])",
"def vector_angle(v):\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)",
"def cross_vectors(u, v):\n return [u[1] * v[2] - u[2] * v[1],\n u[2] * v[0] - u[0] * v[2],\n u[0] * v[1] - u[1] * v[0]]",
"def signed_angle(self, u, v):\n return atan2(u.x * v.y - u.y * v.x, u.x * v.x + u.y * v.y)",
"def signed_angle(self, u, v):\n return atan2(u.x * v.y - u.y * v.x, u.x * v.x + u.y * v.y)",
"def angles_vectors_degrees(u, v):\n a = angle_smallest_vectors_degrees(u, v)\n return a, 360. - a",
"def compute_angle_v2v(v1, v2, v3=None):\n\n alpha = math.acos(dot_product(v1, v2) / (vlength(v1)*vlength(v2)))\n if v3 is not None:\n cross = cross_product(v2, v1)\n if dot_product(cross,v3) > 0.0:\n return 2*math.pi-alpha\n\n return alpha",
"def calc_angle(v1, v2, v3):\n v1 = v1 - v2\n v3 = v3 - v2\n return v1.angle(v3)",
"def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))",
"def angle_smallest_vectors(u, v):\n a = dot_vectors(u, v) / (length_vector(u) * length_vector(v))\n a = max(min(a, 1), -1)\n return acos(a)",
"def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)",
"def calc_theta(U, V, quad, h_length, radians):\n import numpy as np\n theta = np.arcsin(U / h_length)\n import numpy as np\n if quad == 1:\n theta = theta\n elif quad == 2:\n theta = -theta + np.pi / 2\n elif quad - - 3:\n theta = np.pi / 2 + theta + np.pi\n elif quad == 4:\n theta = 3 * np.pi / 2\n theta = 2 * np.pi - theta\n if not radians:\n theta = theta * 180 / np.pi\n\n return (theta)",
"def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle",
"def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle",
"def vector_polar(v):\n return vector_mag(v), vector_angle(v)",
"def calculate_concordance_correlation_coefficient(u, v):\n a = 2 * np.mean((u - np.mean(u)) * (v - np.mean(v)))\n b = (\n np.mean(np.square(u - np.mean(u)))\n + np.mean(np.square(v - np.mean(v)))\n + np.square(np.mean(u) - np.mean(v))\n )\n ccc = a / b\n return ccc",
"def dexpinv(self, u, v, _=None):\n A, a = np.split(u, 2)\n B, b = np.split(v, 2)\n alpha = np.linalg.norm(A)\n rho = np.inner(A, a)\n if np.isclose(alpha, 0):\n return v\n c1 = (\n B\n - 0.5 * np.cross(A, B)\n + self._dexpinv_helper_1(alpha) * np.cross(A, np.cross(A, B))\n )\n c2 = (\n b\n - 0.5 * (np.cross(a, B) + np.cross(A, b))\n + self._dexpinv_helper_2(alpha, rho) * np.cross(A, np.cross(A, B))\n + self._dexpinv_helper_1(alpha)\n * (\n np.cross(a, np.cross(A, B))\n + np.cross(A, np.cross(a, B))\n + np.cross(A, np.cross(A, b))\n )\n )\n return np.hstack((c1, c2))",
"def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None",
"def find_perpendicular_vector(vt):\n x, y = vt\n return np.array([y, -x])",
"def angles_points(a, b, c):\n u = subtract_vectors(b, a)\n v = subtract_vectors(c, a)\n return angles_vectors(u, v)",
"def angle(v,w):\n cosx = dot_product(v,w) / (length(v) * length(w))\n #det = determinant(A,B)\n rad = math.acos(cosx) # in radians\n return rad\n #return rad*180/math.pi # returns degrees",
"def cosine_distance(u, v):\n #print u,v\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))",
"def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))",
"def deriv(self, t, x, u):\n \n # unpack some variables\n theta = x[2]\n v = u[0]\n vdiff = u[1]\n\n return np.r_[\n v * cos(theta), \n v * sin(theta), \n vdiff / self.w\n ]",
"def _angle(u, v, w, d='+'):\n vu = np.arctan2(u[1] - v[1], u[0] - v[0])\n vw = np.arctan2(w[1] - v[1], w[0] - v[0])\n phi = vw - vu\n if phi < 0:\n phi += 2 * np.pi\n if d == '-':\n phi = 2 * np.pi - phi\n return np.round(phi, 6)",
"def angle_2D(v):\n len_v=(v[0]**2+v[1]**2)**(0.5)\n if len_v==0:\n return 0\n ret = math.acos(v[0]/len_v)\n if v[1]<0:\n ret=6.283185307179586-ret\n return ret",
"def pcosine(u, v):\n\n # validate vectors like scipy does\n u = ssd._validate_vector(u)\n v = ssd._validate_vector(v)\n\n dist = 1. - np.abs(np.dot(u, v) / (linalg.norm(u) * linalg.norm(v)))\n\n return dist",
"def solid_angle_tetrahedron(va: Vector, vb: Vector, vc: Vector) -> float:\n a = va.return_magnitude()\n b = vb.return_magnitude()\n c = vc.return_magnitude()\n\n num = va.dot(vb.cross(vc))\n den = a*b*c + a*vb.dot(vc) + b*va.dot(vc) + c*va.dot(vb)\n\n return 2*atan2(num, den)",
"def cos_sim(u, v):\n return np.vdot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))",
"def angleBetween(v1, v2):\n v1_u = unitVector(v1)\n v2_u = unitVector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))",
"def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))",
"def rotate(v: vect2d, angle: float) -> vect2d:\n vector = ((v.x * math.cos(angle) - v.y * math.sin(angle)),\n (v.x * math.sin(angle) + v.x * math.cos(angle)))\n return vector",
"def cross2(u, v, w):\n return dot2(u, w) * v - dot2(u, v) * w",
"def cosine_distance(u, v):\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))",
"def angle_between(u, v, n=None):\n if n is None:\n return np.arctan2(np.linalg.norm(np.cross(u, v)), np.dot(u, v))\n else:\n return np.arctan2(np.dot(n, np.cross(u, v)), np.dot(u, v))",
"def Rotation(v, theta):\n\n v = np.array(v)\n if v.shape != (3,) or abs(v.dot(v) - 1.0) > 1e-8 or not np.all(np.isreal(v)):\n raise ValueError('Rotation vector v should be a 3D real unit vector.')\n\n return np.cos(theta/2) * Identity() - 1j * np.sin(theta/2) * (\n v[0] * PauliX() + v[1] * PauliY() + v[2] * PauliZ())",
"def intersect_triangle(v1, v2, v3, pos):\n #calc normal from two edge vectors v2-v1 and v3-v1\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\n kVal = dot(nVec,v1)\n #return y val i.e. y = (kVal - Ax - Cz)/B\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]",
"def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n #takes out if vectors are 1 or -1 (basically if they're the same direction)\n angle = math.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))\n return angle",
"def intersect_triangle(v1, v2, v3, pos):\r\n #calc normal from two edge vectors v2-v1 and v3-v1\r\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\r\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\r\n kVal = dot(nVec,v1)\r\n #return y val i.e. y = (kVal - Ax - Cz)/B\r\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]",
"def proyZ1(u, v, t2):\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)",
"def vector_angle_finder(vect_1, vect_2):\n theta = np.arccos(np.dot(vect_1, vect_2) / (magnitude_vect(vect_1) * magnitude_vect(vect_2)))\n angle = theta * 180 / math.pi\n return angle",
"def cross_z(self):\n return Vector((self.v.y, -self.v.x))",
"def get_torsional(a, b, c, d):\n \n # Compute 3 vectors connecting the four points\n ba = b - a\n cb = c - b\n dc = d - c\n \n # Compute the normal vector to each plane\n u_A = cross(ba, cb)\n u_B = cross(cb, dc)\n\n #Measure the angle between the two normal vectors\n u_A_mod = mod(u_A)\n u_B_mod = mod(u_B)\n val = dot(u_A, u_B) / (u_A_mod * u_B_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n tor_rad = np.arccos(val)\n \n # compute the sign\n sign = dot(u_A, dc)\n if sign > 0:\n return tor_rad\n else:\n return -tor_rad",
"def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle",
"def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)",
"def arccurv(x, y):\n curv = curvature(x, y)\n steps = np.sqrt(np.diff(x, axis=0)**2 + np.diff(y, axis=0)**2)[:-1]\n arc = np.cumsum(steps)\n return arc, curv",
"def angle_between(v1, v2):\n return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))",
"def vector_component(u, v):\n x = dot_vectors(u, v) / length_vector_sqrd(v)\n return scale_vector(v, x)",
"def get_angle(a, b, c):\n\n ba = a - b\n cb = c - b\n\n ba_mod = mod(ba)\n cb_mod = mod(cb)\n val = dot(ba, cb) / (ba_mod * cb_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n\n return np.arccos(val)",
"def angle_between(v2, v1):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n result = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n if np.isnan(result):\n if abs(v1_u + v2_u) < .5 * (abs(v1_u) + abs(v2_u)):\n return np.pi\n else:\n return 0.0\n if Left( [v2[1],v2[3]], [0,0], [v1[1],v1[3]] ):\n return 2*np.pi - result\n return result",
"def angle_smallest_vectors_degrees(u, v):\n return degrees(angle_smallest_vectors)",
"def get_arc_3D(v1, v2, points_per_radian=100, radius=1):\n\n # v1 and w become the x, y axes of the great circle\n v1_3D = ang_to_vec_coords(v1, radius=radius)\n v2_3D = ang_to_vec_coords(v2, radius=radius)\n w_axis_3D = np.cross(np.cross(v1_3D, v2_3D), v1_3D)\n # make w a vector of proper radius\n w_len = np.sqrt(square_distance([0,0,0], w_axis_3D))\n w_3D = w_axis_3D * (radius / w_len) \n arc_len = np.arccos(np.dot(v1_3D, v2_3D))\n num_points = arc_len * points_per_radian\n t = np.linspace(0, arc_len, num_points)\n u, cos_t = np.meshgrid(v1_3D, np.cos(t))\n w, sin_t = np.meshgrid(w_3D, np.sin(t))\n arc_points = u*cos_t + w*sin_t\n return arc_points",
"def perpendicular_vector(v):\n if v[1] == 0 and v[2] == 0:\n if v[0] == 0:\n raise ValueError(\"zero vector\")\n else:\n return np.cross(v, [0, 1, 0])\n return np.cross(v, [1, 0, 0])",
"def angleBetweenVectors(v1, v2):\n v2Size = vectorLength(v2)\n if not v2Size:\n theta = 0.0\n else:\n theta = math.acos(dotProduct(v1, v2) / v2Size)\n return theta",
"def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0",
"def intersect(v, p):\n\t# Solve for t: 100 = 4 * (px + t * vx)^2 + (py + t * vy)^2\n\t# Note: 4 * px^2 + py^2 - 100 = 0\n\tt = sum([c*vi*pi for (c,vi,pi) in zip((-8.0, -2.0),v,p)]) \n\tt /= sum([c * vi**2 for (c,vi) in zip((4.0,1.0),v)])\n\treturn [pi + vi * t for (pi, vi) in zip(p, v)]",
"def outer_product(u: Vector3D, v: Vector3D):\n cx = u.y * v.z - u.z * v.y\n cy = u.z * v.x - u.x * v.z\n cz = u.x * v.y - u.y * v.x\n return Vector3D(cx, cy, cz, coordinate_system='cartesian')",
"def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle",
"def angle_between(v1, v2):\n v = np.array(v1)\n w = np.array(v2)\n\n norm_v = norm(v)\n norm_w = norm(w)\n\n cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)\n\n if not -1 <= cos_angle <= 1:\n return None\n else:\n return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)",
"def calcul_angle(point1, point2, point3):\n \n x1,y1,z1=point1\n x2,y2,z2=point2\n x3,y3,z3=point3\n \n vec1=[x1-x2, y1-y2, z1-z2]\n vec2=[x3-x2, y3-y2, z3-z2]\n\n return calcul_angle_vector(vec1, vec2)",
"def getAngle(v1,v2,prec=1E-6):\n \n return(math.acos((np.dot(v1,v2))/np.linalg.norm(v1)/np.linalg.norm(v2)))",
"def get_angle(a: Keypoint, b: Keypoint, c: Keypoint) -> float:\n # get a vector with origin in (0,0) from points a and b by substracting Point a from Point b\n vector_a = keypoint_to_vector(a, b)\n vector_c = keypoint_to_vector(c, b)\n # https://de.wikipedia.org/wiki/Skalarprodukt => winkel phi = arccos(...)\n phi = np.arccos(np.dot(vector_a, vector_c) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_c)))\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return phi if angle_left_opening else -phi",
"def get_uvcircle(Grid):\n \n# center of circulation\n loc=-67.5;lac=41.5; \n dx=(Grid['lonc']-loc)*Grid['coslatc']\n dy=(Grid['latc']-lac)\n di=np.sqrt(dx*dx+dy*dy)\n an=np.angle(dx+1j*dy)\n# velocity is linearly increasing with distance \n# 0.1 m/s at 1 deg distance away from center \n# cyclonic gyre \n u=-0.1*di*np.sin(an)\n v= 0.1*di*np.cos(an)\n# adjust the velocity so that the rotation will be perfect \n# on lon-lat plane\n u=u*Grid['coslatc']/np.cos(lac*np.pi/180) \n \n return u,v",
"def get_vertical_vector(q):\n P0, P1, P2, P3 = q\n P0_up = copy.deepcopy(P0)\n P0_up.depth = P0_up.depth - 1.0\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P0_up)\n v1 = (p1 - p0).norm()\n return v1",
"def get_curvature(self, u):\n\n # Compute the curve derivatives\n u = np.asarray(u)\n dC, ddC = self.compute_nurbs_derivatives(self.P, self.W, self.p, self.U, u, up_to_order=2)[[1, 2], ...]\n\n # Compute the curvature\n if self.ndim == 2:\n dC = np.concatenate((dC, np.zeros((1, np.asarray(u).size))), axis=0)\n ddC = np.concatenate((ddC, np.zeros((1, np.asarray(u).size))), axis=0)\n numerator = np.sum(np.cross(ddC, dC, axisa=0, axisb=0, axisc=0) ** 2, axis=0) ** (1 / 2)\n denominator = (np.sum(dC ** 2, axis=0)) ** (3 / 2)\n curvature = (numerator / denominator)\n\n elif self.ndim == 3:\n numerator = np.sum(np.cross(ddC, dC, axisa=0, axisb=0, axisc=0) ** 2, axis=0) ** (1 / 2)\n denominator = (np.sum(dC ** 2, axis=0)) ** (3 / 2)\n curvature = numerator / denominator\n\n else: raise Exception(\"The number of dimensions must be 2 or 3\")\n\n return curvature",
"def point_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = self.radius * cos(u) * sin(v)\n y = self.radius * sin(u) * sin(v)\n z = self.radius * cos(v)\n point = Point(x, y, z)\n if world:\n point.transform(self.transformation)\n return point",
"def xy_rotation(vector,theta):\r\n R = np.array([[np.cos(theta), -np.sin(theta),0],\r\n [np.sin(theta), np.cos(theta),0],\r\n [0,0,1]\r\n ])\r\n return np.dot(R,vector)",
"def vc_tang_u(Xcp,Ycp,Zcp,gamma_t=-1,R=1,polar_out=True,epsilon=0):\n EPSILON_AXIS=1e-7; # relative threshold for using axis formula\n\n # --- Main corpus\n Xcp=np.asarray(Xcp)\n Ycp=np.asarray(Ycp)\n Zcp=np.asarray(Zcp)\n if Xcp.shape==(0,):\n if polar_out:\n return np.array([]), np.array([])\n else:\n return np.array([]),np.array([]),np.array([]),\n\n r = np.sqrt(Xcp ** 2 + Ycp ** 2)\n z = Zcp\n ur = np.full(r.shape,np.nan)\n uz = np.full(r.shape,np.nan)\n # Enforcing axis formula : v_z=-Gamma/(2) *( 1 + z / sqrt(R^2+z^2))\n Iz = r < (EPSILON_AXIS * R)\n ur[Iz] = 0\n uz[Iz] = gamma_t/2 * (1 + z[Iz] / np.sqrt(z[Iz]** 2 + R**2))\n\n # --- From this point on, variables have the size of ~Iz..\n bnIz = np.logical_not(Iz)\n r = r[bnIz]\n z = z[bnIz]\n\n # Eliptic integrals\n if epsilon==0:\n k_2 = 4 * r * R / ((R + r)**2 + z**2)\n k0_2 = 4 * r * R/ ((R + r)**2 )\n else:\n epsilon2= r*0+epsilon**2\n epsilon2[z<0]=0 # No regularization when z<0 # TODO\n k_2 = 4 * r * R / ((R + r)**2 + z**2 + epsilon2)\n k0_2 = 4 * r * R/ ((R + r)**2 + epsilon2)\n k = np.sqrt(k_2)\n EE = ellipe(k_2)\n KK = ellipk(k_2)\n # PI = ellippi(k0_2,k_2)\n PI = ellipticPiCarlson(k0_2,k_2)\n # --- Special values\n PI[PI==np.inf]==0\n PI[r==R]=0 ; # when r==R, PI=0 TODO, check\n KK[KK==np.inf]=0 ; # when r==R, K=0 TODO, check\n # ---\n ur[bnIz] = -gamma_t/(2*np.pi) * np.multiply(np.sqrt(R/r) , np.multiply((2-k_2)/k,KK) - np.multiply(2.0/k, EE))\n # Term 1 has a singularity at r=R, # T1 = (R-r + np.abs(R-r))/(2*np.abs(R-r))\n T1=np.zeros(r.shape) \n T1[r==R] = 1/2\n T1[r<R] = 1\n if (epsilon!=0):\n # TODO, more work needed on regularization\n epsilon2= r*0+epsilon**2\n b=z>=0\n T1[b]=1/2*(1 + (R-r[b])*np.sqrt(1+epsilon2[b]/(R+r[b])**2)/np.sqrt((R-r[b])**2 +epsilon2[b]))\n uz[bnIz] = gamma_t/2*( T1 + np.multiply(np.multiply(z,k) / (2 * np.pi * np.sqrt(r * R)),(KK + np.multiply((R - r)/(R + r),PI))))\n \n if polar_out:\n return ur,uz\n else:\n psi = np.arctan2(Ycp,Xcp) ;\n ux=ur*np.cos(psi)\n uy=ur*np.sin(psi)\n return ux,uy,uz",
"def d(u, v):\r\n\tdiff = u-v\r\n\treturn diff.dot(diff)",
"def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]",
"def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)",
"def cross(self, v):\n if (len(self.mV) != 3) or (len(v) != 3):\n raise IndexError('Cross product is only for 2 3-vectors.')\n\n (x1, y1, z1) = (self.mV[0], self.mV[1], self.mV[2])\n (x2, y2, z2) = (v[0], v[1], v[2])\n x = y1 * z2 - y2 * z1\n y = z1 * x2 - z2 * x1\n z = x1 * y2 - x2 * y1\n return Vector(x, y, z)",
"def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]",
"def casadi_ode(self, t, x, u, w):\n v = u[0]\n thetadot = u[1]\n theta = x[2]\n f = cs.vertcat(v * cs.cos(theta), v * cs.sin(theta), thetadot)\n if self.use_nonlinear_noise_model:\n w_vec = cs.vertcat(self.scale[0]*(cs.cos(theta) * w[0] - cs.sin(theta) * w[1]),\n self.scale[1]*(cs.sin(theta) * w[0] + cs.cos(theta) * w[1]),\n self.scale[2]*(v * w[2]))\n else:\n w_vec = w\n return f + w_vec",
"def vector_diff_angle(v, w):\n if len(v) == 2:\n return vector_angle(v) - vector_angle(w)\n else:\n # TODO: see http://www.euclideanspace.com/maths/algebra/\n # vectors/angleBetween/\n raise NotImplementedError()",
"def angle_between_vectors(vect_ref, vect):\n\n c = np.dot(vect_ref.T, vect) / (np.linalg.norm(vect_ref) * np.linalg.norm(vect))\n angle = np.arccos(np.clip(c, -1, 1))\n\n return angle",
"def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))",
"def arccurv_i(x, y, l=0):\n if l==0:\n l = len(x)\n interp_coords = ia.interparc(l, x, y)\n x_i = interp_coords[:,0]\n y_i = interp_coords[:,1]\n # Calculate curvature. \n curv = curvature(x_i, y_i)\n steps = np.sqrt(np.diff(x_i, axis=0)**2 + np.diff(y_i, axis=0)**2)[:-1]\n arc = np.cumsum(steps)\n return arc, curv",
"def cross(v1: Vector, v2: Vector) -> Vector: # Function is fucked TODO\n if len(v1.coords) != 3 or len(v2.coords) != 3:\n raise ValueError(\"Vectors have to be 3 fucking D, nøøb\")\n x = v1.y * v2.z - v1.z * v2.y\n y = v1.z * v2.x - v1.x * v2.z\n z = v1.x * v2.y - v1.y * v2.x\n return Vector(x, y, z)",
"def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)",
"def compute_viewpoint(self, box):\n x, y, z = self.compute_ray(box)\n theta = math.degrees(math.atan2(z, x))\n phi = math.degrees(math.atan2(y, math.hypot(x, z)))\n return theta, phi",
"def get_angle(a, b, c):\n\n # Law of cosines:\n # C = acos((a^2 + b^2 - c^2) / (2ab))\n return math.acos((a * a + b * b - c * c) / (2 * a * b))",
"def cross(v1: Vec2, v2: Vec2) -> float:\n return v1.x * v2.x + v1.y * v2.y",
"def angle_of_vector(vector):\n z = complex(*vector[:2])\n if z == 0:\n return 0\n return np.angle(complex(*vector[:2]))",
"def cartesian2spherical(v):\n theta = np.arcsin(v[2]) \n phi = np.arctan2(v[1], v[0])\n \n return [theta, phi]",
"def cross_z(self):\n return self.v.cross(Vector((0, 0, 1)))",
"def acc(x: float, v: float, t: float) -> float:\n return -k*v - np.sin(x) + c*np.cos(omega*t)",
"def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r",
"def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))",
"def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest",
"def uVectNorm(x1,y1,z1, # P\n x2,y2,z2, # Q\n x3,y3,z3): # R\n p1 = np.array([x1,y1,z1])\n p2 = np.array([x2,y2,z2])\n p3 = np.array([x3,y3,z3])\n\n v1 = p3-p1\n v2 = p2-p1\n\n cp = np.cross(v1,v2)\n a,b,c = cp\n\n d = np.dot(cp, p3)\n\n print(a,b,c)",
"def z_to_vector(vector):\n norm = np.linalg.norm(vector)\n if norm == 0:\n return np.identity(3)\n v = np.array(vector) / norm\n phi = np.arccos(v[2])\n if any(v[:2]):\n #projection of vector to unit circle\n axis_proj = v[:2] / np.linalg.norm(v[:2])\n theta = np.arccos(axis_proj[0])\n if axis_proj[1] < 0:\n theta = -theta\n else:\n theta = 0\n phi_down = np.array([\n [np.cos(phi), 0, np.sin(phi)],\n [0, 1, 0],\n [-np.sin(phi), 0, np.cos(phi)]\n ])\n return np.dot(rotation_about_z(theta), phi_down)",
"def arc_points_between_vectors(x, y, z, v1, v2, angle, nb_points):\n arc_origin = np.array([x, y, z])\n arc_points = []\n for t in np.linspace(0, 1, nb_points):\n # slerp formula (https://en.wikipedia.org/wiki/Slerp) between v1 vector and v2 vector\n arc_points.append(\n sin((1 - t) * angle) / sin(angle) * v1 + sin(t * angle) / sin(angle) * v2 + arc_origin)\n\n return np.array(arc_points)",
"def ncross2(u, v):\n return sq2(u) * sq2(v) - dot2(u, v) ** 2"
] | [
"0.7025404",
"0.68344337",
"0.6752673",
"0.66578203",
"0.64704037",
"0.646253",
"0.6455052",
"0.63939893",
"0.6362043",
"0.6336837",
"0.6326872",
"0.6326872",
"0.63046926",
"0.62465835",
"0.62256324",
"0.62221944",
"0.6217011",
"0.61493486",
"0.6130122",
"0.61051524",
"0.60816574",
"0.6061429",
"0.6042997",
"0.6018802",
"0.60052407",
"0.60010254",
"0.5981722",
"0.595335",
"0.5947444",
"0.5930505",
"0.5904048",
"0.59002566",
"0.5886499",
"0.58804196",
"0.5878642",
"0.5863893",
"0.5859671",
"0.58583516",
"0.5833852",
"0.58176714",
"0.58030427",
"0.5802732",
"0.5787054",
"0.57826877",
"0.5777993",
"0.5773641",
"0.5771174",
"0.5738647",
"0.57352656",
"0.57249194",
"0.57227486",
"0.5720505",
"0.5716297",
"0.5714798",
"0.57091296",
"0.5704022",
"0.5701588",
"0.5697037",
"0.5683566",
"0.5673669",
"0.56729895",
"0.56664455",
"0.5661221",
"0.56601584",
"0.56600237",
"0.565384",
"0.565093",
"0.5639183",
"0.56360257",
"0.56248796",
"0.56030434",
"0.5600239",
"0.55955493",
"0.5594697",
"0.5588833",
"0.55879796",
"0.55876464",
"0.55810547",
"0.55801815",
"0.5576641",
"0.55598485",
"0.5557113",
"0.5548671",
"0.55476314",
"0.5539031",
"0.5511856",
"0.55099404",
"0.5507609",
"0.55030936",
"0.54997677",
"0.54965127",
"0.5491359",
"0.54902637",
"0.5488865",
"0.5488193",
"0.5482291",
"0.5468106",
"0.5463422",
"0.545605",
"0.54504365",
"0.54382586"
] | 0.0 | -1 |
a is the frequency in the x direction, b is the frequency in the y direction | def __init__(self, a, b):
self.a = a
self.b = b | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def freq_response(b, a=1., n_freqs=1024, sides='onesided'):\r\n # transitioning to scipy freqz\r\n real_n = n_freqs // 2 + 1 if sides == 'onesided' else n_freqs\r\n return sig.freqz(b, a=a, worN=real_n, whole=sides != 'onesided')",
"def freqz(b, a=1, worN=None):\n if worN is None or isinstance(worN, int):\n N = worN or 512\n ws = [mp.pi * mp.mpf(j) / N for j in range(N)]\n else:\n ws = worN\n\n # This assumes b and a contain real values.\n try:\n len(b)\n except TypeError:\n b = [b]\n b = [mp.mpf(t) for t in b]\n try:\n len(a)\n except TypeError:\n a = [a]\n a = [mp.mpf(t) for t in a]\n\n h = []\n for wk in ws:\n z = mp.exp(-1j * wk)\n hk = mp.polyval(b[::-1], z) / mp.polyval(a[::-1], z)\n h.append(hk)\n\n return ws, h",
"def iir_firstorder_coef_to_freq(b = 0.8, a = 0.2):\n f = np.arccos( (a**2/2 + a - 1) / (a - 1))\n return f",
"def freq():",
"def f_two_sample(a, b, tails=None):\r\n dfn, dfd, F = f_value(a, b)\r\n if tails == 'low':\r\n return dfn, dfd, F, f_low(dfn, dfd, F)\r\n elif tails == 'high':\r\n return dfn, dfd, F, f_high(dfn, dfd, F)\r\n else:\r\n if var(a) >= var(b):\r\n side = 'right'\r\n else:\r\n side = 'left'\r\n return dfn, dfd, F, fprob(dfn, dfd, F, side=side)",
"def cosine_sim_counters(a, b):\n union_ab = sorted((a | b).keys())\n veca = np.array([a[element] if element in a else 0 for element in union_ab])\n vecb = np.array([b[element] if element in b else 0 for element in union_ab])\n return np.dot(veca, vecb) / (np.linalg.norm(veca) * np.linalg.norm(vecb))",
"def weight_term_frequencies(a_in,b_in):\n plus_value = 2\n a_out = {}\n b_out = {} \n #print \"in weight term frequencies\" \n for(term, val) in a_in.items():\n try:\n b_val = b_in[term]\n a_out[term] = float(val)/float(val+b_val+plus_value)\n except KeyError:\n a_out[term] = float(val)/(float(val + plus_value)) \n \n for(term, val) in b_in.items():\n try:\n a_val = a_in[term]\n b_out[term] = float(val)/float(val+a_val+plus_value)\n except KeyError:\n b_out[term] = float(val)/(float(val + plus_value)) \n \n return (a_out,b_out)",
"def Calc(self, a, b, size):\n self.eq = lambda x: (60000/((b-a)/size*x+a))\n points = []\n names = [str(self.offset)]\n points.append(0)\n for j in range(1, int(size)):\n points.append(integrate.quad(self.eq,0,j)[0])\n names.append(str(points[-1]+self.offset))\n self.beatstr = ' '.join(names)\n return points",
"def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)",
"def weight_symmetry(a, b):\n return 1 - (np.abs(a - b) / (a + b))",
"def ab2rhotheta(a, b):\n \"\"\" also : y - ax - b = 0 \"\"\"\n \"\"\" y*sin(theta) + x*cos(theta) - rho = 0 \"\"\"\n #print(\"a: %f b: %f\" % (a, b))\n theta = math.atan(a) + math.pi/2.0\n rho = b*math.sin(theta)\n #print(\"a: %f b: %f rho: %f theta: %f\" % (a, b, rho, theta))\n return (rho, theta)",
"def __bsa(self, a, b):\n try:\n if a + 1 == b:\n if a == 0:\n p_ab = q_ab = mpz(1)\n else:\n p_ab = mpz((6 * a -5) * (2 * a - 1) * (6 * a - 1))\n q_ab = mpz(a * a * a * self.C3_24)\n t_ab = p_ab * (self.A + self.B * a)\n if a & 1:\n t_ab *= -1\n else:\n m = (a + b) // 2\n p_am, q_am, t_am = self.__bsa(a, m)\n p_mb, q_mb, t_mb = self.__bsa(m, b)\n p_ab = p_am * p_mb\n q_ab = q_am * q_mb\n t_ab = q_mb * t_am + p_am * t_mb\n return [p_ab, q_ab, t_ab]\n except Exception as e:\n raise",
"def weight_term_frequencies_one(a_in,b_in):\n plus_value = 2\n a_out = {}\n for(term, val) in a_in.items():\n try:\n b_val = b_in[term]\n a_out[term] = float(val)/float(val+b_val+plus_value)\n except KeyError:\n a_out[term] = float(val)/(float(val + plus_value)) \n return a_out",
"def test_f_two_sample(self):\r\n\r\n # The expected values in this test are obtained through R.\r\n # In R the F test is var.test(x,y) different alternative hypotheses\r\n # can be specified (two sided, less, or greater).\r\n # The vectors are random samples from a particular normal distribution\r\n #(mean and sd specified).\r\n\r\n # a: 50 elem, mean=0 sd=1\r\n a = [-0.70701689, -1.24788845, -1.65516470, 0.10443876, -0.48526915,\r\n -0.71820656, -1.02603596, 0.03975982, -2.23404324, -0.21509363,\r\n 0.08438468, -0.01970062, -0.67907971, -0.89853667, 1.11137131,\r\n 0.05960496, -1.51172084, -0.79733957, -1.60040659, 0.80530639,\r\n -0.81715836, -0.69233474, 0.95750665, 0.99576429, -1.61340216,\r\n -0.43572590, -1.50862327, 0.92847551, -0.68382338, -1.12523522,\r\n -0.09147488, 0.66756023, -0.87277588, -1.36539039, -0.11748707,\r\n -1.63632578, -0.31343078, -0.28176086, 0.33854483, -0.51785630,\r\n 2.25360559, -0.80761191, 1.18983499, 0.57080342, -1.44601700,\r\n -0.53906955, -0.01975266, -1.37147915, -0.31537616, 0.26877544]\r\n\r\n # b: 50 elem, mean=0, sd=1.2\r\n b = [\r\n 0.081418743, 0.276571612, -\r\n 1.864316504, 0.675213612, -0.769202643,\r\n 0.140372825, -1.426250184, 0.058617884, -\r\n 0.819287409, -0.007701916,\r\n -0.782722020, -\r\n 0.285891593, 0.661980419, 0.383225191, 0.622444946,\r\n -0.192446150, 0.297150571, 0.408896059, -\r\n 0.167359383, -0.552381362,\r\n 0.982168338, 1.439730446, 1.967616101, -\r\n 0.579607307, 1.095590943,\r\n 0.240591302, -1.566937143, -\r\n 0.199091349, -1.232983905, 0.362378169,\r\n 1.166061081, -0.604676222, -\r\n 0.536560206, -0.303117595, 1.519222792,\r\n -0.319146503, 2.206220810, -\r\n 0.566351124, -0.720397392, -0.452001377,\r\n 0.250890097, 0.320685395, -\r\n 1.014632725, -3.010346273, -1.703955054,\r\n 0.592587381, -1.237451255, 0.172243366, -0.452641122, -0.982148581]\r\n\r\n # c: 60 elem, mean=5, sd=1\r\n c = [4.654329, 5.242129, 6.272640, 5.781779, 4.391241, 3.800752,\r\n 4.559463, 4.318922, 3.243020, 5.121280, 4.126385, 5.541131,\r\n 4.777480, 5.646913, 6.972584, 3.817172, 6.128700, 4.731467,\r\n 6.762068, 5.082983, 5.298511, 5.491125, 4.532369, 4.265552,\r\n 5.697317, 5.509730, 2.935704, 4.507456, 3.786794, 5.548383,\r\n 3.674487, 5.536556, 5.297847, 2.439642, 4.759836, 5.114649,\r\n 5.986774, 4.517485, 4.579208, 4.579374, 2.502890, 5.190955,\r\n 5.983194, 6.766645, 4.905079, 4.214273, 3.950364, 6.262393,\r\n 8.122084, 6.330007, 4.767943, 5.194029, 3.503136, 6.039079,\r\n 4.485647, 6.116235, 6.302268, 3.596693, 5.743316, 6.860152]\r\n\r\n # d: 30 elem, mean=0, sd =0.05\r\n d = [\r\n 0.104517366, 0.023039678, 0.005579091, 0.052928250, 0.020724823,\r\n -0.060823243, -0.019000890, -\r\n 0.064133996, -0.016321594, -0.008898334,\r\n -0.027626992, -0.051946186, 0.085269587, -\r\n 0.031190678, 0.065172938,\r\n -0.054628573, 0.019257306, -\r\n 0.032427056, -0.058767356, 0.030927400,\r\n 0.052247357, -\r\n 0.042954937, 0.031842104, 0.094130522, -0.024828465,\r\n 0.011320453, -0.016195062, 0.015631245, -0.050335598, -0.031658335]\r\n\r\n a, b, c, d = map(array, [a, b, c, d])\r\n self.assertEqual(map(len, [a, b, c, d]), [50, 50, 60, 30])\r\n\r\n # allowed error. This big, because results from R\r\n # are rounded at 4 decimals\r\n error = 1e-4\r\n\r\n self.assertFloatEqual(f_two_sample(a, a), (49, 49, 1, 1), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b), (49, 49, 0.8575, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(b, a), (49, 49, 1.1662, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='low'),\r\n (49, 49, 0.8575, 0.2963), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='high'),\r\n (49, 49, 0.8575, 0.7037), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, c),\r\n (49, 59, 0.6587, 0.1345), eps=error)\r\n # p value very small, so first check df's and F value\r\n self.assertFloatEqualAbs(f_two_sample(d, a, tails='low')[0:3],\r\n (29, 49, 0.0028), eps=error)\r\n assert f_two_sample(d, a, tails='low')[3] < 2.2e-16 # p value\r",
"def _heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2",
"def convolve(a, b):\r\n\r\n # --->>> Put your code here.\r\n \r\n # offset for next step\r\n offset = a.offset + b.offset;\r\n \r\n # init the distri_list\r\n distri_list = []\r\n \r\n # calculate several distributions according to the number of a\r\n for a_value in a.values:\r\n distri_value = []\r\n for b_value in b.values:\r\n distri_value.append(a_value*b_value)\r\n distri_list.append(Distribution(offset, distri_value))\r\n offset += 1\r\n \r\n return Distribution.sum(distri_list)",
"def number_density(self, a, b):\n\n if no_mpmath & (self.alpha <= 0):\n msg = ('mpmath packlage must be installed in order',\n 'to perform this calculation for alpha<=0.')\n raise ValueError(msg)\n \n if not isinstance(a, float):\n msg = ('`a` argument must be a float.')\n raise ValueError(msg)\n if not isinstance(b, float):\n msg = ('`b` argument must be a float.')\n raise ValueError(msg)\n\n a = a / self.x0\n b = b / self.x0\n\n l = float(gammainc(self.alpha + 1, a))\n r = float(gammainc(self.alpha + 1, b))\n return (l - r)*self.phi0",
"def stavenga1993_band_calculation(\n x: np.ndarray, a: Union[float, np.ndarray], b: Union[float, np.ndarray]\n) -> np.ndarray:\n return np.exp(-a * x**2 * (1 + b * x + 3 / 8 * (b * x) ** 2))",
"def length(a, b):\n return sqrt((a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1]))",
"def aitemfreq(a):\r\n scores = pstats.aunique(a)\r\n scores = N.sort(scores)\r\n freq = N.zeros(len(scores))\r\n for i in range(len(scores)):\r\n freq[i] = N.add.reduce(N.equal(a,scores[i]))\r\n return N.array(pstats.aabut(scores, freq))",
"def bspe(a, b):\n if b-a == 1:\n return MPZ_ONE, MPZ(b)\n m = (a+b)//2\n p1, q1 = bspe(a, m)\n p2, q2 = bspe(m, b)\n return p1*q2+p2, q1*q2",
"def f_value(a, b):\r\n if not any(a) or not any(b) or len(a) <= 1 or len(b) <= 1:\r\n raise ValueError(\"Vectors should contain more than 1 element\")\r\n F = var(a) / var(b)\r\n dfn = len(a) - 1\r\n dfd = len(b) - 1\r\n return dfn, dfd, F",
"def number_density(self, a, b):\n\n if no_mpmath & (self.alpha <= 0):\n msg = ('mpmath packlage must be installed in order',\n 'to perform this calculation for alpha<=0.')\n raise ValueError(msg)\n \n if not isinstance(a, float):\n msg = ('`a` argument must be a float.')\n raise ValueError(msg)\n if not isinstance(b, float):\n msg = ('`b` argument must be a float.')\n raise ValueError(msg)\n\n x0 = 10**(self.x0)\n a = 10**(a) / x0\n b = 10**(b) / x0\n l = float(gammainc(self.alpha + 1, a))\n r = float(gammainc(self.alpha + 1, b))\n return (l - r)*self.phi0",
"def compute_f2a_f2b(self):\n try:\n self.Frac2a = self.data['AB Counts'].sum(\n ) / self.data['A Counts'].sum()\n self.Frac2b = self.data['AB Counts'].sum(\n ) / self.data['B Counts'].sum()\n except BaseException:\n print(\"Ran into error computing AB/A and AB/B. Continuing\")",
"def interpolate(a, b):\n x = 1\n i = 1\n f = b[0]\n while i < n:\n b = b*(x-a[i])\n i += 1\n f += (b[i] - f(a[i]))/a[i]) * b\n return f",
"def distance(a, b):\n ax, ay = a\n bx, by = b\n dx = bx - ax\n dy = by - ay\n return (abs(dx) + abs(dy) + abs(dx - dy)) / 2",
"def preprocess(a, b):\r\n m = len(a)\r\n n = len(b)\r\n p1 = m + n - 1 # The maximum length of the filtered signal\r\n # Find p such that p >= p1 is a power of 2\r\n p = int(np.power(2, np.ceil(np.log2(p1))))\r\n a = np.append(a, np.zeros(p - a.size))\r\n b = np.append(b, np.zeros(p - b.size))\r\n return a, b",
"def dist(a,b):\n dist = 0.\n for i in range(len(a)):\n dist += (b[i]-a[i])**2.\n\n dist = dist**.5\n return dist",
"def analyze2(ys, freqs, ts):",
"def get_chi2(a, b):\n off = (a - b)**2\n return np.sqrt(np.sum(off)) / b.sum()",
"def bins_match (a, b):\n return 0 == (\n np.sum ((a.xbins - b.xbins)**2)\n + np.sum ((a.ybins - b.ybins)**2) )",
"def extended_euclidean_algorithm(a, b):\n if a == 0: return b, 0, 1\n else:\n g, y, x = extended_euclidean_algorithm(b % a, a)\n return g, x - (b // a) * y, y",
"def adem_2(a, b):\r\n if b == 0:\r\n return {(a,) : 1}\r\n if a == 0:\r\n return {(b,) : 1}\r\n if a >= 2*b:\r\n return {(a, b) : 1}\r\n result = {}\r\n for j in range(1 + a//2):\r\n if combinatorics.binomial_2(b-j-1, a-2*j) == 1:\r\n if j == 0:\r\n result[(a+b,)] = 1\r\n else:\r\n result[(a+b-j, j)] = 1\r\n return result",
"def egcd(a, b):\n x0, x1, y0, y1 = 0, 1, 1, 0\n while a != 0:\n q, b, a = b // a, a, b % a\n y0, y1 = y1, y0 - q * y1\n x0, x1 = x1, x0 - q * x1\n return b, x0, y0",
"def regress(a, b):\n\t\n\tif a.size != b.size:\n\t\traise RuntimeError(\"a and b must be the same size\")\n\t\n\tabar = a.mean()\n\tbbar = b.mean()\n\n\tb1 = sum( (a - abar)*b )/sum( (a - abar)**2 )\n\tb0 = bbar - b1 * abar\n\t\n\tsst = sum( (b - bbar)**2 )\n\tbhat = b0 + b1*a\n\tsse = sum( (b - bhat)**2 )\n\n\tr2 = 1 - (sse/sst)\n\n\treturn (b0, b1, r2)",
"def dist2D(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5",
"def dist(a, b):\n return np.sum((a-b)**2.0)**.5",
"def map_minus_one_to_one(x, a, b):\n assert b > a\n s = 2./(b - a)\n t = (a+b)/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<-1] = -1\n return y",
"def fast_hist(a, b, n):\n # print(n)\n # print(b.max())\n k = (a >= 0) & (a < n)\n\n\n # a = np.floor(a)\n # a = a.astype(np.int)\n # print(a.max())\n # print(a.dtype)\n # print(a.shape)\n # print(type(a))\n\n return np.bincount((n * a[k].astype(int) + b[k]).astype(int), minlength=n ** 2).reshape(n, n)",
"def f(x, a, d1, d2):\n A = 10*a\n D1 = 10*d1\n D2 = 10*d2\n y = e * (frequency) * (1e9) * ( np.exp(-np.exp(-A*x+D1)) + np.exp(-np.exp(-A*x+D2)) + N)\n return y",
"def eucl_alg(a, b):\n if a == 0:\n return b, 0, 1\n else:\n g, x, y = eucl_alg(b % a, a)\n return g, y - (b // a) * x, x",
"def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))",
"def division(self, x,y,a,b):\n real = (a*x + b*y)/(a*a + b*b)\n img = (a*y - b*x)/(a*a + b*b)\n return real, img",
"def number_density(self, a, b):\n\n if no_mpmath & (self.alpha <= 0):\n msg = ('mpmath packlage must be installed in order',\n 'to perform this calculation for alpha<=0.')\n raise ValueError(msg)\n \n if not isinstance(a, float):\n msg = ('`a` argument must be a float.')\n raise ValueError(msg)\n if not isinstance(b, float):\n msg = ('`b` argument must be a float.')\n raise ValueError(msg)\n\n x0 = 10**(-0.4*self.M0)\n a = 10**(-0.4*a) / x0\n b = 10**(-0.4*b) / x0\n l = float(gammainc(self.alpha + 1, a))\n r = float(gammainc(self.alpha + 1, b))\n return (l - r)*self.phi0",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def frequencyofints(a ,memeff = True) :\n\tif memeff :\n\t\ta.sort()\n\t\t#get unique list of values:\n\t\tu = np.unique(a) \n\n\t\tfreq = np.zeros(len(u), dtype = int) \n\t\tj = 0 \n\t\tfor i,ue in enumerate(u) :\n\t\t\twhile u[i] == a[j] :\n\t\t\t\tfreq[i] += 1\n\t\t\t\tif j == len(a) - 1:\n\t\t\t\t\tbreak\n\t\t\t\tj += 1 \n\t\treturn u , freq\t\t\t\n\t\t\n\telse:\n\t\tx = np.bincount(a) \n\t\tvals = np.nonzero(x) [0]\n\t\tfreq = x[vals]\n\n\treturn vals, freq",
"def cosin_sim_pairs(a, b):\n wordsA = set(a.keys())\n wordsB = set(b.keys())\n inter = wordsA.intersection(wordsB)\n if(len(inter) == 0):\n return 0.0\n aa, bb, ab = 0, 0, 0\n for k in inter:\n aa += a[k] ** 2\n bb += b[k] ** 2\n ab += a[k] * b[k]\n for k in wordsA - inter:\n aa += a[k] ** 2\n for k in wordsB - inter:\n bb += b[k] ** 2\n return ab / float(math.sqrt(aa) * math.sqrt(bb))",
"def dist(a, b):\n x0, y0 = a # Destructuring assignment\n x1, y1 = b\n\n return math.sqrt((x1 - x0)**2 + (y1 - y0)**2)",
"def _histogram_intersection_distance(a, b):\n # branching version\n #return np.vstack((a, b)).min(axis=0).sum()\n\n # Non-branching version\n # noinspection PyUnresolvedReferences\n return (a + b - np.abs(a - b)).sum() * 0.5",
"def _get_a(b):\n\t\tbval = (b - 6.0)\n\t\t_a = np.exp(-1.6805 - 1.7139*bval + 0.8155*bval**2 - 0.6667*bval**3)*r0\n\t\treturn _a",
"def directed_Hausdorff_hyperbox(b1,b2): \n return max(0,np.max(np.hstack((b1.u-b2.u,b2.l-b1.l))))",
"def calculate(self, b):\n self.n_steps = self.n_steps + 1\n self.length = b.length\n self.natoms = b.natoms\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (b.atoms[i].xyz - b.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n bin_no = int(round(mag_rij/self.dr))\n if bin_no <= self.n_max:\n self.gr[bin_no] = self.gr[bin_no] + 1",
"def normalise(a, b):\n dy = b[1] - a[1]\n dx = b[0] - a[0]\n vector = (dy ** 2 + dx ** 2) ** 0.5\n # Normalise, round and cast to int\n dx = int(round(dx / vector))\n dy = int(round(dy / vector))\n \n return (dx, dy)",
"def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))",
"def heuristic_2(a: str, b: str) -> float:\n # generate term-document matrices\n if get_intro(a) == \"\" or get_intro(b) == \"\":\n return 2\n else:\n corpus = [get_intro(a), get_intro(b)]\n vect = TfidfVectorizer()\n mat = vect.fit_transform(corpus)\n # return cosine similarity\n return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2",
"def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))",
"def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))",
"def map_zero_one(x, a, b):\n assert b > a\n s = 1./(b - a)\n t = a/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<0] = 0\n return y",
"def gamma(x1, x2):\r\n gamma1 = math.exp(a / (1 + a * x1/(b * x2)) ** 2.0) \r\n gamma2 = math.exp(b / (1 + b * x2/(a * x1)) ** 2.0)\t\t\r\n return gamma1, gamma2",
"def get_offset(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\toffset_x = 0\r\n\toffset_y = 0\r\n\r\n\tif diff_y > 0:\r\n\t\toffset_y = 1\r\n\telif diff_y < 0:\r\n\t\toffset_y = -1\r\n\r\n\tif diff_x > 0:\r\n\t\toffset_x = 1\r\n\telif diff_x < 0:\r\n\t\toffset_x = -1\r\n\r\n\treturn (offset_x, offset_y)",
"def get_score(self, a, b):\n ### FILL IN ###",
"def lcmu(a, b):\n return (abs(a)*abs(b))//gcdi(a, b)",
"def freq(self) -> int:",
"def arctan2(a, b):",
"def derive_count(freq1: typing.List[int], freq2: typing.List[int]) -> int:\n count = 0\n for i in range(26):\n count += min(freq1[i], freq2[i])\n return count",
"def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )",
"def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy",
"def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w",
"def subs_at_point(a,b,x,y):\n\tmax_i = x + y\n\tsubs = 0\n\t# Counts left -> right, top -> bottom\n\tfor j in range(b, x+y):\n\t\t# stops when it can't go down anymore\n\t\tif not valid(a,j,x,y): return subs\n\t\tfor i in range(a,max_i):\n\t\t\tif valid(i,j,x,y): subs += 1\n\t\t\telse:\n\t\t\t\tmax_i = min(max_i, i)\n\t\t\t\tbreak",
"def extendedGcd(a, b):\n if b == 0:\n return a, 1, 0\n\n x1 = 0\n x2 = 1\n y1 = 1\n y2 = 0\n\n while b != 0:\n q = a // b\n r = a - q * b\n x = x2 - q * x1\n y = y2 - q * y1\n\n a = b\n b = r\n x2 = x1\n x1 = x\n y2 = y1\n y1 = y\n\n if a < 0:\n return -a, -x2, -y2\n\n return a, x2, y2",
"def aks_2samp (data1,data2):\r\n j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE\r\n j2 = 0 # N.zeros(data2.shape[1:])\r\n fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)\r\n fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)\r\n n1 = data1.shape[0]\r\n n2 = data2.shape[0]\r\n en1 = n1*1\r\n en2 = n2*1\r\n d = N.zeros(data1.shape[1:],N.float_)\r\n data1 = N.sort(data1,0)\r\n data2 = N.sort(data2,0)\r\n while j1 < n1 and j2 < n2:\r\n d1=data1[j1]\r\n d2=data2[j2]\r\n if d1 <= d2:\r\n fn1 = (j1)/float(en1)\r\n j1 = j1 + 1\r\n if d2 <= d1:\r\n fn2 = (j2)/float(en2)\r\n j2 = j2 + 1\r\n dt = (fn2-fn1)\r\n if abs(dt) > abs(d):\r\n d = dt\r\n# try:\r\n en = math.sqrt(en1*en2/float(en1+en2))\r\n prob = aksprob((en+0.12+0.11/en)*N.fabs(d))\r\n# except:\r\n# prob = 1.0\r\n return d, prob",
"def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)",
"def rmsd(a,b):\n\tDi=[]\n\tfor i in range(len(a)):\n\t\tD=((a[i][0]-b[i][0])**2+(a[i][1]-b[i][1])**2+(a[i][2]-b[i][2])**2)\n\t\tDi.append(D)\n\tRMSD=sqrt(0.01*fsum(Di))\n\treturn RMSD",
"def nucross(a, b):\n ev = a / np.linalg.norm(a)\n return np.linalg.norm(np.cross(ev, b))",
"def ilerp(a, b, t):\n return (t - a) / (b - a)",
"def soustraction(a,b):\n bina = [int(x) for x in bin(a)[2:]]\n binb = [int(x) for x in bin(b)[2:]]\n while len(bina) >= len(binb):\n binb = [0]+binb\n while len(bina) < len(binb)-1:\n bina = [0]+bina\n bina.reverse()\n binb.reverse()\n n = len(bina)+len(binb)\n na = len(bina)\n q = QuantumRegister(n+1, 'q')\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binb)):\n if binb[i]:\n circ.x(q[na+i])\n sub(circ, q, [q[i] for i in range(len(bina))], [q[i+na] for i in range(len(binb)-1)], q[n], q[na+len(binb)-1])\n circ_m = measure(circ, q, [i for i in range(na, n)])\n return circ_m",
"def test_baseline_sinusoidal(self, b0, x0, a, freq):\n x = np.linspace(-1, 1, 100)\n y = a * np.cos(2 * np.pi * freq * (x - x0)) + b0\n\n b0_guess = guess.constant_sinusoidal_offset(y)\n\n self.assertAlmostEqual(b0, b0_guess, delta=0.1)",
"def spearman(a, b):\n # Note: the use of enumerate(), below, replaces the construct [ (i, list[i]) for i in range(len(list)) ]\n av = [ x[0]+1 for x in sorted(enumerate(a), key=lambda x: x[1]) ]\n bv = [ x[0]+1 for x in sorted(enumerate(b), key=lambda x: x[1]) ]\n\n return pearson(av, bv)",
"def sinh(a):",
"def aulc(x, y):\n assert len(x) == len(y)\n a = 0\n # # x_min = 10 * 60 # average duration of random architecture\n x_min = 0\n x_max = x.max()\n for i in range(0,len(x)-1):\n if x[i] >= x_min:\n a += (x[i+1] - x[i])/(x_max-x_min) * y[i] \n return round(a, 3)",
"def d_ucross(a, b):\n ev = a / np.linalg.norm(a)\n return np.dot(d_unit_vector(a), d_cross(ev, b))",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def angle_diff(self,a,b):\n self.a = self.angle_normalize(a)\n self.b = self.angle_normalize(b)\n self.d1 = a-b\n self.d2 = 2*math.pi - math.fabs(self.d1)\n if self.d1 > 0:\n self.d2 *= -1.0\n if math.fabs(self.d1) < math.fabs(self.d2):\n return self.d1\n else:\n return self.d2",
"def sf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.one\n if x > 1:\n return mp.zero\n return mp.betainc(a, b, x1=x, x2=1, regularized=True)",
"def index_to_single_index(a,b, resolution):\n return a*resolution+b",
"def extended_euclidean_algorithm(a, b):\n s, old_s = 0, 1\n t, old_t = 1, 0\n r, old_r = b, a\n\n while r != 0:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n\n return old_r, old_s, old_t",
"def apply(self, y, a):\n return 1 / 2.0 * np.linalg.norm(y - a) ** 2",
"def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):\r\n if type(b) == list:\r\n # We have a list of filters\r\n N_filt = len(b)\r\n f = np.arange(0,Npts)/(2.0*Npts)\r\n for n in range(N_filt):\r\n w,H = signal.freqz(b[n],a[n],2*np.pi*f)\r\n if n == 0:\r\n plt.figure(figsize=fsize)\r\n if mode.lower() == 'db':\r\n plt.plot(f*fs,20*np.log10(np.abs(H)))\r\n if n == N_filt-1:\r\n plt.xlabel('Frequency (Hz)')\r\n plt.ylabel('Gain (dB)')\r\n plt.title('Frequency Response - Magnitude')\r\n\r\n elif mode.lower() == 'phase':\r\n plt.plot(f*fs,np.angle(H))\r\n if n == N_filt-1:\r\n plt.xlabel('Frequency (Hz)')\r\n plt.ylabel('Phase (rad)')\r\n plt.title('Frequency Response - Phase')\r\n\r\n elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):\r\n \"\"\"\r\n Notes\r\n -----\r\n\r\n Since this calculation involves finding the derivative of the\r\n phase response, care must be taken at phase wrapping points \r\n and when the phase jumps by +/-pi, which occurs when the \r\n amplitude response changes sign. Since the amplitude response\r\n is zero when the sign changes, the jumps do not alter the group \r\n delay results.\r\n \"\"\"\r\n theta = np.unwrap(np.angle(H))\r\n # Since theta for an FIR filter is likely to have many pi phase\r\n # jumps too, we unwrap a second time 2*theta and divide by 2\r\n theta2 = np.unwrap(2*theta)/2.\r\n theta_dif = np.diff(theta2)\r\n f_diff = np.diff(f)\r\n Tg = -np.diff(theta2)/np.diff(w)\r\n # For gain almost zero set groupdelay = 0\r\n idx = pylab.find(20*np.log10(H[:-1]) < -400)\r\n Tg[idx] = np.zeros(len(idx))\r\n max_Tg = np.max(Tg)\r\n #print(max_Tg)\r\n if mode.lower() == 'groupdelay_t':\r\n max_Tg /= fs\r\n plt.plot(f[:-1]*fs,Tg/fs)\r\n plt.ylim([0,1.2*max_Tg])\r\n else:\r\n plt.plot(f[:-1]*fs,Tg)\r\n plt.ylim([0,1.2*max_Tg])\r\n if n == N_filt-1:\r\n plt.xlabel('Frequency (Hz)')\r\n if mode.lower() == 'groupdelay_t':\r\n plt.ylabel('Group Delay (s)')\r\n else:\r\n plt.ylabel('Group Delay (samples)')\r\n plt.title('Frequency Response - Group Delay')\r\n else:\r\n s1 = 'Error, mode must be \"dB\", \"phase, '\r\n s2 = '\"groupdelay_s\", or \"groupdelay_t\"'\r\n print(s1 + s2)",
"def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)",
"def bhatt_distance(a, b):\n return -np.log(np.dot(b**.5, a**.5))",
"def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)",
"def add_edge_length(self, a, b):\n return tuple(sum(x) for x in zip(a, b))",
"def arclength(f, a, b, tol=1e-6):\n nsteps = 1 # number of steps to compute\n oldlength = 1.0e20\n length = 1.0e10\n while abs(oldlength - length) >= tol:\n nsteps *= 2\n fx1 = f(a)\n xdel = (b - a) / nsteps # space between x-values\n oldlength = length\n length = 0\n for i in range(1, nsteps + 1):\n fx0 = fx1 # previous function value\n fx1 = f(a + i * (b - a) / nsteps) # new function value\n length += hypot(xdel, fx1 - fx0) # length of small line segment\n return length",
"def l2(x, y):\n return np.sum(((x - y) ** 2))",
"def chamfer_mean(a,b):\n #tf.print(a)\n #tf.print(a[0,0,:])\n #tf.print(b[0,0,:])\n M = pairwise_distances(a, b)\n if len(M.shape) == 2:\n M = tf.expand_dims(M,0) #[np.newaxis, :, :]\n #return tf.keras.backend.sum(tf.reduce_sum(tf.reduce_min(M, 1), 1) + tf.reduce_sum(tf.reduce_min(M, 2), 1))\n c=tf.reduce_mean(tf.reduce_min(M, 1), 1) + tf.reduce_mean(tf.reduce_min(M, 2), 1)\n #tf.print(tf.reduce_sum(c))\n return c",
"def comes_before(a, b):\r\n \r\n if a.freq < b.freq:\r\n return True \r\n elif a.freq == b.freq:\r\n if a.char < b.char:\r\n return True\r\n else:\r\n return False\r\n else:\r\n if a.freq > b.freq:\r\n return False",
"def box_iou(a, b):\n w_intsec = np.maximum (0, (np.minimum(a[1], b[1]) - np.maximum(a[0], b[0])))\n h_intsec = np.maximum (0, (np.minimum(a[3], b[3]) - np.maximum(a[2], b[2])))\n s_intsec = w_intsec * h_intsec\n s_a = (a[1] - a[0])*(a[3] - a[2])\n s_b = (b[1] - b[0])*(b[3] - b[2])\n return float(s_intsec)/(s_a + s_b -s_intsec)",
"def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)",
"def magnitude(point_a, point_b):\n vector = np.subtract(point_a, point_b)\n total = 0\n for i in vector:\n total += i**2\n mag = total ** .5\n return mag",
"def egim_hesapla(x1, y1, x2, y2):\n\tsonuc = (y2 - y1) / (x2 - x1)\n\tprint float(sonuc)",
"def chamfer_sum(a,b):\n #tf.print(a)\n #tf.print(a[0,0,:])\n #tf.print(b[0,0,:])\n M = pairwise_distances(a, b)\n if len(M.shape) == 2:\n M = tf.expand_dims(M,0) #[np.newaxis, :, :]\n #return tf.keras.backend.sum(tf.reduce_sum(tf.reduce_min(M, 1), 1) + tf.reduce_sum(tf.reduce_min(M, 2), 1))\n c=tf.reduce_sum(tf.reduce_min(M, 1), 1) + tf.reduce_sum(tf.reduce_min(M, 2), 1)\n #tf.print(tf.reduce_sum(c))\n return c"
] | [
"0.6110915",
"0.5992405",
"0.59816706",
"0.5890807",
"0.5879636",
"0.57552236",
"0.57190615",
"0.56921566",
"0.56823105",
"0.56598777",
"0.56512135",
"0.5644057",
"0.5637544",
"0.5629808",
"0.5591739",
"0.5586866",
"0.55634403",
"0.5552246",
"0.5550721",
"0.55504066",
"0.55495656",
"0.55364233",
"0.5535896",
"0.55280095",
"0.55156577",
"0.5515017",
"0.55120724",
"0.548159",
"0.54765767",
"0.54694265",
"0.5461369",
"0.5453457",
"0.54404384",
"0.5440091",
"0.5439374",
"0.5435323",
"0.5430319",
"0.54289657",
"0.54246044",
"0.54225415",
"0.54211456",
"0.5413984",
"0.54083323",
"0.5407179",
"0.5403493",
"0.53947455",
"0.5392257",
"0.53795135",
"0.5376266",
"0.5363134",
"0.5362939",
"0.53459877",
"0.5345439",
"0.5340273",
"0.5333071",
"0.53299206",
"0.53237617",
"0.5323354",
"0.53233373",
"0.5316807",
"0.5315225",
"0.5306972",
"0.53066707",
"0.530607",
"0.5305027",
"0.5304923",
"0.53007245",
"0.52973235",
"0.5294246",
"0.52941763",
"0.52900565",
"0.5289903",
"0.5286932",
"0.52869046",
"0.5286248",
"0.52859616",
"0.5280486",
"0.5280353",
"0.5278825",
"0.5273467",
"0.52714777",
"0.52676994",
"0.52676994",
"0.52644783",
"0.5263107",
"0.5252553",
"0.5250875",
"0.52388746",
"0.5238448",
"0.52347994",
"0.52347976",
"0.5216335",
"0.5209268",
"0.51861006",
"0.5185772",
"0.51851374",
"0.51685584",
"0.51582396",
"0.5157404",
"0.5155086",
"0.51545805"
] | 0.0 | -1 |
Uploads a new transaction to Rex (Click to see more) | def get(self):
args = single_parser.parse_args()
n1 = args.n
m1 = args.m
r = summation(n1, m1)
print(r)
return {"add": r} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)",
"def new_transaction():\n\n data = request.get_json()\n\n if not data:\n return \"No transation data passed\", 400\n\n required = ['sender', 'recipient', 'amount']\n\n if not (list(data.keys()) == required):\n return 'Missing Value', 400\n \n block_index = blockchain.add_transaction(data['sender'], data['recipient'], data['amount'])\n response = {'message':f'Adding the transaction to block at index: {block_index}'}\n\n return jsonify(response), 201",
"def create_transaction(conn, transaction):\n sql = ''' INSERT INTO transactions(date, value, currency, desc, categ)\n VALUES(?, ?, ?, ?, ?) '''\n cur = conn.cursor()\n cur.execute(sql, transaction)",
"def op_transfer(cls, op, tx_idx, num, date):\n result = cls._validated(op, tx_idx, num, date)\n if not result:\n return\n\n record, author_id, permlink = result\n\n # add payment record and return post id\n sql = \\\n\"\"\"\nINSERT INTO hive_payments(block_num, tx_idx, post_id, from_account, to_account, amount, token) SELECT\n bn, tx, hp.id, fa, ta, am, tkn\nFROM\n( \n SELECT bn, tx, hpd.id, auth_id, fa, ta, am, tkn\n FROM (VALUES (:_block_num, :_tx_idx, :_permlink, :_author_id , :_from_account , :_to_account , :_amount, :_token)) \n AS v(bn, tx, perm, auth_id, fa, ta, am, tkn) \n JOIN hive_permlink_data hpd\n ON v.perm = hpd.permlink\n) as vv(bn, tx, hpd_id, auth_id, fa, ta, am, tkn )\nJOIN hive_posts hp\nON hp.author_id=vv.auth_id AND hp.permlink_id=vv.hpd_id\nRETURNING post_id\n\"\"\"\n\n post_id = DB.query_one(sql, \n _block_num=record['block_num'], \n _tx_idx=record['tx_idx'], \n _permlink=permlink, \n _author_id=author_id,\n _from_account=record['from_account'],\n _to_account=record['to_account'],\n _amount=record['amount'],\n _token=record['token']\n )\n\n amount = record['amount']\n if not isinstance(amount, float):\n amount = float(amount)\n\n if amount != 0.0 and post_id is not None:\n # update post record\n sql = \"UPDATE hive_posts SET promoted = promoted + :val WHERE id = :id\"\n DB.query(sql, val=amount, id=post_id)",
"def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)",
"def add_transaction():\n index = blockchain.add_transaction(request.form['sender'], request.form['receiver'], request.form['amount'])\n response = {'message': \"Transaction will be added to Block #{0}\".format(index)}\n return jsonify(response), 200",
"def send_to_db(ck_transactions):\n db = DDDB()\n\n db.add_orders(ck_transactions)",
"def submit(request, session, **kwargs):\n\n from ..models import (\n FacilityTransaction,\n Allocation,\n FollowupRequest,\n Instrument,\n )\n\n instrument = (\n Instrument.query_records_accessible_by(request.requester)\n .join(Allocation)\n .join(FollowupRequest)\n .filter(FollowupRequest.id == request.id)\n .first()\n )\n\n name = request.obj.tns_name\n if name is None:\n request.status = 'No TNS name'\n else:\n try:\n lc = Table.read(\n f\"{lightcurve_url}/lc_{name}_cleaned\",\n format='ascii',\n header_start=1,\n )\n\n if 'BTJD' not in list(lc.columns):\n request.status = f\"TESS alert {name} could not be ingested: {lightcurve_url}/lc_{name}_cleaned\"\n else:\n IOLoop.current().run_in_executor(\n None,\n lambda: commit_photometry(\n lc, request.id, instrument.id, request.requester.id\n ),\n )\n\n except FileNotFoundError:\n request.status = f\"TESS alert {name} not found.\"\n except Exception:\n request.status = f\"TESS alert {name} could not be ingested: {lightcurve_url}/lc_{name}_cleaned\"\n\n transaction = FacilityTransaction(\n request=None,\n response=None,\n followup_request=request,\n initiator_id=request.last_modified_by_id,\n )\n\n session.add(transaction)",
"def Add_File(self,tx,filename,newcontents):\n if tx != self.tx:\n raise InvalidTransaction(tx)\n fullname = os.path.join(self.home,filename)\n h = win32_txf.CreateFileTransacted(fullname,transaction = tx,\n desired_access = win32_txf.const.GENERIC_WRITE,\n creation_disposition = win32_txf.const.CREATE_ALWAYS)\n #TODO handle partial writes\n win32_txf.WriteFile(h,newcontents)\n win32_txf.CloseHandle(h)",
"def Insert_in_Transaction(db, params):\r\n \r\n try: \r\n db.Transaction.insert_one(\r\n {\r\n \"Open time\": params[0], \r\n \"High\": params[1], \r\n \"Low\": params[2], \r\n \"Open\": params[3], \r\n \"Close\": params[4], \r\n \"Volume\": params[5], \r\n \"Quote asset volume\": params[6], \r\n \"Weighted average\": params[7]\r\n }\r\n )\r\n \r\n except Exception as e:\r\n print(e)",
"def CreateTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def enter_transaction():\n _state.transactions = get_transactions() + 1",
"def record_transaction(self) -> None:\n Menu.prompt_record_transaction()\n tx_data = Transaction.prompt_record_tx()\n new_tx = Transaction.generate_new_tx(tx_data)\n\n # Convert the user budget category int input to the enum\n budget_category_int = new_tx.budget_category\n budget_category = BudgetManager.category_mapping[budget_category_int]\n\n # Retrieve the budget object using the enum as the key\n budget = self.user.budget_manager.budget_dict[budget_category]\n\n # Validate the transaction before proceeding\n validated_tx, error_msg = self.validate_transaction_record(new_tx,\n budget)\n if not validated_tx:\n print(\"\\n[red]Warning:[/red] Unable to record transaction!\")\n print(error_msg)\n print(f\"{self.user.account}\\n\")\n print(budget)\n return\n\n # User has successfully recorded a transaction\n budget.add_amount_spent(new_tx.tx_amount)\n self.user.account.add_amount_spent(new_tx.tx_amount)\n self.user.tx_manager.add_transaction(new_tx)\n self.user.update_lock_status()\n print(\"\\nSuccessfully recorded the following transaction:\")\n print(new_tx)\n print(\"\\nTransaction has been recorded under the following budget \"\n \"category:\")\n print(budget)\n\n self.user.check_and_issue_user_warnings(budget)",
"def add_new_transfer(self):\n # << Get New File >>\n result = self.helper.helper_get_new_request()\n gLogger.info(result)\n if not result:\n return False\n\n # << Add New Transfer >>\n return self.helper.helper_add_transfer(result)",
"def transaction(self):\n if not self._tr:\n trname = b'%s\\n%s' % (self.source, urlutil.hidepassword(self.url))\n self._tr = self.repo.transaction(trname)\n self._tr.hookargs[b'source'] = self.source\n self._tr.hookargs[b'url'] = self.url\n return self._tr",
"def post_transaction(self, seqnum):\n logging.warn(\"Posting transaction \" + seqnum)\n return self.__post(\"post/seqnum=\" + seqnum, \"\").text",
"def transaction_action(self):\n # trigger scene signal\n self.scene().node_transaction.emit(self.metadata)",
"def add_transaction(self):\r\n pattern = re.compile('\\d+(\\.\\d+)?')\r\n match = re.search(pattern, self.difference_box.get())\r\n if match:\r\n try:\r\n datetime.strptime(self.dateandtime_box.get(), '%Y-%m-%d %H:%M:%S')\r\n self.cursor.execute(\r\n \"\"\"INSERT INTO transactions(Difference, DateAndTime, TransactionStatus) VALUES (?,?,?)\"\"\",\r\n (self.difference_box.get(), self.dateandtime_box.get(), \"Successful\",))\r\n self.db.commit()\r\n self.create_transaction_window.destroy()\r\n FinancesFrame.update_table(self)\r\n except ValueError:\r\n messagebox.showinfo(\"Error\", \"Transaction format incorrect\")\r\n else:\r\n messagebox.showinfo(\"Error\", \"Difference is invalid format\")",
"def __create_transaction(self):\n log.debug(\"Displaying __create_transaction\")\n # Make the admin select an user\n user = self.__user_select()\n # Allow the cancellation of the operation\n if isinstance(user, CancelSignal):\n return\n # Create an inline keyboard with a single cancel button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"),\n callback_data=\"cmd_cancel\")]])\n # Request from the user the amount of money to be credited manually\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_credit\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(-? ?[0-9]{1,3}(?:[.,][0-9]{1,2})?)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Convert the reply to a price object\n price = self.Price(reply)\n # Ask the user for notes\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_transaction_notes\"), reply_markup=cancel)\n # Wait for an answer\n reply = self.__wait_for_regex(r\"(.*)\", cancellable=True)\n # Allow the cancellation of the operation\n if isinstance(reply, CancelSignal):\n return\n # Create a new transaction\n transaction = db.Transaction(user=user,\n value=int(price),\n provider=\"Manual\",\n notes=reply)\n self.session.add(transaction)\n # Change the user credit\n user.recalculate_credit()\n # Commit the changes\n self.session.commit()\n # Notify the user of the credit/debit\n self.bot.send_message(user.user_id,\n self.loc.get(\"notification_transaction_created\",\n transaction=transaction.text(w=self)))\n # Notify the admin of the success\n self.bot.send_message(self.chat.id, self.loc.get(\"success_transaction_created\",\n transaction=transaction.text(w=self)))",
"def startTransaction(self) -> int:\n ...",
"def start_transaction(self) -> None:\n pass",
"def add_transaction(self):\r\n transactionvariable = self.transactionvariable.get()\r\n transactionvariable = (ast.literal_eval(transactionvariable)[0]) # converts to tuple\r\n pattern = re.compile('\\d+(\\.\\d+)?')\r\n match = re.search(pattern, self.difference_box.get())\r\n if self.difference_box.get() == \"\":\r\n pass\r\n else:\r\n if match: \r\n self.cursor.execute(\"\"\"UPDATE transactions SET Difference = ? WHERE TransactionID = ?\"\"\",\r\n (self.difference_box.get(), transactionvariable,))\r\n else:\r\n messagebox.showinfo(\"Error\", \"Transaction incorrect format (+/-DD)\")\r\n\r\n if self.dateandtime_box.get() == \"\":\r\n pass\r\n else:\r\n try:\r\n datetime.strptime(self.dateandtime_box.get(), '%Y-%m-%d %H:%M:%S') \r\n self.cursor.execute(\"\"\"UPDATE transactions SET DateAndTime = ? WHERE TransactionID = ?\"\"\",\r\n (self.dateandtime_box.get(), transactionvariable,))\r\n except ValueError:\r\n messagebox.showinfo(\"Error\", \"Date and time incorrect format (YYYY-MM-DD HH:MM:SS)\")\r\n\r\n self.db.commit()\r\n self.edit_transaction_window.destroy()\r\n FinancesFrame.update_table(self)",
"def post(self):\n Transacoes.insert_transacao(api.payload)\n return {\"msg\": \"Transacao created.\"}, 201",
"def commit_transaction(self) -> None:\n pass",
"def commit_purchase(self, purchaseid, transferid):\n return self.request(\n 'put',\n safeformat('purchases/{:int}', purchaseid),\n json.dumps({'transferid': transferid})\n )",
"def upload(det_file):\n db = DatabaseSession()\n\n try:\n LOG.info(f\"Copying REDCap DET records from {det_file.name}\")\n\n row_count = db.copy_from_ndjson((\"receiving\", \"redcap_det\", \"document\"), det_file)\n\n LOG.info(f\"Received {row_count:,} DET records\")\n LOG.info(\"Committing all changes\")\n db.commit()\n\n except:\n LOG.info(\"Rolling back all changes; the database will not be modified\")\n db.rollback()\n raise",
"def add_transaction(self, block, transaction):\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?,?);\"\"\" %(TABLE_TRANSACTIONS,\n COL_TRANSACTION_BLOCK,\n COL_TRANSACTION_SENDER,\n COL_TRANSACTION_RECEIVER,\n COL_TRANSACTION_AMOUNT,\n COL_TRANSACTION_SUB_TIME,\n COL_TRANSACTION_VER_TIME)\n self.__dbcursor.execute(cmd, (block, transaction.sender,\n transaction.receiver,\n transaction.amount,\n transaction.submitted_time,\n transaction.verified_time))",
"def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )",
"def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}",
"def transaction_add(request, form_class=TransactionForm, template_name='budget/transactions/add.html'):\n if request.POST:\n form = form_class(request.POST)\n \n if form.is_valid():\n transaction = form.save()\n return HttpResponseRedirect(reverse('budget_transaction_list'))\n else:\n form = form_class()\n return render_to_response(template_name, {\n 'form': form,\n }, context_instance=RequestContext(request))",
"def push_tx(self, crypto, tx_hex):\n raise NotImplementedError(\n \"This service does not support pushing transactions to the network. \"\n \"Or rather it has no defined 'push_tx' method.\"\n )",
"def add_transaction(self,transaction):\n if type(transaction) != PoWGenericTransaction:\n raise Exception('TYPEERROR','transaction should be type of \"PoWGenericTransaction\" but got {}'.format(type(transaction)))\n if not transaction.is_validation_passed():\n print 'The transaction is not valid. Skipped...'\n return\n self.transactions.append(transaction)",
"def add_transaction(self, bitfinex_id, bitfinex_currency, bitfinex_timestamp, bitfinex_price, bitfinex_amount):\n\n new_transaction = Transaction(bitfinex_id=bitfinex_id,\n bitfinex_currency=bitfinex_currency,\n bitfinex_timestamp=bitfinex_timestamp,\n bitfinex_price=bitfinex_price,\n bitfinex_amount=bitfinex_amount,\n languages=languages,\n skills=skills)\n\n self.session.add(new_transaction)\n self.session.commit()\n\n return new_transaction.id",
"def insert_to_db(self) -> None:\n query = '''INSERT INTO ESLReceipts(Transaction_Number, Date, Description, Memo,\n Amount_Debit, Amount_Credit, Balance, Check_Number, \n Fees, Card_Type, Is_Payment, Is_Transaction, User_id)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);'''\n self.db.commit(query, values=self.to_tuple())\n\n if self.is_transaction \\\n and self.transaction is not None \\\n and not self.transaction.exists_in_db():\n self.transaction.insert_to_db()",
"def add_UI_transaction(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\t_type = read_type()\n\tadd_transaction(_day, _amount, _type, account)",
"def submit_textarea(): \n sender = request.form[\"sender\"]\n receiver = request.form[\"receiver\"]\n amount = request.form[\"amount\"]\n post_object = {\n \n 'sender': sender,\n 'receiver': receiver,\n 'amount': amount\n }\n\n # Submit a transaction\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/transaction')",
"async def new_tx(request: Request) -> dict:\n peer = request.client.host\n tx = await request.json()\n tx = Transaction(**tx)\n chain.mempool.put_nowait(tx)\n return {\"sender\": peer, \"receipt\": tx.receipt()}",
"def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n return self.last_block['index']+1",
"def recTrans(self,NoSh,BorS,Price,TS):\n self.TL.append(Transaction(NoSh,BorS,Price,TS))\n self.Price=Price",
"def send_tx(self, tx):\n if sys.version_info >= (3, 0):\n tx = tx.encode('ascii')\n tx_b64 = base64.b64encode(tx)\n self.__rpc_client.call(\"Babble.SubmitTx\", [tx_b64], expect_reply=True)",
"def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n return self.last_block['index'] + 1",
"def commit(self):",
"def record_transaction(self, amount=None, transaction_date=None, paid=False):\n if transaction_date is None:\n transaction_date = timezone.now()\n\n if amount is None:\n amount = self.plan_cost.cost\n SubscriptionTransaction = swapper.load_model(\n \"subscriptions_api\", \"SubscriptionTransaction\"\n )\n return SubscriptionTransaction.objects.create(\n user=self.user,\n subscription=self, # A transaction should link to is subscription\n date_transaction=transaction_date,\n amount=amount,\n paid=paid\n )",
"def Add_File(self,txn,filename,newcontents):\n opid = self.new_opid()\n fullname = os.path.join(self.home,filename)\n #if not self.tx.dir_exists(os.path.dirname(fullname)):\n # raise OSError(errno.ENOENT,\"No directory: %r\"%os.path.dirname(fullname))\n xaction = ReplaceAll_Operation(fullname,newcontents,opid)\n self._add_operation(txn,xaction)",
"def __transactions_file(self):\n log.debug(\"Generating __transaction_file\")\n # Retrieve all the transactions\n transactions = self.session.query(db.Transaction).order_by(db.Transaction.transaction_id.asc()).all()\n # Create the file if it doesn't exists\n try:\n with open(f\"transactions_{self.chat.id}.csv\", \"x\"):\n pass\n except IOError:\n pass\n # Write on the previously created file\n with open(f\"transactions_{self.chat.id}.csv\", \"w\") as file:\n # Write an header line\n file.write(f\"UserID;\"\n f\"TransactionValue;\"\n f\"TransactionNotes;\"\n f\"Provider;\"\n f\"ChargeID;\"\n f\"SpecifiedName;\"\n f\"SpecifiedPhone;\"\n f\"SpecifiedEmail;\"\n f\"Refunded?\\n\")\n # For each transaction; write a new line on file\n for transaction in transactions:\n file.write(f\"{transaction.user_id if transaction.user_id is not None else ''};\"\n f\"{transaction.value if transaction.value is not None else ''};\"\n f\"{transaction.notes if transaction.notes is not None else ''};\"\n f\"{transaction.provider if transaction.provider is not None else ''};\"\n f\"{transaction.provider_charge_id if transaction.provider_charge_id is not None else ''};\"\n f\"{transaction.payment_name if transaction.payment_name is not None else ''};\"\n f\"{transaction.payment_phone if transaction.payment_phone is not None else ''};\"\n f\"{transaction.payment_email if transaction.payment_email is not None else ''};\"\n f\"{transaction.refunded if transaction.refunded is not None else ''}\\n\")\n # Describe the file to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"csv_caption\"))\n # Reopen the file for reading\n with open(f\"transactions_{self.chat.id}.csv\") as file:\n # Send the file via a manual request to Telegram\n requests.post(f\"https://api.telegram.org/bot{self.cfg.telegram['token']}/sendDocument\",\n files={\"document\": file},\n params={\"chat_id\": self.chat.id,\n \"parse_mode\": \"HTML\"})\n # Delete the created file\n os.remove(f\"transactions_{self.chat.id}.csv\")",
"def add_transaction(self, tx_json):\n recv_tx = Transaction.from_json(tx_json)\n if not recv_tx.verify():\n raise Exception(\"New transaction failed signature verification.\")\n with self.all_tx_lock:\n if tx_json in self._all_transactions:\n print(f\"{self.name} - Transaction already exist in pool.\")\n return\n self._all_transactions.add(tx_json)",
"def commit(self, transaction):\n raise NotImplementedError",
"def submit_transaction(self, sender_address, recipient_address, stock, quanitity, signature):\n print(\"self.transactions=\", len(self.transactions))\n\n transaction = OrderedDict({\n 'sender_address': sender_address,\n 'recipient_address': recipient_address,\n 'stock': stock,\n 'quantity': quanitity\n })\n\n verified = self.verify_signature(sender_address, signature, transaction)\n if verified:\n self.transactions.append(transaction)\n print('Added tranasaction successfully (len={})'.format(len(self.transactions)))\n self.mine()\n return len(self.chain) + 1\n else:\n raise Exception(\"Failed to add transaction to blockchain\")",
"def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))",
"def makeNewTx(self):\n new_tx = self.makeTx() # ABSTRACT - Make a new tx.\n logging.info(\"New tx (%d) created by miner %d\" % (new_tx.id, self.id))\n self.changed_last_step = True\n self.handleNewTx(new_tx, self.id)\n self.checkAllTx()",
"def create_transaction(self, xml):\n logging.info(xml)\n seqnum = self.__post(\"import/return_seq=true\", xml).text\n logging.warn(\"Transaction created with id \" + seqnum)\n return seqnum",
"def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender':sender,\n 'recipient':recipient,\n 'amount':amount\n })\n\n return self.last_block['index']+1",
"def sign_trx(self, signture):\n self.trx_signature = signture",
"def push_tx(tx, network='testnet', fee=False):\n\n if network in ['testnet', 'main']:\n if network is 'testnet':\n if fee:\n url = 'http://tbtc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/test3/txs/push'\n elif network is 'main':\n if fee:\n url = 'http://btc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/main/txs/push'\n\n if fee:\n data = {'hex': tx}\n else:\n data = {'tx': tx}\n\n response = post(url, data=json.dumps(data))\n else:\n response = 'Bad network'\n\n r_code = response.status_code\n r_reason = response.reason\n\n if r_code is 200:\n # blockr server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['data'])\n elif r_code is 201:\n # blockcyper server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['tx']['hash'])\n else:\n tx_hash = None\n\n return r_code, r_reason, tx_hash",
"def test_upload_binary(self):\n uploadFile = os.path.join(testdatadir, \"upload.data.gz\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED",
"def test_upload(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.upload(TOOLNAME,TOOLFILEDATA,username,userpass)",
"def submit_transaction():\n data = request.get_json()\n\n # Create candidate transaction object\n try:\n tx = Transaction.from_dict(data['transaction'])\n except (KeyError, TypeError):\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n statuses = []\n # Broadcast if needed and turn off broadcasting for other nodes\n if request.args.get('broadcast', type=int, default=0):\n for node_ in node.network:\n if not node_['id'] == node.node_id:\n response = requests.post(\n node_['ip'] + '/transactions/submit?broadcast=0',\n json=dict(\n transaction=data['transaction'],\n signature=data['signature']\n )\n )\n statuses.append(response.status_code)\n\n if not response.status_code == 200:\n response = dict(message='Transaction rejected by the network.')\n return jsonify(response), 202\n\n # Validate transaction as-is\n val_result = validate_transaction_document(tx)\n if isinstance(val_result, str):\n response = dict(message=val_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Verify signature\n # defined in backend/utils\n sign_result = verify_signature(tx, data['signature'])\n if isinstance(sign_result, str):\n response = dict(message=sign_result)\n status_code = 400\n return jsonify(response), status_code\n\n # Add transaction to local blockchain\n node.blkchain.add_transaction(tx)\n myurl = node.network[node.node_id]['ip']\n url = myurl + '/blockchain/mine_block'\n mine_resp = requests.get(url=url)\n if mine_resp.status_code == 200:\n block_dict = mine_resp.json()\n add_resp = requests.post(url=myurl + '/blockchain/add_block?\\\n broadcast=1', json=block_dict)\n # run consensus \n requests.get(url=myurl+'/blockchain/consensus')\n\n response = dict(message='Transaction added.')\n\n return jsonify(response), 200",
"def submission_old():\n response.headers[\"Content-Type\"] = \"text/xml\"\n xml = str(request.post_vars.xml_submission_file.value)\n if len(xml) == 0:\n raise HTTP(400, \"Need some xml!\")\n importxml(db, xml)\n r = HTTP(201, \"Saved.\")\n r.headers[\"Location\"] = request.env.http_host\n raise r",
"def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n\n return self.last_block['index'] + 1",
"def create_transactions(self, new_transactions):\n if new_transactions is None or len(new_transactions) == 0:\n # TODO: Create proper exception for this\n raise pynab.exceptions.PynabError\n\n path = f\"{pynab.Pynab._base_url}/budgets/{self.budget_id}/transactions\"\n data = {\n \"transactions\": [\n new_transaction.__dict__ for new_transaction in new_transactions\n ]\n }\n print(data)\n response = self.session.post(path, json=data)\n return pynab.factory.PynabFactory.parse(response.json(), self.budget_id)",
"def new_transaction(self, sender, recipient, amount):\n self.pending_transactions.append({\n 'sender' : sender,\n 'recipient' : recipient,\n 'amount' : amount\n })\n return self.last_block['index'] + 1",
"def make_transaction():\n account_id = request.json['account_id']\n aux_account = [account for account in accounts if account['id'] == account_id]\n if len(aux_account) == 0:\n abort(404)\n account_balance = Decimal(aux_account[0].get('balance')).quantize(Decimal('0.00'))\n transaction = request.json['transaction']\n transaction_amount = Decimal(abs(request.json['amount'])).quantize(Decimal('0.00'))\n\n if not request.json:\n abort(400)\n if transaction not in ['withdrawal', 'deposit']:\n abort(400, f'Invalid transaction name: {transaction}')\n if transaction == 'withdrawal':\n transaction_amount = transaction_amount*-1\n\n # the user can't withdraw more than the account has\n validation_sum = (account_balance + transaction_amount).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n if validation_sum >= 0:\n for real_account in accounts:\n if real_account.get('id') == account_id:\n real_account['balance'] = round(float(validation_sum),2)\n else:\n abort(400, {'error':'Not enough funds for this transaction'})\n\n return json.dumps({f'{transaction.capitalize()} Done. New balance': str(validation_sum)}, ensure_ascii=False), 200",
"def save_transaction(**kwargs):\n if not 'user_id' in kwargs:\n raise AttributeError(\"Cannot create a transaction without user_id\")\n\n\n return History.create(\n user_id=kwargs['user_id'],\n from_curr=kwargs['currencyFrom'],\n to_curr=kwargs['currencyTo'],\n amount=kwargs['amountTo'],\n address_in=kwargs['payinAddress'],\n address_out=kwargs['payoutAddress'],\n extraid=kwargs['payinExtraId'],\n transaction_id=kwargs['id'],\n exchange_status=kwargs['status'],\n )",
"def new_transaction(self, sender, recipient, amount):\n\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n\n return self.last_block['index'] + 1",
"def edit_UI_transaction(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\t_type = read_type()\n\ttransaction_at = transaction_exists(_day, _amount, _type, account)\n\tif (transaction_at != -1):\n\t\tprint('Actualizare tranzactie...')\n\t\t_day = read_day()\n\t\t_amount = read_amount()\n\t\t_type = read_type()\n\t\tedit_transaction(transaction_at, _day, _amount, _type, account)\n\t\tprint('Tranzactie actualizata.')\n\telse:\n\t\tprint('Tranzactie inexistenta.')",
"def insert_retailTransactionRecord(cursor, retailTransactionObject):\n last_row_id = -1;\n query = \"\"\"\n INSERT INTO transactions(\n transaction_id,\n unit_id,\n workstation_id,\n sequence_id,\n begin_date_time,\n end_date_time,\n currency,\n quantity,\n extended_amount,\n net_amount,\n training_mode_flag\n )\n VALUES(\n '{0.id}',\n {0.unit_id},\n {0.workstation_id},\n {0.sequence_id},\n '{0.start_dtime}',\n '{0.end_dtime}',\n '{0.currency}',\n {0.quantity},\n {0.extended_amount},\n {0.transaction_net_amount},\n {0.training_mode_flag}\n )\n \"\"\".format(retailTransactionObject)\n try:\n cursor.execute(query)\n last_row_id = cursor.lastrowid\n except DBError as err:\n print(err)\n finally:\n return last_row_id",
"def create_transaction():\n data = request.get_json()\n response = None\n status_code = None\n\n # Proposed transaction document validity checks\n if balance() < (data['amount']):\n response = dict(message='Your balance is not enough to complete transaction')\n status_code = 400\n elif not (\n any(node_['public_key'] == data['sender_address'] for node_ in node.network) and\n any(node_['public_key'] == data['recipient_address'] for node_ in node.network) and\n isinstance((data['amount']), (int, float))\n ):\n response = dict(message='Please make sure the proposed transaction is valid.')\n status_code = 400\n\n if response and status_code:\n return jsonify(response), status_code\n\n transaction_id = str(uuid4())\n\n # Use as many utxos as necessary to create the new transaction inputs\n sender_address = data['sender_address']\n sum_ = 0\n tx_inputs = []\n for utxo in node.blkchain.utxos[sender_address]:\n if sum_ >= (data['amount']):\n break\n elif not node.blkchain.transaction_unconfirmed(utxo):\n sum_ += utxo.amount\n tx_inputs.append(TransactionInput.from_output(utxo))\n\n # Create 2 transaction outputs, one for the transfer and one for the sender's change\n tx_outputs = [\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['recipient_address'],\n amount=(data['amount'])\n ),\n TransactionOutput(\n transaction_id=transaction_id,\n recipient_address=data['sender_address'],\n amount=sum_ - (data['amount'])\n )\n ]\n\n # Actual transaction object:\n tx = Transaction(\n sender_address=data['sender_address'],\n recipient_address=data['recipient_address'],\n amount=(data['amount']),\n transaction_inputs=tx_inputs,\n transaction_outputs=tx_outputs,\n transaction_id=transaction_id\n )\n\n response = tx.to_dict()\n return jsonify(response), 200",
"def transaction(user_id):\n # Check if the login_required is disabled `in case of testing`\n if not app.config['LOGIN_DISABLED']:\n # If login required, check if the user is authenticated\n # Redirect the user to login page if he is not authenticated\n if not current_user.is_authenticated:\n flash(\"You are not logged in!\", 'error')\n return redirect(url_for('login'))\n # Run the transaction in the background\n executor.submit(transaction_run)\n trans_form = TransactionForm()\n # Send transaction request if validation success\n if trans_form.validate_on_submit():\n currency_amount = trans_form.currency_amount.data\n currency_Type = trans_form.currency_Type.data\n target_user = trans_form.target_user.data\n # Add transaction to DB\n # `transaction_run` will decide if the transaction will success or not\n transaction = Transaction(currency_amount=currency_amount,\n currency_Type=currency_Type,\n target_user=target_user,\n user_id=user_id)\n db.session.add(transaction)\n db.session.commit()\n # Get the transaction account for the target user\n target_tran = Transaction.query.filter_by(user_id=target_user).first()\n # Check if the target user does not has a transaction account\n # Create one for him in order to be able to recieve the transfared money\n if not target_tran:\n target_transaction = Transaction(user_id=target_user)\n db.session.add(target_transaction)\n db.session.commit()\n\n flash('Transaction request sent successfully.', 'success')\n return redirect(url_for('mainPage', user_id=user_id))\n return render_template(\"transaction.html\",\n form=trans_form,\n user_id=user_id\n )",
"def add_transaction(recipient,sender=owner,amount=1.0):\n #transaction ={\n # 'sender':sender,\n # 'recipient':recipient,\n # 'amount':amount\n #}\n transaction=OrderedDict([('sender',sender),('recipient',recipient),('amount',amount)])\n if verify_transaction(transaction):\n open_transactions.append(transaction)\n participants.add(sender)\n participants.add(recipient)\n save_data()\n return True\n else:\n return False",
"def transferfunds(self):",
"def blockchain_set_tx_detail(transaction):\n info_endpoint = \"address/%s?format=json\" % transaction.to_address\n try:\n info = json.loads(util.call_api(info_endpoint))\n except:\n return\n\n transaction.txid = info['txs'][0]['hash']\n transaction.amount_paid = round(info['total_received'] * SATOSHI, 8)\n\n if transaction.amount_paid >= transaction.amount_btc:\n transaction.status = Transaction.STATUS_CONFIRMED\n send_webhook.apply_async(kwargs={'transaction_id': transaction.id})\n\n transaction.save()",
"def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append(\n {\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n }\n )\n\n return self.last_block['index'] + 1",
"def submit_textarea():\n print(\"--- submit ---\")\n post_content = request.form[\"content\"]\n author = request.form[\"author\"]\n\n post_object = {\n 'author': author,\n 'content': post_content,\n }\n\n # Submit a transaction\n new_tx_address = \"{}/new_transaction\".format(BASE_URL)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect('/')",
"def transaction(request, user_id, type):\n\tif type == 1:\n\t\tname = request.POST['expName']\n\t\tdate = request.POST['expDate']\n\t\tamount = request.POST['amount']\n\t\ttransact_type = request.POST.get('transactions')\n\t\tt_from = request.POST.get('t_from')\n\t\trep = 0\n\t\tif 'repeat' in request.POST:\n\t\t\trep = int(request.POST.get('repeat_transaction'))\n\t\tif (transact_type == '1'):\n\t\t\tt_to = request.POST.get('t_to')\n\t\t\tt1 = Incomes(user_inc = Users.objects.get(pk=user_id), inc_name=name, cat_inc= Categories.objects.get(user_cat = Users.objects.get(pk=user_id), cat_name=\"Transaction\", cat_for=1), wal_inc=Wallets.objects.get(pk=t_from), inc_date=date, inc_amount=amount, inc_rep=rep)\n\t\t\tif t1 is not None:\n\t\t\t\tt1.save()\n\t\t\tt2 = Expenses(user_exp = Users.objects.get(pk=user_id), exp_name=name, cat_exp= Categories.objects.get(user_cat = Users.objects.get(pk=user_id), cat_name=\"Transaction\", cat_for=0), wal_exp=Wallets.objects.get(pk=t_to), exp_date=date, exp_amount=amount, exp_rep=rep)\n\t\t\tif t2 is not None:\n\t\t\t\tt2.save()\n\t\telif(transact_type == '2'):\n\t\t\texcat = request.POST.get('excat1')\n\t\t\tloaned = \"\"\n\t\t\tif 'loan' in request.POST:\n\t\t\t\tloaned = request.POST['loaned']\n\t\t\tt = Expenses(user_exp = Users.objects.get(pk=user_id), exp_name=name, cat_exp= Categories.objects.get(pk=excat), wal_exp=Wallets.objects.get(pk=t_from), exp_date=date, exp_amount=amount, exp_rep=rep, exp_loan=True, exp_loan_to=loaned)\n\t\t\tif t is not None:\n\t\t\t\tt.save()\n\t\t\tu = Users.objects.get(pk=user_id)\n\t\t\tif u is not None:\n\t\t\t\tu.balance -= int(amount)\n\t\t\t\tu.save()\n\t\telif(transact_type == '3'):\n\t\t\tincat = request.POST.get('incat1')\n\t\t\tindebt = \"\"\n\t\t\tif 'debt' in request.POST:\n\t\t\t\tindebt = request.POST['indebt']\n\t\t\tt = Incomes(user_inc = Users.objects.get(pk=user_id), inc_name=name, cat_inc= Categories.objects.get(pk=incat), wal_inc=Wallets.objects.get(pk=t_from), inc_date=date, inc_amount=amount, inc_rep=rep, inc_debt=True, inc_debt_to=indebt)\n\t\t\tif t is not None:\n\t\t\t\tt.save()\n\t\t\tu = Users.objects.get(pk=user_id)\n\t\t\tif u is not None:\n\t\t\t\tu.balance += int(amount)\n\t\t\t\tu.save()\n\telif type == 2:\n\t\tname = request.POST['expName']\n\t\tdate = request.POST['expDate']\n\t\tamount = request.POST['amount']\n\t\tt_from = request.POST.get('exp_wal')\n\t\trep = 0\n\t\tif 'repeat' in request.POST:\n\t\t\trep = int(request.POST.get('repeatit'))\n\t\texcat = request.POST.get('exp_cat')\n\t\tloaned = \"\"\n\t\tif 'loan' in request.POST:\n\t\t\tloaned = request.POST['loaned']\n\t\tt = Expenses(user_exp = Users.objects.get(pk=user_id), exp_name=name, cat_exp= Categories.objects.get(pk=excat), wal_exp=Wallets.objects.get(pk=t_from), exp_date=date, exp_amount=amount, exp_rep=rep, exp_loan=True, exp_loan_to=loaned)\n\t\tif t is not None:\n\t\t\tt.save()\n\t\tu = Users.objects.get(pk=user_id)\n\t\tif u is not None:\n\t\t\tu.balance -= int(amount)\n\t\t\tu.save()\n\telif type == 3:\n\t\tname = request.POST['incName']\n\t\tdate = request.POST['incDate']\n\t\tamount = request.POST['amount']\n\t\tt_from = request.POST.get('inc_wal')\n\t\trep = 0\n\t\tif 'repeat' in request.POST:\n\t\t\trep = int(request.POST.get('repeatit2'))\n\t\tincat = request.POST.get('inc_cat')\n\t\tindebt = \"\"\n\t\tif 'debt' in request.POST:\n\t\t\tindebt = request.POST['indebt']\n\t\tt = Incomes(user_inc = Users.objects.get(pk=user_id), inc_name=name, cat_inc= Categories.objects.get(pk=incat), wal_inc=Wallets.objects.get(pk=t_from), inc_date=date, inc_amount=amount, inc_rep=rep, inc_debt=True, inc_debt_to=indebt)\n\t\tif t is not None:\n\t\t\tt.save()\n\t\tu = Users.objects.get(pk=user_id)\n\t\tif u is not None:\n\t\t\tu.balance += int(amount)\n\t\t\tu.save()",
"def transfer_money(request):\n source = Account.objects.get(pk=int(request.POST.get('source-id', False)))\n destination = Account.objects.get(pk=int(request.POST.get('destination-id', False)))\n amount = float(request.POST.get('amount', False))\n enough_cash = source.available_cash >= amount\n if enough_cash:\n source.available_cash -= amount\n source.save()\n destination.available_cash += amount\n destination.save()\n messages.success(request, 'OK 200: Transfer successfully executed.')\n else:\n messages.error(request, f'Error 400: Tried to transfer {amount} from {source.name}, but only had {source.available_cash} available.')\n \n transaction = Transaction(description=f\"Transfer from {source.name} to {destination.name}.\", success=enough_cash, cash_amount=amount, source_account=source, \n destination_account=destination)\n transaction.save()\n\n return redirect('overview')",
"def upload(self, upload_request):\n raise NotImplementedError",
"def new_transaction(self, sender, recipient, amount):\n\n new_trans = Transaction(sender=sender,\n recipient=recipient,\n amount=amount)\n\n new_trans.save()\n\n return self.last_block['id'] + 1",
"def final_step_customer(Xaction_type, Xcredit_type, Xcredit_file, Xusers_account):\n ####################################################\n if Xaction_type == \"deposit\" and Xcredit_type == \"savings\":\n #deposit the money into the account.\n amt_entered = amount_entered()\n Xusers_account.deposit_savings(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_sav_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"deposit\" and Xcredit_type == \"current\":\n #deposit the money into the account.\n amt_entered = amount_entered()\n Xusers_account.deposit_current(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_cur_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"withdraw\" and Xcredit_type == \"savings\":\n amt_entered = amount_entered()\n #check if funds is sufficient\n if amt_entered > Xusers_account.get_sav_bal():\n print(\"Insufficient funds.\")\n else: #withdraw the money from the account.\n Xusers_account.withdraw_savings(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_sav_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"withdraw\" and Xcredit_type == \"current\":\n amt_entered = amount_entered()\n #check if funds is sufficient\n if amt_entered > Xusers_account.get_cur_bal():\n print(\"Insufficient funds.\")\n else: #withdraw the money from the account.\n Xusers_account.withdraw_current(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_cur_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"balance\" and Xcredit_type == \"savings\":\n print(\"savings total is #\" + f'{users_account.get_sav_bal():,}')\n\n if Xaction_type == \"balance\" and Xcredit_type == \"current\":\n print(\"current total is #\" + f'{users_account.get_cur_bal():,}')\n\n if Xaction_type == \"history\" and Xcredit_type == \"savings\":\n #print necessary information from the file\n print_history(Xcredit_file)\n\n if Xaction_type == \"history\" and Xcredit_type == \"current\":\n #print necessary information from the file\n print_history(Xcredit_file)",
"def create_incoming_transaction(transaction: IncomingTransactionCreate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):\n try:\n transaction = transaction_service.create(db, auth_user, transaction)\n return transaction\n except ItensNotFound as err:\n\t raise HTTPException(status_code=404, detail=f\"Os seguintes produtos não foram encontrados no sistema: {str(err)}\")\n except ProductsNotFound as err:\n raise HTTPException(status_code=400, detail=\"A movimentação a ser registrada deve conter no minimo um produto.\")\n except ProviderNotFound as err:\n\t raise HTTPException(status_code=404, detail=f\"O fornecedor informado não foi encontrado: {str(err)}\")\n except InvalidStockQuantity as err:\n products_missing = transaction_service.make_response(db, str(err))\n raise HTTPException(status_code=400, detail={\n \"message\": \"A quantidade informada para os seguintes produtos deve ser maior do que zero.\",\n \"products_missing\": products_missing\n })",
"async def transfer_asset(request):\n \n required_fields = ['label', 'source', 'target' , 'amount' ,'resource' ]\n common.validate_fields(required_fields, request.json)\n\n transfer = _create_transfer_dict(request)\n sender = _create_transfer_participant(request.json, transfer)\n signer = await common.get_signer(request)\n\n # print(\"transfer =======> \", transfer)\n # print(\"sender =========> \", sender)\n\n batches, batch_id = transaction_creation.transfer_asset(\n txn_key = signer,\n batch_key = request.app.config.SIGNER,\n identifier = transfer['id'],\n label = transfer.get('label'),\n sender = sender,\n amount = transfer['amount'])\n\n # print(\"batches =========> \", batches)\n\n await messaging.send(\n request.app.config.VAL_CONN,\n request.app.config.TIMEOUT,\n batches)\n\n await messaging.check_batch_status(request.app.config.VAL_CONN, batch_id)\n\n return response.json({\"transfer\" : \"asad\"})",
"def submit_textarea():\n\n post_content = request.form[\"content\"]\n author = request.form[\"author\"]\n\n post_object = {\n 'author': author,\n 'content': post_content\n }\n\n # Submit a tx\n new_tx_address = f\"{CONNECTED_NODE_ADDRESS}/new_transaction\"\n\n request.post(new_tx_address,\n json=post_object, \n headers={'Content-type': 'application/json'})\n # return to homepage\n return redirect('/')",
"def recordTransaction(self, loop, transaction):\n\n a = {}\n a['time'] = transaction.transactionTime\n a['atm'] = transaction.transactionATM.atmID\n a['transaction'] = transaction.transactionType\n a['cash'] = transaction.transactionATM.atmCash\n a['status'] = transaction.transactionStatus\n self._atmDict[loop] = a\n\n c = {}\n c['time'] = transaction.transactionTime\n c['client'] = transaction.transactionCard.cardAccount.accountClient.clientID\n c['account'] = transaction.transactionCard.cardAccount.accountNumber\n c['transaction'] = transaction.transactionType\n c['balance'] = transaction.transactionCard.cardAccount.accountBalance\n c['status'] = transaction.transactionStatus\n self._clientDict[loop] = c\n\n t = {}\n t['time'] = transaction.transactionTime\n t['transaction'] = transaction.transactionType\n t['amount'] = transaction.transactionAmount\n t['status'] = transaction.transactionStatus\n self._transactionDict[loop] = t",
"def transfer(file_obj):",
"def tests_ti_file_add_action(self):\n file = cast(File, self.ti_helper.create_indicator())\n indicator_data = {\n 'confidence': randint(0, 100),\n 'ip': self.ti_helper.rand_ip(),\n 'owner': self.owner,\n 'rating': randint(0, 5),\n }\n target = self.ti.address(**indicator_data)\n target.create()\n response = file.add_action('traffic', target)\n assert response.ok\n target.delete()",
"def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True",
"def add_transfer(self, transfer, run):\n transfer = Transfer(transfer)\n target = transfer.target\n external = not(transfer.direction.startswith('vos'))\n # External data transfers\n if external:\n # Check transfer uri\n checks = check_uri(transfer.target, self.sm, ignoreExist = True)\n if checks['container']: raise VOSpaceError(500, \"Data cannot be uploaded to a container\")\n if not checks['exists']:\n if transfer.direction in ['pushToVoSpace', 'pullToVoSpace']:\n node = DataNode()\n # Reserved URI\n if transfer.target.endswith(AUTO):\n uri = generate_uri(transfer.target)\n transfer.target = uri\n else:\n uri = transfer.target\n node.set_uri(uri)\n location = get_location(uri)\n self.sm.create_node(node.tostring(), uri, NODETYPES[node.TYPE], location = location)\n else:\n raise VOSpaceError(409, \"A Node does not exist with the requested URI.\", summary = NODE_NOT_FOUND)\n # Verify view\n print transfer.view.uri\n if transfer.view.uri != DEFAULT_VIEW and transfer.view.uri not in SERVICE_VIEWS and ANY_VIEW not in SERVICE_VIEWS: raise VOSpaceError(500, \"Service does not support the requested View\", summary = VIEW_NOT_SUPPORTED)\n # Negotiate protocols\n transfer.set_protocols(self._negotiate_protocols(transfer.protocols, transfer.direction))\n # Accept job\n method = self._get_handler(transfer)\n jobid = self.jm.add_job(transfer.tostring(), 'transferDetails', method, run)\n if external:\n for protocol in transfer.protocols:\n self.sm.register_transfer(jobid, protocol.endpoint)\n self.sm.register_details(jobid, transfer.tostring())\n return jobid",
"def post(self):\n blob_key = self.request.get(\"blobkey\")\n\n database_creation.run(blob_key)",
"def new_transaction(self, sender, recipient, amount):\n\n\t\tself.current_transactions.append({\n\t\t\t'sender': sender,\n\t\t\t'recipient': recipient,\n\t\t\t'amount': amount,\t\t\n\t\t})\n\t\t\n\t\treturn self.last_block()['index'] + 1",
"def transfer_create(self, volume_id, name=None):\n name = name or self.generate_random_name()\n aname = \"cinder_v%s.transfer_create\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().transfers.create(volume_id, name=name)",
"def _do_commit(self):",
"def track_transaction(self, transaction, items):\n trans = Transaction()\n trans.order_id = transaction.get('order_id', None)\n trans.total = transaction.get('total', None)\n trans.tax = transaction.get('tax', None)\n trans.affiliation = transaction.get('affiliation', None)\n trans.shipping = transaction.get('shipping', None)\n trans.city = transaction.get('city', None)\n trans.state = transaction.get('state', None)\n trans.country = transaction.get('country', None)\n\n for item in items:\n gitem = gaItem()\n gitem.sku = item.get('sku', None)\n gitem.name = item.get('name', None)\n gitem.variation = item.get('variation', None)\n gitem.price = item.get('price', None)\n gitem.quantity = item.get('quantity', 1)\n trans.add_item(gitem)\n\n self.ga_tracker.track_transaction(transaction=trans,session=self.ga_session,visitor=self.ga_visitor)",
"def transaction(id=None):\n\n if request.method == 'GET':\n if id:\n data = LineItem.query.get_or_404(id).to_dict()\n return jsonify(data)\n else:\n return bad_request('no id provided')\n\n # Create a new transaction\n elif request.method == 'POST':\n data = request.get_json() or {}\n # check for required fields\n if 'amount' not in data or\\\n 'location' not in data or\\\n 'category_id' not in data or\\\n 'date' not in data:\n return bad_request('missing data')\n\n date = datetime.strptime(data['date'], \"%m/%d/%Y\")\n lineitem = LineItem(data['amount'], date, data['location'],\n data['description'], data['category_id'])\n db.session.add(lineitem)\n db.session.commit()\n response = jsonify(lineitem.to_dict())\n response.status_code = 201\n response.headers['location'] = url_for('api.transaction',\n id=lineitem.id)\n return response\n # Edit a transaction\n elif request.method == 'PUT':\n data = request.get_json() or {}\n # is an id specified in either way?\n if 'id' in data:\n if id is None:\n if data['id'] != id:\n return bad_request('two different ids specified')\n else:\n id = int(data['id'])\n elif id is None:\n return bad_request('no id specified')\n\n item = LineItem.query.get_or_404(id)\n item.from_dict(data)\n db.session.commit()\n return jsonify(item.to_dict())\n # Delete\n elif request.method == 'DELETE':\n lineitem = LineItem.query.get_or_404(id)\n db.session.delete(lineitem)\n db.session.commit()\n return '', 204\n else:\n return bad_request('Operation not supported')",
"def upload(self, cr, ads_manager):\n if self.data['order']['articles']:\n res = super(ads_sales_order, self).upload(cr, ads_manager)\n if self.browse_record and self.file_name:\n self.browse_record.write({'ads_file_name': self.file_name})\n return res\n else:\n return False",
"def insertTranscription(dbConnection, realtimefactor, transcription, duration, dbID):\n try:\n cursor = dbConnection.cursor()\n cursor.execute(\"UPDATE transcriptions SET realtimefactor = '\" + realtimefactor + \"', transcription = '\" + transcription + \"', datetranscribed = now(), duration = '\" + duration + \"' WHERE id = '\" + str(dbID) + \"';\")\n dbConnection.commit()\n cursor.close()\n return True\n except Exception as e:\n Tools.writeException(\"uploadTranscriptionData\", e)\n return False",
"def add_transaction(self, date, payee_id, description, amount):\n # [todo] - implement error handling and parameter checking pre-execution\n\n # open a cursor\n cur = self.get_cursor()\n\n self.reset_auto_increment('transactions')\n\n # add transaction with required values\n stmt = \"INSERT INTO transactions \" + \\\n \"VALUES ('0', \" + \\\n \"'{0}-{1}-{2}', \".format(date.year, date.month, date.day) + \\\n \"'{0}', '{1}', \".format(payee_id, description) + \\\n \"'{0}')\".format(amount)\n\n cur.execute(stmt)\n\n # close the cursor\n self.close_cursor()",
"def send_commit(): \r\n \r\n commit_id = request.args[0]\r\n r_commit = db.logs_commit[commit_id]\r\n \r\n # Create a new send record\r\n send_id = db.logs_send.insert( datetime = request.utcnow,\r\n inventory_store_id = r_commit.inventory_store_id,\r\n to_inventory_store_id = r_commit.for_inventory_store_id\r\n )\r\n \r\n #Only select items which are in the warehouse\r\n commit_items = db( (db.logs_commit_item.logs_commit_id == commit_id) & \\\r\n (db.logs_commit_item.logs_req_item_id == db.logs_req_item.id) & \\\r\n (db.logs_req_item.item_id == db.inventory_store_item.item_id) & \\\r\n (db.logs_commit_item.deleted == False) & \\\r\n (db.logs_req_item.deleted == False) & \\\r\n (db.inventory_store_item.deleted == False)\r\n ).select( db.inventory_store_item.id,\r\n db.logs_commit_item.quantity,\r\n db.logs_commit_item.item_packet_id,\r\n ) \r\n \r\n for commit_item in commit_items: \r\n send_item_id = db.logs_send_item.insert( logs_send_id = send_id,\r\n store_item_id = commit_item.inventory_store_item.id,\r\n quantity = commit_item.logs_commit_item.quantity,\r\n item_packet_id = commit_item.logs_commit_item.item_packet_id \r\n ) \r\n \r\n # Redirect to send\r\n redirect(URL(r = request,\r\n c = \"logs\",\r\n f = \"send\",\r\n args = [send_id]\r\n )\r\n )",
"def submit(self):\n data = self.getFSNDataDict()\n if data != []:\n MOSES.addToPiggyBank(data, self.user_id, self.password)",
"def create_transaction(self, sender, recipient, amount):\n\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n return self.last_block['index'] + 1",
"def send_transaction(self, signd_txn):\n return self.web3.eth.send_raw_transaction(signd_txn.rawTransaction).hex()",
"def attach(request, id):\n transaction = get_object_or_404(Transaction, pk=id, user=request.user)\n if request.method == 'POST':\n form = AttachForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n a = form.save(commit=False)\n a.transaction = transaction\n a.save()\n return redirect(transaction)\n else:\n form = AttachForm()\n return render(request, 'pages/form.html', {\n 'title': \"Attach File\",\n 'description': \"You are about to attach a file to {}.\".format(transaction),\n 'breadcrumbs': [transaction.account, transaction],\n 'form': form,\n })",
"def add_income(transaction):\n conn = create_connection(database)\n\n sql = ''' UPDATE card\n SET balance = balance + ?\n WHERE number = ?'''\n\n with conn:\n cur = conn.cursor()\n cur.execute(sql, transaction)\n conn.commit()"
] | [
"0.61232543",
"0.6110544",
"0.608616",
"0.6082285",
"0.607676",
"0.6025124",
"0.59454113",
"0.5938837",
"0.5938707",
"0.5918176",
"0.58792144",
"0.5847983",
"0.5846506",
"0.5797633",
"0.57951856",
"0.5787983",
"0.57702297",
"0.5753735",
"0.5752439",
"0.5735202",
"0.5712338",
"0.56774163",
"0.5671007",
"0.5662342",
"0.5646987",
"0.56437975",
"0.5641894",
"0.56352246",
"0.56232667",
"0.56156826",
"0.56136644",
"0.5610049",
"0.5591521",
"0.55842453",
"0.5581955",
"0.55487025",
"0.5533956",
"0.55333704",
"0.55120265",
"0.5509401",
"0.5504947",
"0.5492087",
"0.54787576",
"0.5459165",
"0.5452937",
"0.5436747",
"0.5436525",
"0.54352367",
"0.5434418",
"0.54299587",
"0.5429394",
"0.5424614",
"0.54183686",
"0.54149425",
"0.54106534",
"0.54052067",
"0.54025966",
"0.5390085",
"0.53867006",
"0.53865635",
"0.5383226",
"0.5376493",
"0.53759927",
"0.53753066",
"0.53725445",
"0.53712827",
"0.53712785",
"0.53456855",
"0.5343066",
"0.53388405",
"0.53337616",
"0.5324898",
"0.52801985",
"0.5277503",
"0.5276349",
"0.5276016",
"0.5275903",
"0.52696294",
"0.5266498",
"0.5261438",
"0.5248539",
"0.5237997",
"0.5229876",
"0.52247787",
"0.52236754",
"0.52229005",
"0.52224195",
"0.5218959",
"0.5209219",
"0.51941025",
"0.51928616",
"0.5192469",
"0.5183288",
"0.5181278",
"0.5179197",
"0.51784223",
"0.5177237",
"0.51756746",
"0.516385",
"0.5163013",
"0.516061"
] | 0.0 | -1 |
Check Whether this command is allowed to be run in current device state. | def check_allowed(self):
if self.state_model.op_state in [
DevState.FAULT,
DevState.UNKNOWN,
DevState.ON,
]:
tango.Except.throw_exception(
f"Disable() is not allowed in current state {self.state_model.op_state}",
"Failed to invoke Disable command on SdpMasterLeafNode.",
"SdpMasterLeafNode.Disable() ",
tango.ErrSeverity.ERR,
)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True",
"def check_device_state(self):",
"def check_subsystem_commands(self):\n self.communications.check_controls()\n self.__check_video()\n self.__check_picture()\n self.__check_ping()\n self.__check_motion()",
"def is_telescope_on_allowed(self):\n handler = self.get_command_object(\"TelescopeOn\")\n return handler.check_allowed()",
"def can_run(self):\n return True",
"def check_commands(self):\n self.check_subsystem_commands()\n self._select_mode()",
"def is_Scan_allowed(self):\n handler = self.get_command_object(\"Scan\")\n return handler.check_allowed()",
"async def can_run(self, ctx: Context) -> bool:\n\n if not self.enabled:\n raise DisabledCommand(f'{self.name} command is disabled')\n\n original = ctx.command\n ctx.command = self\n\n try:\n if not await ctx.bot.can_run(ctx):\n raise CheckFailure(f'The global check functions for command {self.qualified_name} failed.')\n\n cog = self.cog\n if cog is not None:\n local_check = Cog._get_overridden_method(cog.cog_check)\n if local_check is not None:\n ret = await guilded.utils.maybe_coroutine(local_check, ctx)\n if not ret:\n return False\n\n predicates = self.checks\n if not predicates:\n # since we have no checks, then we just return True.\n return True\n\n return await guilded.utils.async_all(predicate(ctx) for predicate in predicates) # type: ignore\n finally:\n ctx.command = original",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def check_commands(self):\n pass",
"def can_edit(self):\n return self.state not in (\n 'scanning', 'resulted', 'cancelled', 'aborted')",
"def can_run(self):\n\t\treturn self._start is None",
"def canAct(self) -> bool:\n return self.cooldown < 1",
"def system_valid(self):\n return self.udev.devices_exist",
"def is_Slew_allowed(self):\n handler = self.get_command_object(\"Slew\")\n return handler.check_allowed()",
"def __is_active(self, command):\n return True",
"def authorized(self) -> bool:\n\n return (\n self.activated\n or self.on_screen\n or self.on_file\n or (\n bool(PyFunceble.storage.CONFIGURATION)\n and bool(PyFunceble.storage.CONFIGURATION.debug.active)\n )\n )",
"async def permission_valid_check(cls):\n pass",
"async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True",
"def is_controlled(self):\n return QtCore.QThread.currentThread() is self.thread",
"def available(self):\n return self._adb_available and self._dev_emu and (self._is_root\n or self._is_su)",
"def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def check_command(self):\n return self.process is not None and self.process.poll() is None",
"def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)",
"def can(self):\n return self._can",
"def can(self):\n return self._can",
"def checkIfEnabled(self):\n\n # Reload the command file to check for new commands\n importlib.reload(BotSettings)\n matches = BotSettings.config['commands']\n\n # Check for the match and if it is there return the value that goes with the command\n for key in matches:\n key.strip(\"!\")\n if key == self.command:\n return matches.get(key)\n\n # If reached the command does not exist\n return False",
"def is_telescope_standby_allowed(self):\n handler = self.get_command_object(\"TelescopeStandby\")\n return handler.check_allowed()",
"def non_root_available(self):\n return self._adb_available and self._dev_emu",
"def assumed_state(self):\n return self._command_state is False",
"def is_in_cmd(self):\r\n return self.select_cmd is not None",
"async def should_handle(self):\n local_controller = self.controller\n self.selected_pools = local_controller.pools.ready.idle\n return (\n local_controller.can_upgrade(ZERGLINGATTACKSPEED, RESEARCH_ZERGLINGADRENALGLANDS, self.selected_pools)\n and local_controller.hives\n )",
"def is_Disable_allowed(self):\n handler = self.get_command_object(\"Disable\")\n return handler.check_allowed()",
"def permissive(self) -> bool:\n return self._permissive",
"def is_in_controlled(self):\n return threadprop.current_controller(require_controller=False) is self",
"def can(self, unused_perm):\n return False",
"def _is_device_active(self):\n return self.power_mode == STATE_ON",
"def can_act(self, **kwargs):\n return True",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")",
"def is_valid(self):\n return self.is_active",
"def is_valid(self):\n if not self.__usb_if:\n return False\n return self.__usb_if.is_valid()\n #end is_valid()",
"def _is_current_intent_protected(self):\n return self._can_be_protected(self._current_intent_start, self._current_intent_end)",
"def __bool__(self):\n return any(self.smask)",
"def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )",
"def has_permission(self, request):\n return request.user.is_active",
"def has_permission(self, request):\n return request.user.is_active",
"def has_permission(self, request):\n return request.user.is_active",
"def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0",
"def is_controlled(self):\n return False if self._remote_controller == \"\" else True",
"def is_valid(self, user=None) -> bool:\n permissions = self.permissions_class(commands=self.permissions)\n conditions = self.conditions_class(commands=self.conditions)\n return (permissions.execute(self.state, user) and\n conditions.execute(self.state))",
"def check_state(self):\n pass",
"def should_poll(self):\n return self._command_state is not None",
"def has_permission(self, request):\n\t\treturn request.user.is_active",
"def available(self):\n return True if self._device.status == \"AVAILABLE\" else False",
"def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )",
"def is_valid(self):\n if not self.__usb_dev:\n return False\n if isinstance(self.__usb_dev, MpUsbApi) and (self.__handle_write != -1) and (self.__handle_read != -1):\n return True\n if isinstance(self.__usb_dev, usb.core.Device):\n return True\n return False\n #end valid()",
"def is_running(self):\n\t\treturn self in _running",
"async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True",
"def can_enable(self, silent: bool = False) -> bool:\n if self.is_access_expired():\n token = self.cfg.machine_token['machineToken']\n contract_client = contract.UAContractClient(self.cfg)\n contract_client.request_resource_machine_access(\n token, self.name)\n if not self.contract_status() == ContractStatus.ENTITLED:\n if not silent:\n print(status.MESSAGE_UNENTITLED_TMPL.format(title=self.title))\n return False\n application_status, _ = self.application_status()\n if application_status != status.ApplicationStatus.DISABLED:\n if not silent:\n print(status.MESSAGE_ALREADY_ENABLED_TMPL.format(\n title=self.title))\n return False\n applicability_status, details = self.applicability_status()\n if applicability_status == status.ApplicabilityStatus.INAPPLICABLE:\n if not silent:\n print(details)\n return False\n return True",
"def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator",
"def allowSecretChat(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n return (self.secretChatAllowed or \\\n (self.productName == \"Terra-DMC\" and self.isBlue() and self.secretChatAllowed))",
"def can_make_action(self) -> bool:\n return not(self.has_pending_action or self.is_dead())",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def is_telescope_off_allowed(self):\n handler = self.get_command_object(\"TelescopeOff\")\n return handler.check_allowed()",
"def __call__(self):\n status = self.os.popen('circusctl status validator').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False",
"def perms_check(self, ctx):\r\n\t\tcommand = ctx.invoked_with\r\n\t\ttry:\r\n\t\t\tif config.cfg[\"main\"][\"perms\"][command] in [x.id for x in ctx.author.roles]:\r\n\t\t\t\treturn True\r\n\t\t\treturn False\r\n\t\texcept KeyError:\r\n\t\t\tif config.cfg[\"main\"][\"perms\"][\"global\"] in [x.id for x in ctx.author.roles]:\r\n\t\t\t\treturn True\r\n\t\t\treturn False",
"def can_activate(self):\n if self.video_library.get_number_of_video_clips() == 0:\n return False\n else:\n return True",
"def has_commands(self) -> bool:\n return len(self.commands) > 0",
"def has_oam_cli(self):\n try:\n r = self.scripts.get_oam_status()\n except Exception:\n r = False\n return bool(r)",
"def is_insufficient_permissions(self):\n return self._tag == 'insufficient_permissions'",
"def check(self):\n return True",
"def is_enabled(command):\n if command not in Controller.commands:\n return False\n return Controller.commands[command][2]",
"def run_command_check(self):\n pass",
"def check(self):\n\n if not self.target.ok():\n return False\n\n if not self.progid.ok():\n return False\n\n if not self.prinapp.ok():\n return False\n\n if not self.observers.ok():\n return False\n\n return True",
"def safety_check(self):\n rs = moveit_msgs.msg.RobotState()\n current_joint_angles = self._limb.joint_angles()\n for joint in current_joint_angles:\n rs.joint_state.name.append(joint)\n rs.joint_state.position.append(current_joint_angles[joint])\n result = self._sv.get_state_validity(rs, self._moveit_group)\n return result.valid",
"def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd",
"def permit_required(self):\n return \"permission\" in self.description.lower()",
"def available(self) -> bool:\n return self._ctrl.connected()",
"def is_on(self):\n if self.is_update_locked():\n return self.graceful_state\n if self._state['action'] == 1 and self._state['state'] == 2:\n return True\n return False",
"def validate_command(command):\n return command in list(VALID_COMMANDS.keys())",
"def check_connected(self):\n return\\\n (self.setup is not None) and\\\n (self.design is not None) and\\\n (self.project is not None) and\\\n (self.desktop is not None) and\\\n (self.app is not None)",
"def check_channel_exec_request(self, channel, command):\n return False",
"def is_Track_allowed(self):\n handler = self.get_command_object(\"Track\")\n return handler.check_allowed()",
"def check_config_mode(self):\n return False",
"def available(self):\n return self._state is not None",
"def available(self):\n return self._state is not None",
"def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()",
"def privileged(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"privileged\")",
"def privileged(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"privileged\")",
"def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result",
"async def should_handle(self):\n local_controller = self.controller\n cavern = local_controller.caverns\n if local_controller.hives and not cavern:\n return False\n if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready):\n return False\n if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self):\n return False\n if cavern.ready:\n return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras)\n return not local_controller.floating_buildings_bm",
"def is_available_while_running(cls) -> bool:\n\n return True",
"def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])",
"async def locked(self):\n return not \"not\" in await self.ask(\"locked\")"
] | [
"0.7648626",
"0.7175639",
"0.6919901",
"0.69176394",
"0.68736464",
"0.68403673",
"0.682443",
"0.6819756",
"0.6730472",
"0.6730472",
"0.6730472",
"0.66860986",
"0.66518617",
"0.6585563",
"0.6568026",
"0.6549128",
"0.6543349",
"0.65116644",
"0.6509165",
"0.6509131",
"0.65001637",
"0.6488481",
"0.6467491",
"0.6443953",
"0.6431574",
"0.6431574",
"0.6399017",
"0.63965964",
"0.6390708",
"0.6390708",
"0.63833493",
"0.6359321",
"0.63589054",
"0.6353716",
"0.6349663",
"0.6323602",
"0.6320985",
"0.6305885",
"0.63056",
"0.62972546",
"0.62907934",
"0.6276974",
"0.6270419",
"0.6270063",
"0.62693155",
"0.62658036",
"0.6253764",
"0.6247202",
"0.6236914",
"0.62023646",
"0.62023646",
"0.62023646",
"0.62003535",
"0.6200187",
"0.61916983",
"0.6177075",
"0.6174419",
"0.61699086",
"0.61663747",
"0.61596894",
"0.6155612",
"0.61443204",
"0.6138113",
"0.6134744",
"0.6129942",
"0.6120675",
"0.6116211",
"0.6112188",
"0.6112188",
"0.6103003",
"0.610262",
"0.6100717",
"0.6093408",
"0.60742575",
"0.6072694",
"0.60658216",
"0.60607976",
"0.6053442",
"0.6048997",
"0.6043894",
"0.60371417",
"0.60243195",
"0.6019638",
"0.6015296",
"0.60121393",
"0.60051525",
"0.59889543",
"0.5987213",
"0.5986464",
"0.59850246",
"0.598479",
"0.598479",
"0.5978419",
"0.59756285",
"0.59756285",
"0.59714794",
"0.59706736",
"0.5966248",
"0.5953492",
"0.5947602"
] | 0.69579506 | 2 |
Callback function immediately executed when the asynchronous invoked command returns. Checks whether the disable command has been successfully invoked on SDP Master. | def disable_cmd_ended_cb(self, event):
this_server = TangoServerHelper.get_instance()
if event.err:
log_msg = (
f"{const.ERR_INVOKING_CMD}{event.cmd_name}\n{event.errors}"
)
self.logger.error(log_msg)
this_server.write_attr("activityMessage", log_msg, False)
else:
log_msg = f"{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}"
self.logger.info(log_msg)
this_server.write_attr("activityMessage", log_msg, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do(self):\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = \"\"\n property_val = this_server.read_property(\"SdpMasterFQDN\")[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(\n const.CMD_Disable, None, self.disable_cmd_ended_cb\n )\n self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)\n this_server.write_attr(\n \"activityMessage\", const.STR_DISABLE_CMS_SUCCESS, False\n )\n\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f\"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}\"\n tango.Except.re_throw_exception(\n dev_failed,\n const.ERR_INVOKING_CMD,\n log_msg,\n \"SdpMasterLeafNode.DisableCommand()\",\n tango.ErrSeverity.ERR,\n )",
"def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])",
"def EnableAsyncConfSlavePortStatusDelete(self):\n\t\treturn self._get_attribute('enableAsyncConfSlavePortStatusDelete')",
"def is_Disable_allowed(self):\n handler = self.get_command_object(\"Disable\")\n return handler.check_allowed()",
"def _isdisable(self):\n return self.dp.state()==PyTango.DevState.DISABLE",
"def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")",
"def check_disabled(self):\n return None",
"def disable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.DISARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False",
"def EnableAsyncConfSlavePacketInNoMatching(self):\n\t\treturn self._get_attribute('enableAsyncConfSlavePacketInNoMatching')",
"async def async_turn_off(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.ON,\n ]:\n tango.Except.throw_exception(\n f\"Disable() is not allowed in current state {self.state_model.op_state}\",\n \"Failed to invoke Disable command on SdpMasterLeafNode.\",\n \"SdpMasterLeafNode.Disable() \",\n tango.ErrSeverity.ERR,\n )\n return True",
"async def async_turn_off(self):",
"def disable_status_check(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disable_status_check\")",
"def EnableAsyncConfMasterPortStatusDelete(self):\n\t\treturn self._get_attribute('enableAsyncConfMasterPortStatusDelete')",
"def disable(self):\n self.error_code = 'DISABLED'\n self.running = False",
"def on_disable(self) -> None:\n self._cancel_automation()",
"def EnableAsyncConfSlavePortStatusModify(self):\n\t\treturn self._get_attribute('enableAsyncConfSlavePortStatusModify')",
"def disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"disabled\")",
"def test_yes_option_disabled(\n self, wait_tx_settled_mock, confirm_mock, do_transfer_mock\n ):\n password_option = self.get_password_args(self.PASSWORD)\n self.invoke(\n \"transfer\",\n self.LEDGER_ID,\n self.get_address(self.LEDGER_ID, self.PASSWORD),\n \"100000\",\n \"100\",\n *password_option,\n )\n confirm_mock.assert_called_once()",
"def can_be_disabled(self) -> bool:\n return True",
"async def async_turn_off(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"off\"):\n self._is_on = False\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")",
"def should_trigger(self, previous_result, *_args, **_kwargs):\n return self.extension.config.get('enabled', True)",
"def on_disable(self) -> None:\n self._on_stop_cycle({})",
"async def async_turn_off(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"tried to switch off %s\", self.name)\n try:\n await self.hass.async_add_executor_job(\n self.device.appliance.set_setting,\n BSH_POWER_STATE,\n self.device.power_off_state,\n )\n except HomeConnectError as err:\n _LOGGER.error(\"Error while trying to turn off device: %s\", err)\n self._state = True\n self.async_entity_update()",
"async def async_turn_off(self, **kwargs):\n if self.is_on == True:\n await self.async_call_service(self._cfg.get('turn_off_service'))\n self._state = False",
"def disable_run_command(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_run_command\")",
"def __bool__(self):\n return self.wait(0)",
"def disable(self):\n return self.enable(False)",
"def disable_status_check(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_status_check\")",
"def disable_status_check(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"disable_status_check\")",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.async_publish(\n self._config[CONF_COMMAND_TOPIC],\n self._config[CONF_PAYLOAD_OFF],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that switch has changed state.\n self._attr_is_on = False\n self.async_write_ha_state()",
"def stopAsync(self):\n return internals.blpapi_ProviderSession_stopAsync(self.__handle) == 0",
"def disabled(name):\n return not enabled(name)",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"disabled\")",
"async def async_turn_off(self, **kwargs):\n try:\n state_off = await self._api.set_relay_state(\n self._dev_id, self._members, \"off\"\n )\n if state_off:\n self._is_on = False\n self.async_write_ha_state()\n except PlugwiseException:\n _LOGGER.error(\"Error while communicating to device\")",
"def disable_detector(self):\n detector_id = self.list_detector()\n if detector_id:\n try:\n response = self.client.update_detector(DetectorId=detector_id, Enable=False)\n print(detector_id, 'has been disabled')\n return True\n except ClientError as e:\n print(e.response['Error']['Code'])\n return False\n else:\n print('no detector has been found.')\n return False",
"def wait_all_ports_admin_disabled(self):\n pass",
"def reason_to_be_disabled(cls):\n # Assume by default the given decoder is always enabled.\n return None",
"def wait_for_bluetooth_disconnection(self, timeout=60):\n result = True\n apollo_status = self.dut.get_bt_status()\n self.logger.info('Waiting for the disconnection.')\n time.sleep(1)\n ini_time = time.time()\n while len(apollo_status) != len(\n [s for s in apollo_status.values() if s == 'FALSE']):\n apollo_status = self.dut.get_bt_status()\n if (time.time() - ini_time) > timeout:\n self.logger.warning('Timeout waiting for the disconnection.')\n return False\n time.sleep(1)\n return result",
"def EnableAsyncConfMasterPacketInNoMatching(self):\n\t\treturn self._get_attribute('enableAsyncConfMasterPacketInNoMatching')",
"async def async_unlock(self, **kwargs: Any) -> None:\n if not await self._node.secure_unlock():\n raise HomeAssistantError(f\"Unable to unlock device {self._node.address}\")",
"async def async_turn_off(self, **kwargs: Any) -> None:\n try:\n result = await self.hass.async_add_executor_job(\n self.coordinator.ezviz_client.sound_alarm, self._serial, 1\n )\n\n except (HTTPError, PyEzvizError) as err:\n raise HomeAssistantError(\n f\"Failed to turn siren off for {self.name}\"\n ) from err\n\n if result:\n if self._delay_listener is not None:\n self._delay_listener()\n self._delay_listener = None\n\n self._attr_is_on = False\n self.async_write_ha_state()",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.entity_description.set_command(self, False)",
"def disable_radio(self):\n self.acquire_response(b'AT*R0')",
"def setDelayStatus(self, channel, isEnabled, unitCode=0):\n resp = self.XAPCommand('DELAYSEL', channel, (1 if isEnabled else 0), unitCode=unitCode)\n return bool(int(resp))",
"def is_disabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"is_disabled\")",
"def disable(func):\n return func",
"def EnableAsyncConfMasterPortStatusModify(self):\n\t\treturn self._get_attribute('enableAsyncConfMasterPortStatusModify')",
"def disable(*args, value: bool=True, **kwargs)->None:\n pass",
"def get_disabled_switch(self):\n return self.disabled",
"def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()",
"def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)",
"def on_disable(self) -> None:\n self._cancel_notification_cycle()",
"def _thread_check_stop_event(self):\n self._require_controller_modes(['running_as_thread','running_as_blocking_call'])\n return self.thread.check_stop_event()",
"async def async_turn_off(self) -> None:\n if CONF_POWER_COMMAND_TOPIC in self._config:\n mqtt_payload = self._command_templates[CONF_POWER_COMMAND_TEMPLATE](\n self._config[CONF_PAYLOAD_OFF]\n )\n await self._publish(CONF_POWER_COMMAND_TOPIC, mqtt_payload)\n if self._optimistic:\n self._attr_hvac_mode = HVACMode.OFF\n self.async_write_ha_state()\n return\n # Fall back to default behavior without power command topic\n await super().async_turn_off()",
"def reject_waiting_call(self) -> None:",
"def disable(self) -> None:",
"def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)",
"async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"def test_verify_state_of_a_device_when_disconnected_from_the_device():",
"def EnableAsyncConfSlaveFlowRemovedHardTimeOut(self):\n\t\treturn self._get_attribute('enableAsyncConfSlaveFlowRemovedHardTimeOut')",
"def is_disabled(self):\n if self.needs_group and not self._conv.groups.keys():\n return \"This action needs a contact group.\"\n\n if self.needs_running and not self._conv.running():\n return \"This action needs a running conversation.\"\n\n return self.check_disabled()",
"def disable_call_waiting_per_call(self, dtmf_code: str) -> None:",
"def is_telescope_off_allowed(self):\n handler = self.get_command_object(\"TelescopeOff\")\n return handler.check_allowed()",
"def is_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_disabled\")",
"def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})",
"def should_slow_down(self, credential):\n raise NotImplementedError()",
"async def async_turn_off(self, **kwargs):\n await self.data.set_appliance_state(self.appliance_id, False)\n return True",
"def is_enabled(self):",
"async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()",
"def _wait_lift_cmd(xbee):\n shared.status['command'] = 'STDBY'\n util.log_info(\"%s Standby, awaiting 'LIFT'.\" % shared.AGENT_ID)\n \n wait_count = 0\n while True:\n time.sleep(.1)\n wait_count = wait_count + 1\n \n if shared.status['command'] == 'LIFT':\n comm.xbee_broadcast(xbee, \"IFO,%s cleared for takeoff.\" % shared.AGENT_ID)\n util.log_info(\"'LIFT' received! Taking off!\")\n return True\n \n elif shared.status['command'] == 'EXIT':\n comm.xbee_broadcast(xbee, \"IFO,%s abort takeoff.\" % shared.AGENT_ID)\n util.log_info(\"'EXIT' received. Abort takeoff.\")\n return False\n \n elif wait_count >= 100:\n wait_count = 0\n comm.xbee_broadcast(xbee,\"IFO,%s standby. Alt: %.2f m.\" % (shared.AGENT_ID, shared.des_alt))",
"def EnableAsyncConfSlavePortStatusAdd(self):\n\t\treturn self._get_attribute('enableAsyncConfSlavePortStatusAdd')",
"def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def disabled(self) -> bool:\n return pulumi.get(self, \"disabled\")",
"def getDelayStatus(self, channel, unitCode=0):\n resp = self.XAPCommand('DELAYSEL', channel, unitCode=unitCode)\n return bool(int(resp))",
"async def _cmdf_pmenable(self, substr, msg, privilege_level):\n enabled_str = None\n if utils.str_says_true(substr) or (len(substr) == 0):\n self._pm_msg_isenabled = True\n enabled_str = \"enabled.\"\n else:\n self._pm_msg_isenabled = False\n enabled_str = \"disabled.\"\n self._save_settings()\n\n buf = \"PM greetings is now \" + enabled_str\n await self._client.send_msg(msg, buf)\n return",
"def should_poll(self):\n return self._command_state is not None",
"def mock_api_stage_success_en_disable_schedule() -> List[bytes]:\n return create_standard_packets_list(DUMMY_DISABLE_ENABLE_SCHEDULE_RESPONSE)",
"def status_callback():\n if args['retire_idle']:\n return False\n\n return True",
"def check_command(self):\n return self.process is not None and self.process.poll() is None",
"def is_call_waiting(self) -> bool:",
"async def power_off(self):\n ...",
"def __disconnect_action(self, json_object):\n\n # parse out the action_port & slot values\n action_port = self.__get_params_for_action(json_object)\n if not action_port:\n return False, 'Unable to parse the required parameters for the disconnect action'\n\n if not self.on(int(action_port)):\n return False, 'Unable to set RF switch port {} off during off action'.format(action_port)\n\n return True, # don't forget the ',' character when no error string is being passed back in the tuple",
"def _evict(self):\n\n # Wait til the MDS is believed by the mon to be available for commands\n try:\n self._wait_for_ready()\n except self.GidGone:\n return True\n\n # Then send it an evict\n ret = errno.ETIMEDOUT\n while ret == errno.ETIMEDOUT:\n log.debug(\"mds_command: {0}, {1}\".format(\n \"%s\" % self.gid, [\"session\", \"evict\"] + self._client_spec\n ))\n ret, outb, outs = self._volume_client.fs.mds_command(\n \"%s\" % self.gid,\n [json.dumps({\n \"prefix\": \"session evict\",\n \"filters\": self._client_spec\n })], \"\")\n log.debug(\"mds_command: complete {0} {1}\".format(ret, outs))\n\n # If we get a clean response, great, it's gone from that rank.\n if ret == 0:\n return True\n elif ret == errno.ETIMEDOUT:\n # Oh no, the MDS went laggy (that's how libcephfs knows to emit this error)\n self._mds_map = self._volume_client._rados_command(\"mds dump\", {})\n try:\n self._wait_for_ready()\n except self.GidGone:\n return True\n else:\n raise ClusterError(\"Sending evict to mds.{0}\".format(self.gid), ret, outs)",
"async def lockdown(self, ctx, action=None):\r\n try:\r\n if not action:\r\n return await ctx.send(\"Lockdown command:\\n*;lockdown [on/off]*\")\r\n if action.lower() == 'on':\r\n msg = await ctx.send(\"Locking down the channel...\")\r\n for x in ctx.guild.members:\r\n await ctx.channel.set_permissions(x, send_messages=False)\r\n return await msg.edit(content=\"The channel has been successfully locked down. :lock: \")\r\n elif action.lower() == 'off':\r\n msg = await ctx.send(\"Unlocking the channel...\")\r\n for x in ctx.guild.members:\r\n await ctx.channel.set_permissions(x, send_messages=True)\r\n return await msg.edit(content=\"The channel has been successfully unlocked. :unlock: \")\r\n else:\r\n return await ctx.send(\"Lockdown command:\\n*;lockdown [on/off]*\")\r\n except discord.Forbidden:\r\n await ctx.send(\"I need to have the permission: Manage Server\")",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_OFF,\n {ATTR_ENTITY_ID: self._switch_entity_id},\n blocking=True,\n context=self._context,\n )",
"def is_enabled(command):\n if command not in Controller.commands:\n return False\n return Controller.commands[command][2]",
"def checkIfEnabled(self):\n\n # Reload the command file to check for new commands\n importlib.reload(BotSettings)\n matches = BotSettings.config['commands']\n\n # Check for the match and if it is there return the value that goes with the command\n for key in matches:\n key.strip(\"!\")\n if key == self.command:\n return matches.get(key)\n\n # If reached the command does not exist\n return False",
"def bt_stop_discovery(self):\n is_stop_discovery = False\n try:\n is_bluetooth_off = self.bt_radio('off')\n if is_bluetooth_off:\n logger.debug(\"Bluetooth discovery Stoped {}\".format(\n self.phone_info.bluetooth_name))\n is_stop_discovery = True\n else:\n logger.debug(\"Bluetooth discovery completed {}\".format(\n self.phone_info.bluetooth_name))\n is_stop_discovery = False\n except Exception as e:\n logger.error(\"Turn OFF Bluetooth Button is not Visible\")\n logger.error(repr(e))\n return is_stop_discovery",
"async def async_turn_off(self, **kwargs: Any) -> None:\n self._attr_is_on = await self.relay.set_active(False)\n self.async_write_ha_state()",
"def command_done(self):\n return self.read(\"*OPC?\") == \"1\"",
"def poll(self):\n return False",
"def _get_enable_peer_as_check(self):\n return self.__enable_peer_as_check",
"async def async_turn_off(self):\n await self.async_mute_volume(True)",
"async def ssh_unlock(self) -> bool:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # have to outsource this to another program\n proc = await asyncio.create_subprocess_shell(\n f'{os.path.join(os.getcwd(), \"files\", \"asicseer_installer.exe\")} -p -f {self.ip} root',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n # get stdout of the unlock\n stdout, stderr = await proc.communicate()\n # check if the webUI password needs to be reset\n print(stderr)\n if str(stdout).find(\"webUI\") != -1:\n # tell the user to reset the webUI password\n self.add_to_output(\"SSH unlock failed, please reset miner with reset button...\")\n # ssh unlock failed\n return False\n else:\n # tell the user the SSH unlock worked\n self.add_to_output(\"SSH unlock success...\")\n await asyncio.sleep(3)\n # ssh is unlocked\n return True",
"def disable(self):",
"def __disable__(self) -> None:\n pass",
"async def async_turn_off(self):\n await self.local_meural.send_key_suspend()"
] | [
"0.7076065",
"0.6165494",
"0.60460854",
"0.6037636",
"0.5963541",
"0.59550655",
"0.5929981",
"0.58966804",
"0.5892812",
"0.5856831",
"0.5800519",
"0.5799643",
"0.57865214",
"0.5756381",
"0.57319176",
"0.5654577",
"0.5590112",
"0.5557854",
"0.5550717",
"0.5546746",
"0.55359083",
"0.55180615",
"0.5514339",
"0.5487976",
"0.5481882",
"0.54572755",
"0.54565614",
"0.5439319",
"0.5428926",
"0.5428926",
"0.5419235",
"0.5413272",
"0.54004997",
"0.53986806",
"0.53986806",
"0.539586",
"0.5393176",
"0.53885293",
"0.5388226",
"0.53790563",
"0.53742725",
"0.53693026",
"0.536032",
"0.5351943",
"0.5346785",
"0.53430504",
"0.53333455",
"0.5329564",
"0.5327604",
"0.5324986",
"0.5320665",
"0.53206646",
"0.5312378",
"0.5301881",
"0.5301638",
"0.5299461",
"0.52871555",
"0.5280132",
"0.52793276",
"0.5277344",
"0.52773196",
"0.52727354",
"0.5267164",
"0.5266225",
"0.52571553",
"0.5240465",
"0.5239867",
"0.5239424",
"0.5234401",
"0.52314955",
"0.52131605",
"0.52056664",
"0.51971674",
"0.5191584",
"0.5185482",
"0.5185482",
"0.5182645",
"0.5181017",
"0.5175777",
"0.5169534",
"0.5160602",
"0.5160488",
"0.51579106",
"0.51565176",
"0.5155431",
"0.5144289",
"0.5141021",
"0.5133352",
"0.5129333",
"0.5126829",
"0.51255554",
"0.5124752",
"0.5123028",
"0.5122836",
"0.5122263",
"0.5116678",
"0.51149565",
"0.51092565",
"0.51086813",
"0.5107929"
] | 0.5901317 | 7 |
Method to invoke Disable command on SDP Master. | def do(self):
this_server = TangoServerHelper.get_instance()
try:
sdp_master_ln_fqdn = ""
property_val = this_server.read_property("SdpMasterFQDN")[0]
sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)
sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)
sdp_mln_client_obj.send_command_async(
const.CMD_Disable, None, self.disable_cmd_ended_cb
)
self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)
this_server.write_attr(
"activityMessage", const.STR_DISABLE_CMS_SUCCESS, False
)
except DevFailed as dev_failed:
self.logger.exception(dev_failed)
log_msg = f"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}"
tango.Except.re_throw_exception(
dev_failed,
const.ERR_INVOKING_CMD,
log_msg,
"SdpMasterLeafNode.DisableCommand()",
tango.ErrSeverity.ERR,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()",
"def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc",
"def disable(self, sid):\n return",
"def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True",
"def _disable(self):\n self.enabled = False",
"def on_disable(self) -> None:\n self._cancel_automation()",
"def on_disable(self) -> None:\n self._on_stop_cycle({})",
"def disable(self):",
"async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)",
"def disable(self):\n self.error_code = 'DISABLED'\n self.running = False",
"async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))",
"def disable(self) -> None:",
"def disable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.DISARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False",
"def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)",
"def _doDisableRegulation(self):\n self._cmdRegulOff()",
"def bdev_nvme_disable_controller(client, name, cntlid):\n\n params = {'name': name}\n\n if cntlid is not None:\n params['cntlid'] = cntlid\n\n return client.call('bdev_nvme_disable_controller', params)",
"def disable(self):\n self.enabled = False",
"def disable_radio(self):\n self.acquire_response(b'AT*R0')",
"async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"async def disable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation disabled.\"))",
"def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)",
"def disable(self):\n self._enabled = False",
"def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)",
"def disable(self):\n pass",
"def disable(self):\n return self.enable(False)",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)",
"def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")",
"def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.entity_description.set_command(self, False)",
"def on_disable(self) -> None:\n self._cancel_notification_cycle()",
"def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])",
"def disable(self):\n self._disable_monitor()\n self._pinger.stop()",
"def disable(self):\n raise NotImplementedError",
"def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})",
"async def async_turn_off(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"off\"):\n self._is_on = False\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")",
"async def async_turn_off(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"def __disable__(self) -> None:\n pass",
"def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})",
"def disable_mute(self):\n self.mute = False",
"def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)",
"async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()",
"def disable(self):\n self.registrar.unregister_service(\"say\", namespace=__name__)",
"async def async_turn_off(self) -> None:\n if CONF_POWER_COMMAND_TOPIC in self._config:\n mqtt_payload = self._command_templates[CONF_POWER_COMMAND_TEMPLATE](\n self._config[CONF_PAYLOAD_OFF]\n )\n await self._publish(CONF_POWER_COMMAND_TOPIC, mqtt_payload)\n if self._optimistic:\n self._attr_hvac_mode = HVACMode.OFF\n self.async_write_ha_state()\n return\n # Fall back to default behavior without power command topic\n await super().async_turn_off()",
"def powerOff(self):\n self._sendCommand(self.SONY_CMD_ExtBackupCommunicator_ForcePowerOff, bufferSize=0)",
"def disable(self) -> None:\n if self.active_mode is not None:\n logger.info(\"Disabling '%s'\", self.active_mode.MODE_NAME)\n self.active_mode.on_disable()\n\n self.active_mode = None",
"def disable_everything(self):\n zhinst.utils.disable_everything(self.daq, self.device_id)\n self.log.info(\"Disabled everything.\")",
"async def async_turn_off(self, **kwargs):\n try:\n state_off = await self._api.set_relay_state(\n self._dev_id, self._members, \"off\"\n )\n if state_off:\n self._is_on = False\n self.async_write_ha_state()\n except PlugwiseException:\n _LOGGER.error(\"Error while communicating to device\")",
"def disable():\n ret = _LIB.oled_click_disable()\n if ret < 0:\n raise Exception(\"oled click disable failed\")",
"def disable(*args, value: bool=True, **kwargs)->None:\n pass",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self._client.turn_off(self._device_port)",
"def disable():\n request = dict(id='gbn')\n _gbn_disable(request)",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = {}\n queue_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE\", queue_info)",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def disable(ctx):\n\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)",
"def device_bypass(self, device_ids, enable):\n return self._device_action(device_ids, \"BYPASS\", self._action_toggle(enable))",
"def disable(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Disabling `{service_name}`\")\n \n run_command(f\"sudo systemctl disable {service_name}\")",
"def disable(self):\n self.enabled = False\n self.__store(self)",
"def port_disable(self, port_num: int) -> None:\n raise NotImplementedError",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def set_disabled_switch(self, disabled):\n self.disabled = disabled",
"def disable(self, name: str):\n self._get_backend().disable_alarm(name)",
"def disable(self):\n self.rx.threadKill()\n self.tx.threadKill()\n time.sleep(1)\n self.fisica.close()",
"def disable(self):\n super().disable()",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n rif_info = {}\n rif_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"RIF\", rif_info)",
"def swo_disable(self, port_mask):\n res = self._dll.JLINKARM_SWO_DisableTarget(port_mask)\n if res != 0:\n raise errors.JLinkException(res)\n return None",
"async def async_turn_off(self, **kwargs: Any) -> None:\n self._attr_is_on = await self.relay.set_active(False)\n self.async_write_ha_state()",
"def disable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 0)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def syslog_remote_disable(handle, name):\n\n mo = handle.query_dn(\"sys/svc-ext/syslog/client-\" + name)\n if mo:\n mo.admin_state = \"disabled\"\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n else:\n raise ValueError(\"Syslog Mo is not available.\")",
"async def async_turn_off(self):\n data_cmd = _command(COMMAND_POWER_OFF)\n await self._async_send_command(data_cmd)",
"def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)",
"def disable_receiver(self):\n self.set_receiver(False)",
"async def disable_digital_reporting(self, pin):\n port = pin // 8\n command = [PrivateConstants.REPORT_DIGITAL + port,\n PrivateConstants.REPORTING_DISABLE]\n await self._send_command(command)",
"def disable(self, subsystem=False):\n self.__dict__[\"enabled\"] = False\n\n if subsystem:\n self.subsystem.disable()",
"async def async_turn_off(self):\n await self.async_mute_volume(True)",
"def turn_off(self, **kwargs):\n self.smartplug.turn_off()",
"def disable(self):\n self.write(\":OUTPUT OFF;\")",
"def disable(version_manager, request):\n version_manager.is_disabled = True\n version_manager.save()\n return version_manager",
"def disable(func):\n return func",
"def setOff(self, command):\r\n self.setDriver('ST', 0)",
"def disable_service(self, **kwargs):\n put_body = json.dumps(kwargs)\n resp, body = self.put('os-services/disable', put_body)\n body = json.loads(body)\n self.validate_response(schema.disable_service, resp, body)\n return rest_client.ResponseBody(resp, body)",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_OFF,\n {ATTR_ENTITY_ID: self._switch_entity_id},\n blocking=True,\n context=self._context,\n )",
"def turn_off_modem(self):\n if self.is_power_on():\n self._logger.debug(\"Switching modem off...\")\n self.set_pin()\n GPIO.cleanup()\n # give modem some time to log out\n time.sleep(5)\n else:\n self._logger.debug(\"GSM modem is already OFF...\")",
"async def power_off(self):\n ...",
"def disable(self):\n logging.debug(\"Restoring sudoers configuration...\")\n command = (\"sed -i -e '/{mark}/,+1d' \" \"{filename}\").format(\n mark=self.MARK, filename=self.SUDOERS\n )\n Command(command, verbose=False).run()",
"def disable():\n ret = _LIB.led_matrix_click_disable()\n if ret < 0:\n raise Exception(\"led matrix click disable failed\")",
"async def disable(self, ctx, function: typing.Union[CommandConverter, PluginConverter, GalaxyConverter],\n *channels: discord.TextChannel):\n channels = channels or (ctx.channel, )\n await ctx.guild_profile.permissions.disable_function(function, channels)\n # noinspection PyUnresolvedReferences\n await ctx.send_line(f\"{ctx.emotes.web_emotion.galka} {function.name} has been disabled in specified channels.\")",
"def TelescopeOff(self):\n handler = self.get_command_object(\"TelescopeOff\")\n handler()",
"def disable_server(self, server):\n log.info(\"Disabling %s in netscaler\", server)\n return self.post(\"server?action=disable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.async_publish(\n self._config[CONF_COMMAND_TOPIC],\n self._config[CONF_PAYLOAD_OFF],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that switch has changed state.\n self._attr_is_on = False\n self.async_write_ha_state()",
"def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()",
"def disable(self):\r\n self.update(enabled=False)",
"def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()",
"def disable(self, index):\n self._action(index, StateVariable.enable, missingok=False, value=False)",
"def turn_off(self, **kwargs):\n self._send_command(\"turn_off\")",
"def disable(self):\n self.direction = None # remove direction\n self.state['enabled'] = False # reset states\n self.state['return'] = False\n self.return_path = None # remove path\n if self.state['blue']:\n self.stop_blue_state(resume_audio=False)\n self.image, _ = self.norm_images.get_image() # reset image\n self.sound_manager.stop()"
] | [
"0.72543144",
"0.682667",
"0.6537114",
"0.6527344",
"0.6507283",
"0.6505277",
"0.64890575",
"0.64763004",
"0.6459875",
"0.6419226",
"0.63770306",
"0.637017",
"0.6363984",
"0.63474035",
"0.62944055",
"0.6292118",
"0.6276801",
"0.627648",
"0.627209",
"0.62699884",
"0.62642103",
"0.62637043",
"0.62545663",
"0.6235732",
"0.62094367",
"0.6187441",
"0.6183761",
"0.6175728",
"0.6165915",
"0.6165217",
"0.6160995",
"0.61572284",
"0.6152616",
"0.6148288",
"0.6148008",
"0.6147495",
"0.6132269",
"0.6112198",
"0.6079803",
"0.6078318",
"0.60769695",
"0.60505867",
"0.60428524",
"0.60320586",
"0.6026174",
"0.6025532",
"0.6017446",
"0.60036784",
"0.6001091",
"0.5988958",
"0.595217",
"0.5946767",
"0.59303546",
"0.59260654",
"0.5924847",
"0.59222233",
"0.59176505",
"0.590445",
"0.59025764",
"0.5882805",
"0.58773017",
"0.58732194",
"0.58732194",
"0.58732194",
"0.58624315",
"0.58383375",
"0.58337903",
"0.5823711",
"0.5820192",
"0.58192635",
"0.58186704",
"0.58121103",
"0.58106005",
"0.5799586",
"0.5794832",
"0.5789671",
"0.57872033",
"0.5784474",
"0.57819486",
"0.5768169",
"0.576502",
"0.5764607",
"0.5759869",
"0.5758704",
"0.57539624",
"0.5752904",
"0.5751539",
"0.5748728",
"0.5745375",
"0.5742271",
"0.57371616",
"0.5734403",
"0.57303387",
"0.57292044",
"0.5724035",
"0.57197165",
"0.57179284",
"0.5711057",
"0.5708427",
"0.57017213"
] | 0.795008 | 0 |
Creates a named tuple from a dictionary. | def namedtuple_from_dict(obj):
if isinstance(obj, dict):
fields = sorted(obj.keys())
namedtuple_type = namedtuple(typename='Config',
field_names=fields,
rename=True)
field_value_pairs = OrderedDict(
(str(field), Config.namedtuple_from_dict(obj[field]))
for field in fields)
try:
return namedtuple_type(**field_value_pairs)
except TypeError:
# Cannot create namedtuple instance so fallback to dict (invalid attribute names)
return dict(**field_value_pairs)
elif isinstance(obj, (list, set, tuple, frozenset)):
return [Config.namedtuple_from_dict(item) for item in obj]
else:
return obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_tuple(in_dict,tupname='values'):\n the_tup = namedtuple(tupname, in_dict.keys())\n the_tup = the_tup(**in_dict)\n return the_tup",
"def ntuple_from_dict(d):\n return namedtuple('TupleFromDict', d.keys())(**d)",
"def make_tuple(name, **kwargs):\n tuple_class = collections.namedtuple(typename=name, field_names=kwargs.keys())\n return tuple_class(**kwargs)",
"def MakeTuple(name, **kwargs):\n tuple_class = collections.namedtuple(\n typename=name,\n field_names=kwargs.keys(),\n )\n return tuple_class(**kwargs)",
"def to_namedtuple(d, classname='struct'):\n if not isinstance(d, dict):\n raise ValueError(\"Can only convert dicts into namedtuple\")\n for k,v in d.iteritems():\n if isinstance(v, dict):\n d[k] = to_namedtuple(v)\n return namedtuple(classname, d.keys())(**d)",
"def get_dict_as_tuple(d):\n for k, v in six.iteritems(d):\n return k, v\n return None",
"def _tupleize(d):\n return [(key, value) for key, value in d.items()]",
"def get_namedtuple(name, field_names, values=None):\n if values is None:\n values = range(len(field_names))\n field_names = [f.replace(' ', '_') for f in field_names]\n return _namedtuple(name, field_names)(*values)",
"def tuple_namer(name,tupl):\n tupl_templ = collections.namedtuple(name, 'battery status neighbour')\n named = tupl_templ(battery = tupl[0], status = tupl[1], neighbour = tupl[2])\n return named",
"def namedtuple(*args, **kwargs):\n cls = _namedtuple(*args, **kwargs)\n _NamedTupleABC.register(cls)\n return cls",
"def dict2tuple(d):\n items = list(d.items())\n items.sort()\n return tuple(items)",
"def convert_dict_to_tuple(d):\n return tuple(sorted(d.items(), key=lambda item: item[0]))",
"def core_named_tuple(user_kwargs: dict, disdat_kwargs: dict) -> NamedTuple('Output', [('user_kwargs', dict),\n ('disdat_kwargs', dict)]):\n import collections\n Output = collections.namedtuple('Output', ['user_kwargs', 'disdat_kwargs'])\n return Output(user_kwargs, disdat_kwargs)",
"def dictToTuple(dic):\n tup = []\n for i in range(len(dic)):\n tup.append(dic[i].items())\n return tup",
"def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n *args,\n **kwargs,\n ) -> NamedTuple:\n return super().load_state_dict(state_dict, *args)",
"def namedtuple(typename, field_names, verbose=False, rename=False):\r\n\r\n # Parse and validate the field names. Validation serves two purposes,\r\n # generating informative error messages and preventing template injection attacks.\r\n if isinstance(field_names, basestring):\r\n field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas\r\n field_names = tuple(map(str, field_names))\r\n if rename:\r\n names = list(field_names)\r\n seen = set()\r\n for i, name in enumerate(names):\r\n if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)\r\n or not name or name[0].isdigit() or name.startswith('_')\r\n or name in seen):\r\n names[i] = '_%d' % i\r\n seen.add(name)\r\n field_names = tuple(names)\r\n for name in (typename,) + field_names:\r\n if not all(c.isalnum() or c=='_' for c in name):\r\n raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)\r\n if _iskeyword(name):\r\n raise ValueError('Type names and field names cannot be a keyword: %r' % name)\r\n if name[0].isdigit():\r\n raise ValueError('Type names and field names cannot start with a number: %r' % name)\r\n seen_names = set()\r\n for name in field_names:\r\n if name.startswith('_') and not rename:\r\n raise ValueError('Field names cannot start with an underscore: %r' % name)\r\n if name in seen_names:\r\n raise ValueError('Encountered duplicate field name: %r' % name)\r\n seen_names.add(name)\r\n\r\n # Create and fill-in the class template\r\n numfields = len(field_names)\r\n argtxt = repr(field_names).replace(\"'\", \"\")[1:-1] # tuple repr without parens or quotes\r\n reprtxt = ', '.join('%s=%%r' % name for name in field_names)\r\n template = '''class %(typename)s(tuple):\r\n '%(typename)s(%(argtxt)s)' \\n\r\n __slots__ = () \\n\r\n _fields = %(field_names)r \\n\r\n def __new__(_cls, %(argtxt)s):\r\n 'Create new instance of %(typename)s(%(argtxt)s)'\r\n return _tuple.__new__(_cls, (%(argtxt)s)) \\n\r\n @classmethod\r\n def _make(cls, iterable, new=tuple.__new__, len=len):\r\n 'Make a new %(typename)s object from a sequence or iterable'\r\n result = new(cls, iterable)\r\n if len(result) != %(numfields)d:\r\n raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))\r\n return result \\n\r\n def __repr__(self):\r\n 'Return a nicely formatted representation string'\r\n return '%(typename)s(%(reprtxt)s)' %% self \\n\r\n def _asdict(self):\r\n 'Return a new OrderedDict which maps field names to their values'\r\n return OrderedDict(zip(self._fields, self)) \\n\r\n def _replace(_self, **kwds):\r\n 'Return a new %(typename)s object replacing specified fields with new values'\r\n result = _self._make(map(kwds.pop, %(field_names)r, _self))\r\n if kwds:\r\n raise ValueError('Got unexpected field names: %%r' %% kwds.keys())\r\n return result \\n\r\n def __getnewargs__(self):\r\n 'Return self as a plain tuple. Used by copy and pickle.'\r\n return tuple(self) \\n\\n''' % locals()\r\n for i, name in enumerate(field_names):\r\n template += \" %s = _property(_itemgetter(%d), doc='Alias for field number %d')\\n\" % (name, i, i)\r\n if verbose:\r\n print template\r\n\r\n # Execute the template string in a temporary namespace and\r\n # support tracing utilities by setting a value for frame.f_globals['__name__']\r\n namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,\r\n OrderedDict=OrderedDict, _property=property, _tuple=tuple)\r\n try:\r\n exec template in namespace\r\n except SyntaxError, e:\r\n raise SyntaxError(e.message + ':\\n' + template)\r\n result = namespace[typename]\r\n\r\n # For pickling to work, the __module__ variable needs to be set to the frame\r\n # where the named tuple is created. Bypass this step in enviroments where\r\n # sys._getframe is not defined (Jython for example) or sys._getframe is not\r\n # defined for arguments greater than 0 (IronPython).\r\n try:\r\n result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')\r\n except (AttributeError, ValueError):\r\n pass\r\n\r\n return result",
"def named_tuples_from_csv(read_file, delimiter=','):\n with open(read_file, 'rb') as f:\n reader = csv_unicode.UnicodeDictReader(f, delimiter=delimiter)\n DataTuple = namedtuple('DataTuple', ','.join(reader.fieldnames))\n return [DataTuple(**row) for row in reader]",
"def generate_from_descriptor(self, tuple_descriptor):\r\n t = Tuple()\r\n d = {}\r\n t.set_data(d)\r\n for alias in tuple_descriptor.aliases:\r\n fields = self.__tuple_descriptor.get_descriptor(alias).underlying_fields\r\n for field in fields:\r\n setattr(t, field, getattr(self, field))\r\n setattr(t, alias, getattr(self, alias))\r\n t.set_tuple_descriptor(tuple_descriptor)\r\n return t",
"def expand_tuple_keys(dictionary):\n data = {}\n for k, v in dictionary.items():\n if isinstance(k, tuple):\n for x in k:\n data[x] = v\n else:\n data[k] = v\n return data",
"def create_namedtuple():\n Thing = namedtuple(\"MyCoolTuple\", \"field1 field2\")\n obj = Thing(1, 2)\n # Throws AttributeError\n #obj.random = \"Whoa\"\n print(Thing)\n print(obj)",
"def dict_to_tuples(input_dict: Dict[str, str]) -> List[Tuple[str, str]]:\n return [(key, input_dict[key]) for key in sorted(input_dict.keys())]",
"def namedtuple(clz: Type) -> Type[NamedTuple]:\n if not is_dataclass(clz):\n raise TypeError(\"{} is not a dataclass.\".format(clz.__name__))\n\n flds: Tuple[Field] = fields(clz)\n flds_by_name: Dict[str, Field] = {f.name: f for f in flds}\n nt_name: str = clz.__name__ + \"NamedTuple\"\n\n defaults: Optional[List[Any]] = None\n field_names: List[str] = [\n f.name\n for f in flds\n if f.default is not None or f.default_factory is not None\n ]\n for fld in field_names:\n if defaults is None:\n defaults = []\n field: Field = flds_by_name[fld]\n default_value: Optional[Any] = field.default\n if default_value is None:\n default_value = field.default_factory()\n\n defaults.append(default_value)\n\n field_names.extend(\n [\n f.name\n for f in flds\n if f.default is None and f.default_factory is None\n ]\n )\n\n return real_namedtuple(nt_name, field_names, defaults=defaults)",
"def get_named_tuple_object(instance: Any) -> Tuple[object, Type[object]]:\n clz: Type = namedtuple(instance.__class__)\n instance_data = asdict(instance)\n for key, value in instance_data.items():\n if is_dataclass(value):\n obj, _ = get_named_tuple_object(value)\n instance_data[key] = obj\n\n # generics currently unsupported, maybe in later versions\n\n new_instance: clz = clz(**instance_data)\n return (new_instance, clz)",
"def kwargs_to_tuple(d):\n if isinstance(d, list):\n return tuple(kwargs_to_tuple(v) for v in sorted(d))\n elif isinstance(d, dict):\n return tuple(\n (k, kwargs_to_tuple(v))\n for k, v in sorted(d.items())\n if k not in [\"result_format\", \"include_config\", \"catch_exceptions\", \"meta\"]\n )\n return d",
"def tupleToDict(tup):\n dic = []\n for i in range(len(tup)):\n dic.append(dict(tup[i]))\n return dic",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n my_tuple = (k, v*v)\n\n return my_tuple",
"def namedtuple(*args, **kwargs):\n nt_class = _invisiblenamedtuple(*args, **kwargs)\n # put it on the global scale so it can be tupled correctly\n globals()[nt_class.__name__] = nt_class\n return nt_class",
"def test_output_named_tuple_vs_dictionary_2():\n assert oldest_person_age == oldest_person_age_d, \"Max age cannot be different for Named Tuple and Dictionary list\"",
"def test_output_named_tuple_vs_dictionary_6():\n assert delta2 > delta1, \"Dictionary cannot be faster than named tuple\"",
"def map_generate_tuple(*args):\n key, func, arg = args[0][0], args[0][1], args[0][2]\n return (key, func(*arg))",
"def to_kv(k: str, v: typing.Union[int, float]) -> typing.Tuple[str, float]:\n return k, v * v",
"def getT(x):\n \n return tuple([(k,v) for (k,v) in x.items() if k != 'f'])",
"def deserialize_tuple(d):\n return tuple(d['items'])",
"def to_tuple(self) -> Tuple[Any]:\n return tuple(self[k] for k in self.keys())",
"def to_tuple(self) -> Tuple[Any]:\n return tuple(self[k] for k in self.keys())",
"def term_name_key(name_tuple: tuple[str, list[float]]) -> tuple[str, str]:\n t_type, atoms = name_tuple[0].split('(', 1)\n atoms = atoms[:-1] # Delete the last closing parenthesis\n t_type_words = t_type.rstrip('0123456789') # In case of polyterm, delete the numbers in the end\n match = re.match(r\"([a-zA-Z]+)([0-9]+)\", t_type, re.I)\n t_type_numbers = int(match.groups()[-1]) if match else ''\n return t_type_words, atoms, t_type_numbers",
"def from_literal(tup):\n\n def expand(vals):\n return [from_literal(x) for x in vals]\n\n def union(vals):\n if not isinstance(vals, tuple):\n vals = (vals,)\n v = expand(vals)\n return frozenset(v)\n\n if not isinstance(tup, tuple):\n return ('prim', tup)\n elif isinstance(tup[0], str):\n tag, *vals = tup\n if tag == 'prim':\n return tup\n elif tag == 'tuple':\n params = tuple(expand(vals))\n return (tag, params)\n elif tag == 'map':\n k, v = vals\n return (tag, (union(k), union(v)))\n else:\n vals, = vals # pylint: disable=self-assigning-variable\n return (tag, union(vals))\n else:\n return tuple(expand(tup))",
"def pkgTupleFromHeader(hdr):\n \n name = hdr['name']\n\n # RPMTAG_SOURCEPACKAGE: RPMTAG_SOURCERPM is not necessarily there for\n # e.g. gpg-pubkeys imported with older rpm versions\n # http://lists.baseurl.org/pipermail/yum/2009-January/022275.html\n if hdr[rpm.RPMTAG_SOURCERPM] or hdr[rpm.RPMTAG_SOURCEPACKAGE] != 1:\n arch = hdr['arch']\n else:\n arch = 'src'\n \n ver = hdr['version']\n rel = hdr['release']\n epoch = hdr['epoch']\n if epoch is None:\n epoch = '0'\n pkgtuple = (name, arch, epoch, ver, rel)\n return pkgtuple",
"def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))",
"def test_Stock_output_named_tuple_vs_dictionary_1():\n assert Stock_tuple[0][0] == Stock_list_dict[0]['name'], \"Name is not getting stored properly\"",
"def _convert_to_tuple(r):\n if not r:\n return r\n else:\n return (r[\"token\"], r[\"value\"], r[\"code\"], r[\"address\"],)",
"def convert_dict2tuple(value):\n if isinstance(value, dict):\n for _keys in value:\n value[_keys] = convert_dict2tuple(value[_keys])\n return tuple(sorted(value.items()))\n return value",
"def tuple_multi_string(dictionary, sep=','):\n for key, value in dictionary.items():\n value_split = value.split(sep)\n\n if len(value_split) == 1 or len(value_split) == 0:\n pass\n else:\n dictionary[key] = tuple(value_split)\n\n return dictionary",
"def tuple_to_nx(t: tuple) -> tuple:\n return (t[0], t[1], {'capacity': t[2]})",
"def from_dict(values) -> \"TensorDescriptor\":\n return TensorDescriptor(**values)",
"def make_key(*values, **kwargs):\n if len(kwargs) == 0:\n key = tuple(v.key for v in values)\n else:\n res = [v.key for v in values]\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (int, float, str)):\n res.append(k)\n res.append(v)\n else:\n raise TypeError(\n f\"Type {type(v)} is not yet supported, \"\n f\"v={v} and parameter {k!r}.\")\n key = tuple(res)\n return key",
"def from_dict(cls, _dict: Dict) -> 'Attribute':\n args = {}\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n if 'value' in _dict:\n args['value'] = _dict.get('value')\n return cls(**args)",
"def from_dict(\n cls, d: typing.Mapping[str, typing.Union[str, float]]\n ) -> \"VLEPoint\":\n\n composition = Composition(p=d[\"composition\"], type=d[\"composition_type\"])\n return cls(\n composition=composition,\n pressures=(d[\"first_component_pressure\"], d[\"second_component_pressure\"]),\n temperature=d[\"temperature\"],\n )",
"def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )",
"def from_dict(cls, tag_dict):\n return cls(tag_dict.get('tag_type'), tag_dict.get('value'))",
"def from_dict(cls, d):\n return cls(**d)",
"def namedtuple_with_defaults(typename,\n field_names,\n default_values=(),\n units=None):\n T = collections.namedtuple(typename, field_names)\n T.__new__.__defaults__ = (None, ) * len(T._fields)\n if isinstance(default_values, collections.abc.Mapping):\n prototype = T(**default_values)\n else:\n prototype = T(*default_values)\n T.__new__.__defaults__ = tuple(prototype)\n if units is None:\n T.units = ('-') * len(T._fields)\n else:\n T.units = units\n return T",
"def create(cls, dictionary):\n return cls(**dictionary)",
"def create(cls, dictionary):\n return cls(**dictionary)",
"def from_tuples(\n cls,\n values: Iterable[tuple[str, str | dt.DataType]],\n ) -> Schema:\n return cls(dict(values))",
"def valid_tuple(obj):\r\n try:\r\n assert isinstance(obj, tuple)\r\n assert isinstance(obj[0], str)\r\n assert isinstance(obj[1], str)\r\n except:\r\n raise Invalid(\"{} is not a valid key tuple\".format(obj))\r\n return obj",
"def _tuples_to_dict(self, tuples):\n d = {}\n for key, value in tuples:\n d[key] = value\n return d",
"def namedtuple_with_two_defaults(typename, field_names, default_values=('', '')):\n T = collections.namedtuple(typename, field_names)\n T.__new__.__defaults__ = default_values\n return T",
"def _entuple(r):\n return tuple(getattr(r, n) for n in r.__names__)",
"def _verify_named_tuple(named_tuple):\n\n if not bool(\n isclass(named_tuple)\n and issubclass(named_tuple, tuple)\n and callable(named_tuple)\n and hasattr(named_tuple, \"_fields\")\n ):\n raise TypeError(\n \"named_tuple parameter should be a tuple subclass created \"\n \"by the collections.namedtuple factory function, or a \"\n \"subclass of typing.NamedTuple.\"\n )",
"def selectRandomFromDict(ddata):\n\tdkeys = list(ddata.keys())\n\tdk = selectRandomFromList(dkeys)\n\tel = (dk, ddata[dk])\n\treturn el",
"def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples",
"def dict_to_obj(data):\n if not isinstance(data, dict):\n raise TypeError(\"Expected dict, got '%s' instead\" % type(data))\n \n class _tmp(object):\n pass\n \n obj = namedtuple(\"OBJ\", list(data.keys()))\n \n o = obj(**data)\n \n return o",
"def tokenize_key_value_pair(kv_pair):\n key, value = kv_pair.strip().split('\\t')\n key = tuple(key.strip().split())\n value = tuple(value.strip().split())\n return (key, value)",
"def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)",
"def to_tuple(waypoint):\n if isinstance(waypoint, dict):\n return (waypoint[\"lat\"], waypoint[\"lng\"])\n else:\n return waypoint",
"def add_make_tuple(self, input_names, name=None):\n return self._build_op('make_tuple', input_names, name=name)",
"def get_section_tuple(c_dict, section_name=''):\n\n subsections = [get_section_tuple(c_dict[ss], ss) for ss in c_dict.sections]\n section_tuple = (section_name, subsections, c_dict.scalars)\n return section_tuple",
"def flatten(keyValuePair):\n\n key = keyValuePair[0]\n value = keyValuePair[1]\n\n # create a flat tuple\n tupes = tuple([key] + list(value))\n\n return tupes",
"def expandable_from_tuple(tuple_, field_descriptions):\n result = types.Expandable(tuple_[0])\n for (name, _), value in zip(field_descriptions, tuple_[1:]):\n result.__dict__[name] = value\n return result",
"def key(cls, *args, **kwargs):\n\n items = [cls]\n if args:\n items.append(tuple(args))\n if kwargs:\n items.append(FrozenDict(kwargs))\n return tuple(items)",
"def get_random_key_from_dict(d: dict) -> [Union[str, int, tuple]]:\n return random.choice(list(d.keys()))",
"def get_tup(self):\n\n tup = (\n self.key,\n self.headers,\n self.proxies,\n self.link,\n self.delta,\n self.size,\n self.status,\n datetime.now(),\n )\n return tup",
"def is_namedtuple(obj):\n return isinstance(obj, tuple) and hasattr(obj, '_asdict')",
"def __to_tuple(self):\n return (self.bucket, self.name)",
"def double_ret(\n super_cool_arg, *, other_very_cool_arg: Optional[Dict[str, Any]] = None\n) -> Tuple[str, Tuple]:\n if other_very_cool_arg is None:\n other_very_cool_arg = {}\n return (\n str(super_cool_arg),\n tuple(other_very_cool_arg.keys(), other_very_cool_arg.values()),\n )",
"def _tuple_to_dict(tpl):\n if isinstance(tpl, list):\n d = []\n for e in tpl:\n d.append(_tuple_to_dict(e))\n elif isinstance(tpl, tuple):\n d = {}\n if isinstance(tpl[0], tuple):\n # tuple has some child tuple\n for e in tpl:\n d[e[0]] = _tuple_to_dict(e[1])\n elif isinstance(tpl[0], list):\n # list member should be processed recursively\n d = _tuple_to_dict(tpl[0])\n else:\n if len(tpl) == 2:\n # single tuple node\n d[tpl[0]] = _tuple_to_dict(tpl[1])\n else:\n raise Exception(tpl)\n else:\n # value or dict\n d = tpl\n return d",
"def from_dict(cls, d):\n return loadd(d, cls)",
"def ttuple(name, *types):\n\n sort = z3.Datatype(name)\n sort.declare(name, *[(fname, typ._z3_sort) for fname, typ in types])\n sort = sort.create()\n fields = {\"_z3_sort\" : sort}\n for fname, typ in types:\n code = \"\"\"\\\n@property\ndef %s(self):\n return wrap(self._z3_sort.%s(self._v))\"\"\" % (fname, fname)\n locals_dict = {}\n exec code in globals(), locals_dict\n fields[fname] = locals_dict[fname]\n return type(name, (STupleBase, SymbolicVal), fields)",
"def dict_factory(cursor, row):\n fields = [column[0] for column in cursor.description]\n return {key: value for key, value in zip(fields, row)}",
"def serialize_tuple(result):\n serialized = {}\n for key, value in result._asdict().items():\n resource, field = key.split('.')\n serialized[resource] = dict(**{field: value}, **serialized.get(resource, {}))\n\n return serialized",
"def csvToTuples(keyValuePair):\n\n keys = keyValuePair.split(', ')\n n = len(keys)\n value = int(keys.pop(n - 1))\n\n return (tuple(keys),value)",
"def split_name_and_attrs(cls, tag_name):\n # transform `key=\"value\"` to `(key, value)`\n def to_pair(s):\n removed_quote = ''.join(ch for ch in s if ch not in ('\"', \"'\"))\n return removed_quote.split('=')\n\n name_parts = tag_name.split()\n name = name_parts[0]\n raw_attrs = [w for w in name_parts if \"=\" in w]\n tag_attrs = dict(to_pair(w) for w in raw_attrs)\n return name, tag_attrs",
"def namedtuples2dicts(namedtuples):\n return {k: dict(v._asdict()) for k, v in namedtuples.items()}",
"def test_Stock_output_named_tuple_vs_dictionary_2():\n assert Stock_tuple[0][1] == Stock_list_dict[0][\"symbol\"], \"symbol is not getting stored properly\"",
"def from_tuple(cls, tpl):\n obj = cls()\n obj._x = tpl[0]\n obj._y = tpl[1]\n obj._z = tpl[2]\n return obj",
"def from_dict(cls, dikt) -> 'TokenEKey':\n return util.deserialize_model(dikt, cls)",
"def rename_dictionary_key(entry, dict_map):\n if isinstance(entry, dict):\n for old_key, new_key in dict_map.items():\n entry[new_key] = entry.pop(old_key)\n return entry\n elif isinstance(entry, list):\n return [\n dict(\n (dict_map[old_key], value)\n if old_key in dict_map else (old_key, value)\n for old_key, value in item.items()\n )\n for item in entry\n ]\n elif isinstance(entry, tuple):\n return tuple(\n tuple(\n (dict_map[value[0]], value[1])\n if value[0] in dict_map else value\n for value in item\n )\n for item in entry\n )",
"def str_tuple(item):\n return \"{}:{}\".format(item[0], item[1])",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, v**2)",
"def from_dict(cls, dikt) -> 'CompoundInfoIdentifiers':\n return util.deserialize_model(dikt, cls)",
"def parse(result):\n return {make_tuple(key[4:]): val for key, val in result.items()}",
"def test_Stock_output_named_tuple_vs_dictionary_7():\n assert Stock_tuple[0][6] == Stock_list_dict[0][\"weight\"], \"weight is not getting stored properly\"",
"def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)",
"def from_tuple(cls, t):\n return cls(t[0], t[1])",
"def make_tuple(tuple_like):\n tuple_like = (\n tuple_like\n if isinstance(tuple_like, (list, tuple))\n else (tuple_like, tuple_like)\n )\n return tuple_like",
"def _key_vals(dict_):\n return [(key, val) for key, val in dict_.iteritems()]",
"def _prefixed(nt: namedtuple, prefix):\n result = {}\n for key, value in nt._asdict().items():\n result[prefix + key] = value\n return result",
"def customAritcleESDecoder(articleDict):\n return namedtuple(\"ArticleES\", articleDict.keys())(*articleDict.value())\n # namedtuple是一个函数,相当于执行函数返回函数的返回值",
"def from_tuples(\n cls,\n values: Iterable[tuple[str, str | dt.DataType]],\n ) -> Schema:\n pairs = list(values)\n if len(pairs) == 0:\n return cls({})\n\n names, types = zip(*pairs)\n\n # validate unique field names\n name_locs = {v: i for i, v in enumerate(names)}\n if len(name_locs) < len(names):\n duplicate_names = list(names)\n for v in name_locs:\n duplicate_names.remove(v)\n raise IntegrityError(f\"Duplicate column name(s): {duplicate_names}\")\n\n # construct the schema\n return cls(dict(zip(names, types)))"
] | [
"0.8427131",
"0.79982275",
"0.69924045",
"0.6788516",
"0.66534394",
"0.64902914",
"0.64544564",
"0.63769853",
"0.63622445",
"0.63100547",
"0.6171877",
"0.6161274",
"0.60242426",
"0.6023281",
"0.59786564",
"0.59320253",
"0.5883411",
"0.5876747",
"0.5843632",
"0.58232385",
"0.5811465",
"0.5803018",
"0.57801217",
"0.57160914",
"0.5713107",
"0.55356354",
"0.54730135",
"0.5382091",
"0.5373956",
"0.5339474",
"0.5320358",
"0.53171957",
"0.5307198",
"0.53031605",
"0.53031605",
"0.52994245",
"0.5247899",
"0.52459043",
"0.5230446",
"0.522707",
"0.5210404",
"0.5203593",
"0.52018607",
"0.51921546",
"0.519107",
"0.51746595",
"0.5152384",
"0.51513666",
"0.5150745",
"0.51269406",
"0.511158",
"0.50976664",
"0.50656766",
"0.50656766",
"0.50621694",
"0.5049523",
"0.50474393",
"0.5046773",
"0.5043695",
"0.5033216",
"0.501213",
"0.50103825",
"0.49978986",
"0.49952233",
"0.4988475",
"0.49726653",
"0.49720952",
"0.4971228",
"0.49584392",
"0.49448198",
"0.4944336",
"0.49425817",
"0.4941011",
"0.49382412",
"0.49364483",
"0.4934501",
"0.4933693",
"0.49214458",
"0.49166086",
"0.49155277",
"0.49115705",
"0.4911077",
"0.49005672",
"0.4891882",
"0.48857385",
"0.48630565",
"0.48630244",
"0.48573193",
"0.4852742",
"0.48479068",
"0.48472053",
"0.4839867",
"0.48377582",
"0.48278147",
"0.4819256",
"0.48192072",
"0.481891",
"0.48187557",
"0.4817653",
"0.48115"
] | 0.5780806 | 22 |
Returns whether the current instance is an edge server in crosssilo FL. | def is_edge_server() -> bool:
return Config().args.port is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_edge_site(self) -> bool:\n return self.config.edge",
"def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites",
"def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]",
"def isEdge(self,x,y):\n\t\treturn y in self._dict[x]",
"def isEdge(self, x, y):\n return y in self._dictOut[x]",
"def isEdge(self,x,y):\n\t\treturn self._matr[x][y]",
"def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges",
"def isEdge(self,x,y):\r\n return self.matr[x][y]",
"def has_edge(self, v1, v2):\n\n return v1 in self.get_reachables(v2[0], v2[1])",
"def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False",
"def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False",
"def isEdge(self, x, y):\n if y in self.parseX() or x in self.parseX():\n return y in self.dictOut[x]\n else :\n print(\"verteces not found\")",
"def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False",
"def connected(self):\n return self.izx.connected and self.ezx.connected",
"def IsWire(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_IsWire(self, *args)",
"def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)",
"def has_neighbor(self):\n if self.cur_neighbor is None:\n return False\n if self.cur_neighbor['app_feat'] is None:\n return False\n return True",
"def is_cross_onap_link(self, logical_link):\n for relationship in logical_link[\"relationship-list\"][\"relationship\"]:\n if relationship[\"related-to\"] == \"ext-aai-network\":\n return True\n return False",
"def is_router(self):\n # @todo: Rewrite\n return self.address_set.count() > 1",
"def is_island(self, sites: Iterable[SkupperSite]) -> bool:\n # Neither incoming nor outgoing connections\n return (\n not self.has_incoming_connections(sites)\n and not self.connected_sites\n and not self.delete\n )",
"def has_edges(self):\n\n return len(self._edges) > 0",
"def is_bipartite(self):\n return True",
"def are_connected(self, node1, node2):\n return bool( self.get_edge(node1, node2) )",
"def has_bond_crossing(self):\n return self.count_bond_collisions() > 0",
"def is_connected(self):\n connected = False\n self.state = self.mesh.state()\n if self.state in (STATE_CHILD, STATE_ROUTER, STATE_LEADER, STATE_LEADER_SINGLE):\n connected = True\n return connected",
"def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None",
"def is_esi_node():\n\n # Fetch ACME logger and write debug message\n log = logging.getLogger(\"ACME\")\n log.debug(\"Test if hostname matches the pattern 'esi-sv*'\")\n return socket.gethostname().startswith(\"esi-sv\") and os.path.isdir(\"/cs\")",
"def is_bipartite(self):\n return self._.bipartite",
"def is_connected(self) -> bool:",
"def is_connected(self):\n return self.is_connected",
"def on_dedicated(self):\n\n return self.is_valid_platform() and self['MODE'] == 'enterprise'",
"def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val",
"def has_incoming_connections(self, sites: Iterable[SkupperSite]) -> bool:\n return any(other.is_connected_to(self) for other in sites)",
"def is_connected(self) -> bool:\n for node in self.nodes.values():\n if node.is_connected:\n return True\n return False",
"def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True",
"def is_edge(self, v, w):\n return self.op_norm(v[0], w[0]) == (v[1] + w[1]) and (self.variant.is_bipartite() or v != w)",
"def is_on(self):\n return self._get_state() == ServerState.ON",
"def is_connected(self):\n vs = self.vertices()\n visited = self.bfs(vs[0])\n return len(visited) == len(vs)",
"def containsEdge(self, e):\n return any(e.nvt in [self.vertices[i-2], self.vertices[i]] and self.vertices[i-1] == e.pvt for i in range(len(self.vertices)))",
"def containsEdge(self, v1, v2):\n for e in self.edges:\n if (e.pvt, e.nvt) in [(v1, v2), (v2, v1)]:\n return True\n return False",
"def is_connected(self, line):\n return any(ext in (line.start, line.end) \\\n for ext in (self.start, self.end))",
"def contains_edge(self, node, other_node):\n return \\\n {node.get_name(), other_node.get_name()} in \\\n list([\n {edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()}\n for edge in self.get_edges()\n ]) # return true if there exists an edge between the input nodes and false otherwise",
"def is_virtual_network_host():\n return False",
"def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)",
"def isconnected(self) -> bool:",
"def is_connected(self):\n return True",
"def is_connected(self):\n return self.factory.is_connected",
"def is_connected(self):\n return self._ws is not None",
"def is_connected(self):\n return False",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True",
"def is_connected(self):\n if self.server: return True\n return False",
"def is_connected(self, node1, node2):\r\n\r\n return node1 in self.graph and node2 in self.graph[node1]",
"def has_hit_edge(self):\n return self.will_hit_edge(self._direction)",
"def is_connected(self):\n return self.connected",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True",
"def is_neighbour(self, other, diagonal):\n return other in self.neighbours(diagonal)",
"def isConnected(self):\n return False",
"def is_multigraph(self):\n # TO DO: Call coloring algorithm\n return True",
"def is_connected(self):\n return self.connector and self.connector.state == \"connected\"",
"def is_bipartite(self):\n # TO DO: Call coloring algorithm\n return False",
"def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise",
"def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True",
"def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP",
"def is_multigraph(self):\n # TO DO: Call coloring algorithm\n return False",
"def is_connected(self):\n return self.connector and self.connector.state == 'connected'",
"def listening(self):\n return self._server is not None",
"def is_connected(self) -> bool:\n pass",
"def is_compatible(self, e2):\n\n return (self.type == TypeEdge.HOLE and e2.type == TypeEdge.HEAD) or (self.type == TypeEdge.HEAD and e2.type == TypeEdge.HOLE) \\\n or self.type == TypeEdge.UNDEFINED or e2.type == TypeEdge.UNDEFINED",
"def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True",
"def failover_target(self) -> bool:\n return pulumi.get(self, \"failover_target\")",
"def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1",
"def has(self, server):\n return (server in self.servers)",
"def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True",
"def is_on(self) -> bool:\n if self._on_off_cluster_handler.on_off is None:\n return False\n return self._on_off_cluster_handler.on_off",
"def is_connected(self, node1, node2):\r\n\r\n return node1 in self._graph and node2 in self._graph[node1]",
"def isconnected(self) -> bool:\n ...",
"def is_lite_mode(ctx: commands.Context) -> bool:\n if is_private(ctx.message.channel):\n for g in ctx.bot.get_user_guilds(ctx.message.author.id):\n if g.id not in config.lite_servers:\n return False\n else:\n return ctx.message.guild in config.lite_servers",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def is_connected(self) -> bool:\n\n return self.send(self.cmd.GET_SYSTEMLINE) == self.cmd.DEFAULT_SYSTEM_LINE",
"def isInCluster(self):\n logger.debug(\"Checking if %s is a part of cluster\" % self)\n role = self.getClusterRole()\n return role is not None and role != \"DISABLED\"",
"def is_connected(self):\n return \"_connection\" in self.__dict__",
"def is_connected(self):\n return self._proxy.get(\"is_connected\", \"filterwheel\")",
"def is_connected(self):\n return self._current_protocol is not None",
"def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True",
"def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )",
"def isInternal(self):\n\n\t\t# TODO optimization do we really need to look at the host attributes?\n\t\t# maybe we can just use the global attribute (faster)\n\t\tfe = self.newdb.getFrontendName()\n\t\tnetwork = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetwork')\n\t\tnetmask = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetmask')\n\n\t\t# Test based on our client's IP address.\n\t\twork = string.split(network, '.')\n\t\tmask = string.split(netmask, '.')\n\t\tip = string.split(self.clientList[-1], '.')\n\n\t\tfor i in range(0, len(ip)):\n\t\t\ta = int(ip[i]) & int(mask[i])\n\t\t\tb = int(work[i]) & int(mask[i])\n\n\t\t\tif a != b:\n\t\t\t\treturn 0\n\n\t\treturn 1",
"def CheckIfWiredConnecting(self):\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.is_connecting\n else:\n return False",
"def can_failover(self):\n return self._can_failover",
"def is_local_client(self):\n return self.msg.is_local_client",
"def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False",
"def is_connected(object_one, object_two):\n\n for vert_one in object_one.Vertexes:\n for vert_two in object_two.Vertexes:\n if (vert_one.X == vert_two.X) and (vert_one.y == vert_two.y):\n return True\n\n return False",
"def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode",
"def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False",
"def isOnInteriorSide(self, v):\n n = self.normalVect()\n return n.dotProduct(vector(self.vertices[0]) - vector(v)) > 0",
"def is_connected(self) -> bool:\n return hasattr(_app_ctx_stack.top, \"zodb_connection\")",
"def door_in_edge(self, edge: list) -> bool:\n doors = self.get_interior_doors()\n room1 = self.get_rooms()[edge[0]]\n room2 = self.get_rooms()[edge[1]]\n for i in range(len(doors)):\n if utils.door_room_relation(doors[i], room1) and utils.door_room_relation(doors[i], room2):\n return True\n return False",
"def is_connected(self) -> bool:\n return (\n self._last_seen is not None\n and (dt_util.utcnow() - self._last_seen)\n < self._router.consider_home_interval\n )",
"def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)",
"def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False"
] | [
"0.7256668",
"0.65036786",
"0.6496777",
"0.6456621",
"0.64283705",
"0.63906115",
"0.6335268",
"0.63215107",
"0.6301984",
"0.6286371",
"0.62700874",
"0.6203978",
"0.61531115",
"0.6063415",
"0.60367393",
"0.60330695",
"0.5981147",
"0.5979535",
"0.5975428",
"0.5965382",
"0.59630185",
"0.59559524",
"0.59321404",
"0.5926251",
"0.5923715",
"0.59187716",
"0.5913594",
"0.59066206",
"0.58825046",
"0.58815366",
"0.58783615",
"0.58636224",
"0.5861533",
"0.58536744",
"0.5844471",
"0.5824599",
"0.577288",
"0.57644206",
"0.5759425",
"0.57580507",
"0.57579243",
"0.5751948",
"0.5748963",
"0.57462925",
"0.5745294",
"0.5741416",
"0.5739905",
"0.5738596",
"0.5727371",
"0.57106704",
"0.57072204",
"0.57042164",
"0.57035565",
"0.5697792",
"0.56925756",
"0.56918645",
"0.5690224",
"0.5679996",
"0.56788963",
"0.56760144",
"0.56712073",
"0.5667384",
"0.5666401",
"0.56631726",
"0.56577677",
"0.56537735",
"0.56493396",
"0.56399965",
"0.5637796",
"0.5637789",
"0.5632978",
"0.56311744",
"0.5628199",
"0.5627773",
"0.56267005",
"0.5620002",
"0.5616226",
"0.5611316",
"0.5608288",
"0.55980587",
"0.5586811",
"0.5577355",
"0.55728596",
"0.55707264",
"0.5568968",
"0.5564354",
"0.55567706",
"0.55555683",
"0.5554548",
"0.5549894",
"0.554688",
"0.55363464",
"0.55349076",
"0.55287164",
"0.5528049",
"0.55264604",
"0.55256456",
"0.5523123",
"0.55213284",
"0.5517402"
] | 0.7117317 | 1 |
Returns whether the current instance is a central server in crosssilo FL. | def is_central_server() -> bool:
return hasattr(Config().algorithm,
'cross_silo') and Config().args.port is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True",
"def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val",
"def central_server_alive(cls, timeout=1):\n central_server_address, _ = cls.get_central_address()\n\n try:\n requests.get(central_server_address, timeout=timeout, verify=False)\n except (Timeout, ConnectionError):\n return False\n\n return True",
"def is_connected(self):\n if self.server: return True\n return False",
"def is_local_client(self):\n return self.msg.is_local_client",
"def isInCluster(self):\n logger.debug(\"Checking if %s is a part of cluster\" % self)\n role = self.getClusterRole()\n return role is not None and role != \"DISABLED\"",
"def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)",
"def _checkTorcsServer(self):\n isRunning = False\n if self.torcsServerProcess is not None:\n if self.torcsServerProcess.poll() is None:\n isRunning = True\n return isRunning",
"def has_upstream_server(self) -> bool:\n return True if self.host is not None else False",
"def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode",
"def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None",
"def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)",
"def is_host(self):\n return self.host",
"def is_remote(self):\n return False",
"def has(self, server):\n return (server in self.servers)",
"def on_internal_cluster(self) -> bool:\n return self.cluster.internal or False",
"def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False",
"def am_I_master(self, ipdict):\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ipdict.get(ip_address).is_master",
"def isMaster(self):\n logger.debug(\"Checking if %s is Cloudera Master\" % self)\n is_master = self.getClusterRole()\n logger.debug(\"Is %s master: %s\" % (self, is_master))\n return is_master",
"def mmo_is_configsrv(self, mmo_connection):\n return True if \"configsvr\" in str(mmo_connection[\"admin\"].command(\"getCmdLineOpts\")[\"parsed\"]) else False",
"def is_master(self):\n return MPControl.is_master",
"def check_connection_to_db(self):\n try:\n self._client.admin.command('ismaster')\n return True\n except Exception:\n return False",
"def on_public_cluster(self) -> bool:\n return not self.on_private_cluster",
"def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected",
"def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)",
"def is_on(self):\n return self._get_state() == ServerState.ON",
"def is_esi_node():\n\n # Fetch ACME logger and write debug message\n log = logging.getLogger(\"ACME\")\n log.debug(\"Test if hostname matches the pattern 'esi-sv*'\")\n return socket.gethostname().startswith(\"esi-sv\") and os.path.isdir(\"/cs\")",
"def is_lite_mode(ctx: commands.Context) -> bool:\n if is_private(ctx.message.channel):\n for g in ctx.bot.get_user_guilds(ctx.message.author.id):\n if g.id not in config.lite_servers:\n return False\n else:\n return ctx.message.guild in config.lite_servers",
"def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client",
"def is_connected(self) -> bool:\n\n return self.send(self.cmd.GET_SYSTEMLINE) == self.cmd.DEFAULT_SYSTEM_LINE",
"def _is_self(self, ip, port):\n import socket as sk\n self_ip = sk.gethostbyname(sk.gethostname())\n self_port = self.config['API_PORT']\n return str(self_ip) == ip and self_port == port",
"def remote(self):\r\n return self._url.scheme in ('http', 'https')",
"def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites",
"def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False",
"def on_dedicated(self):\n\n return self.is_valid_platform() and self['MODE'] == 'enterprise'",
"def isSciServerComputeEnvironment():\n if os.path.isfile(\"/home/idies/keystone.token\"):\n return True\n else:\n return False",
"def is_server_default(self):\n ...",
"def isup(self):\n if self.cloudserver:\n # print self.cloudserver.status\n if self.cloudserver.status in (\"ACTIVE\",):\n return True\n \n return False",
"def is_ncar_host():\n hostname = socket.getfqdn()\n \n return any([re.compile(ncar_host).search(hostname) \n for ncar_host in ['cheyenne', 'casper', 'hobart']])",
"def remote_publishing_master():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'MASTER'",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def is_remote(self): # -> Any | bool:\n ...",
"def isInternal(self):\n\n\t\t# TODO optimization do we really need to look at the host attributes?\n\t\t# maybe we can just use the global attribute (faster)\n\t\tfe = self.newdb.getFrontendName()\n\t\tnetwork = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetwork')\n\t\tnetmask = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetmask')\n\n\t\t# Test based on our client's IP address.\n\t\twork = string.split(network, '.')\n\t\tmask = string.split(netmask, '.')\n\t\tip = string.split(self.clientList[-1], '.')\n\n\t\tfor i in range(0, len(ip)):\n\t\t\ta = int(ip[i]) & int(mask[i])\n\t\t\tb = int(work[i]) & int(mask[i])\n\n\t\t\tif a != b:\n\t\t\t\treturn 0\n\n\t\treturn 1",
"def is_master(self):\n return self._is_master",
"def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )",
"def is_connected(self):\n return self.hub.is_connected and self.client.is_running",
"def is_remote(self):\n raise NotImplementedError()",
"def isStation(self) -> bool:\n return self.station",
"def _check_chassis_cluster(handle):\n out = handle.cli(command=\"show chassis cluster status\").response()\n return False if re.search(\"Chassis cluster is not enabled\", str(out)) else True",
"def has_client(self):\n \n return len(self._clients) > 0",
"def has_client(self):\n \n return len(self._clients) > 0",
"def CheckIfWiredConnecting(self):\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.is_connecting\n else:\n return False",
"def is_cluster_leader(self):\n return self.leader == 'self'",
"def is_connected(self):\n return self.factory.is_connected",
"def isConnected(self):\n return self.__cooperationClient.hasConnections()",
"def listening(self):\n return self._server is not None",
"def is_connected(self):\n return self._proxy.get(\"is_connected\", \"filterwheel\")",
"def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))",
"def is_native(self):\n this_contact_list = self.contactstate()\n return (self.nativeclist == this_contact_list)",
"def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )",
"def is_connected(self):\n return self.is_connected",
"def is_connected(self):\n return self._ws is not None",
"def is_master(self) -> bool:\n return self.zone.SharedRoomID and self.zone.MasterMode",
"def is_on(self) -> bool:\n if self._on_off_cluster_handler.on_off is None:\n return False\n return self._on_off_cluster_handler.on_off",
"def test_central(self):\n self.assertTrue(self.cs.isCentral)",
"def is_connected(self):\n return self._current_protocol is not None",
"def is_cups_server(rm):\n try:\n s = socket.socket()\n s.settimeout(0.3)\n s.connect((rm, 631))\n s.close()\n\n return True\n except (socket.error, socket.timeout):\n return False",
"def is_connected(self):\n return True",
"def server_flag(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"server_flag\")",
"def is_server_running(self, shut_off_is_down: bool = False) -> bool:\n out = self.cloud_cli.run_cloud_cmd(\n f\"compute --project={self.project} instances describe --zone={self.zone} {self.name} --format=json\")\n try:\n out = json.loads(out.strip())\n except json.JSONDecodeError:\n return False\n return True",
"def is_standalone(self) -> bool:\n if not self.name or self.fallback:\n return True\n\n return False",
"def is_mgr():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.is_sac()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.is_xcat_mgr()\n\n return False",
"def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP",
"def is_connected(self):\n return self.connector and self.connector.state == \"connected\"",
"def is_scm(self):\n if self.server_params[-1].scm_class.value == \"dcpm\":\n return True\n return False",
"def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False",
"def is_center(self):\n if self.pupils_located:\n return self.is_right() is not True and self.is_left() is not True",
"def is_connected(self):\n return self.connected",
"def is_connected(self):\n return self.connector and self.connector.state == 'connected'",
"def is_connected(self):\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()",
"def is_on(self) -> bool:\n return self._smart_system.is_ws_connected",
"def is_connected(self) -> bool:\n\n try:\n return self.send(self.cmd.GET_SYSTEMMODEL) == self.cmd.C815_SYSTEMMODEL\n except PLConnectionError:\n return False",
"def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False",
"def is_node_master(self) -> bool:\n self._assert_local_rank_set()\n return self.local_rank == 0",
"def check_corleone_config():\n try:\n# Checking for neo4j is obsolete because there won't be such service\n# Lionfish is taking over neo4j (no REST console)\n# neo4j_host = du.get_configuration('neo4j', 'host')\n# neo4j_port = du.get_configuration('neo4j', 'port')\n lionfish_host = du.get_configuration('lionfish', 'host')\n lionfish_port = du.get_configuration('lionfish', 'port')\n except Exception as error:\n print unicode(error)\n return False\n# Again: obsolete\n# if not neo4j_host or not neo4j_port or not lionfish_host \\\n# or not lionfish_port:\n\n if not lionfish_port or not lionfish_host:\n return False\n return True",
"def CheckIfWirelessConnecting(self):\n if self.wifi.connecting_thread:\n return self.wifi.connecting_thread.is_connecting\n else:\n return False",
"def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")",
"def host_network(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"host_network\")",
"def isSciServerComputeEnvironment():\n if os.path.isfile(KeystoneTokenPath):\n return True\n else:\n return False",
"def is_connected(self):\n return \"_connection\" in self.__dict__",
"def is_vnc_server_started(self):\n\t\treturn bool(call_sdk_function('PrlVmInfo_IsVncServerStarted', self.handle))",
"def is_connected(self) -> bool:\n return hasattr(_app_ctx_stack.top, \"zodb_connection\")",
"def is_connected(self, test=False):\n return self._server.is_connected()",
"def remote_publishing_slave():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'SLAVE'",
"def _is_sshd_server_running(self, timeout=1):\n try:\n self.ssh_client.connect(timeout=timeout)\n self.ssh_client.close()\n return True\n except Exception:\n return False",
"def is_virtual_network_host():\n return False",
"def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"",
"def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False",
"def isLocal(self, connectionInfo):\n return False",
"def isConnected(self):\n if self._session is None:\n return False\n return self._session.isalive() is True"
] | [
"0.66080767",
"0.65418833",
"0.6524869",
"0.6519794",
"0.6491021",
"0.64296925",
"0.639115",
"0.63826996",
"0.6380103",
"0.6369597",
"0.63640034",
"0.6328281",
"0.6286668",
"0.62676233",
"0.6239149",
"0.6202125",
"0.61955136",
"0.61894524",
"0.61597776",
"0.6134595",
"0.61243284",
"0.61220825",
"0.61082244",
"0.6102957",
"0.6099582",
"0.6092593",
"0.60785675",
"0.6070335",
"0.6064259",
"0.6063599",
"0.60358137",
"0.6024761",
"0.60236007",
"0.6018265",
"0.600995",
"0.60037667",
"0.60026723",
"0.59867996",
"0.59779155",
"0.59752727",
"0.59530586",
"0.5951935",
"0.59444445",
"0.59310937",
"0.59254587",
"0.59069264",
"0.5904617",
"0.58959895",
"0.58835965",
"0.5878824",
"0.5878824",
"0.5868247",
"0.58599436",
"0.585966",
"0.58582455",
"0.58545125",
"0.5848427",
"0.5838904",
"0.58314973",
"0.58237714",
"0.5822583",
"0.5819301",
"0.5819252",
"0.5809582",
"0.57935673",
"0.5792857",
"0.57673645",
"0.5764089",
"0.57595867",
"0.5759314",
"0.57547045",
"0.57524544",
"0.57430285",
"0.57416564",
"0.57302594",
"0.57243735",
"0.5719036",
"0.57181764",
"0.57153744",
"0.57137865",
"0.5705121",
"0.5704",
"0.5703532",
"0.56984705",
"0.56946063",
"0.5690415",
"0.5688561",
"0.5688561",
"0.5685936",
"0.56852174",
"0.56848323",
"0.5684163",
"0.5682926",
"0.5681869",
"0.5679943",
"0.5676494",
"0.5675995",
"0.56757027",
"0.56698716",
"0.5667334"
] | 0.83141166 | 0 |
Returns the device to be used for training. | def device() -> str:
import torch
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
if hasattr(Config().trainer,
'parallelized') and Config().trainer.parallelized:
device = 'cuda'
else:
device = 'cuda:' + str(
random.randint(0,
torch.cuda.device_count() - 1))
else:
device = 'cpu'
return device | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")",
"def get_device():\n import torch\n\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')",
"def device():\n return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"def device(self):\n hw = self.hw()\n if hw: return hw.device()",
"def device(self):\n return torch.cuda.current_device()",
"def device(self):\n return next(self.parameters()).device",
"def device(self):\n return next(self.parameters()).device",
"def device(self):\n return next(self.parameters()).device",
"def device(self):\n return next(self.parameters()).device",
"def device(self):\n return next(self.parameters()).device",
"def get_default_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return device",
"def device():\n return G.DEVICE",
"def device(self) -> torch.device:\n return self._device",
"def device(self):\n return self._vars[0].device",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda:0')\n else:\n return torch.device('cpu')",
"def get_device(self):\n raise NotImplementedError()",
"def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"def device(self):\n return self._tensor.device",
"def setup_device(self, conf: DictConfig) -> device:\n device = torch.device(conf.runner.device) if torch.cuda.is_available() else torch.device('cpu')\n\n return device",
"def get_device(model):\n\tif next(model.parameters()).is_cuda:\n\t\treturn 'cuda:{}'.format(torch.cuda.current_device())\n\telse:\n\t\treturn 'cpu'",
"def device(self):\n\n\t\treturn self._device",
"def get_device(i=0):\n if torch.cuda.is_available():\n return torch.device(\"cuda:%d\" % i)\n else:\n return torch.device(\"cpu\")",
"def get_device(self) -> str:\n pass",
"def device(self):\n return self._device",
"def get_device(model):\n p = next(model.parameters())\n return p.device",
"def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device",
"def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)",
"def get_default_device():\n global _default_device\n\n if _default_device is None:\n import wgpu.backends.rs # noqa\n\n adapter = wgpu.request_adapter(canvas=None, power_preference=\"high-performance\")\n _default_device = adapter.request_device()\n return _default_device",
"def device(self) -> str:\n return self._device",
"def get_device(self):\n return self.parent.get_device()",
"def get_device(args: dict) -> torch.device:\n\n if is_distributed(args):\n device = torch.device(\"cuda\", args.local_rank)\n else:\n if torch.cuda.is_available():\n device = torch.device(\"cuda\", 0)\n else:\n device = torch.device(\"cpu\")\n return device",
"def get_default_device():\n return MXNET_DEFAULT_DEVICE",
"def get_device(arn=None):\n pass",
"def _get_device():\n return context.get_context('device_target')",
"def get_device(arguments):\n\n # Checks if the GPU is available to be used and sets the .\n if arguments.gpu and torch.cuda.is_available():\n return torch.device(f\"cuda:{torch.cuda.device_count() - 1}\")\n\n # Sets the device to CPU.\n else:\n return torch.device(\"cpu\")",
"def device(self):\n return get_device(self.module_to_quantize)",
"def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')",
"def main_device(self):\n return self._main_device",
"def global_device():\n global _DEVICE\n return _DEVICE",
"def model_device(model):\n # Source: https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180\n try:\n return str(next(model.parameters()).device)\n except StopIteration:\n # Model has no parameters\n pass\n return 'cpu'",
"def device_class(self):\r\n return self._device_class",
"def get_device():\n c_dev = ct.c_int(0)\n safe_call(backend.get().af_get_device(ct.pointer(c_dev)))\n return c_dev.value",
"def device_class(self):\n return None",
"def device_class(self):\n return None",
"def _get_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, \"\n f\"but only {n_gpu} are available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n self.logger.info(f'Using device: {device}, {list_ids}')\n return device, list_ids",
"def device(self):\n return self.share.device",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def device_class(self):\n return self._device_class",
"def get_device_of(self, tensor):\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()",
"def _get_current_device() -> Device | None:\n if task_runtime.has_environment():\n return task_runtime.get_current_devices()[0]\n return None",
"def return_free_GPU():\r\n if torch.cuda.is_available():\r\n gpu_num = torch.cuda.device_count()\r\n device = torch.device('cuda:{}'.format(gpu_num-1))\r\n print('Using GPU:[{}]/[{}] for training...'.format(gpu_num-1,gpu_num-1))\r\n return device\r\n \r\n raise ValueError('GPU not available for training. Check CUDA env with function \"check_cuda_env\"')",
"def device_class(self):\r\n return self._sensor_cfg[3]",
"def device_class(self):\r\n return self._sensor_cfg[3]",
"def device(self):\n # return self.inv_threshold.device\n return self.threshold.device",
"def device_num(self) -> str:\n return pulumi.get(self, \"device_num\")",
"def setup_device(n_gpus: int) -> object:\n if n_gpus >= 1 and torch.cuda.is_available():\n LOG.info('\\n CUDA is available! using GPU...')\n return torch.device('cuda')\n else:\n LOG.info('\\n Using CPU...')\n return torch.device('cpu')",
"def device(deviceid):\n\n # Torch device\n # pylint: disable=E1101\n return deviceid if isinstance(deviceid, torch.device) else torch.device(Models.reference(deviceid))",
"def device(self):\n return self.broker.device(**{\"JobDetailID\": self.JobDetailID})",
"def device_class(self):\n return SENSOR_DEFAULT_DEVICE_CLASS",
"def magma_getdevice():\n\n dev = c_int_type()\n _libmagma.magma_getdevice(ctypes.byref(dev))\n return dev.value",
"def GetGPU():\n return option['device_id']",
"def device_class(self):\n return self._device_type",
"def device_class(self):\n return self.type",
"def device_class(self):\n return self.type",
"def device_class(self):\n return self.type",
"def device_class(self):\n return SENSOR_TYPES[self.sensor][3].get(\"device_class\")",
"def device_class(self):\n return DEVICE_CLASSES.get(self.sensor_data[\"model\"])",
"def set_device(in_arg): \n \n return torch.device(\"cuda\" if torch.cuda.is_available() and in_arg.gpu == 1 else \"cpu\")",
"def get_device_of(tensor: torch.Tensor) -> int:\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()",
"def device(self) -> SmartNvmeDevice:\n return self._device",
"def device_type(self):\r\n return self._arm.device_type",
"def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev",
"def device(request):\n d = request.param()\n\n # enable GPU error checking\n if isinstance(d, hoomd.device.GPU):\n d.gpu_error_checking = True\n\n return d",
"def device_class(self) -> str | None:\n return self._get_sensor_type()[2]",
"def device(self):\n return self.broker.device(**{\"DeviceRouteID\": self.DeviceRouteID})",
"def device_class(self):\n return SENSOR_TYPES[self._type][3] if self._type in SENSOR_TYPES else None",
"def device(self):\n return self.broker.device(**{\"VirtualNetworkMemberID\": self.VirtualNetworkMemberID})",
"def device_path(self):\n return self._engine.device_path()",
"def get_module_device(module: nn.Module) -> torch.device:\n try:\n next(module.parameters())\n except StopIteration as e:\n raise ValueError('The input module should contain parameters.') from e\n\n if next(module.parameters()).is_cuda:\n return next(module.parameters()).get_device()\n\n return torch.device('cpu')",
"def device_class(self):\n if self.sensor_class in DEVICE_CLASSES:\n return self.sensor_class\n return None",
"def device_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_name\")",
"def device_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_name\")",
"def device_class(self):\n # type: () -> string_types\n return self._device_class",
"def get_device(link):\n device = Device(\"\",0,0,0,0,0)\n device.link = link\n return device.identify()",
"def get_loop_device():\n return \"/dev/%s%s\" % (\"loop\", os.getuid())",
"def CUDA_VISIBLE_DEVICES(self):\n return self._CUDA_VISIBLE_DEVICES",
"def device_config(self):\n\t\ttry:\n\t\t\treturn self._dev\n\t\texcept:\n\t\t\treturn 0",
"def device_class(self):\n return self._sensor_type",
"def deviceid(gpu):\n\n # Return if this is already a torch device\n # pylint: disable=E1101\n if isinstance(gpu, torch.device):\n return gpu\n\n # Always return -1 if gpu is None or an accelerator device is unavailable\n if gpu is None or not Models.hasaccelerator():\n return -1\n\n # Default to device 0 if gpu is True and not otherwise specified\n if isinstance(gpu, bool):\n return 0 if gpu else -1\n\n # Return gpu as device id if gpu flag is an int\n return int(gpu)",
"def device_class(self):\n return SENSOR_TYPES[self._type][1]",
"def device_class(self):\n return SENSOR_TYPES[self._type][1]",
"def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")",
"def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')"
] | [
"0.8276948",
"0.8204713",
"0.81753296",
"0.80565923",
"0.79779685",
"0.79452527",
"0.79452527",
"0.79452527",
"0.79452527",
"0.79452527",
"0.78916585",
"0.78675044",
"0.7835675",
"0.77565765",
"0.77542937",
"0.77542937",
"0.77542937",
"0.77542937",
"0.7713507",
"0.7709037",
"0.7634034",
"0.76193523",
"0.7597322",
"0.7537676",
"0.75102514",
"0.7479524",
"0.7453658",
"0.7396959",
"0.73892164",
"0.7281287",
"0.72776663",
"0.7273611",
"0.7250147",
"0.72495735",
"0.7205435",
"0.72048366",
"0.72021776",
"0.7171577",
"0.70809144",
"0.7067394",
"0.7052181",
"0.70504457",
"0.7049886",
"0.70478153",
"0.7033034",
"0.70127153",
"0.6994459",
"0.6994459",
"0.6973132",
"0.6964583",
"0.69379425",
"0.69379425",
"0.69379425",
"0.69379425",
"0.69379425",
"0.6917063",
"0.687466",
"0.6866811",
"0.6852704",
"0.6852704",
"0.68497366",
"0.6814676",
"0.6809965",
"0.68092674",
"0.6796217",
"0.67630464",
"0.6754054",
"0.67525125",
"0.67382526",
"0.6733138",
"0.6733138",
"0.6733138",
"0.672693",
"0.67164236",
"0.6701289",
"0.6695209",
"0.6674128",
"0.66634333",
"0.6624879",
"0.6616878",
"0.6614751",
"0.6607334",
"0.6583377",
"0.6580983",
"0.6578641",
"0.6563631",
"0.65335524",
"0.65295887",
"0.65295887",
"0.65192884",
"0.64422303",
"0.6439622",
"0.6432497",
"0.64286494",
"0.64257956",
"0.64196575",
"0.6413054",
"0.6413054",
"0.6392007",
"0.63783145"
] | 0.8220512 | 1 |
Check if the hardware and OS support data parallelism. | def is_parallel() -> bool:
import torch
return hasattr(Config().trainer, 'parallelized') and Config(
).trainer.parallelized and torch.cuda.is_available(
) and torch.distributed.is_available(
) and torch.cuda.device_count() > 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parallel_safe(self):\n return True",
"def is_multiprocessing_problematic():\n # Handling numpy linked against accelerate.\n config_info = str([value for key, value in\n np.__config__.__dict__.items()\n if key.endswith(\"_info\")]).lower()\n\n if \"accelerate\" in config_info or \"veclib\" in config_info:\n return True\n elif \"openblas\" in config_info:\n # Most openBLAS can only operate with one thread...\n os.environ[\"OPENBLAS_NUM_THREADS\"] = \"1\"\n else:\n return False",
"def parallel_safe(self):\n\n return True",
"def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )",
"def can_use_omp_threads(self, omp_threads):\n return self.cores_per_node >= omp_threads",
"def DataAvailable(self) -> bool:",
"def is_system_ready_for_benchmarking():\n\n # check if scaling_governor is set to 'performance' for all cpu cores\n cpu_governors = glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')\n if not cpu_governors:\n logger.error('no scaling_governor found. Do you run on a Linux System?')\n return False\n for governor in sorted(cpu_governors):\n with open(governor, 'r') as f:\n line = f.read().splitlines()[0]\n logger.debug('%s is set to \\\"%s\\\"', governor, line)\n if line != 'performance':\n logger.warning('please set all scaling_governor to \\\"performance\\\" (using \"sudo ./ondemand.sh start\")')\n return False\n\n return True",
"def get_is_data_available(self):\n return self._data_available",
"def is_available():",
"def check_multiprocessing():\n\n try:\n import multiprocessing\n except ImportError:\n return False\n return True",
"def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False",
"def is_xpu_available():\n xpu_count = int(os.getenv(\"FLAGS_selected_xpus\", \"-1\"))\n if xpu_count < 0:\n return False\n\n if _HAS_FLUID:\n from paddle import fluid\n if not fluid.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n if _HAS_PADDLE:\n import paddle\n if not paddle.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n return True",
"def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75",
"def has_data_flow(self) -> bool:\n return self.data_flow_steps is not None",
"def _schedTest(self):\n if not self._hasSlices(): # There are no migratory tasks, so let's check utilization\n return self.util() <= 1.0\n else:\n return self._qpa()",
"def available(self) -> bool:\n return (\n super().available\n and self.coordinator.data is not None\n and self.module_id in self.coordinator.data\n and self.data_id in self.coordinator.data[self.module_id]\n )",
"def detect_available():\n global _CUDA_AVAILABLE\n if _CUDA_AVAILABLE is not None: return _CUDA_AVAILABLE\n _CUDA_AVAILABLE = shell.run('{} -c \"import torch;print(torch.cuda.is_available())\"'.format(sys.executable)).strip('\\n') == 'True'\n return _CUDA_AVAILABLE",
"def is_gpu_available():\n ret = get_gpu_count() > 0\n if _HAS_PADDLE:\n import paddle\n if ret is True and not paddle.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n if _HAS_FLUID:\n from paddle import fluid\n if ret is True and not fluid.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n return ret",
"def supports_prefetch(self):\n return (hasattr(self.base_dataset, 'supports_prefetch') and\n self.base_dataset.supports_prefetch) or \\\n (hasattr(self.auxiliary_targets, 'supports_prefetch') and self.auxiliary_targets.supports_prefetch)",
"def check_cuda():\n if OS_VERSION[0] == \"Linux\":\n check_cuda_linux()\n elif OS_VERSION[0] == \"Windows\":\n check_cuda_windows()",
"def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n return usage < 73",
"def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"",
"def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0",
"def data_available(self):\n return (self.status & 0x08) != 0",
"def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')",
"def is_distributed() -> NotImplementedError:\n raise NotImplementedError()",
"def is_distributed() -> int:\n return collective.is_distributed()",
"def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True",
"def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed",
"def gpu_availability():\n # assume if using tensorflow-gpu, then Nvidia GPU is available\n if is_built_with_cuda():\n return len(tf.config.list_physical_devices(\"GPU\")) > 0\n else:\n return False",
"def has_data_flow(self) -> bool:\n return self.graph_count and not self.data_flow_null_count",
"def hardwareConcurrency(self):\n return 1",
"async def _async_has_devices(opp: OpenPeerPower) -> bool:\n # TODO Check if there are any devices that can be discovered in the network.\n devices = await opp.async_add_executor_job(my_pypi_dependency.discover)\n return len(devices) > 0",
"def is_gpu_available() -> bool:\n return torch.cuda.is_available()",
"def hasaccelerator():\n\n return torch.cuda.is_available() or torch.backends.mps.is_available() or bool(Models.finddevice())",
"def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True",
"def is_free(self) -> tuple:\n if self.running_procs >= self.procs_no:\n return (False, None)\n if self.gpus:\n for gpu in self.gpus:\n if self.gpu_running_procs[gpu] < self.per_gpu[gpu]:\n return (True, gpu)\n return (False, None)\n return (True, None)",
"def has_datamask(self):\n return self.datamask is not None",
"def is_busy(self):\n threads = len(self.executor._threads)\n if threads == 0:\n return False\n\n capacity = self.executor._work_queue.qsize() / float(threads)\n if capacity > 2:\n return True\n elif capacity < 1:\n return False\n else:\n return capacity > (random.random() + 1)",
"def parallelism_per_kpu(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parallelism_per_kpu\")",
"def is_distributed(self) -> bool:\n return self.size > 1",
"def is_available_while_running(cls) -> bool:\n\n return True",
"def does_support_multiobjective() -> bool:\n multioutput = False\n return multioutput",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def checkCUDAisAvailable():\n # some possible lib names \n libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')\n libsOk = True\n for libname in libnames:\n try:\n cuda = ctypes.CDLL(libname)\n except OSError:\n continue\n else:\n break\n else:\n libsOk = False\n return libsOk",
"def can_use_mpi_pool():\n return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI",
"def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n return usage < 75",
"def data_ready(self) -> bool:\n data_ready = ctypes.c_uint8()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_GetMeasurementDataReady(self.dev, byref(data_ready)))\n return data_ready.value != 0",
"def available(self) -> bool:\n return super().available and bool(self.data)",
"def parallelism(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parallelism\")",
"def data_ready(self):\n data_ready = len(self.barcode) > 0\n data_ready &= self.price > 0\n data_ready &= len(self.description) > 0\n return data_ready",
"def check_supported_features(self):",
"def isOnNao():\n szCpuInfo = \"/proc/cpuinfo\";\n if not os.path.exists( szCpuInfo ): # already done by the getFileContents\n return False;\n szAllFile = getFileContents( szCpuInfo, bQuiet = True );\n if( szAllFile.find( \"Geode\" ) == -1 and szAllFile.find( \"Intel(R) Atom(TM)\" ) == -1 ):\n return False;\n return True;",
"def is_available(self) -> bool:\n raise NotImplementedError",
"def _migration_supported(self):\n if self.compute_cnt > 1:\n return True\n return False",
"def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover",
"def isParallelMS(vis):\n \n msTool = mstool()\n if not msTool.open(vis):\n raise ValueError, \"Unable to open MS %s,\" % vis\n rtnVal = msTool.ismultims() and \\\n isinstance(msTool.getreferencedtables(), list)\n\n msTool.close()\n return rtnVal",
"def num_dataload_workers() -> int:\n return 4 if common_util.is_linux() else 0",
"def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()",
"def isSupportedData(self, data, info):\n return True",
"def is_low_core_system(ihost, dba):\n cpu_list = dba.icpu_get_by_ihost(ihost['uuid'])\n number_physical_cores = 0\n for cpu in cpu_list:\n if int(cpu['thread']) == 0:\n number_physical_cores += 1\n return number_physical_cores <= constants.NUMBER_CORES_XEOND",
"def is_concurrent(self):\n return self.concurrent",
"def available(self) -> bool:\n raise NotImplementedError",
"def _validate_resources(self):\n resources = self.options.resources\n\n for key in ['num_machines', 'num_mpiprocs_per_machine', 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: '\n 'parallelization is not supported, only a value of `1` is accepted.'\n )",
"def supports_prefetch(self):\r\n return False",
"def may_data_parallel(model):\n if torch.cuda.device_count() > 1:\n model = TransparentDataParallel(model)\n return model",
"def _check_hardware_control(self):\n if self._under_hardware_control:\n v_input = self._ai_client.get_ai_voltage(self._hwc_ai_channel, max_range=10) #CHeck status of hwc voltage input\n v_input = v_input[-1]\n if self._is_stabilizing:\n if v_input < self._hwc_thresh:\n self.stop()\n else:\n if v_input > self._hwc_thresh:\n self.start()",
"def is_available(self):\n raise NotImplementedError",
"def checkCpu(self):\n cpu = self.getCpu()\n err_msg = []\n task_result = device_status = 0\n\n if cpu is None:\n err_msg.append('Get CPU info failed')\n task_result = device_status = 1\n else:\n # 以后可扩展告警条件\n pass\n return cpu, err_msg, task_result, device_status",
"def is_computing(self):\n raise NotImplementedError",
"def _check_all_systems_ready(self):\n raise NotImplementedError()",
"def requiresData():\n return True",
"def available():\n if \"MojoPipeline\" not in h2o.cluster().list_core_extensions():\n print(\"Cannot use MOJO Pipelines - runtime was not found.\")\n return False\n else:\n return True",
"def is_supported(self) -> bool:\n\n # TODO logging ?\n # TODO ICMP error if ttl is zero\n return self._version == 4 and self._ihl >= 5 and self._ttl != 0",
"def Check_Communications(self):\n self.serial_status = False\n try:\n self.serial_status = self.ser.isOpen()\n except Exception as e:\n print \"No communication to stage serial bus. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.serial_status = False\n self.encoder_status = False\n try:\n self.encoder_status = True\n for i in range(3):\n value = self.fd_channel[i].read(3)+b'\\x00' \n # read the 24 bit register (3 bytes) and add a fourth byte \n # to make it an integer.\n signed_value = struct.unpack(\"=I\", value)[0] \n if signed_value < 0 or signed_value > 2**24:\n self.encoder_status = False\n break\n except Exception as e:\n print \"No communication to optical encoders. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.encoder_status = False\n self.comm_status = self.serial_status and self.encoder_status\n return",
"def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n print(\"DEBUG:usage:{}\".format(usage))\n return usage < 75",
"def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)",
"def has_omp(self):\n return hasattr(self, \"omp_env\") and bool(getattr(self, \"omp_env\"))",
"def other_threads_are_active():\n return len(fake_threads) >= 2",
"def test_cpu_logical_cores_value(self):\n \n cpu_logical_cores = get_cpu_information()[3]\n \n # Check to make sure the number of logical cores is 8\n self.assertEqual(cpu_logical_cores, 8)",
"def is_available() -> bool:\n return HAVE_RLE",
"def evaluate_hardware_support(self):\n return hardware.HardwareSupport.SERVICE_PROVIDER",
"def ComputeEAvailable(self):\r\n pass",
"def available(self):\n\t\t\treturn False",
"def available(self):\n\t\t\treturn False",
"def available(self):\n\t\t\treturn False",
"def available(self) -> bool:\n return True",
"def available(self) -> bool:\n return True",
"def has_cuda_context():\n init_once()\n if not nvmlInitialized:\n return False\n for index in range(device_get_count()):\n handle = pynvml.nvmlDeviceGetHandleByIndex(index)\n if hasattr(pynvml, \"nvmlDeviceGetComputeRunningProcesses_v2\"):\n running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)\n else:\n running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)\n for proc in running_processes:\n if os.getpid() == proc.pid:\n return index\n return False",
"def check_cpu_for_proc(std_output):\n res = False\n std_output = std_output.split('\\n')\n for curline in std_output:\n if curline :\n data = curline.split()\n if float(data[2]) >= float(CPU_MIN_VAL_FOR_PROC):\n res = True\n return res",
"def check_platform():\n system = platform.system()\n distro = platform.platform()\n is_raspberry_pi = False\n try:\n info = open(\"/proc/cpuinfo\").read()\n except FileNotFoundError:\n is_raspberry_pi = False\n else:\n # bcm2708: Raspberry Pi 1\n # bcm2709: Raspberry Pi 2\n # bcm2710: Raspberry Pi 3\n is_raspberry_pi = 'BCM27' in info or 'ODROID' in info\n\n return system == \"Linux\" and (\n os.path.isfile('/proc/device-tree/hat/uuid') or\n 'boot2docker' in distro.lower() or\n is_raspberry_pi or\n os.path.isfile('/sys/hypervisor/uuid') or\n os.path.isdir('/var/lib/digitalocean')\n )",
"def is_available(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0",
"def available(self):\n\t\traise NotImplementedError",
"def _check_open_blas():\n if np.__config__.get_info('openblas_info') and os.environ.get('OPENBLAS_NUM_THREADS') != '1':\n log.warn(\"OpenBLAS detected. Its highly recommend to set the environment variable \"\n \"'export OPENBLAS_NUM_THREADS=1' to disable its internal multithreading\")",
"def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True",
"def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True",
"def available(self):\n\t\t\treturn True",
"def available(self):\n\t\t\treturn True",
"def available(self):\n\t\t\treturn True"
] | [
"0.63122064",
"0.6217546",
"0.6193051",
"0.61780185",
"0.61725044",
"0.61503655",
"0.6127145",
"0.6078269",
"0.6058447",
"0.603029",
"0.6027548",
"0.59874004",
"0.5955343",
"0.5927671",
"0.58607745",
"0.58542687",
"0.58361876",
"0.58319396",
"0.58313346",
"0.58165216",
"0.58047605",
"0.5789315",
"0.57813597",
"0.576614",
"0.57457197",
"0.5716463",
"0.5712461",
"0.5698685",
"0.569107",
"0.56755656",
"0.5672325",
"0.56503505",
"0.5647756",
"0.5613471",
"0.56095976",
"0.5596012",
"0.5595269",
"0.55877846",
"0.55699676",
"0.5565921",
"0.55366886",
"0.5535863",
"0.55237997",
"0.5523364",
"0.5521317",
"0.55189556",
"0.5513541",
"0.55103135",
"0.5505134",
"0.54929805",
"0.5489641",
"0.5482765",
"0.5456972",
"0.54463154",
"0.5441153",
"0.5439282",
"0.54317796",
"0.54279286",
"0.5419183",
"0.5417818",
"0.54156554",
"0.5414749",
"0.5405343",
"0.53969455",
"0.53950053",
"0.5394399",
"0.53896874",
"0.5388635",
"0.5385589",
"0.5381537",
"0.5379241",
"0.5370217",
"0.5366312",
"0.5365927",
"0.5364994",
"0.53581434",
"0.5353077",
"0.53472173",
"0.5344048",
"0.53360146",
"0.5308837",
"0.53060997",
"0.52962255",
"0.5284879",
"0.5284879",
"0.5284879",
"0.52820176",
"0.52820176",
"0.528046",
"0.52664626",
"0.5250024",
"0.5241325",
"0.52376425",
"0.52341616",
"0.52219874",
"0.5215771",
"0.5215771",
"0.5213993",
"0.5213993",
"0.5213993"
] | 0.66771305 | 0 |
Responsible for converting a LogRecord to a string. | def format(self, record):
return '[{}] {}'.format(QBShFormatter.LEVEL_DICT[record.levelname], record.getMessage()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format(self, record: LogRecord) -> str:\n json_record: Dict = self.json_record(record.getMessage(), record)\n mutated_record: Dict = self.mutate_json_record(json_record)\n mutated_record = mutated_record if mutated_record is not None else json_record\n\n return self.to_json(mutated_record)",
"def format(self, record):\n # type: (LogRecord) -> str\n try:\n return str(getattr(self, record.levelname)(record))\n except AttributeError as err:\n raise RuntimeError('Unknown record level (name: %s)' % record.levelname) from err",
"def makePickle(self, record: logging.LogRecord) -> bytes:\n return (self.format(record)).encode(encoding='utf-8')",
"def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields, self.REDACTION,\n super().format(record), self.SEPARATOR)",
"def format(self, record):\n extra = {\n \"message\": record.getMessage(),\n \"time\": self.formatTime(record, self.datefmt),\n \"msecs\": record.msecs,\n \"name\": record.name,\n \"level\": record.levelname,\n }\n\n keys = filter(self.filterer, record.__dict__)\n extra.update({k: record.__dict__[k] for k in keys})\n return str(CustomEncoder().encode(extra))",
"def _records_to_string(records):\n parts = []\n for record in records:\n parts.append('\\n'.join(f'{k}: {v}' for k, v in record.items()))\n return '\\n\\n'.join(parts) + '\\n'",
"def format(self, record: LogRecord) -> str:\n record.asctime = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n message = record.getMessage()\n if record.exc_info:\n eno = record.exc_info\n stacktrace = \"\".join(traceback.format_exception(None, eno[1], eno[2]))\n message += f\" excp: {stacktrace}\"\n if record.stack_info:\n stack = self.formatStack(record.stack_info)\n message += f\" trace: {stack}\"\n\n log_output = {\n \"tool\": type(self.checker).__name__,\n \"type\": \"infrastructure\",\n \"severity\": record.levelname,\n \"severityLevel\": max(0, record.levelno // 10 - 1),\n \"timestamp\": record.asctime,\n \"module\": record.module,\n \"function\": record.funcName,\n \"flag\": self.checker.flag,\n \"flagIndex\": self.checker.flag_idx,\n \"runId\": self.checker.run_id,\n \"roundId\": self.checker.round,\n \"relatedRoundId\": self.checker.flag_round,\n \"message\": message,\n \"teamName\": self.checker.team,\n \"teamId\": self.checker.team_id,\n \"serviceName\": self.checker.service_name,\n \"method\": self.checker.method,\n }\n\n return LOGGING_PREFIX + json.dumps(log_output)",
"def get_formatted_record(self, record_format: str = None) -> str:\n if record_format:\n return record_format.format_map(defaultdict(str, **self.dict_values))\n raise RecordFormatError(\"Format string must be set\")",
"def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()",
"def _log_str(self):\n return (\n \"[name: {}, id: {}]\"\n .format(self._raw['Name'] if self._raw else \"<not retrieved>\", self._id)\n )",
"def format(self, record: logging.LogRecord = None) -> str:\n # s = super().format(record)\n s = None\n e = {}\n e['id'] = uuid.uuid4().hex\n e['message'] = record.getMessage()\n # log.warning('record.message: %r', record.getMessage())\n # log.warning('record.args: %r', record.args)\n e['created'] = record.created\n e['priority'] = record.levelname\n e['args'] = record.args\n e['source_code'] = {}\n e['source_code']['pathname'] = record.pathname\n e['source_code']['funcName'] = record.funcName\n e['source_code']['lineno'] = record.lineno\n ctx = record.args.get(PIPELINE_CONTEXT_KEY, None)\n if ctx:\n e[PIPELINE_CONTEXT_KEY] = ctx.toDict()\n # use array enclosure a[] to mainain the log file\n # yaml compliant as new events are appended\n # - event1:\n # - event2:\n # - ...\n a = [e]\n s = yaml.dump(a)\n return s",
"def to_json(self, record: Mapping[str, Any]) -> str:\n return self.json_lib.dumps(record, cls=ObjectEncoder)",
"def to_str(self) -> str:",
"def telemetry_to_string(self, telemetry):\n _log_line = \"%s,%s,%d,%.5f,%.5f,%.1f,%.1f,%s,%.3f\\n\" % (\n telemetry['datetime'],\n telemetry['id'],\n telemetry['frame'],\n telemetry['lat'],\n telemetry['lon'],\n telemetry['alt'],\n telemetry['temp'],\n telemetry['type'],\n telemetry['freq_float'])\n\n # TODO: Add Aux data, if it exists.\n\n return _log_line",
"def to_record(self, val):\n while len(val) < self.length:\n val.append(self.record_class())\n return ''.join([v.to_record() for v in val])",
"def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n return BaseFormatter(log_fmt).format(record)",
"def encode(record: Tuple[MeasureInput, MeasureResult]) -> str:\n return dump_record_to_string(*record)",
"def write(self, record):\n for _, value in record.items():\n self.stringbuffer.append(repr(value))",
"def makePickle(self, record):\n return self.format(record) + \"\\n\"",
"def format(self, record):\n mappings = {\n 'asctime': create_timestamp,\n 'message': lambda r: r.msg,\n }\n\n formatters = self.parse()\n\n log_record = {}\n for formatter in formatters:\n try:\n log_record[formatter] = mappings[formatter](record)\n except KeyError:\n log_record[formatter] = record.__dict__[formatter]\n\n return json.dumps(log_record)",
"def write_record(self, record, buffer_cap=10000):\n if record is not None:\n # We need to enforce the correct encoding for both versions of python\n if sys.version_info[0] < 3:\n if isinstance(record, list):\n self.__queue.append([str(s).decode('utf-8') if isinstance(s, unicode) is False else s for s in record])\n else:\n self.__queue.append([str(s).decode('utf-8') if isinstance(s, unicode) is False else s for s in record.values()])\n else:\n if isinstance(record, list):\n self.__queue.append([s.decode('utf-8') if isinstance(s, bytes) else str(s) for s in record])\n else:\n self.__queue.append(s.decode('utf-8') if isinstance(s, bytes) else str(s) for s in record.values())\n if len(self.__queue) > buffer_cap:\n self.flush_record()\n return \"\"",
"def makePickle(self, record):\n ei = record.exc_info\n if ei:\n # just to get traceback text into record.exc_text ...\n dummy = self.format(record)\n # See issue #14436: If msg or args are objects, they may not be\n # available on the receiving end. So we convert the msg % args\n # to a string, save it as msg and zap the args.\n d = dict(record.__dict__)\n d['msg'] = record.msg # This line has been changed\n d['args'] = None\n d['exc_info'] = None\n # Issue #25685: delete 'message' if present: redundant with 'msg'\n d.pop('message', None)\n s = pickle.dumps(d, 1)\n slen = struct.pack(\">L\", len(s))\n return slen + s",
"def __str__(self):\n return str(self.LOG_TYPES[self.name])",
"def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s",
"def _tostr(obj): # pragma: no cover\n return obj if isinstance(obj, str) else obj.decode()",
"def mapLogRecord(self, record):\n newrec = record.__dict__\n for p in self.params:\n newrec[p] = self.params[p]\n maxParamLength = 4000\n # truncate and clean the message from non-UTF-8 characters\n try:\n newrec['msg'] = newrec['msg'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n try:\n newrec['message'] = newrec['message'][:maxParamLength].decode('utf-8', 'ignore').encode('utf-8')\n except Exception:\n pass\n return newrec",
"def _convert_to_str(self, data):\n raise NotImplementedError()",
"def raw_recordval(record, key):\n if key in record:\n return str(record[key]).strip()\n return \"\"",
"def format(self, record):\n message = {\n \"time\": datetime.utcfromtimestamp(record.created).isoformat(),\n \"level\": record.levelname,\n \"name\": record.name,\n \"message\": record.getMessage(),\n \"process\": record.process,\n \"thread\": record.threadName,\n \"hostname\": self.hostname,\n \"filename\": record.filename,\n \"function\": record.funcName,\n \"lineNo\": record.lineno,\n }\n\n if record.exc_info:\n message[\n \"exception\"\n ] = f\"{record.exc_info[0].__name__}: {record.exc_info[1]}\"\n message[\"traceback\"] = traceback.format_exc()\n\n return json.dumps(message, ensure_ascii=False)",
"def log_message(self, build_id, record):\n # Todo: provide \"shortcut\" methods to convert the traceback\n # (from exc_info) to a serializable object, and to clean\n # up the record object for decent serialization in the\n # database.\n pass",
"def encode_record(record):\n return json.dumps(record)",
"def to_string(self):\r\n return self.__str__()",
"def __str__(self) -> str:\n # stringifying a field as its field adds some convenience for cases where we need the field\n # name\n return cast(str, self._resolve_field_name(\"\"))",
"def _tostr(t):\n\treturn t.__unicode__()",
"def __str__(self):\n buf = StringIO()\n self.write_to(buf)\n return buf.getvalue()",
"def prepare(self, record: LogRecord):\n # The format operation gets traceback text into record.exc_text\n # (if there's exception data), and also returns the formatted\n # message. We can then use this to replace the original\n # msg + args, as these might be unpickleable. We also zap the\n # exc_info and exc_text attributes, as they are no longer\n # needed and, if not None, will typically not be pickleable.\n\n # Not nedded, since we use tblib\n # msg = self.format(record)\n # # bpo-35726: make copy of record to avoid affecting other handlers in the chain.\n # record = copy.copy(record)\n # record.message = msg\n # record.msg = msg\n # record.args = None\n # record.exc_info = None\n # record.exc_text = None\n return ['log_msg', record]",
"def get_log_string(self):\n\n\t\tresult = json.dumps(self.data, sort_keys=True)\n\n\t\tif self.intrusion is not None and self.intrusion != \"\":\n\t\t\tresult += \",{}\".format(self.intrusion)\n\n\t\treturn result",
"def test_str(self):\n log = self.log\n\n self.assertEqual(str(log), self.log_raw['name'])",
"def cee_dict_to_rsyslog(cls, cee_dict):\n structured_data = cee_dict.get('native')\n if structured_data is not None:\n structured_data = cls.sd_dict_to_syslog_str(structured_data)\n\n log = ('<{pri}>{ver} {time} {host} {app} {pid} {msgid} {sd} '\n '{msg}').format(\n pri=cee_dict.get('pri'),\n time=cee_dict.get('time', '-'),\n ver=cee_dict.get('ver'),\n host=cee_dict.get('host', '-'),\n app=cee_dict.get('pname', '-'),\n pid=cee_dict.get('pid', '-'),\n msgid=cee_dict.get('msgid', '-'),\n sd=structured_data or '-',\n msg=cee_dict.get('msg'))\n\n return b'{length} {syslog}'.format(length=len(log), syslog=log)",
"def convert_trans_to_string(self, transaction):\r\n #note, repr will not work because it doesn't remove curly brackets and colons\r\n record_list = []\r\n for mode, trans in transaction.iteritems():\r\n record_list.append(str(\"mode: \" + mode + \" \"))\r\n for product,quantity in trans.iteritems():\r\n record_list.append(str(product + \":\"))\r\n record_list.append(str(quantity) + \" \")\r\n \r\n record_string = \"\".join(record_list) + \"\\n\"\r\n return record_string",
"def as_str(self):\n return self.as_type(str)",
"def get_str(self, time_zone=None, verbose=False, csv=False):\n time_str_nice = self.time.to_readable(time_zone)\n raw_time_str = str(self.time)\n ch_name = self.template.get_full_name()\n fmt_str = self.template.get_format_str()\n if self.val_obj is None:\n ch_val = \"EMPTY CH OBJ\"\n elif fmt_str:\n ch_val = fmt_str % (self.val_obj.val)\n else:\n ch_val = str(self.val_obj.val)\n\n if verbose and csv:\n return \"%s,%s,%s,%d,%s\" % (\n time_str_nice,\n raw_time_str,\n ch_name,\n self.id,\n ch_val,\n )\n elif verbose and not csv:\n return \"%s: %s (%d) %s %s\" % (\n time_str_nice,\n ch_name,\n self.id,\n raw_time_str,\n ch_val,\n )\n elif not verbose and csv:\n return \"{},{},{}\".format(time_str_nice, ch_name, ch_val)\n else:\n return \"{}: {} = {}\".format(time_str_nice, ch_name, ch_val)",
"def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)",
"def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)",
"def format(self, record):\n data = dict()\n\n data[\"category\"] = record.name\n data[\"timestamp\"] = datetime.datetime.utcnow()\\\n .replace(tzinfo=utc)\\\n .strftime('%Y-%m-%dT%H:%M:%SZ')\n data[\"level\"] = record.levelname\n data[\"message\"] = record.msg\n data[\"threadName\"] = record.threadName\n data[\"hostname\"] = self.hostname\n \n return data",
"def qrelEntry2Str(qrelEntry):\n return genQrelStr(qrelEntry.queryId, qrelEntry.docId, qrelEntry.relGrade)",
"def _type_str(self):\n try:\n record_name = RECORD_TYPES[self.type]\n return '{0} record'.format(record_name)\n except KeyError:\n return '{0} type 0x{1:04x}'.format(self.__class__.__name__,\n self.type)",
"def __str__(self):\n return bytes_to_string(self._bytes)",
"def get_str(self, name):\n return str(self.field(name).toString())",
"def serialize(self):\n return self.record",
"def str_payload(self):\n self._payload_to_str()\n return self._str_payload",
"def serialize(self, pid, record, links_factory=None):\n record = self.process_dict(self.transform_record(pid, record, links_factory))\n\n return self._format_csv([record])",
"def format(self, record):\n data = {}\n\n data[\"category\"] = record.name\n data[\"timestamp\"] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n data[\"level\"] = record.levelname\n data[\"message\"] = record.msg\n data[\"threadName\"] = record.threadName\n return json.dumps(data)",
"def __build_message_to_print_in_log(log: LogModel) -> Optional[str]:\n\n if log is None:\n return None\n\n log_level_name: str = LogHelper.get_log_level_name(log.log_level)\n message: str = \\\n f'{log.creation_date} |->\\t[{log_level_name}]\\t{log.message}\\t\\t[Line: {log.line_number}]\\t[{log.filename}]'\n\n return message",
"def as_str(self) -> str:\n if isinstance(self.data, str):\n return self.data\n elif isinstance(self.data, bytes):\n return self.data.decode()\n else:\n return bytes(self.data).decode()",
"def formatter(record):\n\n lines = record[\"message\"].splitlines()\n prefix = (\n \"{time:YY-MM-DD HH:mm:ss.S} | {level.name:<8} | \"\n + \"{file}.{function}:{line} - \".format(**record)\n )\n indented = (\n lines[0] + \"\\n\" + \"\\n\".join(\" \" * len(prefix) + line for line in lines[1:])\n )\n record[\"message\"] = indented.strip()\n return (\n \"<g>{time:YY-MM-DD HH:mm:ss.S}</> | <lvl>{level.name:<8}</> | \"\n + \"<e>{file}.{function}:{line}</> - <lvl>{message}\\n</>{exception}\"\n )",
"def bytes_to_str(self, data):\n if isinstance(data, str):\n return data\n return data.decode(\"utf-8\")",
"def toString(self) -> unicode:\n ...",
"def toString(self) -> unicode:\n ...",
"def __str__(self):\n outstr = self._field1\n return outstr",
"def __str__(self):\n return \"{0} {1} {2} {3}\".format(self.get_domain(), self.get_record_type(), self.get_value(), self.get_ttl())",
"def ts2str(self,ts):\n dttime = datetime.datetime.fromtimestamp(ts)\n return self.format_datetime(dttime)",
"def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s",
"def get_string_value(self, obj, field):\n return smart_unicode(field.value_to_string(obj))",
"def to_string(obj, attr):\n val = getattr(obj, attr)\n if val is None:\n return ''\n if type(val) is str:\n return val\n elif type(val) is int:\n return str(val)\n elif type(val) is float:\n return \"{:.02f}\".format(val)\n elif hasattr(val, 'strftime'):\n return val.strftime(\"%Y-%m-%d\")\n return val.decode()",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())",
"def to_str(self):\n return pformat(self.to_dict())"
] | [
"0.7008044",
"0.69639575",
"0.69169354",
"0.6403992",
"0.6396035",
"0.63660896",
"0.6323886",
"0.61869884",
"0.6149548",
"0.6103099",
"0.6052202",
"0.60219127",
"0.5971884",
"0.5971298",
"0.59584796",
"0.59510505",
"0.59124696",
"0.58543694",
"0.5848523",
"0.584811",
"0.58463186",
"0.57882416",
"0.57852817",
"0.5750084",
"0.57151425",
"0.56566584",
"0.5644273",
"0.5624602",
"0.5603908",
"0.5596489",
"0.55944777",
"0.55782723",
"0.5568114",
"0.556362",
"0.55576056",
"0.55555946",
"0.5544302",
"0.5535786",
"0.55303144",
"0.5521798",
"0.55029124",
"0.54929113",
"0.54898024",
"0.54898024",
"0.5486767",
"0.5485397",
"0.5480836",
"0.54790914",
"0.54761356",
"0.54461104",
"0.54245895",
"0.54158413",
"0.5408533",
"0.5398161",
"0.5397343",
"0.5380026",
"0.5370746",
"0.53595895",
"0.53595895",
"0.5358259",
"0.53579146",
"0.53532565",
"0.5347904",
"0.5340609",
"0.5325421",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313",
"0.5294313"
] | 0.57463837 | 24 |
Return a command that starts an skvbc replica when passed to subprocess.Popen. The replica is started with a short view change timeout. Note each arguments is an element in a list. | def start_replica_cmd_prefix(builddir, replica_id, config):
statusTimerMilli = "500"
path_to_s3_config = os.path.join(builddir, "test_s3_config_prefix.txt")
if replica_id >= config.n and replica_id < config.n + config.num_ro_replicas:
bucket = "blockchain-" + ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(6))
with open(path_to_s3_config, "w") as f:
f.write("# test configuration for S3-compatible storage\n"
"s3-bucket-name:" + bucket + "\n"
"s3-access-key: concordbft\n"
"s3-protocol: HTTP\n"
"s3-url: 127.0.0.1:9000\n"
"s3-secret-key: concordbft\n"
"s3-path-prefix: concord")
os.makedirs(os.path.join(MINIO_DATA_DIR, "data", bucket)) # create new bucket for this run
ro_params = [ "--s3-config-file",
path_to_s3_config
]
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
ret = [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-V", os.getenv('BLOCK_CHAIN_VERSION', default="1"),
"-l", os.path.join(builddir, "tests", "simpleKVBC", "scripts", "logging.properties")
]
if replica_id < config.n:
ret += ["--key-exchange-on-start", "--publish-master-key-on-startup"]
if replica_id >= config.n and replica_id < config.n + config.num_ro_replicas:
ret.extend(ro_params)
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_replica_cmd(builddir, replica_id):\n statusTimerMilli = \"500\"\n viewChangeTimeoutMilli = \"10000\"\n path = os.path.join(builddir, \"tests\", \"simpleKVBC\", \"TesterReplica\", \"skvbc_replica\")\n return [path,\n \"-k\", KEY_FILE_PREFIX,\n \"-i\", str(replica_id),\n \"-s\", statusTimerMilli,\n \"-v\", viewChangeTimeoutMilli,\n \"-p\"]",
"def start_replica_cmd(builddir, replica_id):\n statusTimerMilli = \"500\"\n view_change_timeout_milli = \"10000\"\n path = os.path.join(builddir, \"tests\", \"simpleKVBC\", \"TesterReplica\", \"skvbc_replica\")\n return [path,\n \"-k\", KEY_FILE_PREFIX,\n \"-i\", str(replica_id),\n \"-s\", statusTimerMilli,\n \"-v\", view_change_timeout_milli,\n \"-e\", str(True),\n \"--diagnostics-port\", f\"{replica_diagnostic_server_port(replica_id)}\"\n ]",
"def __launch__(self,config,command=None,**kwargs):\n if command is None:\n command = ['sleep 30;','qsub']\n return SampleQsubProcess.__launch__(self,config,command=command,**kwargs)",
"def open_nxview():\n nxview_procc = Popen([\"nxView\"])\n assert nxview_procc is not None, \"Could not start nxlib subprocess\"\n time.sleep(10) # wait for nxview\n return nxview_procc",
"def startCommand(self):\n commandLine = \"su - %s -c \\\"%s/startservers \\\" \" % (self.runAsUser, self.boHome)\n return self.submitCommand(commandLine)",
"def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)",
"def launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, use_user_disk=True):\n\n cuttlefish_dir = '/usr/local/share/cuttlefish'\n user_data_dir = '/mnt/user_data'\n\n launch_command = f'gcloud compute ssh --zone={zone} {instance_name} -- '\n\n if use_user_disk:\n launch_command += f'HOME={user_data_dir} \\\n ANDROID_HOST_OUT={cuttlefish_dir} \\\n ANDROID_PRODUCT_OUT={cuttlefish_dir} '\n else:\n launch_command += f'HOME={cuttlefish_dir} '\n\n launch_command += f'{cuttlefish_dir}/bin/launch_cvd \\\n --start_webrtc --daemon \\\n --webrtc_sig_server_addr={sig_server_addr} \\\n --webrtc_sig_server_port={sig_server_port} \\\n --start_webrtc_sig_server=false \\\n --webrtc_device_id={instance_name} \\\n --report_anonymous_usage_stats=y'\n\n os.system(launch_command)\n\n print(f'Launched cuttlefish on {instance_name} at {sig_server_addr}:{sig_server_port}')",
"def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()",
"def popenCLIExecutable(command, **kwargs):\n\n cliExecutable = command[0]\n\n # hack (at least, this does not scale to other module sources):\n # detect Slicer modules and run through wrapper script setting up\n # appropriate runtime environment\n ma = re_slicerSubPath.search(cliExecutable)\n if ma:\n wrapper = os.path.join(cliExecutable[:ma.start()], 'Slicer')\n if sys.platform.startswith('win'):\n wrapper += '.exe'\n if os.path.exists(wrapper):\n command = [wrapper, '--launcher-no-splash', '--launch'] + command\n\n return subprocess.Popen(command, **kwargs)",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)",
"def do_startstcv(self, args):\n if not self._assert_login():\n return\n\n vm_image = None\n ttl_minutes = 60\n socket = False\n desc = None\n instances = 2\n host = None\n cores = 1\n memory = None\n vlan = None\n ntp_server = None\n license_server = None\n share = True\n static_ip = None\n netmask = None\n gateway = None\n external = False\n\n if args:\n args = args.split()\n missing_arg = 'missing value after'\n while args:\n arg = args.pop(0)\n if arg in ('-i', '--image'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n vm_image = args.pop(0)\n elif arg in ('-t', '--ttl'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n ttl_minutes = int(args.pop(0))\n elif arg in ('-d', '--desc'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n desc = args.pop(0)\n elif arg in ('-n', '--number'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n instances = int(args.pop(0))\n elif arg in ('-h', '--host'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n host = args.pop(0)\n elif arg in ('-c', '--cores'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n cores = int(args.pop(0))\n elif arg in ('-m', '--memory'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n memory = int(args.pop(0))\n elif arg == '--socket':\n socket = True\n elif arg == '--vlan':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n vlan = int(args.pop(0))\n elif arg == '--ntp':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n ntp_server = args.pop(0)\n elif arg == '--license':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n license_server = args.pop(0)\n elif arg == '--noshare':\n share = False\n elif arg == '--staticip':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n static_ip = args.pop(0)\n elif arg == '--netmask':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n netmask = args.pop(0)\n elif arg == '--gateway':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n gateway = args.pop(0)\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n\n if not vm_image:\n builds = self._qm.get_available_stc_builds()\n if not builds:\n print('unable to find latest build', file=sys.stderr)\n return\n vm_image = '#' + builds[0]\n\n try:\n vm_ids = self._qm.start_stc_vm(\n self._user, vm_image, ttl_minutes, socket, desc, instances,\n host, share, vlan, memory, cores, external, ntp_server,\n license_server, static_ip, netmask, gateway)\n except Exception as e:\n print('ERROR:', e, file=sys.stderr)\n return\n\n print('Started new vm instances of', vm_image)\n print('\\n'.join(vm_ids))",
"def launch(**kwargs):\n logger.info('launch dream command')\n launch_gui()",
"def launch_vrouter_instance(self):\n # Add code to start vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"VTEST_ONLY_RETURN \" +\n str(self.vr_args['vtest_only']))\n return\n cpid = os.fork()\n if cpid == 0:\n vrouter_cmd_args = [\"taskset\", self.vr_args['taskset'],\n self.vr_args['vrouter_path'], \"--no-daemon\",\n \"--no-huge\", \"--vr_packet_sz\", \"2048\"]\n if self.vr_args['dpdk_args']:\n for dpdk_arg in self.vr_args['dpdk_args'].split(' '):\n vrouter_cmd_args.append(dpdk_arg)\n vrouter_cmd_args.extend([\"--vr_socket_dir\",\n self.vr_args['socket_dir']])\n os.execvp(\"taskset\", vrouter_cmd_args)\n else:\n self.logger.info(\n \"Running cmd - taskset %s %s --no-daemon --no-huge \"\n \"--vr_packet_sz 2048 --vr_socket_dir %s\" %\n (self.vr_args['taskset'],\n self.vr_args['vrouter_path'],\n self.vr_args['socket_dir']))\n self.logger.info(\"pid = \" + str(cpid))\n self.pid = cpid\n count = 0\n ret = 0\n while (count < 10):\n cmd = \"lsof \" + self.vr_args['socket_dir'] +\\\n \"/dpdk_netlink | wc -l\"\n self.logger.info(\"Running cmd - {}\".format(cmd))\n try:\n ret = subprocess.check_output(cmd, shell=True)\n # check if the netlink is up using the ret value\n if (ret == \"2\\n\"):\n break\n else:\n time.sleep(1)\n count += 1\n except Exception as e:\n self.logger.error(e)\n time.sleep(1)\n count += 1\n if (ret != \"2\\n\"):\n self.logger.error(\"Failed to bringup vrouter\")\n return -1\n else:\n return 0",
"def _powershell(args):\n\n def _repl(src, address, port):\n return src.strip().replace(\"__IP__\", address).replace(\"__PORT__\", str(port))\n\n pscmds = [\n _repl(i, args.address, args.port) for i in PSCMDS.splitlines() if i.strip()\n ]\n\n remote_commands = [\n f\"socat TCP4:{args.address}:{args.port} EXEC:'cmd.exe',pipes\",\n f\"socat TCP4:{args.address}:{args.port} EXEC:'powershell.exe',pipes\",\n f\"ncat.exe {args.address} {args.port} -e powershell.exe\",\n f\"ncat.exe {args.address} {args.port} -e cmd.exe\",\n ]\n\n for i in pscmds:\n remote_commands.append(f'powershell -nop -c \"{i}\"')\n remote_commands.append(f\"powershell -nop -e {powershell_base64_encode(i)}\")\n\n lcmd = f'rlwrap socat tcp-listen:\"{args.port}\" STDOUT'\n _print_remote(remote_commands)\n _print_local(lcmd)\n os.system(lcmd)",
"def StartCmd(args, cwd=None, shell=False, env=None):\n _ValidateAndLogCommand(args, cwd, shell)\n return Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n cwd=cwd,\n env=env)",
"def launchsvn(s, show=False, pretend=False, **kwargs):\n username = password = configdir = \"\"\n if opts.get(\"username\", None):\n username = \"--username=\" + opts[\"username\"]\n if opts.get(\"password\", None):\n password = \"--password=\" + opts[\"password\"]\n if opts.get(\"config-dir\", None):\n configdir = \"--config-dir=\" + opts[\"config-dir\"]\n cmd = ' '.join(filter(None, [opts[\"svn\"], \"--non-interactive\",\n username, password, configdir, s]))\n if show or opts[\"verbose\"] >= 2:\n print(cmd)\n if pretend:\n return None\n return launch(cmd, **kwargs)",
"def simplerun(args, options):\n try:\n cutoff = args.index('--')\n cmdline = ' '.join(args[cutoff + 1:])\n except ValueError:\n cmdline = ' '.join(args)\n\n print(\"Running command: '%s'\" % cmdline)\n\n thermos_task = ThermosTaskWrapper(Task(\n name=options.name,\n resources=Resources(cpu=1.0, ram=256 * 1024 * 1024, disk=0),\n processes=[Process(name=options.name, cmdline=cmdline)]))\n\n really_run(thermos_task,\n options.root,\n tempfile.mkdtemp(),\n task_id=options.task_id,\n user=options.user,\n prebound_ports=options.prebound_ports,\n chroot=False,\n daemon=options.daemon)",
"def startup():\n\n # Earlier versions of traffic_ctl do not support\n # \"server start\", so we prefer traffic_line here.\n if _TRAFFICLINE:\n cmd = _traffic_line(\"-U\")\n else:\n cmd = _traffic_ctl(\"server\", \"start\")\n\n _subprocess(cmd)\n return _statuscmd()",
"def _start_server():\n args = [sys.executable] + sys.argv\n args.insert(args.index('wserver'), 'server')\n args.remove('wserver')\n pid = os.spawnv(os.P_NOWAIT, sys.executable, args)\n return pid",
"def cvmfsStart(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"transaction\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not start CVMFS transaction\")",
"def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )",
"def launch_vm(self):\r\n self._print(\"Starting VM\")\r\n options = [self.vboxheadless,'-startvm',self.vm_name]\r\n options.extend(self.vboxheadless_start_options)\r\n self.popen = subprocess.Popen(options)\r\n# result = process.wait()\r\n result = \"(other thread)\"\r\n self._print(\"Started %s\" % result)",
"def __init__(self):\n super(DaosServer.ServerStartSubCommand, self).__init__(\n \"/run/daos_server/start/*\", \"start\")\n self.port = FormattedParameter(\"-p {}\")\n self.storage = FormattedParameter(\"-s {}\")\n self.modules = FormattedParameter(\"-m {}\")\n self.targets = FormattedParameter(\"-t {}\")\n self.xshelpernr = FormattedParameter(\"-x {}\")\n self.firstcore = FormattedParameter(\"-f {}\")\n self.group = FormattedParameter(\"-g {}\")\n self.sock_dir = FormattedParameter(\"-d {}\")\n self.insecure = FormattedParameter(\"-i\", True)\n self.recreate = FormattedParameter(\"--recreate-superblocks\", False)",
"def execute_slice(args):\n\n scad_file, height, output_file, openscad = args\n height_param = \"-DLAYER_HEIGHT={0}\".format(height)\n\n try:\n LOG.info(\"Starting slice at height {0}\".format(height))\n subprocess.check_call([openscad,\n height_param,\n \"-o\", output_file,\n scad_file])\n LOG.info(\"Completed slice at height {0}\".format(height))\n\n except subprocess.CalledProcessError as cpe:\n LOG.error(str(cpe))",
"def select_index(index, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run(['devpi', 'use', '--clientdir', clientdir, index])",
"def call_scons(build_options, extra_option_str):\n cmd_line = \"scons VERBOSE=\" + VERBOSE\n for key in build_options:\n cmd_line += \" \" + key + \"=\" + str(build_options[key])\n\n cmd_line += \" \" + str(extra_option_str)\n\n print (\"Running : \" + cmd_line)\n sys.stdout.flush()\n exit_code = subprocess.Popen([cmd_line], shell=True).wait()\n if exit_code != 0:\n exit(exit_code)",
"def connect_subproc(args, service=VoidService, config={}):\n from subprocess import Popen, PIPE\n proc = Popen(args, stdin=PIPE, stdout=PIPE)\n conn = connect_pipes(proc.stdout, proc.stdin, service=service, config=config)\n conn.proc = proc # just so you can have control over the process\n return conn",
"def open(self):\n self._lock.acquire()\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n logger.debug(\n \"PIGGYBACK TCPRELAY\"\n \"PID: {0} PORT: {1}\".format(self._relaypid,\n self._portoffset))\n except AttributeError:\n # TODO: tcprelays might want to close when test is over???\n self._portoffset = get_available_portoffset()\n command = \"/usr/local/bin/tcprelay --portoffset {0} \" \\\n \"--locationid {1} rsync telnet \" \\\n \"ssh > /tmp/tcprelay.{1}.log 2>&1\" \\\n \" &\".format(self._portoffset, self.locationid_param)\n logger.debug(\"SPAWNING TCPRELAY - {0}\".format(command))\n child = subprocess.Popen([\"bash\", \"-c\", command], close_fds=True)\n time.sleep(0.5)\n try:\n self._relaypid, self._portoffset = self._check_tcprelay()\n except AttributeError:\n logger.error(\n \"FAILED to SPAWN TCPRELAY - CMD {0} \"\n \"OUTPUT: {1} ERROR: {2} RC: {3}\".format(command,\n child.stdout,\n child.stderr,\n child.returncode))\n finally:\n self._lock.release()",
"def start_client(self):\n if self.client is not None:\n return\n\n # Arguments for the client\n browser = self.vim.vars.get('markdown_composer_browser')\n open_browser = (\n self.vim.vars.get('markdown_composer_open_browser', 1) == 1)\n syntax_theme = self.vim.vars.get('markdown_composer_syntax_theme')\n current_buffer = '\\n'.join(self.vim.current.buffer)\n\n plugin_root = Path(__file__).parents[3]\n args = ['cargo', 'run', '--release', '--']\n if browser:\n args.append('--browser=%s' % browser)\n\n if not open_browser:\n args.append('--no-browser')\n\n if syntax_theme:\n args.append('--highlight-theme=%s' % syntax_theme)\n\n args.append('--working-directory=%s' % os.getcwd())\n\n if os.path.isfile(self.vim.current.buffer.name):\n args.append(self.vim.current.buffer.name)\n\n self.client = subprocess.Popen(args,\n bufsize=0,\n cwd=str(plugin_root),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)",
"def InvocationStart(builder):\n return Start(builder)",
"def start(self):\r\n return self.start_subprocess()",
"def launch_app(item,terminal=False):\n try:\n if terminal:\n subprocess.Popen(['/bin/terminal',item])\n else:\n subprocess.Popen(shlex.split(item))\n except:\n pass",
"def _launch_autotest(control='control', dryrun=False, verbose=True):\n logging.info('Launching autotest...')\n autotest_bin_d = os.path.join(os.getcwd(),'kvm-test/bin/autotest')\n control_path = os.path.join(os.getcwd(), 'kvm-test/tests/kvm/')\n control = os.path.join(control_path, control)\n kvm_config = os.path.join(control_path, \"kvm_config.py\")\n state = control + '.state'\n\n if dryrun:\n os.system(\"%s | grep shortname\" %kvm_config)\n else:\n os.system('rm -rf %s' % state)\n start_cmd = \"%s %s\" % (autotest_bin_d, control)\n if verbose:\n start_cmd += \" --verbose\"\n os.system(start_cmd)",
"def start_ds9(ds9_name):\n os.system('ds9 -title {} &'.format(ds9_name))",
"def start(self):\n if self.isRunning():\n raise Exception('DhcpClientAlreadyStarted')\n cmd = ['sudo', self._slave_dhcp_client_path, '-i', self._ifname, '-A', '-S']\n if self._logger is not None:\n self._logger.debug('Running command ' + str(cmd))\n #self._slave_dhcp_client_proc = robot.libraries.Process.Process()\n #self._slave_dhcp_client_proc.start_process('sudo', self._slave_dhcp_client_path, '-i', self._ifname, '-A', '-S')\n self._slave_dhcp_client_proc = subprocess.Popen(cmd)#, stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)\n self._slave_dhcp_client_pid = self._slave_dhcp_client_proc.pid\n self.addSlavePid(self._slave_dhcp_client_proc.pid) # Add the PID of the child to the list of subprocesses (note: we get sudo's PID here, not the slave PID, that we will get later on via D-Bus (see RemoteDhcpClientControl.getPid())",
"def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()",
"def call_command_line(string, **kwargs):\n return subprocess.run(string.split(\" \"), **kwargs)",
"def start_vnc_server(self, nReserved = 0):\n\t\treturn Job(SDK.PrlVm_StartVncServer(self.handle, nReserved)[0])",
"def launch(config_list):\n p = PyRosLaunch(config_list)\n p.start()\n p.spin()",
"def select_server(server, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run(['devpi', 'use', '--clientdir', clientdir, server])",
"def _start_server_cmd(cls, address='localhost:44818',\n tags=(('SENSOR1', 'INT'), ('ACTUATOR1', 'INT'))):\n\n CMD = sys.executable + ' -m cpppo.server.enip '\n PRINT_STDOUT = '--no-print '\n HTTP = '--web %s:80 ' % address[0:address.find(':')]\n # print 'DEBUG: enip _start_server_cmd HTTP: ', HTTP\n ADDRESS = '--address ' + address + ' '\n TAGS = EnipProtocol._tuple_to_cpppo_tags(tags)\n\n if sys.platform.startswith('linux'):\n SHELL = '/bin/bash -c '\n LOG = '--log logs/protocols_tests_enip_server '\n else:\n raise OSError\n\n cmd = shlex.split(\n CMD +\n PRINT_STDOUT +\n LOG +\n ADDRESS +\n TAGS\n )\n print('DEBUG enip _start_server cmd: ', cmd)\n\n return cmd",
"def cmd(commandLine, choice, verbose = False):\n\tif verbose:\n stdout=None\n\telse:\n stdout=subprocess.PIPE\n\n\tlCmd = shlex.split(commandLine)\n\ttry:\n\t run = subprocess.call(lCmd, \n\t\t\t shell=choice,\n stdout=stdout,\n\t\t\t stderr=subprocess.PIPE)\n\texcept subprocess.CalledProcessError as err:\n\t sys.stderr.write(str(err))",
"def start(self, _=False):\n if not self._stop:\n self._current_execution += 1\n flags = self.flags\n if '--write' not in flags:\n flags.extend(['--write', self.writepath])\n if '--output-format' not in flags:\n flags.extend(['--output-format', 'csv'])\n line = [\"airodump-ng\"] + flags + self.arguments + [self.interface]\n self._proc = Popen(line, bufsize=0,\n env={'PATH': os.environ['PATH']},\n stderr=DEVNULL, stdin=DEVNULL, stdout=DEVNULL)\n os.system('stty sane')\n\n time.sleep(5)\n watcher = threading.Thread(target=self.watch_process)\n watcher.start()",
"def scontrol(cmd):\n cmd = 'scontrol {}'.format(cmd)\n cmd = shlex.split(cmd)\n subprocess.call(cmd)",
"def cmd_start(self, app_name=None):\n rc = self.socket_command_with_project('start', app_name)\n return rc",
"def callLineage(commands_list, vcfFile, snpScheme = \"/n/data1/hms/dbmi/farhat/Jerry/fast-lineage-caller-vcf/snp_schemes/coll.tsv\", outputFile = None):\n if outputFile == None:\n outputFile = genLinFileName(vcfFile)\n lineageCommand = \"/n/data1/hms/dbmi/farhat/Jerry/fast-lineage-caller-vcf/bin/fast-lineage-caller-vcf.py {vcf} {scheme} > {out}\".format(vcf = vcfFile, \n scheme = snpScheme, out = outputFile) \n commands_list.append(lineageCommand)",
"def bounce_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\")\n else:\n cmd = _traffic_line(\"-B\")\n\n return _subprocess(cmd)",
"def view_darwin(filepath, *, quiet: bool) -> None:\n cmd = ['open', filepath]\n log.debug('view: %r', cmd)\n kwargs = {'stderr': subprocess.DEVNULL} if quiet else {}\n subprocess.Popen(cmd, **kwargs)",
"def start_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"start\", service_name])",
"def __init__(self, senna_path, executable):\n self.senna_path = senna_path\n self.p = sp.Popen(['blabla', '-path', senna_path],\n executable=os.path.join(senna_path, executable),\n stdin=sp.PIPE,\n stdout=sp.PIPE)",
"def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)",
"def cli():\n ...",
"def call(seq):\n return subprocess.Popen(seq,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()[0]",
"def callSubprocess(args, test=False):\n print(Fore.MAGENTA),\n for arg in args: \n print arg,\n print(Fore.WHITE)\n if not test: \n subprocess.call(args)",
"def run_server(ctx, ram, yourkit, dry_run, minecraft_version, yourkit_delay, yourkit_modes):\n ctx.jvm = ctx.parent.jvm # This should auto-inherit -_-\n try:\n ctx.minecraft_version = MinecraftVersion(minecraft_version)\n except ValueError:\n raise ClickException(f\"Invalid minecraft version: {minecraft_version!r}\")\n if ctx.invoked_subcommand is None:\n print()\n print(\"No command specified!\")\n click.echo(ctx.get_help())",
"def pn_cli(module):\n username = module.params['pn_cliusername']\n password = module.params['pn_clipassword']\n\n if username and password:\n cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)\n else:\n cli = '/usr/bin/cli --quiet '\n\n return cli",
"def pn_cli(module):\n username = module.params['pn_cliusername']\n password = module.params['pn_clipassword']\n\n if username and password:\n cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)\n else:\n cli = '/usr/bin/cli --quiet '\n\n return cli",
"def spawn_dev_appserver(self, args, open_ports=False, **kwargs):\n cmd = [\n sys.executable,\n os.path.join(self._gae_sdk, 'dev_appserver.py'),\n '--application', self.app_id,\n '--skip_sdk_update_check=yes',\n '--require_indexes=yes',\n ] + self.module_yamls\n if self.dispatch_yaml:\n cmd += [self.dispatch_yaml]\n cmd += args\n if open_ports:\n cmd.extend(('--host', '0.0.0.0', '--admin_host', '0.0.0.0'))\n if self._verbose:\n cmd.extend(('--log_level', 'debug'))\n return subprocess.Popen(cmd, cwd=self.app_dir, **kwargs)",
"def run_current_selection(self):\n import subprocess\n name, data, _ = self.matches_copy[self.selected_item]\n try:\n needs_term = data[\"Terminal\"].lower() == \"true\"\n except KeyError:\n needs_term = False\n if needs_term:\n with open(os.devnull, \"w\") as devnull:\n subprocess.call([\"nohup\", \"gnome-terminal\", \"-e\",\n data[\"command\"]],\n stdout=devnull,\n stderr=devnull)\n else:\n with open(os.devnull, \"w\") as devnull:\n cmdlist = [\"nohup\"]\n cmdlist.extend(data[\"Exec\"].split())\n subprocess.Popen(cmdlist,\n #stdout=devnull,\n #stderr=devnull\n )\n quit()",
"def Run_command_window(argument):\n if os.name == 'posix':\n argument = argument.replace(\".exe\",\"\")\n os.system(argument)\n\n else:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\n process = subprocess.Popen(argument, startupinfo=startupinfo, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.wait()\n\n return()",
"def Spawn(proc):\n proc.start()\n return proc",
"def start(self):\n\t\treturn Job(SDK.PrlVm_Start(self.handle)[0])",
"def portkill_main(args=sys.argv[1:]):\n # Probably should use optparse or some such.\n kw = {}\n if '-v' in args:\n kw['verbose'] = True\n args = [a for a in args if a != '-v']\n if '-s' in args:\n index = args.index('-s')\n kw['sleeptime'] = args[index + 1]\n args = args[:index] + args[index+2:]\n portkill(*args, **kw)\n return 0",
"def StartVm(cmd, timeout):\n\n vm = TimedSubprocess(timeout)\n if vm.Popen(cmd):\n logging.info('VM process was successfully started.')\n if vm.Wait():\n logging.error('VM process has terminated with errors.')\n return False\n else:\n logging.info('VM process has terminated without errors.')\n return True\n else:\n logging.error('VM process was not started.')\n return False",
"def start_session(self):\r\n print('Executing code by running main.run()...')\r\n print('This will open a tmux session...')\r\n print('Detach by pressing CTRL + B and then D')\r\n\r\n # Connect with SSH-PubKey and execute tmux script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n '-t', 'robolab-tmux'\r\n ])\r\n\r\n print('Done.')",
"def start_storescp():\n args = [which('storescp'), '--ignore', '11112']\n return subprocess.Popen(args)",
"def spawn(*args):\n # Adapted from ranger.ext.spawn\n process = Popen(args, stdout=PIPE, shell=True)\n stdout, stderr = process.communicate()\n return stdout.decode('utf-8')",
"def cli(argv):\n import getopt\n class BadUsage(Exception): pass\n\n try:\n opts, args = getopt.getopt(argv[1:], 'gp:r:h:s:', \n ['gui', 'port=', 'repository=', 'script-alias='])\n for opt, val in opts:\n if opt in ('-g', '--gui'):\n options.start_gui = 1\n elif opt in ('-r', '--repository'):\n if options.repositories: # option may be used more than once:\n num = len(options.repositories.keys())+1\n symbolic_name = \"Repository\"+str(num)\n options.repositories[symbolic_name] = val\n else:\n options.repositories[\"Development\"] = val\n elif opt in ('-p', '--port'):\n try:\n options.port = int(val)\n except ValueError:\n raise BadUsage\n elif opt in ('-h', '--host'):\n options.host = val\n elif opt in ('-s', '--script-alias'):\n options.script_alias = \\\n string.join(filter(None, string.split(val, '/')), '/')\n if options.start_gui:\n gui(options.host, options.port)\n return\n elif options.port:\n def ready(server):\n print 'server ready at %s%s' % (server.url,\n options.script_alias)\n serve(options.host, options.port, ready)\n return\n raise BadUsage\n except (getopt.error, BadUsage):\n cmd = os.path.basename(sys.argv[0])\n port = options.port\n host = options.host\n script_alias = options.script_alias\n print \"\"\"ViewVC standalone - a simple standalone HTTP-Server\n\nUsage: %(cmd)s [OPTIONS]\n\nAvailable Options:\n\n-h <host>, --host=<host>:\n Start the HTTP server listening on <host>. You need to provide\n the hostname if you want to access the standalone server from a\n remote machine. [default: %(host)s]\n\n-p <port>, --port=<port>:\n Start an HTTP server on the given port. [default: %(port)d]\n\n-r <path>, --repository=<path>:\n Specify a path for a CVS repository. Repository definitions are\n typically read from the viewvc.conf file, if available. This\n option may be used more than once.\n\n-s <path>, --script-alias=<path>:\n Specify the ScriptAlias, the artificial path location that at\n which ViewVC appears to be located. For example, if your\n ScriptAlias is \"cgi-bin/viewvc\", then ViewVC will appear to be\n accessible at the URL \"http://%(host)s:%(port)s/cgi-bin/viewvc\".\n [default: %(script_alias)s]\n \n-g, --gui:\n Pop up a graphical interface for serving and testing ViewVC.\n NOTE: this requires a valid X11 display connection.\n\"\"\" % locals()",
"def test_launch_traj(self, capsys):\n args = self.args.copy()\n args[\"traj_file\"] = str(PATH_DATA / \"2POPC.xtc\")\n args[\"out_file\"] = \"out.txt\"\n args[\"prefix_traj_ouput\"] = \"basename\"\n args[\"begin\"] = 0\n args[\"end\"] = 10000\n UI.launch(**args)\n captured = capsys.readouterr().out\n assert \"Results written to out.txt\" in captured\n assert \"Dealing with frame 10 at 10000.0 ps.\" in captured\n assert \"Writing new pdb with hydrogens.\" in captured\n assert \"Writing trajectory with hydrogens in xtc file.\" in captured",
"def startServer(self):\n self._ensurePasswordFile()\n \n args = ['-localhost',\n '-userdir', self.vncUserDir,\n '-xauthority', self.xauthFile,\n '-socat']\n\n #Note: perl is needed because when executing within an egg\n # vncserver will not be marked executable.\n command = [_Paths.perl, _Paths.vncserver] + args + self.extraArgs\n status, output, error = runCommand(command,\n waitChildren=False)\n\n # Check correct status returned\n if status != 0:\n raise RuntimeError('Launching vncserver failed.\\nStatus %d\\nstdout: %s\\nstderr: %s' %\n (status, output, error))\n\n # parse message on stderr\n m = _Regexps.desktop.search(error)\n if m:\n display = m.group(1)\n if self.verbose:\n print \"started server on %s\" % display\n if self.testDisplay(display):\n return display\n raise RuntimeError(\"display just started on %s is not responding\"\n % display)\n\n else:\n raise RuntimeError(\"apparently launched vncserver \" + \n \"but could not determine display name\")",
"def __init__(self, name=None, start=True, *args, **kwargs):\n name = \"VM_TEMPL_2\" if name is None else name\n super(CliVM, self).__init__(name=name, start=start, *args, **kwargs)\n\n self.add_proc(rift.vcs.DtsPerfTasklet(), mode_active=False)\n self.add_proc(RedisServer(), mode_active=False) \n if not start:\n self.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)",
"def do_cmd(gargs, args, _):\n\n request = {\n \"desc\": args.desc,\n \"owner\": args.owner\n }\n\n if args.ssid:\n\n request[\"wifi_props\"] = {\n \"bssid_type\": args.ssid_type,\n \"ssid\": args.ssid\n }\n\n if args.plmnid:\n\n plmnid = PLMNID(args.plmnid)\n\n request[\"lte_props\"] = {\n \"plmnid\": plmnid.to_str()\n }\n\n headers = command.get_headers(gargs)\n\n url = '/api/v1/projects'\n response, _ = command.connect(gargs, ('POST', url), 201, request,\n headers=headers)\n\n location = response.headers['Location']\n tokens = location.split(\"/\")\n project_id = tokens[-1]\n\n print(project_id)",
"def launch(*args):\n if len(args) == 1:\n version_name = args[0]\n if not NAME_REGEX.match(version_name):\n print 'Invalid version name'\n return\n else:\n print 'Incorrect arguments'\n return\n\n result = subprocess.call('cp /etc/localtime docker/build/anbardari', shell=True)\n if result:\n return\n\n for container_settings in DOCKER_SETTINGS_DICT['containers']:\n _deploy_container(version_name, container_settings, os.path.join(SHARED_DIR_OUTSIDE_CONTAINER, version_name))",
"def _start_dummy_server(self):\r\n dummy_executable = os.path.join(__here__, 'tests', 'dummy_xsct.tcl')\r\n start_command = 'tclsh {}'.format(dummy_executable)\r\n logger.info('Starting xsct server: %s', start_command)\r\n stdout = None\r\n self._xsct_server = subprocess.Popen(start_command, stdout=stdout)\r\n logger.info('xsct started with PID: %d', self._xsct_server.pid)",
"def snap_run(args):\n\n ## TODO: link snap num to host name\n\n logger.debug(\"ETCD config file: \"+args.etcd_config_file)\n etcd_params = read_yaml(args.etcd_config_file)\n logger.debug(\"CORR config file: \"+args.corr_config_file)\n logger.debug(\"HOST SNAP: \"+args.host_snap)\n logger.debug(\"SNAP NUMBER: \"+str(args.snap_num))\n\n delay_params = read_yaml(args.delay_config_file)\n delays = (np.asarray(delay_params['cal_solutions']['delays']).ravel())[(args.snap_num-1)*6:(args.snap_num)*6]\n ants = np.asarray(delay_params['cal_solutions']['antenna_order'])\n \n logger.info(\"snap.py.snap_run() creatting process to handle snap: {}\".format(args.host_snap))\n my_snap = dsaX_snap.dsaX_snap(args.host_snap,args.corr_config_file,number=args.snap_num)\n\n etcd_host, etcd_port = parse_endpoint(etcd_params['endpoints'])\n logger.info(\"snap.py.snap_run() etcd host={}, etcd port={}\".format(etcd_host, etcd_port))\n etcd = etcd3.client(host=etcd_host, port=etcd_port)\n watch_ids = []\n keym = '/mon/snap/' + str(args.snap_num) + '/armed_mjd'\n keym3 = '/mon/snap/' + str(args.snap_num) + '/delays'\n keym4 = '/mon/snap/' + str(args.snap_num) + '/antenna_order'\n keym2 = '/mon/snap/' + str(args.snap_num)\n\n \n # add watch on cmd for snapnum\n cmd = etcd_params['snap_command'] + str(args.snap_num)\n logger.info('snap.py.snap_run() watch cmd= {}'.format(cmd))\n\n\n watch_id = etcd.add_watch_callback(cmd, process_command(my_snap,etcd,keym,keym2,delays,ants,keym3,keym4))\n watch_ids.append(watch_id)\n\n # add watch on cmd for snap 0\n cmd = etcd_params['snap_command'] + str(0)\n logger.info('snap.py.snap_run() watch cmd= {}'.format(cmd))\n watch_id = etcd.add_watch_callback(cmd, process_command(my_snap,etcd,keym,keym2,delays,ants,keym3,keym4))\n watch_ids.append(watch_id)\n\n \n # main loop\n while True:\n \n #md = my_snap.get_monitor_data()\n #if md!=-1:\n # etcd.put(key, md)\n sleep(10)",
"def cmd_port(args):",
"def start_subprocess(self):\r\n errmsg = ('\\n\\nPlease install GNU Octave and put it in your path\\n')\r\n ON_POSIX = 'posix' in sys.builtin_module_names\r\n if self.use_pty:\r\n master, slave = pty.openpty()\r\n self.wfid, self.rfid = master, master\r\n rpipe, wpipe = slave, slave\r\n else:\r\n self.rfid, wpipe = os.pipe()\r\n rpipe, self.wfid = os.pipe()\r\n kwargs = dict(close_fds=ON_POSIX, bufsize=0, stdin=rpipe,\r\n stderr=wpipe, stdout=wpipe)\r\n if os.name == 'nt':\r\n startupinfo = subprocess.STARTUPINFO()\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n kwargs['startupinfo'] = startupinfo\r\n try:\r\n proc = subprocess.Popen(['octave', '-q', '--braindead'],\r\n **kwargs)\r\n except OSError: # pragma: no cover\r\n raise Oct2PyError(errmsg)\r\n else:\r\n self.reader = _Reader(self.rfid, self.read_queue)\r\n return proc",
"def server_cmd(command):\n\tp = Popen([COMMAND_SCRIPT, command], stderr=PIPE)\n\tret = p.wait()\n\tif ret:\n\t\tout, err = p.communicate()\n\t\traise OSError(command, ret, err.read().strip())\n\treturn",
"def run_server(instance):\n cpu = ['9', '10'][instance] # on which cpu\n server_delay = [0, slow][instance]\n args = {\n 'bin': slow_receiver_exp,\n 'cpu': cpu,\n 'count_queue': count_queue,\n 'sysmod': 'bess' if sysmod == 'bess-bp' else sysmod,\n 'mode': 'server',\n 'inst': instance,\n 'delay': server_delay,\n 'source_ip': _server_ips[instance],\n 'bidi': 'false'\n }\n if PORT_TYPE == PMD:\n vdev = ['virtio_user0,path=/tmp/ex_vhost0.sock,queues='+str(count_queue),\n 'virtio_user2,path=/tmp/ex_vhost2.sock,queues='+str(count_queue)][instance]\n prefix = 'slow_receiver_server_{}'.format(instance)\n args['vdev'] = vdev\n args['file-prefix'] = prefix\n cmd = ('sudo {bin} --no-pci --lcores=\"{cpu}\" --file-prefix={file-prefix} '\n '--vdev=\"{vdev}\" --socket-mem=128 -- '\n 'bidi={bidi} {source_ip} {count_queue} {sysmod} {mode} {delay}').format(**args)\n else:\n vdev = ['ex_vhost0','ex_vhost2'][instance]\n prefix = 'bessd-dpdk-prefix'\n args['vdev'] = vdev\n args['file-prefix'] = prefix\n cmd = ('sudo {bin} --no-pci --lcores=\"{cpu}\" --file-prefix={file-prefix} '\n '--proc-type=secondary --socket-mem=128 -- '\n 'bidi={bidi} vport={vdev} {source_ip} {count_queue} '\n '{sysmod} {mode} {delay}').format(**args)\n\n print(\"=\" * 32)\n print(\" \" * 13 + \"server\")\n print(cmd)\n print(\"=\" * 32, end='\\n\\n')\n # Run in background\n if not DIRECT_OUTPUT:\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n else:\n p = subprocess.Popen(cmd, shell=True)\n return p",
"def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():"
] | [
"0.78137475",
"0.7717148",
"0.5588287",
"0.54520285",
"0.543603",
"0.5400916",
"0.5332327",
"0.52927345",
"0.51990217",
"0.515756",
"0.50810075",
"0.5065432",
"0.5031213",
"0.50212044",
"0.50108457",
"0.5000161",
"0.4948063",
"0.4895872",
"0.48936856",
"0.48688263",
"0.48572204",
"0.48547882",
"0.48514485",
"0.48503274",
"0.48495066",
"0.48461458",
"0.4818393",
"0.48109594",
"0.4806708",
"0.48036346",
"0.48013398",
"0.4794929",
"0.47798568",
"0.4775612",
"0.4773824",
"0.47537377",
"0.4753704",
"0.47527754",
"0.47372794",
"0.473468",
"0.47247955",
"0.47204077",
"0.4719227",
"0.47174427",
"0.47161424",
"0.4712353",
"0.47032624",
"0.4701548",
"0.47002497",
"0.46993774",
"0.46959025",
"0.46930787",
"0.46809772",
"0.4674613",
"0.46735618",
"0.4670251",
"0.4670251",
"0.46677452",
"0.46636268",
"0.4662945",
"0.4658445",
"0.46532595",
"0.4652954",
"0.46469295",
"0.46468416",
"0.46448722",
"0.46392146",
"0.46331978",
"0.4629107",
"0.46284217",
"0.4626926",
"0.46194163",
"0.46164036",
"0.4616262",
"0.46147123",
"0.4607692",
"0.46072114",
"0.4600493",
"0.45980936",
"0.45967478",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062",
"0.45966062"
] | 0.5777458 | 2 |
generator for primes below threshold | def primes_below_thresh(thresh):
primes_lookup = {n: True for n in range(2, thresh)}
for n in range(2, thresh):
if primes_lookup[n]:
for tick_off in range(n+n, thresh, n):
primes_lookup[tick_off] = False
return sorted((n for n, is_prime in primes_lookup.items() if is_prime)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1",
"def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break",
"def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]",
"def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1",
"def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False",
"def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2",
"def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2",
"def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num",
"def primes(max_number_of_primes) -> iter:\n number_primes = count(1)\n prime = prime_generator()\n while next(number_primes) <= max_number_of_primes:\n yield next(prime)",
"def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n",
"def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)",
"def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def primes(n):\n sieve = [True]*n\n for p in range(2, n):\n if sieve[p]:\n yield p\n for i in range(p*p, n, p):\n sieve[i] = False",
"def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]",
"def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n",
"def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n",
"def gen_primes():\n\tyield 2\n\tyield 3\n\tprime_list = [2, 3]\n\twhile 1:\n\t\tnext = prime_list[-1] + 2\n\t\ti = 0\n\t\twhile i < len(prime_list):\n\t\t\tif next%prime_list[i] == 0:\n\t\t\t\tnext+=2\n\t\t\t\ti=0\n\t\t\telse:\n\t\t\t\ti+=1\n\t\tprime_list.append(next)\n\t\tyield next",
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]",
"def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return",
"def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i",
"def generate_primes(L):\n # We need to compute the Bound of the factor set.\n i = 0\n list_p = []\n for p in prime_sieve():\n i += 1\n list_p.append(p)\n if i >= L:\n break\n return list_p",
"def primeGen(n):\n primes = [2, 3, 5, 7, 11]\n if n in xrange(1, len(primes) + 1):\n return primes[:n]\n else:\n banlist = []\n count = 6\n while count <= n:\n Next = (primes[-2] + primes[-1]) - primes[-3]\n if not is_prime(Next):\n count -= 1\n banlist.append(Next)\n count += 1\n primes.append(Next)\n filterout(banlist, primes)\n return primes",
"def prime_generator():\r\n for i in itertools.count(start=1):\r\n for j in ((6 * i) - 1, (6 * i) + 1):\r\n if is_prime(j): yield(j)",
"def get_primes(self, startnum=2):\n i = startnum\n while True:\n if self.is_prime(i):\n yield i\n i += 1",
"def problem077():\n\n cond = lambda n: num_prime_sum_ways(n) > 5000\n ans = next(filter(cond, itertools.count(2)))\n return ans",
"def prime_generator() -> int:\n \n #Start with the first prime.\n counter = count(2)\n candidate = next(counter)\n cache: list = [candidate]\n yield candidate\n \n # Set a flag.\n divisible = False\n while True:\n candidate = next(counter)\n # Check if the candidate is prime.\n for number in cache:\n # If number is greater than the squareroot of candidate, we are done.\n if number * number > candidate:\n break\n # If number divides candidate, candidate is not prime.\n if candidate % number == 0:\n divisible = True\n break\n # If is is prime, add it to the list.\n if not divisible:\n cache.append(candidate)\n yield candidate\n # Reset the flag.\n divisible = False",
"def sieve(self, upto_num):\n max_cur_known = self.max_known_number()\n \n num_new = upto_num - max_cur_known\n #All new numbers are primes until they are crossed off\n self.number_list.extend(array.array('b', [1])*num_new)\n \n for marker_num in range(2, maths.floor(maths.sqrt(upto_num)) + 1):\n #For efficiency only use prime marked numbers\n if not self.is_prime(marker_num):\n continue\n \n min_x = max(max_cur_known // marker_num + 1, marker_num)\n max_x = upto_num // marker_num\n \n for x in range(min_x, max_x + 1):\n self.number_list[marker_num*x] = 0 # Non-prime",
"def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]",
"def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set",
"def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])",
"def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])",
"def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]",
"def prime_gen():\n for i in memo_primes: yield i\n x = memo_primes[-1] + 1\n \n while True:\n if prime_with(x, memo_primes):\n yield x\n memo_primes.append(x)\n x += 1",
"def xprimes(step=1000):\n\n\tif step % 2:\n\t\traise ValueError(\"step is not even\")\n\n\tprimes = [2]\n\tmultiples = [4] # least multiple of prime at index i in primes not yet marked\n\tlower = 2\n\tupper = 4\n\t\n\twhile True:\n\t\t\n\t\t# non-prime numbers will live here\n\t\tnums = set()\n\t\t\n\t\tfor i, p in enumerate(primes):\n\n\t\t\t# You've marked everything worth marking (for now)\n\t\t\tif p * p > upper:\n\t\t\t\tbreak\n\t\t\t# Pick up marking where you left off\n\t\t\tm = multiples[i]\n\t\t\t\n\t\t\t# Do some marking\n\t\t\twhile m < upper: # upper is even, cannot be prime\n\t\t\t\tnums.add(m)\n\t\t\t\tm += p\n\t\t\t\n\t\t\t# Left off on this multiple (save for later)\n\t\t\tmultiples[i] = m\n\t\t\n\t\t# Collect primes between lower and upper\n\t\tfor i in xrange(lower, upper): # upper is even, cannot be prime\n\t\t\tif i not in nums:\n\t\t\t\tyield i\n\t\t\t\tprimes.append(i)\n\t\t\t\tmultiples.append(i + i) # 2 * i (i is a new prime)\n\t\t\n\t\t# Got all the primes in this interval; move it up\n\t\tlower = upper + 1\n\t\tupper += min(upper, step)",
"def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))",
"def gen_primes():\n\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current number\n # being tested\n\n D = {}\n\n # The runing integer that is checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next multiples\n # of its witnesses to prepare for larger numbers\n\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1",
"def primeIterator(no = 0,lessThan = None ):\r\n \r\n prmd = {2:1,3:2}\r\n sqrtn = 2\r\n l = 1\r\n count = 0\r\n #or (no==-1 and not lessThan) l < no or:\r\n print(\"no\", no)\r\n while ((no!=0 and count < no) or ( (no==0) and (lessThan and l<lessThan ) or (not lessThan ) ))and (l<4) :\r\n if l in prmd:\r\n count += 1\r\n yield l\r\n l+=1\r\n l=5\r\n add = 2\r\n \r\n while (no!=0 and count < no) or ( (no==0) and ( (lessThan and l<lessThan ) or (not lessThan )) ) : #check only 6n-1 and 6n+1\r\n if l > sqrtn**2:\r\n sqrtn = l**0.5\r\n for i in prmd:\r\n if i > sqrtn:\r\n prmd[l] = len(prmd)\r\n add = 2 if add==4 else 2\r\n count +=1\r\n yield l\r\n break\r\n if l%i ==0 : \r\n break\r\n l+=add",
"def gen_primes():\n\n # Maps composites (=non-primes) to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\" indefinitely,\n # but only as long as required by the current number being tested.\n D = {}\n\n q = 1 # the running integer that is checked for primeness\n while (q := q+1):\n if q not in D:\n # q is a new prime. Yield it and mark its first multiple that is\n # not already marked in previous iterations\n yield q\n D[q*q] = [q]\n else:\n # q is composite. D[q] is the list of primes that divide it. Since\n # we have reached q, we no longer need it in the map, but we will\n # mark the next multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p+q, []).append(p)\n del D[q]",
"def list_primes(number):\n sieve = [True] * (number // 2)\n for i in range(3, int(number ** 0.5) + 1, 2):\n if sieve[i // 2]:\n sieve[i * i // 2::i] = [False] * ((number - i * i - 1) // (2 * i) + 1)\n return [2] + [2 * i + 1 for i in range(1, number // 2) if sieve[i]]",
"def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])",
"def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p",
"def prime_generator():\n i = 0 # prime numbers counter\n num = 0 # current number\n while True:\n num += 1\n if is_prime(num):\n i += 1\n yield i, num",
"def generarPrimo(self, bits):\n while True:\n p = primes.bigppr(bits)\n if p & 3 == 3:\n return p",
"def ple(self,x):\r\n for i in self.dp: # initial conditions: self.dp = [2,3]\r\n if i > x:\r\n raise StopIteration\r\n yield i\r\n \r\n for i in range(self.lkdp+2,x+1,2): # skip even no's. Future: skip 5's\r\n if not self.firstdiv(i):\r\n self.dp.append(i)\r\n self.lkdp = i\r\n if i in self.sp:\r\n self.sp.remove(i)\r\n yield i\r\n \r\n raise StopIteration\r\n\r\n self.pbe = self.ple # Alternate name for pbe (\"primes less than or equal to)\r",
"def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]",
"def generateRandomPrime(bits):\n getRandomT = lambda: random.getrandbits(bits) | 1 << bits | 1\n p = getRandomT()\n for i in itertools.count(1):\n if primeChecker(p):\n assert (type(p)==int or type(p)==long), \"Prime generated isn't a prime\"\n return p\n else:\n if i % (bits * 2) == 0:\n p = getRandomT()\n else:\n p += 2 # Add 2 since we are only interested in odd numbers",
"def primes_less(n):\n test_nums = list(range(3, int(floor(sqrt(n))), 2))\n prime_flags = [True] * ((n - 2) // 2)\n for a in test_nums:\n next_div = a**2\n while next_div < n:\n prime_flags[(next_div-3)//2] = False\n next_div += 2*a\n return [2] + [2*i + 3 for i, flag in enumerate(prime_flags) if flag]",
"def prime_generator() -> Iterator[int]:\n\n num = 2\n while True:\n if is_prime(num):\n yield num\n num += 1",
"def sieve(upper=10**5):\n nums = [True] * (upper + 1)\n nums[0] = False\n nums[1] = False\n for i in range(2, upper + 1):\n if not nums[i]: continue\n for j in range(i * 2, upper + 1, i):\n nums[j] = False\n return nums",
"def Pollard_pm1(n, primes, max_B=1000000):\n B = 10\n g = 1\n while B < max_B and g < n:\n a = randint(2, n - 2)\n g = gcd(a, n)\n if g != 1:\n return g\n for p in primes:\n if p >= B:\n break\n pd = 1 # p^d\n while pd * p <= B:\n pd *= p\n a = powmod(a, pd, n)\n g = gcd(a - 1, n)\n if g != 1 and g != n:\n return g\n B *= 2\n return 1",
"def generate_primes():\n # David Eppstein, UC Irvine, 28 Feb 2002\n # Source : http://code.activestate.com/recipes/117119/\n yield 2\n\n D = {} # map composite integers to primes witnessing their compositeness\n for q in count(start=3, step=2):\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(2*p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory",
"def prime_generator(num):\n prime_list = [i for i in range(1,num+1,2) if prime_checker(i)]\n\n if num > 1:\n prime_list.insert(0,2)\n\n return prime_list",
"def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]",
"def _gen_prime(self, n_bits):\n n = gmpy2.mpz(prng.getrandbits(n_bits))\n return gmpy2.next_prime(n)",
"def generatePrimesSieve(count):\n\tif count < 1:\n\t\treturn None\n\n\tsieve = itertools.count(3, 2)\n\tlastPrime = 2\n\tfor i in xrange(1, count):\n\t\tlastPrime = sieve.next()\n\t\tprint lastPrime\n\t\tsieve = filterPrime(sieve, lastPrime)\n\treturn lastPrime",
"def test_primes_under_10(self):\n self.assertEqual(sieve(10), [2, 3, 5, 7])\n self.assertEqual(sieve(100), [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,\n 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97])",
"def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]",
"def eliminate_non_primes(primes_dict, highest):\r\n for i in range(2, highest):\r\n if primes_dict[i] == True:\r\n loop =1\r\n for j in range(i**2, highest, i*loop):\r\n primes_dict[j]=False\r\n loop = loop+1\r\n \r\n primes = []\r\n for i in range(2, highest):\r\n if primes_dict[i] == True:\r\n primes.append(i)\r\n \r\n return primes",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]",
"def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]",
"def gen_primes():\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n\n D = {}\n\n # The running integer that's checked for primeness\n\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1",
"def gen_primes():\n D = defaultdict(list)\n q = 2\n while True:\n if q not in D:\n\n yield q \n D[q * q] = [q]\n else:\n for p in D[q]:\n D[p + q].append(p)\n del D[q]\n q += 1",
"def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)",
"def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]",
"def genPrimes(n):\n assert n>1\n p = gen_eratosthenes()\n prime_list = []\n prime_list.append(next(p))\n while n > prime_list[len(prime_list)-1]: #while input is less than the last term in the prime list\n prime_list.append(next(p)) #adds next term from generator\n if n < prime_list[len(prime_list)-1]: #deletes last term\n del prime_list[len(prime_list)-1]\n #print(prime_list) #for testing only\n return prime_list",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes",
"def Primes():\n candidate = 1\n _primes_so_far = [2] # first prime, only even prime\n yield _primes_so_far[-1]\n while True:\n candidate += 2 # check odds only from now on\n for prev in _primes_so_far:\n if prev**2 > candidate:\n yield candidate\n _primes_so_far.append(candidate)\n break\n if not divmod(candidate, prev)[1]: # no remainder!\n break # done looping",
"def test_prime_12(self):\n\t self.assertTrue(prime_generator(12), [2, 3, 5, 7, 11])",
"def croft():\n # Copied from:\n # https://code.google.com/p/pyprimes/source/browse/src/pyprimes.py\n # Implementation is based on erat3 from here:\n # http://stackoverflow.com/q/2211990\n # and this website:\n # http://www.primesdemystified.com/\n # Memory usage increases roughly linearly with the number of primes seen.\n # dict ``roots`` stores an entry x:p for every prime p.\n for p in (2, 3, 5):\n yield p\n roots = {9: 3, 25: 5} # Map d**2 -> d.\n primeroots = frozenset((1, 7, 11, 13, 17, 19, 23, 29))\n selectors = (1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0)\n for q in compress(\n # Iterate over prime candidates 7, 9, 11, 13, ...\n islice(count(7), 0, None, 2),\n # Mask out those that can't possibly be prime.\n cycle(selectors)\n ):\n # Using dict membership testing instead of pop gives a\n # 5-10% speedup over the first three million primes.\n if q in roots:\n p = roots[q]\n del roots[q]\n x = q + 2*p\n while x in roots or (x % 30) not in primeroots:\n x += 2*p\n roots[x] = p\n else:\n roots[q*q] = q\n yield q",
"def find_good_prime(num_bits=512):\n candidate = 1\n\n while not good_prime(candidate):\n candidate = random.getrandbits(num_bits)\n\n return candidate",
"def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)",
"def rwh_primes1(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * int((n/2))\n for i in range(3,int(n**0.5)+1,2):\n if sieve[int(i/2)]:\n sieve[int(i*i/2)::i] = [False] * int(((n-i*i-1)/(2*i)+1))\n return [2] + [2*i+1 for i in range(1,int(n/2)) if sieve[int(i)]]",
"def sieve(n: int) -> Generator[int, None, None]:\n primes, p = [i for i in range(2, n + 1)], 2\n while p**2 < n:\n for i in primes:\n if i % p == 0 and i != p:\n primes.remove(i)\n p += 1\n yield from primes",
"def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret",
"def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def sieve(upto):\n return list(prime_numbers(upto))",
"def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)",
"def primes(count):\n\n prime_list = []\n num = 2\n\n while count > 0:\n\n if prime_checker(num):\n prime_list.append(num)\n count -= 1\n num += 1\n\n return prime_list",
"def primes():\n D = {} # map composite integers to primes witnessing their compositeness\n q = 2 # first integer to test for primality\n while True:\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory\n q += 1",
"def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def generate_large_prime(bit_size=1024):\n while True:\n p = random.randint(2**(bit_size-1), 2**bit_size)\n if is_prime(p):\n return p",
"def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1",
"def primes(n, DEBUG=False):\n\n return [x[0] for x in enumerate(_sieve(n, DEBUG=DEBUG)[0:n+1]) if x[1]]",
"def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo",
"def primes_below(n):\n L, M = [2], [x for x in range(3, int(n), 2)]\n if n <= 2:\n print('There are no primes below 2')\n return None\n for i in range(3, int(n), 2):\n if M[i // 2 - 1] != 0 and is_prime(i):\n L.append(i)\n for j in range(i, int(n), 2 * i):\n M[j // 2 - 1] = 0\n return L",
"def primes(count):\n\n prime_numbers = [2]\n next_num = 3 \n\n def is_prime(next_num):\n if next_num % 2 == 0:\n return False \n \n for i in range(3, next_num, 2):\n if next_num % i == 0:\n return False \n return True \n\n while count > len(prime_numbers): \n if is_prime(next_num): \n prime_numbers.append(next_num)\n next_num += 1\n\n return prime_numbers",
"def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]",
"def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]",
"def main():\n prime = gen_prime(1, 100000)\n print(prime)",
"def sieve(max):\n\tprimes = [False]*max\n\tfor i in range(2, int(math.sqrt(len(primes)))):\n\t\tif primes[i] == False:\n\t\t\tfor j in range(i*i, max, i):\n\t\t\t\tprimes[j] = True\n\tcount = 0\n\tprint(\"Prime numbers under \", max, \":\", sep='')\n\tfor j in range(2, max):\n\t\tif primes[j] == False:\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tprint(j)\n\t\t\telse:\n\t\t\t\tprint(j, end='\\t')\n\tprint()"
] | [
"0.74480325",
"0.7377035",
"0.73052454",
"0.72586906",
"0.7133222",
"0.7117662",
"0.7117662",
"0.71107304",
"0.7063913",
"0.70566493",
"0.70468926",
"0.70215964",
"0.697506",
"0.69364065",
"0.69047195",
"0.6880698",
"0.68761224",
"0.6849385",
"0.68255585",
"0.68095154",
"0.68074274",
"0.6780208",
"0.67631376",
"0.6742597",
"0.6725584",
"0.670229",
"0.66511273",
"0.66363466",
"0.6591886",
"0.6555842",
"0.65543365",
"0.6552345",
"0.65418947",
"0.654065",
"0.65340245",
"0.65337515",
"0.65229154",
"0.6522334",
"0.6518083",
"0.65049154",
"0.6504055",
"0.65014774",
"0.6495336",
"0.6484293",
"0.64708596",
"0.64627093",
"0.646043",
"0.64382535",
"0.6429422",
"0.6427964",
"0.6425804",
"0.6424077",
"0.6417274",
"0.6415226",
"0.6400787",
"0.6395306",
"0.6391443",
"0.6390489",
"0.6385073",
"0.636301",
"0.6360134",
"0.6357776",
"0.6357776",
"0.6357363",
"0.63511217",
"0.6345418",
"0.63410395",
"0.6339803",
"0.6325084",
"0.63235015",
"0.6321938",
"0.6301682",
"0.62999123",
"0.62866867",
"0.6279539",
"0.62760067",
"0.6275142",
"0.62715656",
"0.62713194",
"0.62530226",
"0.6246412",
"0.62449294",
"0.62438965",
"0.6240926",
"0.6239803",
"0.62392354",
"0.6236273",
"0.62350583",
"0.6234823",
"0.6221238",
"0.6212223",
"0.62037563",
"0.61988765",
"0.61878616",
"0.6179828",
"0.6177438",
"0.61763555",
"0.61717826",
"0.6166157",
"0.6159594"
] | 0.6985148 | 12 |
return True if the number_str can be truncated from both left and right and always be prime, e.g. 3797 | def is_left_right_truncatable(number_str, prime_str_set):
l = len(number_str)
#left truncatable?
for i in range(l):
if number_str[i:] not in prime_str_set or number_str[:l-i] not in prime_str_set:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_truncatable(number: int):\n\n str_number = str(number)\n index = 0\n\n # Left shift:\n while index < len(str_number):\n if not is_prime(int(str_number[index:])):\n return False\n\n index += 1\n\n # Right shift:\n index = len(str_number)\n while index > 0:\n if not is_prime(int(str_number[:index])):\n return False\n\n index -= 1\n\n return True",
"def is_truncatable(nb):\n nb = str(nb)\n if is_prime(int(nb)):\n for i in range(1, len(nb)):\n if not is_prime(int(nb[i:])) or not is_prime(int(nb[:len(nb)-i])):\n return False\n return True\n else:\n return False",
"def substring_divisible(number):\n string = str(number)\n for offset in xrange(1, len(string)-2):\n substring = string[offset:offset+3]\n # print '%s / %d' % (substring, PRIMES[offset-1])\n if int(substring) % PRIMES[offset-1]:\n return False\n return True",
"def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res",
"def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True",
"def is_number_palindrome(number, digits, start):\n number = str((number // 10**start) % 10**digits).zfill(digits)\n return is_palindrome(number)",
"def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0",
"def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True",
"def isprime(number):\n\n if number == 1:\n return False\n for i in range(2, int(number**0.5) + 1):\n if number % i == 0:\n return False\n return True",
"def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True",
"def isprime(number: int) -> bool:\n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n return True",
"def is_circular_prime(n):\r\n\r\n # pdb.set_trace()\r\n s = str(n)\r\n for i in xrange(len(s)):\r\n if not is_prime(n):\r\n return False\r\n s = s[1:] + s[0]\r\n n = int(s)\r\n\r\n return True",
"def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True",
"def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double",
"def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True",
"def istele(number):\n if number[:3] == '140':\n return True\n return False",
"def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True",
"def get_prime_digits_for_one(a: int) -> bool:\r\n b = a\r\n c = 0\r\n c1 = 0\r\n while b > 0:\r\n c1 += 1\r\n n = b % 10\r\n if isprime(n):\r\n c += 1\r\n b = b // 10\r\n if c == c1:\r\n return True\r\n else:\r\n return False",
"def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))",
"def is_prime(num):\n if not isinstance(num, int):\n return False\n if num <= 1:\n return False\n if num == 2 or num == 3:\n return True\n if num % 6 in [0, 2, 3, 4]:\n return False\n div_max = int(math.sqrt(num))\n for div in range(5, div_max + 1, 2):\n if num % div == 0:\n return False\n return True",
"def is_prime(number):\n\tif number < 0:\n\t\treturn False\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True",
"def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False",
"def checkPerfectNumber(self, num: int) -> bool:\n if num <= 0:\n return False\n s = 0\n for i in range(1, int(math.sqrt(num) + 1)):\n if i != num:\n res = num % i\n if res == 0:\n s += i\n divisor = num // i\n if divisor != num:\n s += divisor\n if s > num:\n return False\n return s == num",
"def is_prime(number):\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True",
"def is_prime(n):\n return mr_prime(n)",
"def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True",
"def is_prime(number):\n if number <= 1:\n return False\n\n max_element = int(math.ceil(math.sqrt(number)))\n # iterate through all elements from 2 through sqrt(n)\n for element in range(2,max_element + 1):\n if number % element == 0:\n return False\n\n return True",
"def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True",
"def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True",
"def is_armstrong_number(number: int) -> bool:\n\n str_number = f\"{number}\"\n return sum(pow(int(x), len(str_number)) for x in str_number) == number",
"def is_prime(num):\n\tif num is 1:\n\t\treturn False\n\tif num % 2 is 0:\n\t\treturn num is 2\n\n\tdivision = 3\n\twhile (division * division) <= num:\n\t\tif num % division is 0:\n\t\t\treturn False\n\t\tdivision += 2\n\treturn True",
"def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True",
"def is_prime(num):\n if num < 2:\n return False\n elif num == 2:\n return True\n\n for i in range(2, int(num**(1/2))+1):\n if num % i == 0:\n return False\n\n return True",
"def is_prime(num: int) -> bool:\n if num < 2:\n return False\n low_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,\n 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,\n 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251,\n 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557,\n 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647,\n 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757,\n 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,\n 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997]\n if num in low_primes:\n return True\n for prime in low_primes:\n if num % prime == 0:\n return False\n return rabin_miller(num)",
"def is_prime(self):\n pass",
"def rotate(num):\n\tflag, num, length = (1, str(num), len(str(num)))\n\n\tfor x in xrange(0, length):\n\t\tif x < length:\n\t\t\tnew_num = int(num[x+1:] + num[0:x+1])\n\t\t\tif is_prime(new_num):\n\t\t\t\tflag *= 2\n\t\t\telse:\n\t\t\t\tflag = 0\n\n\treturn(False, True)[flag >= 2]",
"def is_issn(val):\n try:\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 8:\n return False\n r = sum([(8 - i) * (_convert_x_to_10(x)) for i, x in enumerate(val)])\n return not (r % 11)\n except ValueError:\n return False",
"def is_armstrong_number(number: int) -> bool:\n result = 0\n num_str = str(number)\n for i in num_str:\n result += int(i) ** len(num_str)\n return result == number",
"def is_emirp(n) -> bool:\r\n if not is_prime(n):\r\n return False\r\n if not is_palindromic_number(n):\r\n return is_prime(int(str(n)[::-1]))\r\n return False",
"def is_prime(num: int) -> bool:\n return factorial(num - 1) % num != 0",
"def is_prime(num):\n # 2 is prime; exclude\n if num == 2: \n return True\n \n # exclude all other even numbers and numbers less than 2\n if num % 2 == 0 or num < 2:\n return False\n \n # Only need to count up to the the square root of num\n sqrt = int(num ** 0.5 +1) # int rounds down; correct by +1\n \n # Loop through all odd numbers\n for i in range(3, sqrt, 2):\n if num % i == 0:\n return False\n return True",
"def concats_divisible_by_divisors(num_string: str) -> bool:\n \n # check if all concatenated numbers are divisible by divisors in DIVISORS\n for i in range(divisor_count):\n # form number by concatenating digits of num_string\n indices = DIGIT_INDICES[i]\n concat_num = int(num_string[indices[0]-1:indices[1]])\n \n # check if concatenated number is divisible by its divisor in DIVISORS\n if concat_num % DIVISORS[i] != 0:\n return False\n \n return True",
"def good_prime(p):\n return p % 4 == 3 and probablyPrime(p, accuracy=100)",
"def isprime(n):\r\n\treturn is_prime(n)",
"def isprime(n):\n\treturn is_prime(n)",
"def prime_checker(num):\n if num <= 0:\n return \"Error: num must be a positive nonzero integer\"\n elif num <= 3:\n return num > 1\n elif num % 2 == 0 or num % 3 == 0:\n return False\n else:\n k = 5\n while k * k < num:\n if (num % k == 0) or (num % (k+2) == 0):\n return False\n k += 6\n return True",
"def is_prime(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n return True",
"def is_prime(n):\n if n <= 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n\n limit = int(math.floor(math.sqrt(n)))\n i = 5\n while i <= limit:\n if n % i == 0:\n return False\n if n % (i + 2) == 0:\n return False\n i += 6\n return True",
"def is_prime(number):\n if number == 0 or number == 1:\n return False\n\n isprime = True\n for test in range(2, int(math.sqrt(number) + 1)): # +1 since we have to test up to the square root value\n if number % test == 0:\n isprime = False\n break\n return isprime",
"def is_prime(number):\n\t\n\tif number < 2: return False\n\telif number == 2: return True\n\telif number % 2 == 0: return False\n\telse:\n\t\tfor x in range(2, number):\n\t\t\tif number % x == 0:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def is_prime(num):\n\tsquare_root = int(math.ceil(math.sqrt(num)))\n\tfor n in range(2, square_root+1):\n\t\tif num % n == 0:\n\t\t\tif num != n:\n\t\t\t\treturn False\n\n\treturn True",
"def isprime(checknumber):\n isprime = 0\n if checknumber % 2 == 0:\n if checknumber != 2:\n return False\n else:\n x = 3\n while x <= int(math.sqrt(checknumber)):\n if checknumber % x == 0:\n return False\n x += 2\n return True",
"def is_nine_pandigital(number):\n digits = str(number)\n return bool(len(digits) == len(ALL_NINE) and set(digits) == ALL_NINE)",
"def is_prime(num):\n if num < 2:\n return False\n\n for i in range(2, num):\n if num % i == 0:\n return True",
"def is_prime(n):\n k = 2\n while n % k != 0:\n k += 1\n if k < n:\n return False\n else:\n return True",
"def is_prime(num):\n\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True",
"def is_prime(num):\n for x in range(2, num + 1):\n if num % x == 0:\n return False\n return True",
"def is_prime(num1):\n num2 = 2\n while num2 < num1:\n if num1 % num2 == 0:\n return False\n num2 += 1\n return True",
"def test_if_it_includes_a_number_if_the_number_is(self):\n self.assertNotIn(16, prime_numbers(16))",
"def is_superprime(x: int) -> bool:\n if x <= 0:\n return False\n\n while x:\n if is_prime(x) == False:\n return False\n x //= 10\n return True",
"def is_prime(num):\n if num == 0 or num == 1:\n return False\n for x in range(2, num):\n if num % x == 0:\n return False\n else:\n return True",
"def is_simple_number(x: int):\n divisor = 2\n while divisor < x:\n if x % divisor == 0:\n return False\n divisor += 1\n return True",
"def _is_safe_size(n):\n n = int(n)\n\n if n == 0:\n return True\n\n # Divide by 3 until you can't, then by 5 until you can't\n for c in (3, 5):\n while n % c == 0:\n n //= c\n\n # Return True if the remainder is a power of 2\n return not n & (n-1)",
"def prime_with(x, s):\n for i in s:\n if x % i == 0:\n return False\n return True",
"def is_prime(my_number):\n\n # let's assume the number is prime; we'll revise that\n prime = True\n\n # check all integers from 2 to my_number, not included\n # Or, actually, to half of my_number. No number will have a factor greater than half of itself.\n # For example, if my_number equals to 24, it only makes sense to check numbers up to 12\n for divisor in range(2, my_number//2+1):\n\n # calculate the remainder\n remainder = my_number % divisor\n\n # display the number, divisor and result (comment that out for faster computation)\n print(f\"{my_number}%{divisor} = {remainder}\")\n\n if remainder == 0:\n # if so, the divisor is a factor of my_number, so my_number is not prime\n prime = False\n break # one such number is enough - we don't need to check further\n\n # the return part: give back a True for a prime number, False for a composite number\n if prime:\n return True\n else:\n return False",
"def is_prime(num):\r\n if num == 0 or num == 1:\r\n return False\r\n for i in range(2, num):\r\n if num % i == 0:\r\n return False\r\n else:\r\n return True",
"def is_prime(num):\n for n in range(2, num):\n if num % n == 0:\n return False\n\n else:\n return True",
"def check_if_armstrong_number(number):\n sum = 0\n number_as_string = str(number)\n digits_number = len(number_as_string)\n for character in number_as_string:\n sum += int(character) ** digits_number\n\n return sum == number",
"def isNumeric(string, needHexPrefix):\n return (True)",
"def is_desc(x):\n while x > 9:\n if x % 10 > x // 10 % 10:\n return False\n x = x // 10\n return True",
"def is_prime(num):\n\n assert num >= 0, \"Num should be a positive integer!\"\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n while n * n <= num:\n if num % n == 0:\n return False\n n += 2\n\n return True",
"def isprime(x):\n if x <= 1: return False \n if x % 2 == 0: return x == 2\n for k in range(3, int(sqrt(x))+1, 2): \n if x % k == 0: return False\n return True",
"def isnum(self, x):\n\n return x in '1234567890.-'",
"def primenumber(x):\n if x >= 2:\n for y in range(2,x):\n if not (x % y):\n return False\n else:\n return False\n return True",
"def isprime(n):\n\n if n % 2 == 0:\n return False\n\n # else take square root and iterate over all uneven (step 2) numbers\n sqrt_n = int(math.floor(math.sqrt(n)))\n for i in range(3, sqrt_n + 1, 2):\n if n % i == 0:\n return False\n\n return True",
"def is_valid(n):\n\tif type(n) == int:\n\t\tn = str(n)\n\tfor index, c in enumerate(n):\n\t\tif index == 0:\n\t\t\tcontinue\n\t\tif n[index - 1] > n[index]:\n\t\t\treturn False\n\treturn True",
"def isprime(n):\n if n == 2: return True\n if n == 3: return True\n if n % 2 == 0: return False\n if n % 3 == 0: return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True",
"def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num",
"def is_prime(number, divisor):\r\n \r\n #Set recursion to terminate if the divisor is more than half palindromic number\r\n if divisor <= number**(1/2):\r\n if number%divisor == 0:\r\n return reference(number+1) #If a palindromic number is not a prime, find the\r\n #next palindrome.\r\n else:\r\n return is_prime(number, divisor + 1) #If number is not divisible by the divisor,\r\n #increase the divisor and recurse.\r\n #If a palindromic number is prime, print the number and fine the next palindromic\r\n #number.\r\n else:\r\n print(number)\r\n return reference(number+1)",
"def isPalendrome(number):\n\t\n\tnum = str(number)\n\ti \t= 0\n\tj \t= len(num) - 1\n\tmid = len(num) // 2\n\n\t#print(mid)\n\t\n\t# While i and j are not in the middle\n\twhile( i != mid):\n\t\t#print(i,j,sep=\"\\t\")\n\t\t#print(num[i],num[j], sep=\"\\t\")\n\t\tif(num[i] != num[j]):\n\t\t\treturn(False)\n\t\telse:\n\t\t\ti = i + 1\n\t\t\tj = j - 1\n\n\treturn(True)",
"def is_prime(n):\n \n if n < 2:\n return False\n elif n == 2 or n == 3 or n == 5:\n return True\n elif n % 2 == 0 or n % 3 == 0 or n % 5 == 0:\n return False\n \n i = 6\n sqrt_n = int(math.ceil(math.sqrt(n)))\n \n while i <= sqrt_n + 1:\n if n % (i - 1) == 0 or n % (i + 1) == 0:\n return False\n i += 6\n return True",
"def meets_criteria2(num):\n output = True\n if not exactly_two_same_digits(num):\n output = False\n if not digits_increase(num):\n output = False\n return output",
"def is_palindrome(a):\n\tmax = a\n\tmin = 0\n\twhile max > 0:\n\t\tmin = (min * 10 + max % 10)\n\t\tmax /= 10\n\treturn min == a",
"def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True",
"def is_prime(n):\n\tb = 2\n\twhile b <= math.sqrt(n):\n\t\tif n % b == 0:\n\t\t\treturn False\n\t\tb += 1\n\treturn True",
"def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))",
"def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False",
"def check_hole_number(n):\n if n // 10 == 0:\n return True\n\n # The \\ symbol just allows me to continue this line of code on a new line.\n # It's only included to make sure all the code stays on the page\n return ((n // 10) % 10) < (n % 10) and ((n // 10) % 10) < ((n // 100) % 10) \\\n and check_hole_number(n // 100)",
"def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i ** 2 <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True",
"def isReversible(n): \n if n % 10 == 0:\n return False\n s = n + reverseNum(n)\n while s > 0:\n digit = s % 10\n if not digit in [1,3,5,7,9]:\n return False\n s //= 10\n return True",
"def prime_checker(num):\n\n assert num > 0\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n\n while n * n <= num:\n\n if num % n == 0:\n return False\n\n else:\n num += 2\n\n return True",
"def test_with_10_prime_numbers(self):\n numbers = [3,5,7,11,13,17,19,23,29,31]\n for number in numbers:\n self.assertFalse(has_divisors(number, int(math.sqrt(number) // 1) + 1), \"Number {} is a prime number.\".format(number))",
"def isprime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True",
"def is_key(number):\n res = False\n if is_integer(number):\n if int(number) > 0:\n res = True\n return res",
"def is_hilbert_number(n):\n return n > 0 and n % 4 == 1",
"def is_prime(n):\r\n if n in (2, 3, 5, 7, 11, 13, 17, 19): return(True)\r\n if (n<=1 or n%2==0 or n%3==0): return(False)\r\n # determine upper limit of test range =>\r\n ulimit = (int(math.ceil(math.sqrt(n)))+1)\r\n return(not any(n%k==0 for k in range(3, ulimit, 2)))",
"def isprime(n):\n if n == 1:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True",
"def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True",
"def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0",
"def isprime(n):\n # make sure n is a positive integer\n n = abs(int(n))\n # 0 and 1 are not primes\n if n < 2:\n return False\n # 2 is the only even prime number\n if n == 2:\n return True\n # all other even numbers are not primes\n if not n & 1:\n return False\n # range starts with 3 and only needs to go up the squareroot of n\n # for all odd numbers\n for x in range(3, int(int(n ** 0.5) ** 0.5) + 1, 2):\n if n % x == 0:\n return False\n return True"
] | [
"0.8172049",
"0.72789836",
"0.69700235",
"0.6308689",
"0.61999017",
"0.6155211",
"0.6115153",
"0.6111989",
"0.6109884",
"0.6073659",
"0.6044184",
"0.6039048",
"0.60375977",
"0.60341597",
"0.60208774",
"0.6020748",
"0.601937",
"0.60004914",
"0.5998233",
"0.5982962",
"0.5968794",
"0.5953393",
"0.5939892",
"0.5923705",
"0.59115493",
"0.5902841",
"0.58870685",
"0.58655393",
"0.58621526",
"0.5861683",
"0.58558744",
"0.5844468",
"0.5837657",
"0.5822066",
"0.58186305",
"0.5818204",
"0.5798421",
"0.5765915",
"0.57650745",
"0.5750868",
"0.5749601",
"0.5749177",
"0.57447547",
"0.5744117",
"0.57357854",
"0.5716475",
"0.57086897",
"0.5703048",
"0.5688915",
"0.5687471",
"0.5677576",
"0.56609476",
"0.5658205",
"0.56555724",
"0.56435925",
"0.56358886",
"0.5633941",
"0.56318796",
"0.5626376",
"0.56179655",
"0.561599",
"0.561217",
"0.5606507",
"0.5603754",
"0.559233",
"0.55897737",
"0.5589365",
"0.55759716",
"0.55757225",
"0.556983",
"0.5567095",
"0.5555472",
"0.55532724",
"0.5552689",
"0.5541587",
"0.5538141",
"0.55270785",
"0.5526173",
"0.55249065",
"0.5513289",
"0.5507124",
"0.5502616",
"0.5500747",
"0.549334",
"0.54885334",
"0.5486117",
"0.5476941",
"0.5476427",
"0.5474347",
"0.546908",
"0.5468135",
"0.54652864",
"0.5464619",
"0.5463837",
"0.5461316",
"0.5459574",
"0.54401064",
"0.5439637",
"0.5435493",
"0.5434779"
] | 0.7736553 | 1 |
Determine fixed modifications in case the reference shift is not at zero. Needs localization. | def determine_fixed_mods_nonzero(reference, locmod_df, data):
utils.internal('Localizations for %s: %s', reference, locmod_df.at[reference, 'localization'])
loc = get_fix_mod_from_l10n(reference, locmod_df)
label = reference
data_dict = data.ms_stats().copy()
while loc is None:
del data_dict[label]
label = max(data_dict, key=lambda k: data_dict[k][1])
loc = get_fix_mod_from_l10n(label, locmod_df)
logger.debug('No luck. Trying %s. Got %s', label, loc)
if not data_dict:
break
return loc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_fixed_mods_zero(aastat_result, data, params_dict):\n fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh']\n min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor']\n\n fix_mod_dict = {}\n reference = utils.mass_format(0)\n aa_rel = aastat_result[reference][2]\n utils.internal('aa_rel:\\n%s', aa_rel)\n candidates = aa_rel[aa_rel < fix_mod_zero_thresh].index\n logger.debug('Fixed mod candidates: %s', candidates)\n for i in candidates:\n candidate_label = get_fixed_mod_raw(i, data, params_dict)\n if candidate_label != reference:\n # number of peptides with `i` at shift `candidate label` must be higher than ...\n count_cand = data.peptides(candidate_label).str.contains(i).sum()\n # number of peptides with `i` at shift `reference` by a factor of `min_fix_mod_pep_count_factor`\n count_ref = data.peptides(reference).str.contains(i).sum()\n # peptide count at candidate shift over # of peptides at reference\n est_ratio = count_cand / data.ms_stats()[reference][1]\n logger.debug('Peptides with %s: ~%d at %s, ~%d at %s. Estimated pct: %f',\n i, count_ref, reference, count_cand, candidate_label, est_ratio)\n if aastat_result[candidate_label][2][i] > fix_mod_zero_thresh and (\n est_ratio * 100 > fix_mod_zero_thresh * min_fix_mod_pep_count_factor):\n fix_mod_dict[i] = candidate_label\n else:\n logger.debug('Could not find %s anywhere. Can\\'t fix.', i)\n else:\n logger.debug('Reference shift is the best for %s.', i)\n return fix_mod_dict",
"def change_nochange(reference_dataframe, allow_offset=0):\r\n\r\n def changed(x, default=False, offset=0):\r\n if len(x) == 1:\r\n return default\r\n elif x[0] == (x[1]-offset):\r\n return False\r\n else:\r\n return True\r\n\r\n def valid_matches(df, shift, mask):\r\n return df.RefChg & \\\r\n df.MapChg.shift(periods=shift, fill_value=False) & \\\r\n mask & \\\r\n mask.shift(periods=shift, fill_value=False)\r\n\r\n def get_change_window(series, index, offset):\r\n window = [index - offset, index + offset + 1]\r\n for w, s in zip([[0, 1], [1, 0]], [[0, offset], [offset, 0]]):\r\n slc0 = slice(*window)\r\n slc1 = slice(*[window[i] + s[i] for i in range(len(window))])\r\n while series[slc1].sum() > series[slc0].sum():\r\n window = [window[i] + w[i] for i in range(len(window))]\r\n slc0 = slice(*window)\r\n slc1 = slice(*[window[i] + s[i] for i in range(len(window))])\r\n return slice(*window)\r\n\r\n df = reference_dataframe.copy()\r\n df = df.sort_values(['plotid', 'image_year']).reset_index()\r\n\r\n # Rolling window to find changes in land cover class, plot id, or jumps in year\r\n ref_chg = df.Reference.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x), raw=True).astype(np.bool)\r\n map_chg = df.LC_Primary.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x), raw=True).astype(np.bool)\r\n plt_chg = df.plotid.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x, default=True), raw=True).to_numpy(dtype=np.bool)\r\n year_chg_not_one = df.image_year.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x, offset=1), raw=True).to_numpy(dtype=np.bool)\r\n\r\n # Potentially 'valid' data points for change/no-change are defined as follows:\r\n # a) The 'plotid' did not change (the initial observations cannot be a change)\r\n # b) The change in 'image_year' cannot be more than one (missing years are unknowns)\r\n # c) The current and previous reference class cannot be a 0 (invalid value)\r\n\r\n df.loc[:, 'Valid'] = ~plt_chg & ~year_chg_not_one & ~(df.Reference.values == 0)\r\n df.loc[1:, 'Valid'] = df.Valid.values[1:] & ~(df.Reference.values[:-1] == 0)\r\n\r\n # ---- Initialize new columns ---- #\r\n\r\n df.loc[:, 'RefChg'] = ref_chg & df['Valid'].values # Valid reference changes\r\n df.loc[:, 'MapChg'] = map_chg & df['Valid'].values # Valid map changes, not shifted yet\r\n\r\n df.loc[:, 'MapChgYear'] = df['image_year'] * df['MapChg'] # Year of map change or zero\r\n\r\n # There will be some invalid entries here, but they will be filtered out later\r\n df['RefChgFromTo'] = (df.Reference.astype(np.int16) * 100) + df.Reference\r\n df.loc[1:, 'RefChgFromTo'] = (df.Reference[:-1].astype(np.int16).values * 100) + df.Reference[1:].values\r\n df['MapChgFromTo'] = (df.LC_Primary.astype(np.int16) * 100) + df.LC_Primary\r\n df.loc[1:, 'MapChgFromTo'] = (df.LC_Primary[:-1].astype(np.int16).values * 100) + df.LC_Primary[1:].values\r\n\r\n mutable = df.Valid.copy() # Track which things are OK to change\r\n\r\n # ---- End of initialization ---- #\r\n\r\n # Find map changes that can be matched to those in the reference data set in other years, within tolerance\r\n if allow_offset:\r\n print('Adjusting changes...')\r\n change_indices = df[df.MapChg.values].index\r\n for change_index in change_indices:\r\n mask = df.plotid == df.loc[change_index, 'plotid'] # Only consider the same plotid\r\n change_compare = []\r\n window = get_change_window(df.MapChg | df.RefChg, change_index, allow_offset)\r\n for shift in range(-allow_offset, allow_offset + 1):\r\n change_compare.append((valid_matches(df, shift, mutable & mask)[window].sum(), shift))\r\n # Sort by decreasing total matches, then increasing shift amount\r\n change_compare.sort(key=lambda x: (-x[0], abs(x[1])))\r\n for changes in change_compare:\r\n n_changes, offset = changes\r\n if n_changes:\r\n matches = valid_matches(df, offset, mutable & mask)\r\n # Shift will only affect valid matches, or where the valid matches started from, for that window\r\n shift_mask = (matches | matches.shift(periods=-offset, fill_value=False)) & \\\r\n df.index.isin(df[window].index)\r\n # Update MapChg, MapChgYear, MapChgFromTo\r\n df.loc[shift_mask, 'MapChg'] = \\\r\n (df.MapChg & shift_mask).shift(\r\n periods=offset, fill_value=False)[shift_mask].values\r\n df.loc[shift_mask, 'MapChgYear'] = \\\r\n (df.MapChgYear * shift_mask.astype(np.int16)).shift(\r\n periods=offset, fill_value=0)[shift_mask].values\r\n df.loc[shift_mask, 'MapChgFromTo'] = \\\r\n (df.MapChgFromTo * shift_mask.astype(np.int16)).shift(\r\n periods=offset, fill_value=101)[shift_mask].values\r\n # These matches will not be changed again\r\n mutable[matches & df.index.isin(df[window].index)] = False\r\n\r\n # Fixing the change codes after moving stuff around above\r\n print('Adjusting change codes...')\r\n for i in df[df.MapChg.values].index:\r\n need_new_lc = True\r\n new_lc = 0\r\n for j in range(i, max(df.index) + 1):\r\n if plt_chg[j]:\r\n break\r\n # If we've just jumped years, we don't know the LC\r\n if year_chg_not_one[j]:\r\n need_new_lc = True\r\n # If we need LC, take it from LC_Primary if nonzero\r\n if need_new_lc and df.loc[j, 'LC_Primary']:\r\n new_lc = df.loc[j, 'LC_Primary']\r\n need_new_lc = False\r\n # If there's been a change, take the new LC from the change code\r\n if df.loc[j, 'MapChg']:\r\n new_lc = df.loc[j, 'MapChgFromTo'] % 10\r\n need_new_lc = False\r\n # Update non-change locations with LC code if possible.\r\n if (not need_new_lc) and (not df.loc[j, 'MapChg']) and (df.loc[j, 'LC_Primary']):\r\n df.loc[j, 'MapChgFromTo'] = (new_lc * 100) + new_lc\r\n\r\n # Check for leapfrogging. The code does not prevent this.\r\n print('Final checks...')\r\n for plot in np.unique(df[df.MapChg.values].plotid):\r\n masked_arr = df[(df.plotid == plot) & (df.MapChgYear > 0)].MapChgYear.values\r\n if not all(masked_arr[i] <= masked_arr[i + 1] for i in range(len(masked_arr) - 1)):\r\n raise Exception('Warning! Leapfrog change year in plot: {}'.format(plot))\r\n\r\n # Switch from True/False values to strings for clarity\r\n chg = {True: 'Chg', False: 'NoChg'}\r\n df['RefChg'] = df.RefChg.apply(lambda x: chg[x])\r\n df['MapChg'] = df.MapChg.apply(lambda x: chg[x])\r\n\r\n # Get rid of the invalid data points, those don't count for change or no-change.\r\n df.drop(df[~df.Valid].index, inplace=True)\r\n\r\n return df",
"def fixed(self):\n return self.f_fixed().m_fixed()",
"def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq",
"def pre_modify(self):\n return 0",
"def _only_fixed(o, d):\n if d[\"fixed\"]:\n return (\"value\", \"fixed\")\n else:\n return (\"fixed\",)",
"def test_correct_backward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"backward\")\r\n assert np.allclose(coeffs, [1, -1])\r\n assert np.allclose(shifts, [0, -1])",
"def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds",
"def test_correct_forward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"forward\")\r\n assert np.allclose(coeffs, [-1, 1])\r\n assert np.allclose(shifts, [0, 1])",
"def check_fixedblock(self):\n print('This will read the fixed block then display changes as they')\n print('occur. Typically the most common change is the incrementing')\n print('of the data pointer, which happens whenever readings are saved')\n print('to the station memory. For example, if the logging interval')\n print('is set to 5 minutes, the fixed block should change at least')\n print('every 5 minutes.')\n raw_fixed = self.station.get_raw_fixed_block()\n while True:\n new_fixed = self.station.get_raw_fixed_block(unbuffered=True)\n for ptr in range(len(new_fixed)):\n if new_fixed[ptr] != raw_fixed[ptr]:\n print(datetime.datetime.now().strftime('%H:%M:%S'), end=' ')\n print(' %04x (%d) %02x -> %02x' % (\n ptr, ptr, raw_fixed[ptr], new_fixed[ptr]))\n raw_fixed = new_fixed\n time.sleep(0.5)",
"def test_correct_forward_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"forward\")\r\n assert np.allclose(coeffs, [-1.5, 2, -0.5])\r\n assert np.allclose(shifts, [0, 1, 2])",
"def test_shift_ruptures_no_shift(midday):\n shift_mask, shift_amounts = time.shifts_ruptures(\n midday, midday\n )\n assert not shift_mask.any()\n assert_series_equal(\n shift_amounts,\n pd.Series(0, index=midday.index, dtype='int64'),\n check_names=False\n )",
"def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)",
"def find_shift(ref, img):\n im0 = prepare(ref)\n im1 = prepare(img)\n shift, error, diffphase = register_translation(im0, im1, 100)\n\n return shift",
"def test_correct_center_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"center\")\r\n assert np.allclose(coeffs, [-0.5, 0.5])\r\n assert np.allclose(shifts, [-1, 1])",
"def _set_fixed(o, d):\n if d:\n o.fix()\n else:\n o.unfix()",
"def test_correct_second_derivative_center_order4(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 4, \"center\")\r\n assert np.allclose(coeffs, [-2.5, 4 / 3, 4 / 3, -1 / 12, -1 / 12])\r\n assert np.allclose(shifts, [0, -1, 1, -2, 2])",
"def getTranslation(fracs):\n \n \n \n # Determine whether the shift needs to be from inf to 0 \n # or from -inf to 0\n \n # Along all x fractionals\n if abs(max(fracs[0]))>=abs(min(fracs[0])):\n minX = min([x for x in fracs[0] if x>0])\n else:\n minX = min([x for x in fracs[0] if x<0])\n \n # Along all y fractionals\n if abs(max(fracs[1]))>=abs(min(fracs[1])):\n minY = min([x for x in fracs[1] if x>0])\n else:\n minY = min([x for x in fracs[1] if x<0])\n \n # Along all z fractionals\n # Need to consider all atoms lying in a single\n # plane (e.g. graphene), thus the final \"else\"\n # statement\n if abs(max(fracs[2]))>abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x>0])\n elif abs(max(fracs[2]))<abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x<0])\n else:\n minZ = max(fracs[2])\n\n shift_vector = np.array([minX,minY,minZ])\n \n return(shift_vector)",
"def test_explicit_fixed_effects_without_mask(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n _, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel().fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n # test without mask variable\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance)\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))",
"def test_findBugfixes(self):\n bugfixes = self.builder._findChanges(\n self.project, self.builder._BUGFIX)\n self.assertEquals(\n bugfixes,\n [(23, 'Broken stuff was fixed.')])",
"def test_modified_schwefel(self):\n fun = get_problem('modified_schwefel', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 6.9448853328785844, delta=350)",
"def test_correct_second_derivative_forward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 1, \"forward\")\r\n assert np.allclose(coeffs, [1, -2, 1])\r\n assert np.allclose(shifts, [0, 1, 2])",
"def test_adjust_offsets_short(self):\n tool = pybedtools.BedTool(\"chr15 91512755 91512836 ENSG00000198901_1_147 0 -\", from_string=True)\n offsets = {\"ENSG00000198901_1_147\" : 10}\n results = adjust_offsets(tool, offsets)",
"def fixed(self):\n for i in range(15):\n self.factors[i].fixed()\n self.transition.fixed()",
"def mask_fixed(self):\n ns = len(self)-1\n # mask fixed entries\n self.mask[0,0,0] = True\n self.mask[0,0,-1] = True\n self.mask[0,-1,0] = True\n self.mask[-1,0,0] = True\n # mask entries with i+j+k > ns\n for ii in range(len(self)):\n for jj in range(len(self)):\n for kk in range(len(self)):\n if ii+jj+kk > ns:\n self.mask[ii,jj,kk] = True\n \n # mask fA = 0 and fB = 0\n for ii in range(len(self)):\n self.mask[ii,ns-ii,0] = True\n self.mask[ii,0,ns-ii] = True\n\n self.mask[0,:,0] = True\n self.mask[0,0,:] = True\n return self",
"def on_fees_change(origin_matrix, changes_on_fees):\n new_fees = origin_matrix[FEES_IDX]\n for idx in range(len(origin_matrix[0])):\n if changes_on_fees[idx] != None:\n new_fees[idx] = changes_on_fees[idx]\n return new_fees",
"def rmap_check_modifications(old_rmap, new_rmap, old_ref, new_ref, expected=(\"add\",)):\n diffs = diff.mapping_diffs(old_rmap, new_rmap)\n as_expected = True\n for difference in diffs:\n actual = diff.diff_action(difference)\n if actual in expected:\n pass # white-list so it will fail when expected is bogus.\n else:\n log.error(\"Expected one of\", repr(expected), \"but got\", repr(actual),\n \"from change\", repr(difference))\n as_expected = False\n with open(old_rmap) as pfile:\n old_count = len([line for line in pfile.readlines() if os.path.basename(old_ref) in line])\n with open(new_rmap) as pfile:\n new_count = len([line for line in pfile.readlines() if os.path.basename(new_ref) in line])\n if \"replace\" in expected and old_count != new_count:\n log.error(\"Replacement COUNT DIFFERENCE replacing\", repr(old_ref), \"with\", repr(new_ref), \"in\", repr(old_rmap),\n old_count, \"vs.\", new_count)\n as_expected = False\n return as_expected",
"def _get_init_controls(self):\n\n u_perf_0 = None\n k_fb_perf_0 = None\n k_fb_lqr = self.get_lqr_feedback()\n\n if self.do_shift_solution and self.n_fail == 0:\n if self.n_safe > 1:\n k_fb_safe = np.copy(self.k_fb_safe_all)\n\n # Shift the safe controls\n k_ff_safe = np.copy(self.k_ff_safe)\n\n u_0 = k_ff_safe[0, :]\n\n if self.n_safe > self.r and self.n_perf > self.n_safe: # the first control after the shared controls\n k_ff_perf = np.copy(self.k_ff_perf)\n k_ff_r_last = (k_ff_perf[0, :] + k_ff_safe[self.r - 1,\n :]) / 2 # mean of first perf ctrl and safe ctrl after shared\n else:\n k_ff_r_last = k_ff_safe[-1, :] # just the last safe control\n\n k_ff_safe_new = np.vstack((k_ff_safe[1:self.r, :], k_ff_r_last))\n\n if self.n_safe > self.r + 1:\n k_ff_safe_new = np.vstack((k_ff_safe_new, k_ff_safe[self.r:, :]))\n else:\n u_0 = self.u_apply\n k_ff_safe_new = np.array([])\n\n if self.n_perf - self.r > 0:\n k_ff_perf = np.copy(self.k_ff_perf)\n k_ff_perf_new = np.vstack((k_ff_perf[1:, :], k_ff_perf[-1, :]))\n\n if self.perf_has_fb:\n k_fb_perf_0 = np.copy(self.k_fb_perf_0)\n else:\n k_fb_perf_0 = np.array([])\n else:\n k_ff_perf_new = np.array([])\n k_fb_perf_0 = np.array([])\n else:\n k_fb_safe = np.empty((self.n_safe - 1, self.n_s * self.n_u))\n for i in range(self.n_safe - 1):\n k_fb_safe[i] = cas_reshape(k_fb_lqr, (1, -1))\n\n k_ff_safe_new = np.zeros((self.n_safe - 1, self.n_u))\n u_0 = np.zeros((self.n_u, 1))\n\n k_ff_perf_new = np.array([])\n if self.n_perf > 1:\n k_ff_perf_new = np.zeros((self.n_perf - self.r, self.n_u))\n\n if self.perf_has_fb:\n k_fb_perf_0 = k_fb_lqr\n else:\n k_fb_perf_0 = np.array([])\n\n if self.n_safe > 1:\n k_fb_safe_new = np.vstack((k_fb_safe[1:, :], k_fb_safe[-1, :]))\n\n else:\n k_fb_safe_new = np.array([])\n\n return u_0, k_ff_safe_new, k_fb_safe, k_ff_perf_new, k_fb_perf_0",
"def m_fixed(self):\n self.mx_free = self.my_free = self.mz_free = False\n return self",
"def _make_determine_wants_func(ref_mutator):\n\n def determine_wants(old_refs):\n refs = {k.decode(\"UTF-8\"): v.decode(\"UTF-8\") for k, v in old_refs.items()}\n new_refs = ref_mutator(refs)\n new_refs = {k.encode(\"UTF-8\"): v.encode(\"UTF-8\") for k, v in new_refs.items()}\n new_refs.update(old_refs) # Make sure we don't delete/modify anything.\n return new_refs\n\n return determine_wants",
"def test_fix(self):\n self.check_data.side_effect = lambda: self.fixed_cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[self.mock_fix]) as mock_get_fixes:\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_data(\n self.cube,\n short_name='short_name',\n project='project',\n dataset='model',\n mip='mip',\n session=sentinel.session,\n )\n self.checker.assert_called_once_with(self.intermediate_cube)\n self.check_data.assert_called_once_with()\n assert cube_returned is not self.cube\n assert cube_returned is not self.intermediate_cube\n assert cube_returned is self.fixed_cube\n mock_get_fixes.assert_called_once_with(\n **self.expected_get_fixes_call\n )",
"def test_fixups():\n binary: MachO = cast(MachO, cle.Loader(str(TEST_BASE / \"tests\" / \"aarch64\" / \"dyld_ios15.macho\")).main_object)\n expected = {\n 0x100008100: 0x100007A40,\n 0x1000081E0: 0x1000072B0,\n 0x1000081E8: 0x1000072DC,\n 0x1000081F0: 0x1000072E4,\n 0x1000081F8: 0x100007310,\n 0x100008200: 0x100007350,\n 0x100008208: 0x10000735C,\n 0x100008210: 0x10000738C,\n 0x100008218: 0x1000073E8,\n 0x100008238: 0x1000081E0,\n 0x100008248: 0x100007A40,\n 0x1000082A0: 0x100007AFC,\n 0x1000082D8: 0x10000C0E8,\n 0x10000C018: 0x100007B90,\n 0x10000C060: 0x100007B90,\n 0x10000C068: 0x100007998,\n 0x10000C090: 0x100007C2A,\n 0x10000C0D0: 0x10000C000,\n 0x10000C0D8: 0x100007210,\n 0x10000C0E8: 0x10000C0B0,\n 0x10000C108: 0x10000C04A,\n 0x10000C128: 0x1000079F0,\n }\n\n actual = {r.rebased_addr: r.value for r in binary.relocs if isinstance(r, MachOChainedFixup)}\n assert actual == expected",
"def test_shift_ruptures_positive_shift(midday):\n shifted = _shift_between(\n midday, 60,\n start='2020-01-01',\n end='2020-02-29'\n )\n expected_shift_mask = pd.Series(False, index=midday.index)\n expected_shift_mask['2020-01-01':'2020-02-29'] = True\n shift_mask, shift_amounts = time.shifts_ruptures(shifted, midday)\n assert_series_equal(shift_mask, expected_shift_mask, check_names=False)\n assert_series_equal(\n shift_amounts,\n pd.Series(60, index=shifted.index, dtype='int64'),\n check_names=False\n )",
"def test_allow_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = True\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)\n self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)",
"def test_fix_data(self):\n cube = self.fix.fix_data(self.cube)\n np.testing.assert_allclose(cube.data[0], 1.0)\n np.testing.assert_allclose(cube.data[2], 2.0)\n assert not np.ma.is_masked(cube.data[0])\n assert np.ma.is_masked(cube.data[1])\n assert not np.ma.is_masked(cube.data[2])",
"def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1",
"def detect_fpMod():\n import logging\n log = logging.getLogger(__name__)\n log.debug('Setting fpMod')\n\n if z3.is_true(z3.simplify(z3.FPVal(3, z3.Float32()) % 2 < 0)):\n log.debug('Correct fpRem detected')\n fpMod.__code__ = fpMod_using_fpRem.__code__\n else:\n log.debug('fpRem = fpMod')\n fpMod.__code__ = fpRem_trampoline.__code__",
"def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)",
"def shiftDetector(frame):\n \n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts",
"def test_center_of_coordinates_shift():\n # print sys._getframe().f_code.co_name\n # c = commons()\n\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n ncs_restraints_group_list = ncs_obj_phil.get_ncs_restraints_group_list()\n\n # ncs_restraints_group_list = c.ncs_restraints_group_list\n xrs = pdb_inp.xray_structure_simple()\n shifts = ncs_restraints_group_list.get_ncs_groups_centers(\n sites_cart = xrs.sites_cart())\n\n xyz = pdb_inp.atoms().extract_xyz()\n center_of_coor = (flex.vec3_double([xyz.sum()]) * (1/xyz.size())).round(8)\n # test shifts\n t1 = shifts[0].round(8)\n t2 = shifts[1].round(8)\n d1 = flex.sqrt((center_of_coor-t1).dot()).min_max_mean().as_tuple()\n d2 = flex.sqrt((center_of_coor-t2).dot()).min_max_mean().as_tuple()\n assert (d1 == d2)\n\n # test shift to center\n new_nrg = ncs_restraints_group_list.shift_translation_to_center(shifts = shifts)\n expected = (22.63275, 5.54625, 2.9375)\n assert (new_nrg[0].copies[0].t.round(5)).elems == expected\n # back to original coordinates system\n old_nrg = new_nrg.shift_translation_back_to_place(shifts=shifts)\n expected = (old_nrg[0].copies[0].t.round(5)).elems\n result = (ncs_restraints_group_list[0].copies[0].t.round(5)).elems\n assert result == expected",
"def checkIfAllowedToModify(self):\n\n oldBytes = b''\n testFileName = self.MAPSTUDIO + self.inputFiles[0] + '.msb'\n\n with open(testFileName, 'rb') as oldf:\n oldBytes = oldf.read()\n\n # Try writing something to the file\n\n try:\n with open(testFileName, 'wb') as outf:\n outf.write(b'TESTINGIFICANWRITEINTOTHISFILE')\n except:\n return False\n\n # Because apparently for _some_ reason it doesn't throw an error sometimes(?) so we confirm if the file was actually modified\n\n newBytes = b''\n with open(testFileName, 'rb') as oldf:\n newBytes = oldf.read()\n\n if (oldBytes == newBytes):\n return False\n\n # Restore the file to normal\n\n with open(testFileName, 'wb') as outf:\n outf.write(oldBytes)\n\n oldBytes = b''\n newBytes = b''\n\n return True",
"def test_unfixable_mask(self):\n unfixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_unfixable_mask.map'))\n self.assertFalse(unfixable_mask.is_mask)\n with self.assertRaises(ValueError):\n unfixable_mask.fix_mask()\n self.assertFalse(unfixable_mask.is_mask)",
"def shiftDetector(frame, onh_info=None):\n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts",
"def test_fix(self):\n self.check_metadata.side_effect = lambda: self.fixed_cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[self.mock_fix]) as mock_get_fixes:\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_metadata(\n cubes=[self.cube],\n short_name='short_name',\n project='project',\n dataset='model',\n mip='mip',\n session=sentinel.session,\n )[0]\n self.checker.assert_called_once_with(self.intermediate_cube)\n self.check_metadata.assert_called_once_with()\n assert cube_returned is not self.cube\n assert cube_returned is not self.intermediate_cube\n assert cube_returned is self.fixed_cube\n mock_get_fixes.assert_called_once_with(\n **self.expected_get_fixes_call\n )",
"def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self",
"def test_resync():\n np.random.seed(12)\n raw, beh, events, corrupted_indices = \\\n pd_parser.simulate_pd_data(prop_corrupted=0.)\n pd = raw._data[0]\n exclude_shift_i = np.round(raw.info['sfreq'] * exclude_shift).astype(int)\n candidates = _find_pd_candidates(\n pd, max_len=max_len, baseline=baseline,\n zscore=zscore, max_flip_i=max_flip_i, sfreq=raw.info['sfreq'])[0]\n beh_events = beh['time'] * raw.info['sfreq']\n offsets = (2 * resync * np.random.random(beh_events.size) - 1\n ) * raw.info['sfreq']\n beh_events += offsets\n beh_events -= beh_events[0]\n beh_events_adjusted, alignment, best_events = _find_best_alignment(\n beh_events, candidates, exclude_shift, resync, raw.info['sfreq'])\n errors = beh_events_adjusted - best_events + alignment\n resync_exclusions = np.where(abs(errors) > exclude_shift_i)[0]\n idx = resync_exclusions[0]\n correct = (best_events[idx], f'{idx}\\nrecovered (not excluded)')\n assert len(resync_exclusions) > 0\n # test exclude ambiguous\n pd_events = _exclude_ambiguous_events(\n beh_events_adjusted, alignment, best_events, pd, candidates,\n exclude_shift, max_len, raw.info['sfreq'], recover, zscore)\n assert np.isnan(pd_events[resync_exclusions]).all()\n assert np.isnan(pd_events[np.isnan(best_events)]).all()\n with mock.patch('builtins.input', return_value='y'):\n found = _recover_event(\n idx, pd, beh_events_adjusted[idx] + alignment, 2 * resync, zscore,\n max_len, raw.info['sfreq'])\n assert abs(found[0] - correct[0]) < 2\n assert found[1] == correct[1]",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def determine_if_clade_differs(\n self,\n plain_rf: int,\n tip_names_zero: list,\n tip_names_one: list\n ) -> int:\n if set(tip_names_zero) != set(tip_names_one):\n plain_rf +=1\n \n return plain_rf",
"def find_change(now: int, history: List[int]) -> Union[int, None]:\n if now is None or history is None:\n return None\n index = 7\n if len(history) < 7:\n index = len(history)\n return history[-index] - now",
"def reusability(self):\n self._reusability = -0.25 * self.DCC + 0.25 * self.CAMC + 0.5 * self.CIS + 0.5 * self.DSC\n return round(self._reusability, 5)",
"def _fixed_masks_arg(mask):\n return [\"NULL\", mask]",
"def _modify_all_notes(self):\n return self._modify_notes_in_time(TimeStep(0.0, MAX_CLIP_LENGTH), self._clip_notes, self._length_offset)",
"def get_shift(self, ra, dec):\n\n if not self.__dict__.has_key('wcs'):\n # set up a fake WCS to do the transformation\n # likely the details do not matter much\n wcs=pywcs.WCS(naxis=2)\n wcs.wcs.ctype=['RA---SIN','DEC--SIN']\n wcs.wcs.crval=[n.degrees(self.ra0),n.degrees(self.dec0)]\n wcs.wcs.crpix=[2049,2049]\n wcs.wcs.cdelt=[-1.0/60,1.0/60]\n \n observer=ephem.Observer()\n observer.long=n.radians(self.long)\n observer.lat=n.radians(self.lat)\n observer.epoch=ephem.J2000\n J0 = ephem.julian_date(0) \n observer.date=self.time[0]-J0\n \n body=ephem.FixedBody()\n body._ra=self.ra0\n body._dec=self.dec0\n body._epoch=ephem.J2000\n body.compute(observer)\n \n LST=observer.sidereal_time()\n HA=LST-self.ra0\n _dec=self.dec0\n _lat=n.radians(self.lat)\n # this calculation comes from Steve Ord's fixhdr.c\n parallactic_angle=n.arctan2(n.sin(HA)*n.cos(_lat),\n n.sin(_lat)*n.cos(_dec)-n.sin(_dec)*n.cos(_lat)*n.cos(HA))\n\n cosz=n.sin(_lat)*n.sin(_dec)+n.cos(_lat)*n.cos(_dec)*n.cos(HA)\n z=n.arccos(cosz)\n sinz=n.sin(z)\n tanz=sinz/cosz\n \n PV2_1=tanz*n.sin(parallactic_angle)\n PV2_2=tanz*n.cos(parallactic_angle)\n\n wcs.wcs.set_pv([(2,1,PV2_1),(2,2,PV2_2)])\n self.wcs=wcs\n \n if isinstance(ra,n.ndarray):\n sky=n.vstack((ra,dec)).T\n else:\n sky=n.array([[ra,dec]])\n pix=self.wcs.wcs_sky2pix(sky,0)\n if isinstance(ra,n.ndarray):\n x=pix[:,0]\n y=pix[:,1]\n else:\n x=pix[0,0]\n y=pix[0,1]\n dx=x-(self.wcs.wcs.crpix[0]-1)\n dy=y-(self.wcs.wcs.crpix[1]-1)\n dl=n.radians(dx*self.wcs.wcs.cdelt[0])\n dm=n.radians(dy*self.wcs.wcs.cdelt[1])\n return dl,dm",
"def test_adjust_offsets(self):\n \n offsets = {\"ENSMUSG00000051951_1_147\" : 10, \n \"ENSG00000198901_2_52\" : 10 ,\n \"ENSG00000198901_3_239\" : 10, \n \"ENSG00000198901_4_85\" : 10 ,\n \"ENSG00000198901_5_47\" : 10 ,\n \"ENSG00000198901_6_119\" : 10 ,\n \"ENSG00000198901_7_58\" : 10 ,\n \"ENSG00000198901_8_588\" : 10 ,\n \"ENSG00000198901_10_92\" : 10 ,\n \"ENSG00000198901_11_59\" : 10 ,\n \"ENSG00000198901_12_196\" : 10 ,\n \"ENSG00000198901_13_36\" : 10 ,\n\n }\n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n \n results = adjust_offsets(bedtool, offsets)\n \n true_results = ((3206126, 3206130),\n (91513660, 91513664),\n (91517394, 91517398),\n (91517935, 91517939),\n (91522404, 91522408),\n (91523607, 91523611),\n (91524250, 91524254),\n (91525137, 91525141),\n (91527347, 91527351),\n (91527937, 91527941),\n (91528034, 91528038),\n (91537658, 91537662),\n )\n for result, true_result in zip(results, true_results):\n self.assertEqual(int(result[6]), true_result[0])\n self.assertEqual(int(result[7]), true_result[1])",
"def mask_percentage(self):\n return 100 - self.tissue_percentage",
"def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n # compute the matrix for the scale and rotation correction\n shift = (np.asarray(shift) - np.dot(self._wcslin.wcs.crpix, matrix) +\n self._wcslin.wcs.crpix)\n\n matrix = inv(matrix).T\n\n cwcs = self._wcs.deepcopy()\n\n # estimate step for numerical differentiation. We need a step\n # large enough to avoid rounding errors and small enough to get a\n # better precision for numerical differentiation.\n # TODO: The logic below should be revised at a later time so that it\n # better takes into account the two competing requirements.\n crpix1, crpix2 = self._wcs.wcs.crpix\n hx = max(1.0, min(20.0, (crpix1 - 1.0) / 100.0,\n (self._wcs.pixel_shape[0] - crpix1) / 100.0))\n hy = max(1.0, min(20.0, (crpix2 - 1.0) / 100.0,\n (self._wcs.pixel_shape[1] - crpix2) / 100.0))\n\n # compute new CRVAL for the image WCS:\n crpixinref = self._wcslin.wcs_world2pix(\n self._wcs.wcs_pix2world([self._wcs.wcs.crpix], 1), 1)\n crpixinref = np.dot(crpixinref - shift, matrix.T).astype(np.float64)\n self._wcs.wcs.crval = self._wcslin.wcs_pix2world(crpixinref, 1)[0]\n self._wcs.wcs.set()\n\n # approximation for CD matrix of the image WCS:\n (U, u) = _linearize(cwcs, self._wcs, self._wcslin, self._wcs.wcs.crpix,\n matrix, shift, hx=hx, hy=hy)\n self._wcs.wcs.cd = np.dot(self._wcs.wcs.cd.astype(np.longdouble),\n U).astype(np.float64)\n self._wcs.wcs.set()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)",
"def test_reflectance_ref(fluxd, wfb, f_sun, ref):\n\n xsec = 6.648e5 * u.km**2\n\n with vega_fluxd.set({'V': u.Quantity(3.589e-9, 'erg/(s cm2 AA)')}):\n with solar_fluxd.set({wfb: f_sun}):\n r = fluxd.to('1/sr', reflectance(wfb, cross_section=xsec))\n assert r.unit == u.sr**-1\n assert np.isclose(r.value, ref)",
"def modify_cand():\n if col_i + 1 < len(lastrow):\n return (lastrow[col_i + 1] +\n diff(left_elem, right_elem, key=key + [left_i],\n minimal=minimal, verbose=False))",
"def eval_is_fixed(leaf, eval_from_search):\n check_val = nn_evaluate.evaluate(leaf)\n if leaf.side_to_move() == Side.B:\n check_val *= -1\n check_val = min(max(check_val/1000, -1), 1)\n if abs(check_val - eval_from_search) > .0008:\n if abs(eval_from_search) != 0 and abs(eval_from_search) != 1:\n print(\"fixed.. searchval:\", eval_from_search, \"nnval:\", check_val, \"fen\", leaf.fen())\n return True\n return False",
"def _cmp_cflw_m1(self, problem, cflw_e):\n import pdb\n mu = {t:{o:{} for o in list(cflw_e.keys())} for t in self.periods}\n for i, tree in list(problem.trees.items()):\n for path in tree.paths():\n j = tuple(n.data('acode') for n in path)\n for o in list(cflw_e.keys()):\n _mu = path[-1].data(o)\n for t in self.periods:\n mu[t][o][i, j] = _mu[t] if t in _mu else 0.\n for t in self.periods:\n for o, e in list(cflw_e.items()):\n #pdb.set_trace()\n if t in e[0]:\n mu_lb = {'x_%i' % hash((i, j)):(mu[t][o][i, j] - (1 - e[0][t]) * mu[e[1]][o][i, j]) for i, j in mu[t][o]}\n mu_ub = {'x_%i' % hash((i, j)):(mu[t][o][i, j] - (1 + e[0][t]) * mu[e[1]][o][i, j]) for i, j in mu[t][o]}\n problem.add_constraint(name='flw-lb_%03d_%s' % (t, o), coeffs=mu_lb, sense=opt.SENSE_GEQ, rhs=0.)\n problem.add_constraint(name='flw-ub_%03d_%s' % (t, o), coeffs=mu_ub, sense=opt.SENSE_LEQ, rhs=0.)",
"def bits(delta):\n delta = asanyarray(delta)\n distance = absolute(delta)\n distance += 1\n return log2(distance)",
"def test_transform_update():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj = ncs.input(hierarchy=pdb_inp.construct_hierarchy())\n pdb_inp = iotbx.pdb.input(lines=pdb_answer_0,source_info=None)\n nrgl = ncs_obj.get_ncs_restraints_group_list()\n asu_site_cart = pdb_inp.atoms().extract_xyz()\n # reference matrices\n r1 = nrgl[0].copies[0].r\n t1 = nrgl[0].copies[0].t\n r2 = nrgl[0].copies[1].r\n t2 = nrgl[0].copies[1].t\n # modify matrices in the ncs group list\n nrgl[0].copies[0].r = r1 + r2\n nrgl[0].copies[0].t = t1 + t2\n nrgl[0].copies[1].r = r1 + r2\n nrgl[0].copies[1].t = t1 + t2\n nrgl.recalculate_ncs_transforms(asu_site_cart)\n # Get the updated values\n r1_n = nrgl[0].copies[0].r\n t1_n = nrgl[0].copies[0].t\n r2_n = nrgl[0].copies[1].r\n t2_n = nrgl[0].copies[1].t\n #\n assert approx_equal(r1, r1_n, eps=0.001)\n assert approx_equal(t1, t1_n, eps=0.1)\n assert approx_equal(r2, r2_n, eps=0.001)\n assert approx_equal(t2, t2_n, eps=0.1)",
"def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal",
"def test_value_change(self):\n before = self.data.diffusion_data[:, :, 0, 0]\n after = module_05.run_module(self.data).diffusion_data[:, :, 0, 0]\n self.assertFalse(np.all(before == after))",
"def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4",
"def mask_reopenings(self, d_min=90, n_extra=0, print_out=True):\n total_cms = self.ActiveCMs\n diff_cms = np.zeros_like(total_cms)\n diff_cms[:, :, 1:] = total_cms[:, :, 1:] - total_cms[:, :, :-1]\n rs, ds = np.nonzero(np.any(diff_cms < 0, axis=1))\n nnz = rs.size\n\n for nz_i in range(nnz):\n if (ds[nz_i] + 3) > d_min and ds[nz_i] + 3 < len(self.Ds):\n if print_out:\n print(f\"Masking {self.Rs[rs[nz_i]]} from {self.Ds[ds[nz_i] + 3]}\")\n self.NewCases[rs[nz_i], ds[nz_i] + 3 - n_extra:].mask = True\n self.NewDeaths[rs[nz_i], ds[nz_i] + 11 - n_extra:].mask = True",
"def loss_comparison(warp_fname, offpar_fname, moving_rmli_fname,\n fixed_rmli_fname, moved_fname, crop_center, crop_size,\n reg_weight, ncc_win, debug=False):\n rg_crop = crop_size[0]\n az_crop = crop_size[1]\n rg_cen = crop_center[0]\n az_cen = crop_center[1]\n\n # Import voxelmorph with pytorch backend\n os.environ['VXM_BACKEND'] = 'pytorch'\n import voxelmorph as vxm\n\n # Read the voxelmorph warp file\n warp_file = np.load(warp_fname)\n warp = warp_file['offs']\n warp = warp[np.newaxis, :, :, :]\n\n # Read moved scene\n moved_file = np.load(moved_fname)\n moved = moved_file['scene']\n moved = moved[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the fixed RMLI\n fixed_rmli = gx.MLI(fixed_rmli_fname,\n par=gx.MLI_Par(fixed_rmli_fname + '.par'))\n rmli_dim = fixed_rmli.dim\n fixed_full = fixed_rmli.array\n fixed = fixed_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n fixed = scale_rmli(fixed)\n fixed = fixed[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the moving RMLI\n moving_rmli = gx.MLI(moving_rmli_fname,\n par=gx.MLI_Par(moving_rmli_fname + '.par'))\n moving_full = moving_rmli.array\n moving = moving_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n moving = scale_rmli(moving)\n moving = moving[np.newaxis, np.newaxis, :, :]\n\n # Read in the Gamma offsets\n # Scale the Gamma offsets to be the same size as the original data that\n # was cropped to feed into voxelmorph.\n offs_basename, _ = os.path.splitext(os.path.basename(offpar_fname))\n offs_fname = os.path.join(os.path.dirname(offpar_fname), offs_basename + '.offs')\n offpar = gx.OFF_Par(offpar_fname)\n offs_dim = (offpar['offset_estimation_range_samples'],\n offpar['offset_estimation_azimuth_samples'])\n gx_offs = gx.readBin(offs_fname, offs_dim, _dtype='complex64')\n zoom_factor = (rmli_dim[0] / offs_dim[0], rmli_dim[1] / offs_dim[1])\n multilook = (fixed_rmli.par['range_looks'],\n fixed_rmli.par['azimuth_looks'])\n gamma_rg_offs = scipy.ndimage.zoom(np.real(gx_offs), zoom_factor)\n gamma_az_offs = scipy.ndimage.zoom(np.imag(gx_offs), zoom_factor)\n gamma_rg_offs = gamma_rg_offs[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n gamma_rg_offs /= multilook[0]\n gamma_az_offs = gamma_az_offs[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n gamma_az_offs /= multilook[1]\n gamma_warp = np.stack((gamma_rg_offs, gamma_az_offs), axis=0)\n gamma_warp = gamma_warp[np.newaxis, :, :, :]\n\n # Create a moved image with the gamma offsets\n transformer = vxm.layers.SpatialTransformer(crop_size)\n gamma_moved = transformer(torch.from_numpy(moving).float(),\n torch.from_numpy(gamma_warp).float())\n\n # Prepare ncc loss with square window\n ndims = len(list(fixed.shape)) - 2\n assert ndims in [1, 2, 3], \"volumes should be 1 to 3 dimensions. found: %d\" % ndims\n ncc_win_sq = [ncc_win] * ndims # Build a square window\n ncc = vxm.losses.NCC(ncc_win_sq, cuda=False)\n\n # Now we have all the data, compute the losses\n loss_sim_vxm = ncc.loss(torch.from_numpy(fixed).float(),\n torch.from_numpy(moved).float())\n loss_sim_gamma = ncc.loss(torch.from_numpy(fixed).float(), gamma_moved)\n\n grad = vxm.losses.Grad(penalty='l2')\n loss_smooth_vxm = grad.loss(None, torch.from_numpy(warp).float())\n loss_smooth_gamma = grad.loss(None, torch.from_numpy(gamma_warp).float())\n\n loss_total_vxm = loss_sim_vxm + (reg_weight * loss_smooth_vxm)\n loss_total_gamma = loss_sim_gamma + (reg_weight * loss_smooth_gamma)\n\n # Print everything\n print('Lambda: {}\\n'.format(reg_weight))\n print('Voxelmorph:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n 'Total: {}\\n'.format(loss_sim_vxm, loss_smooth_vxm, loss_total_vxm))\n print('Gamma:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n 'Total: {}\\n'.format(loss_sim_gamma, loss_smooth_gamma, loss_total_gamma))\n\n if debug:\n plt.figure()\n plt.imshow(moved[0, 0, :, :])\n plt.title('moved')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_moved[0, 0, :, :])\n plt.title('gamma_moved')\n plt.colorbar()\n plt.figure()\n plt.imshow(fixed[0, 0, :, :])\n plt.title('fixed')\n plt.colorbar()\n plt.figure()\n plt.imshow(warp[0, 0, :, :])\n plt.title('warp_rg')\n plt.colorbar()\n plt.figure()\n plt.imshow(warp[0, 1, :, :])\n plt.title('warp_az')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_warp[0, 0, :, :])\n plt.title('gamma_warp_rg')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_warp[0, 1, :, :])\n plt.title('gamma_warp_az')\n plt.colorbar()\n plt.show()",
"def switchToPositiveStrandCoordinates( options, data ):\n for c in data.mafBlocksByChrom:\n for m in data.mafBlocksByChrom[ c ]:\n if m.refStart > m.refEnd:\n m.refStart, m.refEnd = m.refEnd, m.refStart\n m.refStrand *= -1\n m.hplStart, m.hplEnd = m.hplStart, m.hplEnd # this is now left-right draw order\n # sanity check\n if m.refStart > data.chrLengthsByChrom[ c ] or m.refEnd > data.chrLengthsByChrom[ c ]:\n sys.stderr.write( 'file %s has maf block on chr %s with '\n 'bounds [%d - %d] which are beyond featLen (%d)\\n' %\n ( options.maf, m.refChr, m.refStart, m.refEnd, data.chrLengthsByChrom[ c ] ))\n sys.exit( 1 )",
"def test_zero_clumper():\n mask0 = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0], dtype=bool)\n expected_clump0 = [\n np.array([0], dtype=bool),\n np.array([0, 0, 0], dtype=bool),\n np.array([0, 0], dtype=bool),\n np.array([0, 0, 0], dtype=bool),\n np.array([0], dtype=bool),\n ]\n\n clumps0 = org_zeros_clumper(mask0)\n clumps0_corr = correct_zeros_clumper(mask0)\n # For mask0 both methods work.\n for i, __ in enumerate(clumps0):\n assert np.all(clumps0[i] == expected_clump0[i])\n assert np.all(clumps0_corr[i] == expected_clump0[i])\n\n clumps0_inverted = org_zeros_clumper(~mask0)\n clumps0_corr_inverted = correct_zeros_clumper(~mask0)\n # Check number of clumps if inverted\n # Should change since mask0 has uneven group numbers. 5 False, 4 True.\n assert not (len(clumps0_inverted) != len(clumps0)) # This should not be the case\n assert len(clumps0_corr_inverted) != len(clumps0_corr)\n\n # This mask starts with group of 1s so fails for original code.\n mask1 = np.array([1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0], dtype=bool)\n expected_clump1 = [\n np.array([0, 0, 0], dtype=bool),\n np.array([0, 0], dtype=bool),\n np.array([0, 0, 0], dtype=bool),\n np.array([0], dtype=bool),\n ]\n clumps1 = org_zeros_clumper(mask1)\n clumps1_corr = correct_zeros_clumper(mask1)\n\n for i, __ in enumerate(clumps1):\n assert not (np.all(clumps1[i] == expected_clump1[i])) # Failed original case\n assert np.all(clumps1_corr[i] == expected_clump1[i])\n\n # Testing corner cases\n masked_zeros = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool)\n expected_zeros = [masked_zeros]\n all_zeros_clumped = correct_zeros_clumper(masked_zeros)\n assert len(expected_zeros) == len(all_zeros_clumped)\n for i, __ in enumerate(all_zeros_clumped):\n assert np.all(all_zeros_clumped[i] == expected_zeros[i])\n\n masked_ones = np.array([1, 1, 1, 1, 1, 1], dtype=bool)\n expected_ones = []\n all_ones_clumped = correct_zeros_clumper(masked_ones)\n assert len(expected_ones) == len(all_ones_clumped)\n assert all_ones_clumped == []",
"def check_change_power_spectrum(test_knotpos, test_knotval, matpow):\n #Get the modified power spectrum\n kval = matpow[:,0]\n newpk = lyasimulation.change_power_spectrum_knots(test_knotpos, test_knotval, matpow)\n #Check the kvalues are still the same for comparison to the transfer function\n assert np.all([k in newpk[:,0] for k in kval])\n #Build interpolators for the new power spectrum\n #Only interpolate a subset of Pk for speed\n newpkint = build_restrict_interp(newpk, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for old power spectrum\n pkint = build_restrict_interp(matpow, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for knots\n ext_knotpos = np.concatenate([[kval[0],],test_knotpos, [kval[-1],]])\n ext_knotval = np.concatenate([[test_knotval[0],],test_knotval, [test_knotval[-1],]])\n knotint = interp.interp1d(ext_knotpos, ext_knotval, kind='linear')\n #Check that the interpolator works\n assert np.all(np.abs(knotint(test_knotpos) / test_knotval-1) < 1e-5)\n lg_knotpos = np.log(test_knotpos)\n #Check modification worked at the knot values\n assert np.all(np.abs(np.exp(newpkint(lg_knotpos)) / (np.exp(pkint(lg_knotpos)) * test_knotval) - 1) < 1e-3)\n #Pick some random k values distributed uniformly in log space\n krand = (lg_knotpos[-1]-lg_knotpos[0]+0.2)*np.random.random(250)+lg_knotpos[0]-0.1\n #Check that the modification was accurate at random positions\n #print(np.max(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1)))\n assert np.all(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1) < 0.01)",
"def test_explicit_fixed_effects(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n mask, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel(mask_img=mask).fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance, mask)\n\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))\n\n # ensure that using unbalanced effects size and variance images\n # raises an error\n with pytest.raises(ValueError):\n compute_fixed_effects(contrasts * 2, variance, mask)",
"def ctrl_update_flags(self, flags, old_dst, old_src, new_val, sub_op):\n of_cond = self.create_overflow_condition(old_dst, old_src, new_val, sub_op)\n cf_cond = self.create_carry_condition(new_val, sub_op)\n\n valid_flags = {'C': cf_cond is True,\n 'Z': new_val & 0xFF == 0,\n 'V': of_cond is True,\n 'N': ((new_val & 0x80) != 0)}\n\n for flag in flags:\n self.set_flag(flag, valid_flags[flag])",
"def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False",
"def calculate_correction(filedic):\n lanczos_cera = xr.open_mfdataset(filedic['lanczos(CERA)'], combine='by_coords')\n lanczos_noaa = xr.open_mfdataset(filedic['lanczos(20CR)'], combine='by_coords')\n return lanczos_noaa.drop('number').squeeze() - lanczos_cera.drop('number').squeeze()",
"def test_read_0_1_smirff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirff99Frosst_reference_0_1_spec.offxml\"\n )\n )",
"def compare_shift(workshift: tb.Workshift, delta: int) -> int:\n return workshift.duration - delta",
"def Wang_SSM_New(data, do_padding = False, endianness='little'): \n \n if do_padding: \n M = MD4_get_words(MD4_pad_data(data), endianness) \n else: \n if len(data) < 64:\n data += b'\\x00'*(64-len(data)) \n M = MD4_get_words(data, endianness) \n\n A, B, C, D = MD4_get_IVs(M)\n \n jj = 0 # Constant...we're just doing the 1st round corrections.\n for ii in range(4):\n \n corrections = Wang_Rules[4*ii]\n A[ii+1] = phi(jj, A[ii], B[ii], C[ii], D[ii], M[W[jj][4*ii+0]], S[jj][0])\n A[ii+1] = apply_corrections(A[ii+1], B[ii], corrections)\n M[W[jj][4*ii+0]] = un_phi(jj, A[ii], B[ii], C[ii], D[ii], A[ii+1], S[jj][0])\n \n corrections = Wang_Rules[4*ii+1]\n D[ii+1] = phi(jj, D[ii], A[ii+1], B[ii], C[ii], M[W[jj][4*ii+1]], S[jj][1])\n D[ii+1] = apply_corrections(D[ii+1], A[ii+1], corrections)\n M[W[jj][4*ii+1]] = un_phi(jj, D[ii], A[ii+1], B[ii], C[ii], D[ii+1], S[jj][1])\n \n corrections = Wang_Rules[4*ii+2] \n C[ii+1] = phi(jj, C[ii], D[ii+1], A[ii+1], B[ii], M[W[jj][4*ii+2]], S[jj][2])\n C[ii+1] = apply_corrections(C[ii+1], D[ii+1], corrections)\n M[W[jj][4*ii+2]] = un_phi(jj, C[ii], D[ii+1], A[ii+1], B[ii], C[ii+1], S[jj][2])\n \n corrections = Wang_Rules[4*ii+3]\n B[ii+1] = phi(jj, B[ii], C[ii+1], D[ii+1], A[ii+1], M[W[jj][4*ii+3]], S[jj][3])\n B[ii+1] = apply_corrections(B[ii+1], C[ii+1], corrections)\n M[W[jj][4*ii+3]] = un_phi(jj, B[ii], C[ii+1], D[ii+1], A[ii+1], B[ii+1], S[jj][3])\n \n new_data = MD4_get_data(M, endianness)\n \n return(new_data)",
"def check_for_edit(self, force):\n if force:\n self._manipulations = {\"bri\": 0, \"con\": 0, \"sat\": 0}\n return 0\n elif self._manipulations != {\"bri\": 0, \"con\": 0, \"sat\": 0}:\n self._app[\"statusbar\"].message(\n \"Image has been edited, add ! to force\", \"warning\")\n return 1\n return 0",
"def flux_recal(data, z0, zref):\n\tf_obs = data\n\tz0 = z0\n\tz1 = zref\n\tDa0 = Test_model.angular_diameter_distance( z0 ).value\n\tDa1 = Test_model.angular_diameter_distance( z1 ).value\n\tf_ref = f_obs * (1 + z0)**4 * Da0**2 / ( (1 + z1)**4 * Da1**2 )\n\treturn f_ref",
"def jumpfix(df, meas, threashold=0.005, return_jump=False):\r\n df1 = df.copy(deep=True)\r\n df1['delta' + meas] = df1.loc[:, meas].diff()\r\n jump = df1[abs(df1['delta' + meas]) > threashold]\r\n jump['cumul'] = jump.loc[:, 'delta' + meas].cumsum()\r\n df1['newVal'] = df1.loc[:, meas]\r\n\r\n for i in range(len(jump)):\r\n jt = jump.index[i]\r\n ja = jump['cumul'][i]\r\n df1.loc[jt:, 'newVal'] = df1[meas].apply(lambda x: x - ja, 1)\r\n df1[meas] = df1['newVal']\r\n if return_jump:\r\n print(jump)\r\n return df1, jump\r\n else:\r\n return df1",
"def water_correction_energies(fname, se_h2o_hof, se_h_hof, ref_h2o_ener,\n se_au=False, ref_au=True):\n check_for_keys(fname, REFEK, NATMK, SEEK)\n with h5.File(fname, 'r') as ifi:\n # This calculates the reference heat of formation\n # Note the reference is assumed to be in eH\n correction = ifi[REFEK][:] - ((ifi[NATMK][:]//3) * ref_h2o_ener)\n if ref_au:\n correction *= 627.509\n if se_au:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof) * 627.509\n else:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof)\n return correction",
"def test_modified_14_factors(self, force_field):\n top = Molecule.from_smiles(\"CCCC\").to_topology()\n default_14 = copy.deepcopy(force_field)\n e_mod_14 = copy.deepcopy(force_field)\n vdw_mod_14 = copy.deepcopy(force_field)\n\n e_mod_14[\"Electrostatics\"].scale14 = 0.66\n assert e_mod_14[\"Electrostatics\"].scale14 == 0.66\n\n vdw_mod_14[\"vdW\"].scale14 = 0.777\n assert vdw_mod_14[\"vdW\"].scale14 == 0.777\n\n default_omm_sys = default_14.create_openmm_system(top)\n e_mod_omm_sys = e_mod_14.create_openmm_system(top)\n vdw_mod_omm_sys = vdw_mod_14.create_openmm_system(top)\n\n for omm_sys, expected_vdw_14, expected_coul_14 in [\n [default_omm_sys, 0.5, 0.833333],\n [e_mod_omm_sys, 0.5, 0.66],\n [vdw_mod_omm_sys, 0.777, 0.833333],\n ]:\n found_coul_14, found_vdw_14 = get_14_scaling_factors(omm_sys)\n\n np.testing.assert_almost_equal(\n actual=found_vdw_14,\n desired=expected_vdw_14,\n decimal=10,\n err_msg=\"vdW 1-4 scaling factors do not match\",\n )\n\n np.testing.assert_almost_equal(\n actual=found_coul_14,\n desired=expected_coul_14,\n decimal=10,\n err_msg=\"Electrostatics 1-4 scaling factors do not match\",\n )",
"def check_change(self, state_variables):\n for control in self.__control_list:\n if control[0] != 'control':\n\t\t\t\t# sum of values of state variables of interest in the previous and the current interval of time\n sum1 = np.matmul(control[1], state_variables[:,0])\n sum2 = np.matmul(control[1], state_variables[:,1])\n\n if (np.sign(sum1 - control[2]) != np.sign(sum2 - control[2])):\n self.__active_control = control\n return True\t\n return False",
"def fix_seq(self, fixed_seq):\n self.wc.fix_seq(wc(fixed_seq))",
"def replace_lowest_one_with_zero(x):\n return x & (x-1)",
"def fluxes_increments_to_actual(example_dict):\n\n edge_heights_m_agl = get_grid_cell_edges(example_dict[HEIGHTS_KEY])\n grid_cell_widths_metres = get_grid_cell_widths(edge_heights_m_agl)\n\n num_examples = len(example_dict[VALID_TIMES_KEY])\n num_heights = len(example_dict[HEIGHTS_KEY])\n\n grid_cell_width_matrix_metres = numpy.reshape(\n grid_cell_widths_metres, (1, num_heights)\n )\n grid_cell_width_matrix_metres = numpy.repeat(\n grid_cell_width_matrix_metres, repeats=num_examples, axis=0\n )\n\n down_flux_increment_matrix_w_m03 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_INC_NAME\n )\n up_flux_increment_matrix_w_m03 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_INC_NAME\n )\n\n down_flux_matrix_w_m02 = numpy.cumsum(\n down_flux_increment_matrix_w_m03 * grid_cell_width_matrix_metres,\n axis=1\n )\n up_flux_matrix_w_m02 = numpy.cumsum(\n up_flux_increment_matrix_w_m03 * grid_cell_width_matrix_metres,\n axis=1\n )\n\n down_flux_matrix_w_m02 = numpy.maximum(down_flux_matrix_w_m02, 0.)\n up_flux_matrix_w_m02 = numpy.maximum(up_flux_matrix_w_m02, 0.)\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_down_flux = SHORTWAVE_DOWN_FLUX_NAME in vector_target_names\n found_up_flux = SHORTWAVE_UP_FLUX_NAME in vector_target_names\n\n if not found_down_flux:\n vector_target_names.append(SHORTWAVE_DOWN_FLUX_NAME)\n if not found_up_flux:\n vector_target_names.append(SHORTWAVE_UP_FLUX_NAME)\n\n down_flux_index = vector_target_names.index(SHORTWAVE_DOWN_FLUX_NAME)\n up_flux_index = vector_target_names.index(SHORTWAVE_UP_FLUX_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_down_flux:\n example_dict[VECTOR_TARGET_VALS_KEY][..., down_flux_index] = (\n down_flux_matrix_w_m02\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=down_flux_index, values=down_flux_matrix_w_m02, axis=-1\n )\n\n if found_up_flux:\n example_dict[VECTOR_TARGET_VALS_KEY][..., up_flux_index] = (\n up_flux_matrix_w_m02\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=up_flux_index, values=up_flux_matrix_w_m02, axis=-1\n )\n\n return example_dict",
"def _modify_step_notes(self, steps):\n notes = self._clip_notes\n self._nudge_offset = self._limited_nudge_offset(steps, notes, self._nudge_offset)\n for step in steps:\n time_step = self._time_step(self.get_step_start_time(step))\n notes = self._modify_notes_in_time(time_step, notes, self._length_offset)\n\n return notes",
"def absolute_momentum(self, prices, lookback, long_only=False):\n returns = prices.pct_change(periods=lookback).fillna(0)\n long_signal = (returns > 0).applymap(self.bool_converter)\n short_signal = -(returns < 0).applymap(self.bool_converter)\n if long_only == True:\n signal = long_signal\n else:\n signal = long_signal + short_signal\n return signal",
"def test_read_0_1_smirnoff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml\"\n )\n )",
"def check_limits(self):\n\n #Find the relative position of each leg vs. its \"zero\" position\n relpos = self.fixed_plate - self.fixed_plate_zero\n\n for leg in range(3):\n #Check that the leg is within allowable \"safe zone\"\n #Use the position of the leg (relative to 0) to find the index in the \"safe zone\" matrix\n i_x = nearest_index(self.leg_safe_xaxis, relpos[COORD_X, leg])\n i_z = nearest_index(self.leg_safe_zaxis, relpos[COORD_Z, leg])\n #Look up in the safe zone.\n self.leg_fault[leg] = (not self.leg_safe_zone[leg, i_x, i_z])\n\n if (not all(np.isreal(self.fixed_plate[:, leg]))) or any(np.isnan(self.fixed_plate[:, leg])):\n #A complex or NaN value = the angle found for the leg was invalid, meaning that the\n #leg would have to be longer to reach the desired position.\n self.leg_fault[leg] = True",
"def alignFromFiducials(self, mute=True, shift_markers=True, logfile_residual=''):\n from math import sqrt\n import scipy.optimize\n from pytom.reconstruction.tiltAlignmentFunctions import markerResidual, refMarkerResidualForTiltImage as refResidual\n\n self.sum_called = 0\n print('Shift Markers: ', shift_markers)\n self.optimizeMarkerPositions = shift_markers\n self.irefmark = self.TiltSeries_._TiltAlignmentParas.irefmark\n self.ireftilt = numpy.argwhere( self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0]\n print('reftilt: ', self.ireftilt, self.TiltSeries_._TiltAlignmentParas.ireftilt, self._ntilt)\n # self._alignmentTransXOrig = numpy.array(self._alignmentTransX)\n # self._alignmentTransYOrig = numpy.array(self._alignmentTransY)\n scoringFunction = self.alignmentScore\n\n self.q = [.001,]*len(self._alignmentTransX)\n\n if self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin':\n optimizer = scipy.optimize.fmin\n if not mute:\n print(\"using scipy fmin optimizer\")\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_slsqp':\n optimizer = scipy.optimize.fmin_slsqp\n if not mute:\n print(\"using scipy fmin_slsqp (Sequential Least SQuares Programming) optimizer\")\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_cg':\n optimizer = scipy.optimize.fmin_cg\n if not mute:\n print(\"using scipy fmin_cg (conjugate gradients) optimizer\")\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'leastsq':\n optimizer = scipy.optimize.leastsq\n if not mute:\n print(\"using scipy leastsq optimizer - optimize matrix instead of scalar function\")\n self.TiltSeries_._TiltAlignmentParas.leastsq = True\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_powell':\n optimizer = scipy.optimize.fmin_powell\n if not mute:\n print(\"using scipy fmin_powell optimizer\")\n else:\n if not mute:\n print((\"optimizer \" + str(self.TiltSeries_._TiltAlignmentParas.optimizer) +\n \" not known\"))\n # first update alignment from projections\n self.getMarkersFromTiltSeries(self.TiltSeries_)\n self.getTranslationsFromTiltSeries(self.TiltSeries_)\n self.getRotationsFromTiltSeries(self.TiltSeries_)\n self.getMagnificationsFromTiltSeries(self.TiltSeries_)\n optimizableVariables0 = self.getOptimizableVariables(self.TiltSeries_._TiltAlignmentParas)\n\n # alignment score before optimization\n score = markerResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Markers_=self._Markers,\n cTilt=self._cTilt, sTilt=self._sTilt, ireftilt=self.ireftilt,\n transX=self._alignmentTransX, transY=self._alignmentTransY,\n rotInPlane=self._alignmentRotations, tiltangles=self._tiltAngles,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False, irefmark=self.irefmark)\n\n if not mute:\n print(( \"Alignment score before optimization (square root of residual): \"\n + str(sqrt(score)) ))\n\n # optimize scoring function\n if ((self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin') or\n (self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_powell')):\n optimizableVariables = optimizer(scoringFunction, optimizableVariables0,\n xtol=0.000001, ftol=0.000001,\n maxiter=self.TiltSeries_._TiltAlignmentParas.maxIter, maxfun=None)\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_cg':\n optimizableVariables = optimizer(scoringFunction, optimizableVariables0,\n gtol=0.0000001,\n maxiter=self.TiltSeries_._TiltAlignmentParas.maxIter)\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_slsqp':\n optimizableVariables = optimizer(scoringFunction, optimizableVariables0,\n iter=self.TiltSeries_._TiltAlignmentParas.maxIter, acc=1e-08)\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'leastsq':\n optimizableVariables, success = optimizer(scoringFunction, optimizableVariables0,\n maxfev=self.TiltSeries_._TiltAlignmentParas.maxIter*10, epsfcn=0.0,\n factor=10)\n\n score = markerResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Markers_=self._Markers,\n cTilt=self._cTilt, sTilt=self._sTilt,\n transX=self._alignmentTransX, transY=self._alignmentTransY, ireftilt=self.ireftilt,\n rotInPlane=self._alignmentRotations, irefmark=self.irefmark, tiltangles=self._tiltAngles,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False, logfile_residual=logfile_residual)\n\n self.setOptimizableVariables(self.TiltSeries_._TiltAlignmentParas, optimizableVariables)\n\n # finally set values in tilt series\n self.setMarkersInTiltSeries(self.TiltSeries_)\n self.setTranslationsInTiltSeries(self.TiltSeries_)\n self.setRotationsInTiltSeries(self.TiltSeries_)\n self.setMagnificationsInTiltSeries(self.TiltSeries_)\n\n\n if not mute:\n print(\"Alignment Score after optimization: \" + str(sqrt(score)))\n\n\n errors = numpy.zeros((len(self._cTilt)))\n for i in range(len(self._cTilt)):\n errors[i] = refResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Marker=self._Markers[self.TiltSeries_._TiltAlignmentParas.irefmark],\n cTilt=self._cTilt, sTilt=self._sTilt, transX=self._alignmentTransX,\n transY=self._alignmentTransY, rotInPlane=self._alignmentRotations, iproj=i,\n ireftilt=self.ireftilt,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False)\n errorRef = markerResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Markers_=self._Markers,\n cTilt=self._cTilt, sTilt=self._sTilt,\n transX=self._alignmentTransX, transY=self._alignmentTransY, ireftilt=self.ireftilt,\n rotInPlane=self._alignmentRotations, irefmark=self.irefmark,\n tiltangles=self._tiltAngles,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False,\n logfile_residual=logfile_residual, verbose=True, errorRef=True)\n print(\"Error score refmarker: \", errorRef)\n\n\n\n\n # out = open('scores.txt', 'w')\n # for n, s in enumerate(scoresIt):\n # out.write(f'{n} {s}\\n')\n # out.close()\n\n return sqrt(score)",
"def _calculate_correction(self, telid):\n return 1",
"def test_nondefault_nonbonded_cutoff(self):\n topology = Molecule.from_smiles(\"[#18]\").to_topology()\n topology.box_vectors = [3, 3, 3] * unit.nanometer\n\n force_field = ForceField()\n\n vdw_handler = vdWHandler(version=0.4)\n vdw_handler.cutoff = 7.89 * unit.angstrom\n vdw_handler.scale14 = 1.0\n\n vdw_handler.add_parameter(\n {\n \"smirks\": \"[#18:1]\",\n \"epsilon\": 1.0 * unit.kilojoules_per_mole,\n \"sigma\": 1.0 * unit.angstrom,\n }\n )\n force_field.register_parameter_handler(vdw_handler)\n\n electrostatics_handler = ElectrostaticsHandler(version=0.3)\n electrostatics_handler.cutoff = 7.89 * unit.angstrom\n electrostatics_handler.periodic_potential = \"PME\"\n force_field.register_parameter_handler(electrostatics_handler)\n\n library_charges = LibraryChargeHandler(version=0.3)\n library_charges.add_parameter(\n {\n \"smirks\": \"[#18:1]\",\n \"charge1\": 0.0 * unit.elementary_charge,\n }\n )\n force_field.register_parameter_handler(library_charges)\n\n system = force_field.create_openmm_system(topology)\n\n found_cutoff = (\n system.getForce(0).getCutoffDistance().value_in_unit(openmm_unit.angstrom)\n )\n\n assert abs(found_cutoff - 7.89) < 1e-6",
"def correctMisalign(img, marker, center, compus, scope=100):\n\n markerCenter = np.asarray(marker.shape)//2\n guide = np.asarray([center, compus])\n landmark = np.zeros(guide.shape)\n \n #To run template matching to finder markers\n result = cv2.matchTemplate(img, marker, 0)\n result = (1-result/np.max(result))*255\n M = np.float32([\n [1, 0, markerCenter[1]] ,\n [0, 1, markerCenter[0]] ])\n resultPadded = cv2.warpAffine(result, M, (width, height))\n \n mask = np.zeros(resultPadded.shape)\n\n for i in range(0, len(guide)):\n mask[:] = 0\n mask_xfr = max(0, guide[i,1]-(scope+markerCenter[0]))\n mask_xto = min(width, guide[i,1]+(scope+markerCenter[0]))\n mask_yfr = max(0, guide[i,0]-(scope+markerCenter[1]))\n mask_yto = min(width, guide[i,0]+(scope+markerCenter[1]))\n mask[mask_xfr:mask_xto, mask_yfr:mask_yto] = 255\n min_val, max_val, min_loc, landmark[i,:] = \\\n cv2.minMaxLoc(np.multiply(resultPadded, mask))\n \n #To shift image\n shift = guide[0] - landmark[0] \n M = np.float32([\n [1, 0, shift[0]] ,\n [0, 1, shift[1]] ])\n imgShifted = cv2.warpAffine(img, M, (width, height))\n \n #To rescale & rotate image\n radius = np.linalg.norm(landmark[1,:] - landmark[0,:])\n scale = np.linalg.norm(guide[1,:] - guide[0,:])/radius\n cos = (landmark[1,0]-landmark[0,0])/radius\n theta = np.arccos(cos) / (2 * np.pi) * 360\n M = cv2.getRotationMatrix2D((guide[0,0],guide[0,1]),-theta,scale)\n imgModified = cv2.warpAffine(imgShifted,M,(width,height))\n return imgModified\n\n #}}}",
"def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n frms = self._wcs.available_frames\n\n # if original WCS did not have tangent-plane corrections, create\n # new correction and add it to the WCs pipeline:\n if self._tpcorr is None:\n self._tpcorr = TPCorr(\n v2ref=self._wcsinfo['v2_ref'] / 3600.0,\n v3ref=self._wcsinfo['v3_ref'] / 3600.0,\n roll=self._wcsinfo['roll_ref'],\n matrix=matrix,\n shift=shift,\n name='tangent-plane linear correction'\n )\n idx_v2v3 = frms.index(self._v23name)\n pipeline = deepcopy(self._wcs.pipeline)\n pf, pt = pipeline[idx_v2v3]\n pipeline[idx_v2v3] = (pf, deepcopy(self._tpcorr))\n frm_v2v3corr = deepcopy(pf)\n frm_v2v3corr.name = 'v2v3corr'\n pipeline.insert(idx_v2v3 + 1, (frm_v2v3corr, pt))\n self._wcs = gwcs.WCS(pipeline, name=self._owcs.name)\n self._v23name = 'v2v3corr'\n\n else:\n # combine old and new corrections into a single one and replace\n # old transformation with the combined correction transformation:\n tpcorr2 = self._tpcorr.__class__(\n v2ref=self._tpcorr.v2ref, v3ref=self._tpcorr.v3ref,\n roll=self._tpcorr.roll, matrix=matrix, shift=shift,\n name='tangent-plane linear correction'\n )\n\n self._tpcorr = tpcorr2.combine(tpcorr2, self._tpcorr)\n\n idx_v2v3 = frms.index(self._v23name)\n pipeline = deepcopy(self._wcs.pipeline)\n pipeline[idx_v2v3 - 1] = (pipeline[idx_v2v3 - 1][0],\n deepcopy(self._tpcorr))\n self._wcs = gwcs.WCS(pipeline, name=self._owcs.name)\n\n # reset definitions of the transformations from detector/world\n # coordinates to the tangent plane:\n self._update_transformations()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)",
"def testChangeRef(self):\n self.ccr.id = 'blahblahblah'\n\n self.assertEqual(\n {'blahblahblah': [self.ccr], 'oldId': [self.ccr_bad]},\n cdl_convert.ColorCorrectionRef.members\n )",
"def res_required(self):\n v = self[22]\n return (v & 0b1) != 0",
"def check_cflcushion(delt=0.1, cfl_cushion_upper=0.5, cfl_cushion_lower=0.1, code_dt_max=0.1, nstep=100):\n \n # Define some characteristic delta t's as log10()\n vec_cfl_dt_discrete = [-1., -2., -3., -3., -3., -3., -2., -3., -1., -1] \n vec_code_dt = [delt]; changes_in_delt = []\n print(0.1/0.22)\n print(0.1, 0.1/0.22*0.5)\n \n # Construct a continues vector of time steps\n vec_cfl_dt = []\n for i in range(len(vec_cfl_dt_discrete)-1):\n vec_cfl_dt += list(vec_cfl_dt_discrete[i] + np.array(range(nstep))/nstep*(vec_cfl_dt_discrete[i+1]-vec_cfl_dt_discrete[i]))\n vec_cfl_dt = 10**np.array(vec_cfl_dt) \n vec_step = range(len(vec_cfl_dt))\n \n # Mimic the CFL decrease condition\n for i, cfl_dt in enumerate(vec_cfl_dt):\n if (vec_code_dt[-1] > cfl_dt*cfl_cushion_upper):\n print(10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2))\n vec_code_dt.append(cfl_dt*10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2))\n changes_in_delt.append(i)\n print()\n print(f\"DECREASE! Because {vec_code_dt[-2]:6.2e} > {cfl_dt*cfl_cushion_upper:6.2e}\")\n print(f\" {cfl_dt*cfl_cushion_upper:6.2e} = cfl_dt*cfl_cushion_upper\")\n print(f\" {cfl_dt:6.2e} = cfl_dt\")\n print(f\" {vec_code_dt[-2]:6.2e} = code_dt\") \n print(f\" ==> code_dt = {vec_code_dt[-1]}\")\n elif (vec_code_dt[-1] < np.min([cfl_dt*cfl_cushion_lower, code_dt_max])):\n vec_code_dt.append(np.min([cfl_dt*10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2), code_dt_max]))\n changes_in_delt.append(i)\n print()\n print(f\"INCREASE! Because {vec_code_dt[-2]:6.2e} < {np.min([cfl_dt*cfl_cushion_lower, code_dt_max]):6.2e}\")\n print(f\" {cfl_dt*cfl_cushion_lower:6.2e} = cfl_dt*cfl_cushion/delt_adjust\")\n print(f\" {cfl_dt:6.2e} = cfl_dt\")\n print(f\" {vec_code_dt[-2]:6.2e} = code_dt\") \n print(f\" ==> code_dt = {vec_code_dt[-1]}\")\n else:\n vec_code_dt.append(vec_code_dt[-1])\n \n # Create a figure\n fig = plt.figure(figsize=(18, 9)); fig.set_tight_layout(False)\n grid_specifications = gridspec.GridSpec(1,1)\n grid_specifications.update(top=0.98, left=0.05, right=0.95, bottom=0.06, wspace=0.35, hspace=0.45)\n ax = plt.subplot(grid_specifications[0])\n \n # Plot dt(istep)\n ax.plot(vec_step, vec_cfl_dt, color='black', label='CFL dt')\n ax.plot(vec_step, vec_cfl_dt*cfl_cushion_upper, color='black', alpha=0.5, label='CFL dt*CFL cushion upper')\n ax.plot(vec_step, vec_cfl_dt*cfl_cushion_lower, color='black', alpha=0.2, label='CFL dt*CFL cushion lower')\n ax.plot(vec_step, vec_code_dt[1:], color='maroon', label='code dt')\n \n # Highlight the changes \n if False:\n for change in changes_in_delt:\n ax.axvline(x=change, color='maroon', alpha=0.5, zorder=1)\n \n # Show figure\n ax.set_yscale('log')\n ax.autoscale()\n ax.legend(labelspacing=0.0, handlelength=1, shadow=True)\n plt.show()\n return",
"def fixed_in(self):\n fixed_in = self.fixed_artifact()\n fix_available_in = fixed_in.version if fixed_in and fixed_in.version != 'None' else None\n\n # NOTE: semver version format indicates a range where package\n # is vulnerable (as opposed to a value where anythng < value\n # is vulnerable, and the fix itself is known to exist), so we prepend a 'not' to indicate 'fix is available, if not in semver range'\n if fixed_in and fixed_in.version_format in ['semver']:\n # Github Advisories can add the real version where there is a fix if any.\n metadata = fixed_in.fix_metadata or {}\n first_patched_version = metadata.get('first_patched_version')\n if first_patched_version:\n return first_patched_version\n\n if fix_available_in and fixed_in.fix_metadata and fixed_in.fix_metadata.get('fix_exists', False):\n fix_available_in = \"! {}\".format(fix_available_in)\n else:\n fix_available_in = None\n\n return fix_available_in",
"def _handle_offsets_changed(self, master, slave, track_offset_delta, scene_offset_delta):\n track_target_offset = self._get_target_track_offset(master, slave, track_offset_delta)\n if track_target_offset < 0 or not self._can_link_tracks:\n track_target_offset = slave.track_offset_method()\n scene_target_offset = self._get_target_scene_offset(master, slave, scene_offset_delta)\n if scene_target_offset < 0:\n scene_target_offset = slave.scene_offset_method()\n if track_target_offset == slave.track_offset_method() and scene_target_offset == slave.scene_offset_method():\n return\n slave.set_offsets(track_target_offset, scene_target_offset)"
] | [
"0.61892533",
"0.6004834",
"0.5786946",
"0.5511628",
"0.54182035",
"0.5341413",
"0.52645195",
"0.5253012",
"0.5237",
"0.5225602",
"0.52148324",
"0.5205912",
"0.5189934",
"0.5129188",
"0.51038384",
"0.51002914",
"0.5086002",
"0.50812876",
"0.49967453",
"0.4983229",
"0.49150836",
"0.48954305",
"0.48794034",
"0.48509634",
"0.48450327",
"0.48201448",
"0.48180535",
"0.47885323",
"0.4787207",
"0.47792074",
"0.47737932",
"0.4772512",
"0.47632852",
"0.47526696",
"0.4746087",
"0.47178486",
"0.47141775",
"0.4710843",
"0.47101405",
"0.47066614",
"0.47066417",
"0.47065708",
"0.46972305",
"0.46931607",
"0.4683457",
"0.46699983",
"0.46693894",
"0.46657148",
"0.46625748",
"0.46570554",
"0.46565473",
"0.46485388",
"0.46442",
"0.46375224",
"0.46370137",
"0.46328297",
"0.46299043",
"0.46287698",
"0.46246806",
"0.4624022",
"0.46215153",
"0.4619811",
"0.46147954",
"0.4608131",
"0.46047541",
"0.46045905",
"0.46027827",
"0.46019384",
"0.46003145",
"0.45991632",
"0.4594096",
"0.4591859",
"0.4584954",
"0.45726413",
"0.4569874",
"0.4565497",
"0.45620802",
"0.4561637",
"0.4561054",
"0.45607448",
"0.45558712",
"0.45551696",
"0.45451578",
"0.45442796",
"0.4543911",
"0.45414332",
"0.4540367",
"0.45294932",
"0.4525408",
"0.45241478",
"0.45230994",
"0.45144162",
"0.45122868",
"0.4510297",
"0.45070246",
"0.45056173",
"0.45020926",
"0.4501063",
"0.449671",
"0.4494305"
] | 0.59622836 | 2 |
Determine fixed modifications in case the reference shift is at zero. Does not need localization. | def determine_fixed_mods_zero(aastat_result, data, params_dict):
fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh']
min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor']
fix_mod_dict = {}
reference = utils.mass_format(0)
aa_rel = aastat_result[reference][2]
utils.internal('aa_rel:\n%s', aa_rel)
candidates = aa_rel[aa_rel < fix_mod_zero_thresh].index
logger.debug('Fixed mod candidates: %s', candidates)
for i in candidates:
candidate_label = get_fixed_mod_raw(i, data, params_dict)
if candidate_label != reference:
# number of peptides with `i` at shift `candidate label` must be higher than ...
count_cand = data.peptides(candidate_label).str.contains(i).sum()
# number of peptides with `i` at shift `reference` by a factor of `min_fix_mod_pep_count_factor`
count_ref = data.peptides(reference).str.contains(i).sum()
# peptide count at candidate shift over # of peptides at reference
est_ratio = count_cand / data.ms_stats()[reference][1]
logger.debug('Peptides with %s: ~%d at %s, ~%d at %s. Estimated pct: %f',
i, count_ref, reference, count_cand, candidate_label, est_ratio)
if aastat_result[candidate_label][2][i] > fix_mod_zero_thresh and (
est_ratio * 100 > fix_mod_zero_thresh * min_fix_mod_pep_count_factor):
fix_mod_dict[i] = candidate_label
else:
logger.debug('Could not find %s anywhere. Can\'t fix.', i)
else:
logger.debug('Reference shift is the best for %s.', i)
return fix_mod_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_fixed_mods_nonzero(reference, locmod_df, data):\n utils.internal('Localizations for %s: %s', reference, locmod_df.at[reference, 'localization'])\n loc = get_fix_mod_from_l10n(reference, locmod_df)\n label = reference\n data_dict = data.ms_stats().copy()\n while loc is None:\n del data_dict[label]\n label = max(data_dict, key=lambda k: data_dict[k][1])\n loc = get_fix_mod_from_l10n(label, locmod_df)\n logger.debug('No luck. Trying %s. Got %s', label, loc)\n if not data_dict:\n break\n return loc",
"def fixed(self):\n return self.f_fixed().m_fixed()",
"def change_nochange(reference_dataframe, allow_offset=0):\r\n\r\n def changed(x, default=False, offset=0):\r\n if len(x) == 1:\r\n return default\r\n elif x[0] == (x[1]-offset):\r\n return False\r\n else:\r\n return True\r\n\r\n def valid_matches(df, shift, mask):\r\n return df.RefChg & \\\r\n df.MapChg.shift(periods=shift, fill_value=False) & \\\r\n mask & \\\r\n mask.shift(periods=shift, fill_value=False)\r\n\r\n def get_change_window(series, index, offset):\r\n window = [index - offset, index + offset + 1]\r\n for w, s in zip([[0, 1], [1, 0]], [[0, offset], [offset, 0]]):\r\n slc0 = slice(*window)\r\n slc1 = slice(*[window[i] + s[i] for i in range(len(window))])\r\n while series[slc1].sum() > series[slc0].sum():\r\n window = [window[i] + w[i] for i in range(len(window))]\r\n slc0 = slice(*window)\r\n slc1 = slice(*[window[i] + s[i] for i in range(len(window))])\r\n return slice(*window)\r\n\r\n df = reference_dataframe.copy()\r\n df = df.sort_values(['plotid', 'image_year']).reset_index()\r\n\r\n # Rolling window to find changes in land cover class, plot id, or jumps in year\r\n ref_chg = df.Reference.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x), raw=True).astype(np.bool)\r\n map_chg = df.LC_Primary.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x), raw=True).astype(np.bool)\r\n plt_chg = df.plotid.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x, default=True), raw=True).to_numpy(dtype=np.bool)\r\n year_chg_not_one = df.image_year.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x, offset=1), raw=True).to_numpy(dtype=np.bool)\r\n\r\n # Potentially 'valid' data points for change/no-change are defined as follows:\r\n # a) The 'plotid' did not change (the initial observations cannot be a change)\r\n # b) The change in 'image_year' cannot be more than one (missing years are unknowns)\r\n # c) The current and previous reference class cannot be a 0 (invalid value)\r\n\r\n df.loc[:, 'Valid'] = ~plt_chg & ~year_chg_not_one & ~(df.Reference.values == 0)\r\n df.loc[1:, 'Valid'] = df.Valid.values[1:] & ~(df.Reference.values[:-1] == 0)\r\n\r\n # ---- Initialize new columns ---- #\r\n\r\n df.loc[:, 'RefChg'] = ref_chg & df['Valid'].values # Valid reference changes\r\n df.loc[:, 'MapChg'] = map_chg & df['Valid'].values # Valid map changes, not shifted yet\r\n\r\n df.loc[:, 'MapChgYear'] = df['image_year'] * df['MapChg'] # Year of map change or zero\r\n\r\n # There will be some invalid entries here, but they will be filtered out later\r\n df['RefChgFromTo'] = (df.Reference.astype(np.int16) * 100) + df.Reference\r\n df.loc[1:, 'RefChgFromTo'] = (df.Reference[:-1].astype(np.int16).values * 100) + df.Reference[1:].values\r\n df['MapChgFromTo'] = (df.LC_Primary.astype(np.int16) * 100) + df.LC_Primary\r\n df.loc[1:, 'MapChgFromTo'] = (df.LC_Primary[:-1].astype(np.int16).values * 100) + df.LC_Primary[1:].values\r\n\r\n mutable = df.Valid.copy() # Track which things are OK to change\r\n\r\n # ---- End of initialization ---- #\r\n\r\n # Find map changes that can be matched to those in the reference data set in other years, within tolerance\r\n if allow_offset:\r\n print('Adjusting changes...')\r\n change_indices = df[df.MapChg.values].index\r\n for change_index in change_indices:\r\n mask = df.plotid == df.loc[change_index, 'plotid'] # Only consider the same plotid\r\n change_compare = []\r\n window = get_change_window(df.MapChg | df.RefChg, change_index, allow_offset)\r\n for shift in range(-allow_offset, allow_offset + 1):\r\n change_compare.append((valid_matches(df, shift, mutable & mask)[window].sum(), shift))\r\n # Sort by decreasing total matches, then increasing shift amount\r\n change_compare.sort(key=lambda x: (-x[0], abs(x[1])))\r\n for changes in change_compare:\r\n n_changes, offset = changes\r\n if n_changes:\r\n matches = valid_matches(df, offset, mutable & mask)\r\n # Shift will only affect valid matches, or where the valid matches started from, for that window\r\n shift_mask = (matches | matches.shift(periods=-offset, fill_value=False)) & \\\r\n df.index.isin(df[window].index)\r\n # Update MapChg, MapChgYear, MapChgFromTo\r\n df.loc[shift_mask, 'MapChg'] = \\\r\n (df.MapChg & shift_mask).shift(\r\n periods=offset, fill_value=False)[shift_mask].values\r\n df.loc[shift_mask, 'MapChgYear'] = \\\r\n (df.MapChgYear * shift_mask.astype(np.int16)).shift(\r\n periods=offset, fill_value=0)[shift_mask].values\r\n df.loc[shift_mask, 'MapChgFromTo'] = \\\r\n (df.MapChgFromTo * shift_mask.astype(np.int16)).shift(\r\n periods=offset, fill_value=101)[shift_mask].values\r\n # These matches will not be changed again\r\n mutable[matches & df.index.isin(df[window].index)] = False\r\n\r\n # Fixing the change codes after moving stuff around above\r\n print('Adjusting change codes...')\r\n for i in df[df.MapChg.values].index:\r\n need_new_lc = True\r\n new_lc = 0\r\n for j in range(i, max(df.index) + 1):\r\n if plt_chg[j]:\r\n break\r\n # If we've just jumped years, we don't know the LC\r\n if year_chg_not_one[j]:\r\n need_new_lc = True\r\n # If we need LC, take it from LC_Primary if nonzero\r\n if need_new_lc and df.loc[j, 'LC_Primary']:\r\n new_lc = df.loc[j, 'LC_Primary']\r\n need_new_lc = False\r\n # If there's been a change, take the new LC from the change code\r\n if df.loc[j, 'MapChg']:\r\n new_lc = df.loc[j, 'MapChgFromTo'] % 10\r\n need_new_lc = False\r\n # Update non-change locations with LC code if possible.\r\n if (not need_new_lc) and (not df.loc[j, 'MapChg']) and (df.loc[j, 'LC_Primary']):\r\n df.loc[j, 'MapChgFromTo'] = (new_lc * 100) + new_lc\r\n\r\n # Check for leapfrogging. The code does not prevent this.\r\n print('Final checks...')\r\n for plot in np.unique(df[df.MapChg.values].plotid):\r\n masked_arr = df[(df.plotid == plot) & (df.MapChgYear > 0)].MapChgYear.values\r\n if not all(masked_arr[i] <= masked_arr[i + 1] for i in range(len(masked_arr) - 1)):\r\n raise Exception('Warning! Leapfrog change year in plot: {}'.format(plot))\r\n\r\n # Switch from True/False values to strings for clarity\r\n chg = {True: 'Chg', False: 'NoChg'}\r\n df['RefChg'] = df.RefChg.apply(lambda x: chg[x])\r\n df['MapChg'] = df.MapChg.apply(lambda x: chg[x])\r\n\r\n # Get rid of the invalid data points, those don't count for change or no-change.\r\n df.drop(df[~df.Valid].index, inplace=True)\r\n\r\n return df",
"def pre_modify(self):\n return 0",
"def _only_fixed(o, d):\n if d[\"fixed\"]:\n return (\"value\", \"fixed\")\n else:\n return (\"fixed\",)",
"def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds",
"def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq",
"def test_correct_backward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"backward\")\r\n assert np.allclose(coeffs, [1, -1])\r\n assert np.allclose(shifts, [0, -1])",
"def test_correct_forward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"forward\")\r\n assert np.allclose(coeffs, [-1, 1])\r\n assert np.allclose(shifts, [0, 1])",
"def _set_fixed(o, d):\n if d:\n o.fix()\n else:\n o.unfix()",
"def test_explicit_fixed_effects_without_mask(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n _, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel().fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n # test without mask variable\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance)\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))",
"def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)",
"def test_correct_forward_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"forward\")\r\n assert np.allclose(coeffs, [-1.5, 2, -0.5])\r\n assert np.allclose(shifts, [0, 1, 2])",
"def test_correct_center_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"center\")\r\n assert np.allclose(coeffs, [-0.5, 0.5])\r\n assert np.allclose(shifts, [-1, 1])",
"def check_fixedblock(self):\n print('This will read the fixed block then display changes as they')\n print('occur. Typically the most common change is the incrementing')\n print('of the data pointer, which happens whenever readings are saved')\n print('to the station memory. For example, if the logging interval')\n print('is set to 5 minutes, the fixed block should change at least')\n print('every 5 minutes.')\n raw_fixed = self.station.get_raw_fixed_block()\n while True:\n new_fixed = self.station.get_raw_fixed_block(unbuffered=True)\n for ptr in range(len(new_fixed)):\n if new_fixed[ptr] != raw_fixed[ptr]:\n print(datetime.datetime.now().strftime('%H:%M:%S'), end=' ')\n print(' %04x (%d) %02x -> %02x' % (\n ptr, ptr, raw_fixed[ptr], new_fixed[ptr]))\n raw_fixed = new_fixed\n time.sleep(0.5)",
"def getTranslation(fracs):\n \n \n \n # Determine whether the shift needs to be from inf to 0 \n # or from -inf to 0\n \n # Along all x fractionals\n if abs(max(fracs[0]))>=abs(min(fracs[0])):\n minX = min([x for x in fracs[0] if x>0])\n else:\n minX = min([x for x in fracs[0] if x<0])\n \n # Along all y fractionals\n if abs(max(fracs[1]))>=abs(min(fracs[1])):\n minY = min([x for x in fracs[1] if x>0])\n else:\n minY = min([x for x in fracs[1] if x<0])\n \n # Along all z fractionals\n # Need to consider all atoms lying in a single\n # plane (e.g. graphene), thus the final \"else\"\n # statement\n if abs(max(fracs[2]))>abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x>0])\n elif abs(max(fracs[2]))<abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x<0])\n else:\n minZ = max(fracs[2])\n\n shift_vector = np.array([minX,minY,minZ])\n \n return(shift_vector)",
"def test_shift_ruptures_no_shift(midday):\n shift_mask, shift_amounts = time.shifts_ruptures(\n midday, midday\n )\n assert not shift_mask.any()\n assert_series_equal(\n shift_amounts,\n pd.Series(0, index=midday.index, dtype='int64'),\n check_names=False\n )",
"def test_correct_second_derivative_center_order4(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 4, \"center\")\r\n assert np.allclose(coeffs, [-2.5, 4 / 3, 4 / 3, -1 / 12, -1 / 12])\r\n assert np.allclose(shifts, [0, -1, 1, -2, 2])",
"def fixed(self):\n for i in range(15):\n self.factors[i].fixed()\n self.transition.fixed()",
"def m_fixed(self):\n self.mx_free = self.my_free = self.mz_free = False\n return self",
"def mask_fixed(self):\n ns = len(self)-1\n # mask fixed entries\n self.mask[0,0,0] = True\n self.mask[0,0,-1] = True\n self.mask[0,-1,0] = True\n self.mask[-1,0,0] = True\n # mask entries with i+j+k > ns\n for ii in range(len(self)):\n for jj in range(len(self)):\n for kk in range(len(self)):\n if ii+jj+kk > ns:\n self.mask[ii,jj,kk] = True\n \n # mask fA = 0 and fB = 0\n for ii in range(len(self)):\n self.mask[ii,ns-ii,0] = True\n self.mask[ii,0,ns-ii] = True\n\n self.mask[0,:,0] = True\n self.mask[0,0,:] = True\n return self",
"def find_shift(ref, img):\n im0 = prepare(ref)\n im1 = prepare(img)\n shift, error, diffphase = register_translation(im0, im1, 100)\n\n return shift",
"def test_adjust_offsets_short(self):\n tool = pybedtools.BedTool(\"chr15 91512755 91512836 ENSG00000198901_1_147 0 -\", from_string=True)\n offsets = {\"ENSG00000198901_1_147\" : 10}\n results = adjust_offsets(tool, offsets)",
"def test_correct_second_derivative_forward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 1, \"forward\")\r\n assert np.allclose(coeffs, [1, -2, 1])\r\n assert np.allclose(shifts, [0, 1, 2])",
"def test_modified_schwefel(self):\n fun = get_problem('modified_schwefel', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 6.9448853328785844, delta=350)",
"def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self",
"def test_findBugfixes(self):\n bugfixes = self.builder._findChanges(\n self.project, self.builder._BUGFIX)\n self.assertEquals(\n bugfixes,\n [(23, 'Broken stuff was fixed.')])",
"def on_fees_change(origin_matrix, changes_on_fees):\n new_fees = origin_matrix[FEES_IDX]\n for idx in range(len(origin_matrix[0])):\n if changes_on_fees[idx] != None:\n new_fees[idx] = changes_on_fees[idx]\n return new_fees",
"def test_fixups():\n binary: MachO = cast(MachO, cle.Loader(str(TEST_BASE / \"tests\" / \"aarch64\" / \"dyld_ios15.macho\")).main_object)\n expected = {\n 0x100008100: 0x100007A40,\n 0x1000081E0: 0x1000072B0,\n 0x1000081E8: 0x1000072DC,\n 0x1000081F0: 0x1000072E4,\n 0x1000081F8: 0x100007310,\n 0x100008200: 0x100007350,\n 0x100008208: 0x10000735C,\n 0x100008210: 0x10000738C,\n 0x100008218: 0x1000073E8,\n 0x100008238: 0x1000081E0,\n 0x100008248: 0x100007A40,\n 0x1000082A0: 0x100007AFC,\n 0x1000082D8: 0x10000C0E8,\n 0x10000C018: 0x100007B90,\n 0x10000C060: 0x100007B90,\n 0x10000C068: 0x100007998,\n 0x10000C090: 0x100007C2A,\n 0x10000C0D0: 0x10000C000,\n 0x10000C0D8: 0x100007210,\n 0x10000C0E8: 0x10000C0B0,\n 0x10000C108: 0x10000C04A,\n 0x10000C128: 0x1000079F0,\n }\n\n actual = {r.rebased_addr: r.value for r in binary.relocs if isinstance(r, MachOChainedFixup)}\n assert actual == expected",
"def test_fix(self):\n self.check_data.side_effect = lambda: self.fixed_cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[self.mock_fix]) as mock_get_fixes:\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_data(\n self.cube,\n short_name='short_name',\n project='project',\n dataset='model',\n mip='mip',\n session=sentinel.session,\n )\n self.checker.assert_called_once_with(self.intermediate_cube)\n self.check_data.assert_called_once_with()\n assert cube_returned is not self.cube\n assert cube_returned is not self.intermediate_cube\n assert cube_returned is self.fixed_cube\n mock_get_fixes.assert_called_once_with(\n **self.expected_get_fixes_call\n )",
"def determine_if_clade_differs(\n self,\n plain_rf: int,\n tip_names_zero: list,\n tip_names_one: list\n ) -> int:\n if set(tip_names_zero) != set(tip_names_one):\n plain_rf +=1\n \n return plain_rf",
"def test_fix_data(self):\n cube = self.fix.fix_data(self.cube)\n np.testing.assert_allclose(cube.data[0], 1.0)\n np.testing.assert_allclose(cube.data[2], 2.0)\n assert not np.ma.is_masked(cube.data[0])\n assert np.ma.is_masked(cube.data[1])\n assert not np.ma.is_masked(cube.data[2])",
"def _make_determine_wants_func(ref_mutator):\n\n def determine_wants(old_refs):\n refs = {k.decode(\"UTF-8\"): v.decode(\"UTF-8\") for k, v in old_refs.items()}\n new_refs = ref_mutator(refs)\n new_refs = {k.encode(\"UTF-8\"): v.encode(\"UTF-8\") for k, v in new_refs.items()}\n new_refs.update(old_refs) # Make sure we don't delete/modify anything.\n return new_refs\n\n return determine_wants",
"def test_explicit_fixed_effects(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n mask, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel(mask_img=mask).fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance, mask)\n\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))\n\n # ensure that using unbalanced effects size and variance images\n # raises an error\n with pytest.raises(ValueError):\n compute_fixed_effects(contrasts * 2, variance, mask)",
"def _get_init_controls(self):\n\n u_perf_0 = None\n k_fb_perf_0 = None\n k_fb_lqr = self.get_lqr_feedback()\n\n if self.do_shift_solution and self.n_fail == 0:\n if self.n_safe > 1:\n k_fb_safe = np.copy(self.k_fb_safe_all)\n\n # Shift the safe controls\n k_ff_safe = np.copy(self.k_ff_safe)\n\n u_0 = k_ff_safe[0, :]\n\n if self.n_safe > self.r and self.n_perf > self.n_safe: # the first control after the shared controls\n k_ff_perf = np.copy(self.k_ff_perf)\n k_ff_r_last = (k_ff_perf[0, :] + k_ff_safe[self.r - 1,\n :]) / 2 # mean of first perf ctrl and safe ctrl after shared\n else:\n k_ff_r_last = k_ff_safe[-1, :] # just the last safe control\n\n k_ff_safe_new = np.vstack((k_ff_safe[1:self.r, :], k_ff_r_last))\n\n if self.n_safe > self.r + 1:\n k_ff_safe_new = np.vstack((k_ff_safe_new, k_ff_safe[self.r:, :]))\n else:\n u_0 = self.u_apply\n k_ff_safe_new = np.array([])\n\n if self.n_perf - self.r > 0:\n k_ff_perf = np.copy(self.k_ff_perf)\n k_ff_perf_new = np.vstack((k_ff_perf[1:, :], k_ff_perf[-1, :]))\n\n if self.perf_has_fb:\n k_fb_perf_0 = np.copy(self.k_fb_perf_0)\n else:\n k_fb_perf_0 = np.array([])\n else:\n k_ff_perf_new = np.array([])\n k_fb_perf_0 = np.array([])\n else:\n k_fb_safe = np.empty((self.n_safe - 1, self.n_s * self.n_u))\n for i in range(self.n_safe - 1):\n k_fb_safe[i] = cas_reshape(k_fb_lqr, (1, -1))\n\n k_ff_safe_new = np.zeros((self.n_safe - 1, self.n_u))\n u_0 = np.zeros((self.n_u, 1))\n\n k_ff_perf_new = np.array([])\n if self.n_perf > 1:\n k_ff_perf_new = np.zeros((self.n_perf - self.r, self.n_u))\n\n if self.perf_has_fb:\n k_fb_perf_0 = k_fb_lqr\n else:\n k_fb_perf_0 = np.array([])\n\n if self.n_safe > 1:\n k_fb_safe_new = np.vstack((k_fb_safe[1:, :], k_fb_safe[-1, :]))\n\n else:\n k_fb_safe_new = np.array([])\n\n return u_0, k_ff_safe_new, k_fb_safe, k_ff_perf_new, k_fb_perf_0",
"def test_zero_clumper():\n mask0 = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0], dtype=bool)\n expected_clump0 = [\n np.array([0], dtype=bool),\n np.array([0, 0, 0], dtype=bool),\n np.array([0, 0], dtype=bool),\n np.array([0, 0, 0], dtype=bool),\n np.array([0], dtype=bool),\n ]\n\n clumps0 = org_zeros_clumper(mask0)\n clumps0_corr = correct_zeros_clumper(mask0)\n # For mask0 both methods work.\n for i, __ in enumerate(clumps0):\n assert np.all(clumps0[i] == expected_clump0[i])\n assert np.all(clumps0_corr[i] == expected_clump0[i])\n\n clumps0_inverted = org_zeros_clumper(~mask0)\n clumps0_corr_inverted = correct_zeros_clumper(~mask0)\n # Check number of clumps if inverted\n # Should change since mask0 has uneven group numbers. 5 False, 4 True.\n assert not (len(clumps0_inverted) != len(clumps0)) # This should not be the case\n assert len(clumps0_corr_inverted) != len(clumps0_corr)\n\n # This mask starts with group of 1s so fails for original code.\n mask1 = np.array([1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0], dtype=bool)\n expected_clump1 = [\n np.array([0, 0, 0], dtype=bool),\n np.array([0, 0], dtype=bool),\n np.array([0, 0, 0], dtype=bool),\n np.array([0], dtype=bool),\n ]\n clumps1 = org_zeros_clumper(mask1)\n clumps1_corr = correct_zeros_clumper(mask1)\n\n for i, __ in enumerate(clumps1):\n assert not (np.all(clumps1[i] == expected_clump1[i])) # Failed original case\n assert np.all(clumps1_corr[i] == expected_clump1[i])\n\n # Testing corner cases\n masked_zeros = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool)\n expected_zeros = [masked_zeros]\n all_zeros_clumped = correct_zeros_clumper(masked_zeros)\n assert len(expected_zeros) == len(all_zeros_clumped)\n for i, __ in enumerate(all_zeros_clumped):\n assert np.all(all_zeros_clumped[i] == expected_zeros[i])\n\n masked_ones = np.array([1, 1, 1, 1, 1, 1], dtype=bool)\n expected_ones = []\n all_ones_clumped = correct_zeros_clumper(masked_ones)\n assert len(expected_ones) == len(all_ones_clumped)\n assert all_ones_clumped == []",
"def eval_is_fixed(leaf, eval_from_search):\n check_val = nn_evaluate.evaluate(leaf)\n if leaf.side_to_move() == Side.B:\n check_val *= -1\n check_val = min(max(check_val/1000, -1), 1)\n if abs(check_val - eval_from_search) > .0008:\n if abs(eval_from_search) != 0 and abs(eval_from_search) != 1:\n print(\"fixed.. searchval:\", eval_from_search, \"nnval:\", check_val, \"fen\", leaf.fen())\n return True\n return False",
"def replace_lowest_one_with_zero(x):\n return x & (x-1)",
"def test_allow_effect_during_refractory(self):\n np.random.seed(6564)\n f = 0.5\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n self.syn_dense.change_during_ref = True\n\n self.T.active_state = False\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)\n self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)",
"def test_fix(self):\n self.check_metadata.side_effect = lambda: self.fixed_cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[self.mock_fix]) as mock_get_fixes:\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_metadata(\n cubes=[self.cube],\n short_name='short_name',\n project='project',\n dataset='model',\n mip='mip',\n session=sentinel.session,\n )[0]\n self.checker.assert_called_once_with(self.intermediate_cube)\n self.check_metadata.assert_called_once_with()\n assert cube_returned is not self.cube\n assert cube_returned is not self.intermediate_cube\n assert cube_returned is self.fixed_cube\n mock_get_fixes.assert_called_once_with(\n **self.expected_get_fixes_call\n )",
"def detect_fpMod():\n import logging\n log = logging.getLogger(__name__)\n log.debug('Setting fpMod')\n\n if z3.is_true(z3.simplify(z3.FPVal(3, z3.Float32()) % 2 < 0)):\n log.debug('Correct fpRem detected')\n fpMod.__code__ = fpMod_using_fpRem.__code__\n else:\n log.debug('fpRem = fpMod')\n fpMod.__code__ = fpRem_trampoline.__code__",
"def test_unfixable_mask(self):\n unfixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_unfixable_mask.map'))\n self.assertFalse(unfixable_mask.is_mask)\n with self.assertRaises(ValueError):\n unfixable_mask.fix_mask()\n self.assertFalse(unfixable_mask.is_mask)",
"def calculate_correction(filedic):\n lanczos_cera = xr.open_mfdataset(filedic['lanczos(CERA)'], combine='by_coords')\n lanczos_noaa = xr.open_mfdataset(filedic['lanczos(20CR)'], combine='by_coords')\n return lanczos_noaa.drop('number').squeeze() - lanczos_cera.drop('number').squeeze()",
"def flux_recal(data, z0, zref):\n\tf_obs = data\n\tz0 = z0\n\tz1 = zref\n\tDa0 = Test_model.angular_diameter_distance( z0 ).value\n\tDa1 = Test_model.angular_diameter_distance( z1 ).value\n\tf_ref = f_obs * (1 + z0)**4 * Da0**2 / ( (1 + z1)**4 * Da1**2 )\n\treturn f_ref",
"def _fixed_masks_arg(mask):\n return [\"NULL\", mask]",
"def test_center_of_coordinates_shift():\n # print sys._getframe().f_code.co_name\n # c = commons()\n\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n ncs_restraints_group_list = ncs_obj_phil.get_ncs_restraints_group_list()\n\n # ncs_restraints_group_list = c.ncs_restraints_group_list\n xrs = pdb_inp.xray_structure_simple()\n shifts = ncs_restraints_group_list.get_ncs_groups_centers(\n sites_cart = xrs.sites_cart())\n\n xyz = pdb_inp.atoms().extract_xyz()\n center_of_coor = (flex.vec3_double([xyz.sum()]) * (1/xyz.size())).round(8)\n # test shifts\n t1 = shifts[0].round(8)\n t2 = shifts[1].round(8)\n d1 = flex.sqrt((center_of_coor-t1).dot()).min_max_mean().as_tuple()\n d2 = flex.sqrt((center_of_coor-t2).dot()).min_max_mean().as_tuple()\n assert (d1 == d2)\n\n # test shift to center\n new_nrg = ncs_restraints_group_list.shift_translation_to_center(shifts = shifts)\n expected = (22.63275, 5.54625, 2.9375)\n assert (new_nrg[0].copies[0].t.round(5)).elems == expected\n # back to original coordinates system\n old_nrg = new_nrg.shift_translation_back_to_place(shifts=shifts)\n expected = (old_nrg[0].copies[0].t.round(5)).elems\n result = (ncs_restraints_group_list[0].copies[0].t.round(5)).elems\n assert result == expected",
"def find_change(now: int, history: List[int]) -> Union[int, None]:\n if now is None or history is None:\n return None\n index = 7\n if len(history) < 7:\n index = len(history)\n return history[-index] - now",
"def absolute_momentum(self, prices, lookback, long_only=False):\n returns = prices.pct_change(periods=lookback).fillna(0)\n long_signal = (returns > 0).applymap(self.bool_converter)\n short_signal = -(returns < 0).applymap(self.bool_converter)\n if long_only == True:\n signal = long_signal\n else:\n signal = long_signal + short_signal\n return signal",
"def mask_percentage(self):\n return 100 - self.tissue_percentage",
"def get_shift(self, ra, dec):\n\n if not self.__dict__.has_key('wcs'):\n # set up a fake WCS to do the transformation\n # likely the details do not matter much\n wcs=pywcs.WCS(naxis=2)\n wcs.wcs.ctype=['RA---SIN','DEC--SIN']\n wcs.wcs.crval=[n.degrees(self.ra0),n.degrees(self.dec0)]\n wcs.wcs.crpix=[2049,2049]\n wcs.wcs.cdelt=[-1.0/60,1.0/60]\n \n observer=ephem.Observer()\n observer.long=n.radians(self.long)\n observer.lat=n.radians(self.lat)\n observer.epoch=ephem.J2000\n J0 = ephem.julian_date(0) \n observer.date=self.time[0]-J0\n \n body=ephem.FixedBody()\n body._ra=self.ra0\n body._dec=self.dec0\n body._epoch=ephem.J2000\n body.compute(observer)\n \n LST=observer.sidereal_time()\n HA=LST-self.ra0\n _dec=self.dec0\n _lat=n.radians(self.lat)\n # this calculation comes from Steve Ord's fixhdr.c\n parallactic_angle=n.arctan2(n.sin(HA)*n.cos(_lat),\n n.sin(_lat)*n.cos(_dec)-n.sin(_dec)*n.cos(_lat)*n.cos(HA))\n\n cosz=n.sin(_lat)*n.sin(_dec)+n.cos(_lat)*n.cos(_dec)*n.cos(HA)\n z=n.arccos(cosz)\n sinz=n.sin(z)\n tanz=sinz/cosz\n \n PV2_1=tanz*n.sin(parallactic_angle)\n PV2_2=tanz*n.cos(parallactic_angle)\n\n wcs.wcs.set_pv([(2,1,PV2_1),(2,2,PV2_2)])\n self.wcs=wcs\n \n if isinstance(ra,n.ndarray):\n sky=n.vstack((ra,dec)).T\n else:\n sky=n.array([[ra,dec]])\n pix=self.wcs.wcs_sky2pix(sky,0)\n if isinstance(ra,n.ndarray):\n x=pix[:,0]\n y=pix[:,1]\n else:\n x=pix[0,0]\n y=pix[0,1]\n dx=x-(self.wcs.wcs.crpix[0]-1)\n dy=y-(self.wcs.wcs.crpix[1]-1)\n dl=n.radians(dx*self.wcs.wcs.cdelt[0])\n dm=n.radians(dy*self.wcs.wcs.cdelt[1])\n return dl,dm",
"def test_reflectance_ref(fluxd, wfb, f_sun, ref):\n\n xsec = 6.648e5 * u.km**2\n\n with vega_fluxd.set({'V': u.Quantity(3.589e-9, 'erg/(s cm2 AA)')}):\n with solar_fluxd.set({wfb: f_sun}):\n r = fluxd.to('1/sr', reflectance(wfb, cross_section=xsec))\n assert r.unit == u.sr**-1\n assert np.isclose(r.value, ref)",
"def test_transform_update():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj = ncs.input(hierarchy=pdb_inp.construct_hierarchy())\n pdb_inp = iotbx.pdb.input(lines=pdb_answer_0,source_info=None)\n nrgl = ncs_obj.get_ncs_restraints_group_list()\n asu_site_cart = pdb_inp.atoms().extract_xyz()\n # reference matrices\n r1 = nrgl[0].copies[0].r\n t1 = nrgl[0].copies[0].t\n r2 = nrgl[0].copies[1].r\n t2 = nrgl[0].copies[1].t\n # modify matrices in the ncs group list\n nrgl[0].copies[0].r = r1 + r2\n nrgl[0].copies[0].t = t1 + t2\n nrgl[0].copies[1].r = r1 + r2\n nrgl[0].copies[1].t = t1 + t2\n nrgl.recalculate_ncs_transforms(asu_site_cart)\n # Get the updated values\n r1_n = nrgl[0].copies[0].r\n t1_n = nrgl[0].copies[0].t\n r2_n = nrgl[0].copies[1].r\n t2_n = nrgl[0].copies[1].t\n #\n assert approx_equal(r1, r1_n, eps=0.001)\n assert approx_equal(t1, t1_n, eps=0.1)\n assert approx_equal(r2, r2_n, eps=0.001)\n assert approx_equal(t2, t2_n, eps=0.1)",
"def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4",
"def reusability(self):\n self._reusability = -0.25 * self.DCC + 0.25 * self.CAMC + 0.5 * self.CIS + 0.5 * self.DSC\n return round(self._reusability, 5)",
"def test_shift_ruptures_positive_shift(midday):\n shifted = _shift_between(\n midday, 60,\n start='2020-01-01',\n end='2020-02-29'\n )\n expected_shift_mask = pd.Series(False, index=midday.index)\n expected_shift_mask['2020-01-01':'2020-02-29'] = True\n shift_mask, shift_amounts = time.shifts_ruptures(shifted, midday)\n assert_series_equal(shift_mask, expected_shift_mask, check_names=False)\n assert_series_equal(\n shift_amounts,\n pd.Series(60, index=shifted.index, dtype='int64'),\n check_names=False\n )",
"def _modify_all_notes(self):\n return self._modify_notes_in_time(TimeStep(0.0, MAX_CLIP_LENGTH), self._clip_notes, self._length_offset)",
"def preferred_rep(self):\n # reducing coefficients mod torsion\n if self.torsion != 'free':\n for key, value in self.items():\n self[key] = value % self.torsion\n\n # removing key:value pairs with value = 0\n zeros = [k for k, v in self.items() if not v]\n for key in zeros:\n del self[key]",
"def test_set_vec_to_zero(self):\n self.init()\n set_to_zero_by_ptr(self.f64_1)\n set_to_zero_by_ref(self.ff64_1)\n set_to_zero_by_ref(self.i32_1)\n set_to_zero_by_ref(self.fi32_1)\n set_to_zero_by_ref(self.i64_1)\n set_to_zero_by_ref(self.fi64_1)\n set_to_zero_by_ref(self.f32_1)\n assert np.all(self.f64_1 == 0.)\n assert np.all(self.ff64_1 == 0.)\n assert np.all(self.i32_1 == 0)\n assert np.all(self.fi32_1 == 0)\n assert np.all(self.i64_1 == 0)\n assert np.all(self.fi64_1 == 0)\n assert np.all(self.f32_1 == 0.)\n assert np.all(self.ff32_1 == 0.)",
"def test_adjust_offsets(self):\n \n offsets = {\"ENSMUSG00000051951_1_147\" : 10, \n \"ENSG00000198901_2_52\" : 10 ,\n \"ENSG00000198901_3_239\" : 10, \n \"ENSG00000198901_4_85\" : 10 ,\n \"ENSG00000198901_5_47\" : 10 ,\n \"ENSG00000198901_6_119\" : 10 ,\n \"ENSG00000198901_7_58\" : 10 ,\n \"ENSG00000198901_8_588\" : 10 ,\n \"ENSG00000198901_10_92\" : 10 ,\n \"ENSG00000198901_11_59\" : 10 ,\n \"ENSG00000198901_12_196\" : 10 ,\n \"ENSG00000198901_13_36\" : 10 ,\n\n }\n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n \n results = adjust_offsets(bedtool, offsets)\n \n true_results = ((3206126, 3206130),\n (91513660, 91513664),\n (91517394, 91517398),\n (91517935, 91517939),\n (91522404, 91522408),\n (91523607, 91523611),\n (91524250, 91524254),\n (91525137, 91525141),\n (91527347, 91527351),\n (91527937, 91527941),\n (91528034, 91528038),\n (91537658, 91537662),\n )\n for result, true_result in zip(results, true_results):\n self.assertEqual(int(result[6]), true_result[0])\n self.assertEqual(int(result[7]), true_result[1])",
"def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal",
"def fixed_point(is_zero, plus, minus, f, x):\n\n @memo_Y\n def _fixed_point(fixed_point_fun):\n def __fixed_point(collected, new):\n diff = minus(new, collected)\n if is_zero(diff):\n return collected\n return fixed_point_fun(plus(collected, diff), f(diff))\n return __fixed_point\n\n return _fixed_point(x, f(x))",
"def rmap_check_modifications(old_rmap, new_rmap, old_ref, new_ref, expected=(\"add\",)):\n diffs = diff.mapping_diffs(old_rmap, new_rmap)\n as_expected = True\n for difference in diffs:\n actual = diff.diff_action(difference)\n if actual in expected:\n pass # white-list so it will fail when expected is bogus.\n else:\n log.error(\"Expected one of\", repr(expected), \"but got\", repr(actual),\n \"from change\", repr(difference))\n as_expected = False\n with open(old_rmap) as pfile:\n old_count = len([line for line in pfile.readlines() if os.path.basename(old_ref) in line])\n with open(new_rmap) as pfile:\n new_count = len([line for line in pfile.readlines() if os.path.basename(new_ref) in line])\n if \"replace\" in expected and old_count != new_count:\n log.error(\"Replacement COUNT DIFFERENCE replacing\", repr(old_ref), \"with\", repr(new_ref), \"in\", repr(old_rmap),\n old_count, \"vs.\", new_count)\n as_expected = False\n return as_expected",
"def test_resync():\n np.random.seed(12)\n raw, beh, events, corrupted_indices = \\\n pd_parser.simulate_pd_data(prop_corrupted=0.)\n pd = raw._data[0]\n exclude_shift_i = np.round(raw.info['sfreq'] * exclude_shift).astype(int)\n candidates = _find_pd_candidates(\n pd, max_len=max_len, baseline=baseline,\n zscore=zscore, max_flip_i=max_flip_i, sfreq=raw.info['sfreq'])[0]\n beh_events = beh['time'] * raw.info['sfreq']\n offsets = (2 * resync * np.random.random(beh_events.size) - 1\n ) * raw.info['sfreq']\n beh_events += offsets\n beh_events -= beh_events[0]\n beh_events_adjusted, alignment, best_events = _find_best_alignment(\n beh_events, candidates, exclude_shift, resync, raw.info['sfreq'])\n errors = beh_events_adjusted - best_events + alignment\n resync_exclusions = np.where(abs(errors) > exclude_shift_i)[0]\n idx = resync_exclusions[0]\n correct = (best_events[idx], f'{idx}\\nrecovered (not excluded)')\n assert len(resync_exclusions) > 0\n # test exclude ambiguous\n pd_events = _exclude_ambiguous_events(\n beh_events_adjusted, alignment, best_events, pd, candidates,\n exclude_shift, max_len, raw.info['sfreq'], recover, zscore)\n assert np.isnan(pd_events[resync_exclusions]).all()\n assert np.isnan(pd_events[np.isnan(best_events)]).all()\n with mock.patch('builtins.input', return_value='y'):\n found = _recover_event(\n idx, pd, beh_events_adjusted[idx] + alignment, 2 * resync, zscore,\n max_len, raw.info['sfreq'])\n assert abs(found[0] - correct[0]) < 2\n assert found[1] == correct[1]",
"def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed",
"def fixed_in(self):\n fixed_in = self.fixed_artifact()\n fix_available_in = fixed_in.version if fixed_in and fixed_in.version != 'None' else None\n\n # NOTE: semver version format indicates a range where package\n # is vulnerable (as opposed to a value where anythng < value\n # is vulnerable, and the fix itself is known to exist), so we prepend a 'not' to indicate 'fix is available, if not in semver range'\n if fixed_in and fixed_in.version_format in ['semver']:\n # Github Advisories can add the real version where there is a fix if any.\n metadata = fixed_in.fix_metadata or {}\n first_patched_version = metadata.get('first_patched_version')\n if first_patched_version:\n return first_patched_version\n\n if fix_available_in and fixed_in.fix_metadata and fixed_in.fix_metadata.get('fix_exists', False):\n fix_available_in = \"! {}\".format(fix_available_in)\n else:\n fix_available_in = None\n\n return fix_available_in",
"def testNullWarpExposure(self, interpLength=10):\n originalExposure = afwImage.ExposureF(originalExposurePath)\n originalExposure.getInfo().setId(10313423)\n originalExposure.getInfo().setVisitInfo(makeVisitInfo())\n originalFilterLabel = afwImage.FilterLabel(band=\"i\")\n originalPhotoCalib = afwImage.PhotoCalib(1.0e5, 1.0e3)\n originalExposure.setFilter(originalFilterLabel)\n originalExposure.setPhotoCalib(originalPhotoCalib)\n afwWarpedExposure = afwImage.ExposureF(\n originalExposure.getBBox(),\n originalExposure.getWcs())\n warpingControl = afwMath.WarpingControl(\n \"lanczos4\", \"\", 0, interpLength)\n afwMath.warpExposure(\n afwWarpedExposure, originalExposure, warpingControl)\n if SAVE_FITS_FILES:\n afwWarpedExposure.writeFits(\"afwWarpedExposureNull.fits\")\n\n self.assertEqual(afwWarpedExposure.getFilter().bandLabel,\n originalFilterLabel.bandLabel)\n self.assertEqual(afwWarpedExposure.getPhotoCalib(), originalPhotoCalib)\n self.assertEqual(afwWarpedExposure.getInfo().getVisitInfo(),\n originalExposure.getInfo().getVisitInfo())\n\n afwWarpedMaskedImage = afwWarpedExposure.getMaskedImage()\n afwWarpedMask = afwWarpedMaskedImage.getMask()\n noDataBitMask = afwWarpedMask.getPlaneBitMask(\"NO_DATA\")\n\n # compare all non-DATA pixels of image and variance, but relax specs a bit\n # because of minor noise introduced by bad pixels\n noDataMaskArr = afwWarpedMaskedImage.mask.array & noDataBitMask\n msg = \"afw null-warped MaskedImage (all pixels, relaxed tolerance)\"\n self.assertMaskedImagesAlmostEqual(afwWarpedMaskedImage, originalExposure.getMaskedImage(),\n doMask=False, skipMask=noDataMaskArr, atol=1e-5, msg=msg)\n\n # compare good pixels (mask=0) of image, mask and variance using full\n # tolerance\n msg = \"afw null-warped MaskedImage (good pixels, max tolerance)\"\n self.assertMaskedImagesAlmostEqual(afwWarpedMaskedImage, originalExposure.getMaskedImage(),\n skipMask=afwWarpedMask, msg=msg)",
"def fluxes_increments_to_actual(example_dict):\n\n edge_heights_m_agl = get_grid_cell_edges(example_dict[HEIGHTS_KEY])\n grid_cell_widths_metres = get_grid_cell_widths(edge_heights_m_agl)\n\n num_examples = len(example_dict[VALID_TIMES_KEY])\n num_heights = len(example_dict[HEIGHTS_KEY])\n\n grid_cell_width_matrix_metres = numpy.reshape(\n grid_cell_widths_metres, (1, num_heights)\n )\n grid_cell_width_matrix_metres = numpy.repeat(\n grid_cell_width_matrix_metres, repeats=num_examples, axis=0\n )\n\n down_flux_increment_matrix_w_m03 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_INC_NAME\n )\n up_flux_increment_matrix_w_m03 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_INC_NAME\n )\n\n down_flux_matrix_w_m02 = numpy.cumsum(\n down_flux_increment_matrix_w_m03 * grid_cell_width_matrix_metres,\n axis=1\n )\n up_flux_matrix_w_m02 = numpy.cumsum(\n up_flux_increment_matrix_w_m03 * grid_cell_width_matrix_metres,\n axis=1\n )\n\n down_flux_matrix_w_m02 = numpy.maximum(down_flux_matrix_w_m02, 0.)\n up_flux_matrix_w_m02 = numpy.maximum(up_flux_matrix_w_m02, 0.)\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_down_flux = SHORTWAVE_DOWN_FLUX_NAME in vector_target_names\n found_up_flux = SHORTWAVE_UP_FLUX_NAME in vector_target_names\n\n if not found_down_flux:\n vector_target_names.append(SHORTWAVE_DOWN_FLUX_NAME)\n if not found_up_flux:\n vector_target_names.append(SHORTWAVE_UP_FLUX_NAME)\n\n down_flux_index = vector_target_names.index(SHORTWAVE_DOWN_FLUX_NAME)\n up_flux_index = vector_target_names.index(SHORTWAVE_UP_FLUX_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_down_flux:\n example_dict[VECTOR_TARGET_VALS_KEY][..., down_flux_index] = (\n down_flux_matrix_w_m02\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=down_flux_index, values=down_flux_matrix_w_m02, axis=-1\n )\n\n if found_up_flux:\n example_dict[VECTOR_TARGET_VALS_KEY][..., up_flux_index] = (\n up_flux_matrix_w_m02\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=up_flux_index, values=up_flux_matrix_w_m02, axis=-1\n )\n\n return example_dict",
"def non_specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value * math.log(focal.cardinal, 2)\n return round(result, 6)",
"def test_nondefault_nonbonded_cutoff(self):\n topology = Molecule.from_smiles(\"[#18]\").to_topology()\n topology.box_vectors = [3, 3, 3] * unit.nanometer\n\n force_field = ForceField()\n\n vdw_handler = vdWHandler(version=0.4)\n vdw_handler.cutoff = 7.89 * unit.angstrom\n vdw_handler.scale14 = 1.0\n\n vdw_handler.add_parameter(\n {\n \"smirks\": \"[#18:1]\",\n \"epsilon\": 1.0 * unit.kilojoules_per_mole,\n \"sigma\": 1.0 * unit.angstrom,\n }\n )\n force_field.register_parameter_handler(vdw_handler)\n\n electrostatics_handler = ElectrostaticsHandler(version=0.3)\n electrostatics_handler.cutoff = 7.89 * unit.angstrom\n electrostatics_handler.periodic_potential = \"PME\"\n force_field.register_parameter_handler(electrostatics_handler)\n\n library_charges = LibraryChargeHandler(version=0.3)\n library_charges.add_parameter(\n {\n \"smirks\": \"[#18:1]\",\n \"charge1\": 0.0 * unit.elementary_charge,\n }\n )\n force_field.register_parameter_handler(library_charges)\n\n system = force_field.create_openmm_system(topology)\n\n found_cutoff = (\n system.getForce(0).getCutoffDistance().value_in_unit(openmm_unit.angstrom)\n )\n\n assert abs(found_cutoff - 7.89) < 1e-6",
"def checkIfAllowedToModify(self):\n\n oldBytes = b''\n testFileName = self.MAPSTUDIO + self.inputFiles[0] + '.msb'\n\n with open(testFileName, 'rb') as oldf:\n oldBytes = oldf.read()\n\n # Try writing something to the file\n\n try:\n with open(testFileName, 'wb') as outf:\n outf.write(b'TESTINGIFICANWRITEINTOTHISFILE')\n except:\n return False\n\n # Because apparently for _some_ reason it doesn't throw an error sometimes(?) so we confirm if the file was actually modified\n\n newBytes = b''\n with open(testFileName, 'rb') as oldf:\n newBytes = oldf.read()\n\n if (oldBytes == newBytes):\n return False\n\n # Restore the file to normal\n\n with open(testFileName, 'wb') as outf:\n outf.write(oldBytes)\n\n oldBytes = b''\n newBytes = b''\n\n return True",
"def test_value_change(self):\n before = self.data.diffusion_data[:, :, 0, 0]\n after = module_05.run_module(self.data).diffusion_data[:, :, 0, 0]\n self.assertFalse(np.all(before == after))",
"def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n # compute the matrix for the scale and rotation correction\n shift = (np.asarray(shift) - np.dot(self._wcslin.wcs.crpix, matrix) +\n self._wcslin.wcs.crpix)\n\n matrix = inv(matrix).T\n\n cwcs = self._wcs.deepcopy()\n\n # estimate step for numerical differentiation. We need a step\n # large enough to avoid rounding errors and small enough to get a\n # better precision for numerical differentiation.\n # TODO: The logic below should be revised at a later time so that it\n # better takes into account the two competing requirements.\n crpix1, crpix2 = self._wcs.wcs.crpix\n hx = max(1.0, min(20.0, (crpix1 - 1.0) / 100.0,\n (self._wcs.pixel_shape[0] - crpix1) / 100.0))\n hy = max(1.0, min(20.0, (crpix2 - 1.0) / 100.0,\n (self._wcs.pixel_shape[1] - crpix2) / 100.0))\n\n # compute new CRVAL for the image WCS:\n crpixinref = self._wcslin.wcs_world2pix(\n self._wcs.wcs_pix2world([self._wcs.wcs.crpix], 1), 1)\n crpixinref = np.dot(crpixinref - shift, matrix.T).astype(np.float64)\n self._wcs.wcs.crval = self._wcslin.wcs_pix2world(crpixinref, 1)[0]\n self._wcs.wcs.set()\n\n # approximation for CD matrix of the image WCS:\n (U, u) = _linearize(cwcs, self._wcs, self._wcslin, self._wcs.wcs.crpix,\n matrix, shift, hx=hx, hy=hy)\n self._wcs.wcs.cd = np.dot(self._wcs.wcs.cd.astype(np.longdouble),\n U).astype(np.float64)\n self._wcs.wcs.set()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)",
"def test07(self):\n model = self.setup_model02()\n model.x[1].fix(1)\n wts = StoreSpec.isfixed()\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].unfix()\n model.x[2].fix(6)\n from_json(model, fname=self.fname, wts=wts)\n assert value(model.x[1]) == 1\n assert model.x[1].lb == -4\n assert value(model.x[2]) == 6\n assert model.x[1].fixed\n assert not model.x[2].fixed\n assert not model.g.active",
"def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)",
"def test_set_mat_to_zero(self):\n self.init()\n set_to_zero_by_ref(self.ff64_2)\n assert np.all(self.ff64_2 == 0)\n # Set to ones, then test set_to_zero_by_ptr also works\n self.ff64_2 += 1\n assert np.all(self.ff64_2)\n set_to_zero_by_ptr(self.ff64_2)\n assert np.all(self.ff64_2 == 0)\n # Repeat for other matrix types\n # These ones will make internal copies, so will run slower.\n set_to_zero_by_ref(self.i64_2)\n set_to_zero_by_ref(self.fi64_2)\n set_to_zero_by_ref(self.f64_2)\n assert np.all(self.i64_2 == 0)\n assert np.all(self.fi64_2 == 0)\n assert np.all(self.f64_2 == 0)\n self.i64_2 += 1\n self.fi64_2 += 1\n self.f64_2 += 1\n assert np.all(self.i64_2)\n assert np.all(self.fi64_2)\n assert np.all(self.f64_2)\n set_to_zero_by_ptr(self.i64_2)\n set_to_zero_by_ptr(self.fi64_2)\n set_to_zero_by_ptr(self.f64_2)\n assert np.all(self.i64_2 == 0)\n assert np.all(self.fi64_2 == 0)\n assert np.all(self.f64_2 == 0)",
"def test_modified_14_factors(self, force_field):\n top = Molecule.from_smiles(\"CCCC\").to_topology()\n default_14 = copy.deepcopy(force_field)\n e_mod_14 = copy.deepcopy(force_field)\n vdw_mod_14 = copy.deepcopy(force_field)\n\n e_mod_14[\"Electrostatics\"].scale14 = 0.66\n assert e_mod_14[\"Electrostatics\"].scale14 == 0.66\n\n vdw_mod_14[\"vdW\"].scale14 = 0.777\n assert vdw_mod_14[\"vdW\"].scale14 == 0.777\n\n default_omm_sys = default_14.create_openmm_system(top)\n e_mod_omm_sys = e_mod_14.create_openmm_system(top)\n vdw_mod_omm_sys = vdw_mod_14.create_openmm_system(top)\n\n for omm_sys, expected_vdw_14, expected_coul_14 in [\n [default_omm_sys, 0.5, 0.833333],\n [e_mod_omm_sys, 0.5, 0.66],\n [vdw_mod_omm_sys, 0.777, 0.833333],\n ]:\n found_coul_14, found_vdw_14 = get_14_scaling_factors(omm_sys)\n\n np.testing.assert_almost_equal(\n actual=found_vdw_14,\n desired=expected_vdw_14,\n decimal=10,\n err_msg=\"vdW 1-4 scaling factors do not match\",\n )\n\n np.testing.assert_almost_equal(\n actual=found_coul_14,\n desired=expected_coul_14,\n decimal=10,\n err_msg=\"Electrostatics 1-4 scaling factors do not match\",\n )",
"def fixed_constant_pressure(self, general_transmissibility):\n #print('Setting boundary conditions of local problem {}'.format(self.coarse_volume))\n correct_volumes_group_1 = np.array([0,1,2,3])\n correct_volumes_group_2 = np.array([12,13,14,15])\n transmissibility = copy.deepcopy(general_transmissibility)\n volumes_group_1 = correct_volumes_group_1\n volumes_group_2 = correct_volumes_group_2\n transmissibility[volumes_group_1] = 0\n transmissibility[volumes_group_2] = 0\n transmissibility[volumes_group_1, volumes_group_1] = 1\n transmissibility[volumes_group_2, volumes_group_2] = 1\n source = lil_matrix((int(self.number_volumes_local_problem), 1), dtype = 'float')\n source[volumes_group_1] = 1\n source[volumes_group_2] = 0\n\n #print('Fixed constant pressure boundary condition applied')\n\n return transmissibility, source, correct_volumes_group_1",
"def test_read_0_1_smirff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirff99Frosst_reference_0_1_spec.offxml\"\n )\n )",
"def check_limits(self):\n\n #Find the relative position of each leg vs. its \"zero\" position\n relpos = self.fixed_plate - self.fixed_plate_zero\n\n for leg in range(3):\n #Check that the leg is within allowable \"safe zone\"\n #Use the position of the leg (relative to 0) to find the index in the \"safe zone\" matrix\n i_x = nearest_index(self.leg_safe_xaxis, relpos[COORD_X, leg])\n i_z = nearest_index(self.leg_safe_zaxis, relpos[COORD_Z, leg])\n #Look up in the safe zone.\n self.leg_fault[leg] = (not self.leg_safe_zone[leg, i_x, i_z])\n\n if (not all(np.isreal(self.fixed_plate[:, leg]))) or any(np.isnan(self.fixed_plate[:, leg])):\n #A complex or NaN value = the angle found for the leg was invalid, meaning that the\n #leg would have to be longer to reach the desired position.\n self.leg_fault[leg] = True",
"def _calculate_correction(self, telid):\n return 1",
"def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)",
"def _cmp_cflw_m1(self, problem, cflw_e):\n import pdb\n mu = {t:{o:{} for o in list(cflw_e.keys())} for t in self.periods}\n for i, tree in list(problem.trees.items()):\n for path in tree.paths():\n j = tuple(n.data('acode') for n in path)\n for o in list(cflw_e.keys()):\n _mu = path[-1].data(o)\n for t in self.periods:\n mu[t][o][i, j] = _mu[t] if t in _mu else 0.\n for t in self.periods:\n for o, e in list(cflw_e.items()):\n #pdb.set_trace()\n if t in e[0]:\n mu_lb = {'x_%i' % hash((i, j)):(mu[t][o][i, j] - (1 - e[0][t]) * mu[e[1]][o][i, j]) for i, j in mu[t][o]}\n mu_ub = {'x_%i' % hash((i, j)):(mu[t][o][i, j] - (1 + e[0][t]) * mu[e[1]][o][i, j]) for i, j in mu[t][o]}\n problem.add_constraint(name='flw-lb_%03d_%s' % (t, o), coeffs=mu_lb, sense=opt.SENSE_GEQ, rhs=0.)\n problem.add_constraint(name='flw-ub_%03d_%s' % (t, o), coeffs=mu_ub, sense=opt.SENSE_LEQ, rhs=0.)",
"def shiftDetector(frame, onh_info=None):\n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts",
"def test_by_ref_non_contiguous(self):\n self.init()\n corners = self.ff64_2[::2,::2]\n assert not corners.flags['OWNDATA']\n set_to_zero_by_ref(corners)\n assert np.all(self.ff64_2 == np.array([[0,1,0],[3,4,5],[0,7,0]]))",
"def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1",
"def fix_neg(df: pd.DataFrame, roi: str,\n columns: list = ['cases', 'deaths', 'recover'],\n plot: bool = False) -> pd.DataFrame:\n for c in columns:\n cum = 'cum_%s' % c\n new = 'new_%s' % c\n before = df[cum].copy()\n non_zeros = df[df[new] > 0].index\n has_negs = before.diff().min() < 0\n if len(non_zeros) and has_negs:\n first_non_zero = non_zeros[0]\n maxx = df.loc[first_non_zero, cum].max()\n # Find the bad entries and null the corresponding\n # cumulative column, which are:\n # 1) Cumulative columns which are zero after previously\n # being non-zero\n bad = df.loc[first_non_zero:, cum] == 0\n df.loc[bad[bad].index, cum] = None\n # 2) New daily columns which are negative\n bad = df.loc[first_non_zero:, new] < 0\n df.loc[bad[bad].index, cum] = None\n # Protect against 0 null final value which screws up interpolator\n if np.isnan(df.loc[df.index[-1], cum]):\n df.loc[df.index[-1], cum] = maxx\n # Then run a loop which:\n while True:\n # Interpolates the cumulative column nulls to have\n # monotonic growth\n after = df[cum].interpolate('pchip')\n diff = after.diff()\n if diff.min() < 0:\n # If there are still negative first-differences at this\n # point, increase the corresponding cumulative values by 1.\n neg_index = diff[diff < 0].index\n df.loc[neg_index, cum] += 1\n else:\n break\n # Then repeat\n if plot:\n plt.figure()\n plt.plot(df.index, before, label='raw')\n plt.plot(df.index, after, label='fixed')\n r = np.corrcoef(before, after)[0, 1]\n plt.title(\"%s %s Raw vs Fixed R=%.5g\" % (roi, c, r))\n plt.legend()\n else:\n after = before\n # Make sure the first differences are now all non-negative\n assert after.diff().min() >= 0\n # Replace the values\n df[new] = df[cum].diff().fillna(0).astype(int).values\n return df",
"def loss_comparison(warp_fname, offpar_fname, moving_rmli_fname,\n fixed_rmli_fname, moved_fname, crop_center, crop_size,\n reg_weight, ncc_win, debug=False):\n rg_crop = crop_size[0]\n az_crop = crop_size[1]\n rg_cen = crop_center[0]\n az_cen = crop_center[1]\n\n # Import voxelmorph with pytorch backend\n os.environ['VXM_BACKEND'] = 'pytorch'\n import voxelmorph as vxm\n\n # Read the voxelmorph warp file\n warp_file = np.load(warp_fname)\n warp = warp_file['offs']\n warp = warp[np.newaxis, :, :, :]\n\n # Read moved scene\n moved_file = np.load(moved_fname)\n moved = moved_file['scene']\n moved = moved[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the fixed RMLI\n fixed_rmli = gx.MLI(fixed_rmli_fname,\n par=gx.MLI_Par(fixed_rmli_fname + '.par'))\n rmli_dim = fixed_rmli.dim\n fixed_full = fixed_rmli.array\n fixed = fixed_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n fixed = scale_rmli(fixed)\n fixed = fixed[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the moving RMLI\n moving_rmli = gx.MLI(moving_rmli_fname,\n par=gx.MLI_Par(moving_rmli_fname + '.par'))\n moving_full = moving_rmli.array\n moving = moving_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n moving = scale_rmli(moving)\n moving = moving[np.newaxis, np.newaxis, :, :]\n\n # Read in the Gamma offsets\n # Scale the Gamma offsets to be the same size as the original data that\n # was cropped to feed into voxelmorph.\n offs_basename, _ = os.path.splitext(os.path.basename(offpar_fname))\n offs_fname = os.path.join(os.path.dirname(offpar_fname), offs_basename + '.offs')\n offpar = gx.OFF_Par(offpar_fname)\n offs_dim = (offpar['offset_estimation_range_samples'],\n offpar['offset_estimation_azimuth_samples'])\n gx_offs = gx.readBin(offs_fname, offs_dim, _dtype='complex64')\n zoom_factor = (rmli_dim[0] / offs_dim[0], rmli_dim[1] / offs_dim[1])\n multilook = (fixed_rmli.par['range_looks'],\n fixed_rmli.par['azimuth_looks'])\n gamma_rg_offs = scipy.ndimage.zoom(np.real(gx_offs), zoom_factor)\n gamma_az_offs = scipy.ndimage.zoom(np.imag(gx_offs), zoom_factor)\n gamma_rg_offs = gamma_rg_offs[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n gamma_rg_offs /= multilook[0]\n gamma_az_offs = gamma_az_offs[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n gamma_az_offs /= multilook[1]\n gamma_warp = np.stack((gamma_rg_offs, gamma_az_offs), axis=0)\n gamma_warp = gamma_warp[np.newaxis, :, :, :]\n\n # Create a moved image with the gamma offsets\n transformer = vxm.layers.SpatialTransformer(crop_size)\n gamma_moved = transformer(torch.from_numpy(moving).float(),\n torch.from_numpy(gamma_warp).float())\n\n # Prepare ncc loss with square window\n ndims = len(list(fixed.shape)) - 2\n assert ndims in [1, 2, 3], \"volumes should be 1 to 3 dimensions. found: %d\" % ndims\n ncc_win_sq = [ncc_win] * ndims # Build a square window\n ncc = vxm.losses.NCC(ncc_win_sq, cuda=False)\n\n # Now we have all the data, compute the losses\n loss_sim_vxm = ncc.loss(torch.from_numpy(fixed).float(),\n torch.from_numpy(moved).float())\n loss_sim_gamma = ncc.loss(torch.from_numpy(fixed).float(), gamma_moved)\n\n grad = vxm.losses.Grad(penalty='l2')\n loss_smooth_vxm = grad.loss(None, torch.from_numpy(warp).float())\n loss_smooth_gamma = grad.loss(None, torch.from_numpy(gamma_warp).float())\n\n loss_total_vxm = loss_sim_vxm + (reg_weight * loss_smooth_vxm)\n loss_total_gamma = loss_sim_gamma + (reg_weight * loss_smooth_gamma)\n\n # Print everything\n print('Lambda: {}\\n'.format(reg_weight))\n print('Voxelmorph:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n 'Total: {}\\n'.format(loss_sim_vxm, loss_smooth_vxm, loss_total_vxm))\n print('Gamma:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n 'Total: {}\\n'.format(loss_sim_gamma, loss_smooth_gamma, loss_total_gamma))\n\n if debug:\n plt.figure()\n plt.imshow(moved[0, 0, :, :])\n plt.title('moved')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_moved[0, 0, :, :])\n plt.title('gamma_moved')\n plt.colorbar()\n plt.figure()\n plt.imshow(fixed[0, 0, :, :])\n plt.title('fixed')\n plt.colorbar()\n plt.figure()\n plt.imshow(warp[0, 0, :, :])\n plt.title('warp_rg')\n plt.colorbar()\n plt.figure()\n plt.imshow(warp[0, 1, :, :])\n plt.title('warp_az')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_warp[0, 0, :, :])\n plt.title('gamma_warp_rg')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_warp[0, 1, :, :])\n plt.title('gamma_warp_az')\n plt.colorbar()\n plt.show()",
"def shiftDetector(frame):\n \n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts",
"def water_correction_energies(fname, se_h2o_hof, se_h_hof, ref_h2o_ener,\n se_au=False, ref_au=True):\n check_for_keys(fname, REFEK, NATMK, SEEK)\n with h5.File(fname, 'r') as ifi:\n # This calculates the reference heat of formation\n # Note the reference is assumed to be in eH\n correction = ifi[REFEK][:] - ((ifi[NATMK][:]//3) * ref_h2o_ener)\n if ref_au:\n correction *= 627.509\n if se_au:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof) * 627.509\n else:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof)\n return correction",
"def _free_indicies(self):\n return np.logical_not(self._fixed_indicies)",
"def define_ufl_neumann_bcs_diff(self):\n\n if hasattr(self, 'ufl_neumann_bcs_diff'):\n return None\n\n # Note that \"is not\" only works for integers 0 to 255.\n neum_not_zero = self.ufl_neumann_bcs is not 0\n disp_not_zero = self.displacement is not 0\n\n if neum_not_zero and disp_not_zero:\n self.ufl_neumann_bcs_du = dlf.derivative(self.ufl_neumann_bcs,\n self.displacement,\n self.trial_vector)\n else:\n self.ufl_neumann_bcs_du = 0\n\n return None",
"def test_read_0_1_smirnoff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirnoff99Frosst_reference_0_1_spec.offxml\"\n )\n )",
"def check_for_edit(self, force):\n if force:\n self._manipulations = {\"bri\": 0, \"con\": 0, \"sat\": 0}\n return 0\n elif self._manipulations != {\"bri\": 0, \"con\": 0, \"sat\": 0}:\n self._app[\"statusbar\"].message(\n \"Image has been edited, add ! to force\", \"warning\")\n return 1\n return 0",
"def modify_cand():\n if col_i + 1 < len(lastrow):\n return (lastrow[col_i + 1] +\n diff(left_elem, right_elem, key=key + [left_i],\n minimal=minimal, verbose=False))",
"def fix_seq(self, fixed_seq):\n self.wc.fix_seq(wc(fixed_seq))",
"def calc_base_eff_and_infl(level):\n return 2 + (level - 1)",
"def _handle_offsets_changed(self, master, slave, track_offset_delta, scene_offset_delta):\n track_target_offset = self._get_target_track_offset(master, slave, track_offset_delta)\n if track_target_offset < 0 or not self._can_link_tracks:\n track_target_offset = slave.track_offset_method()\n scene_target_offset = self._get_target_scene_offset(master, slave, scene_offset_delta)\n if scene_target_offset < 0:\n scene_target_offset = slave.scene_offset_method()\n if track_target_offset == slave.track_offset_method() and scene_target_offset == slave.scene_offset_method():\n return\n slave.set_offsets(track_target_offset, scene_target_offset)",
"def test_nofix(self):\n self.check_data.side_effect = lambda: self.cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[]) as mock_get_fixes:\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_data(\n self.cube,\n short_name='short_name',\n project='project',\n dataset='model',\n mip='mip',\n session=sentinel.session,\n )\n self.checker.assert_called_once_with(self.cube)\n self.check_data.assert_called_once_with()\n assert cube_returned is self.cube\n assert cube_returned is not self.intermediate_cube\n assert cube_returned is not self.fixed_cube\n mock_get_fixes.assert_called_once_with(\n **self.expected_get_fixes_call\n )",
"def switchToPositiveStrandCoordinates( options, data ):\n for c in data.mafBlocksByChrom:\n for m in data.mafBlocksByChrom[ c ]:\n if m.refStart > m.refEnd:\n m.refStart, m.refEnd = m.refEnd, m.refStart\n m.refStrand *= -1\n m.hplStart, m.hplEnd = m.hplStart, m.hplEnd # this is now left-right draw order\n # sanity check\n if m.refStart > data.chrLengthsByChrom[ c ] or m.refEnd > data.chrLengthsByChrom[ c ]:\n sys.stderr.write( 'file %s has maf block on chr %s with '\n 'bounds [%d - %d] which are beyond featLen (%d)\\n' %\n ( options.maf, m.refChr, m.refStart, m.refEnd, data.chrLengthsByChrom[ c ] ))\n sys.exit( 1 )",
"def bits(delta):\n delta = asanyarray(delta)\n distance = absolute(delta)\n distance += 1\n return log2(distance)"
] | [
"0.6245811",
"0.5993922",
"0.58981633",
"0.54384756",
"0.5420952",
"0.5370132",
"0.5319682",
"0.52733696",
"0.52633834",
"0.5207875",
"0.5174516",
"0.5168301",
"0.5162118",
"0.5160096",
"0.5155298",
"0.5127173",
"0.5121005",
"0.50911295",
"0.49932376",
"0.4955329",
"0.49097493",
"0.49048755",
"0.4889362",
"0.4889008",
"0.4873095",
"0.48622525",
"0.4823693",
"0.4778005",
"0.4771348",
"0.47635102",
"0.47587073",
"0.47395226",
"0.4735539",
"0.47348043",
"0.47337103",
"0.47230202",
"0.47080818",
"0.46882048",
"0.4687877",
"0.46855977",
"0.46763062",
"0.46700603",
"0.46648687",
"0.4659112",
"0.4650623",
"0.46371737",
"0.46350074",
"0.46283996",
"0.46229118",
"0.46155876",
"0.46092123",
"0.46066102",
"0.46062088",
"0.46061453",
"0.46025017",
"0.46006545",
"0.4591929",
"0.45850167",
"0.45810196",
"0.4574848",
"0.4571448",
"0.45713332",
"0.45676067",
"0.4566225",
"0.45653886",
"0.45612714",
"0.45593148",
"0.45590815",
"0.4558727",
"0.45558852",
"0.45548275",
"0.4551274",
"0.45493937",
"0.45442668",
"0.45432207",
"0.4539474",
"0.4538086",
"0.45297748",
"0.45292252",
"0.45287365",
"0.45274174",
"0.45252487",
"0.45250073",
"0.45240974",
"0.45216405",
"0.4514807",
"0.45126498",
"0.4512371",
"0.4511696",
"0.4511663",
"0.45092526",
"0.45021892",
"0.44994932",
"0.44933483",
"0.44836566",
"0.44760218",
"0.44718868",
"0.44686204",
"0.4464701",
"0.44564942"
] | 0.63415945 | 0 |
Generates points used as input to a hotspot detection algorithm. With probability hotspot_weight (20%), a point is drawn from the hotspot; otherwise, it is drawn from the base field. The location of the hotspot changes for every 1000 points generated. | def generate(
stream_name, field, hotspot_size, hotspot_weight, batch_size, kinesis_client):
points_generated = 0
hotspot = None
while True:
if points_generated % 1000 == 0:
hotspot = get_hotspot(field, hotspot_size)
records = [
get_record(field, hotspot, hotspot_weight) for _ in range(batch_size)]
points_generated += len(records)
pprint(records)
kinesis_client.put_records(StreamName=stream_name, Records=records)
time.sleep(0.1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_points(num_points):\n for i in xrange(0, num_points):\n pass",
"def __get_random_hotspot(self):\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n # This might bes a bit strange, but we have the following problem:\n # some simulators need a square version of the same map. A square version\n # will have other x_max or y_max and thus the random hotspots will be different.\n # TO prevent this, we will always take only the max value of either x_max or y_max.\n # This will be the same for the square version and the not-square version (of the same map).\n max_value = max(x_max, y_max)\n\n # search for a not occupied position\n while True:\n # previously: x = random.uniform(x_min, x_max) # see problem description above\n x = random.uniform(x_min, max_value)\n # previously: y = random.uniform(y_min, y_max) # see problem description above\n y = random.uniform(y_min, max_value)\n # due to the workaround for the problem above, it can be that the value is out\n # of map for the not square map version. We need to skip this (the square\n # map version will skip it due to occupied cell...):\n if x <= x_max and y <= y_max:\n cell_x = min(int(\n (x - x_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.width - 1)\n cell_y = min(int(\n (y - y_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.height - 1)\n if not self.__cell_is_occupied(cell_x, cell_y):\n break\n spread = random.uniform(0.5, 1.0)\n return (x, y, spread)",
"def generate_nearby_hotspot_graph(self, k_rings: int, hotspot_data, hex_list, format='torch'):\n\n g = nx.Graph()\n ring_hexes = list(h3.k_ring(hotspot_data[self.address]['location_hex'], k_rings))\n ring_hexes.append(hotspot_data[self.address]['location_hex'])\n\n hotspots_in_ring = []\n witnesses_in_ring = []\n\n for ring_hex in ring_hexes:\n try:\n hotspots_in_hex, n_hotspots, elev = hex_list[ring_hex]['hotspots'], hex_list[ring_hex]['n_hotspots'], \\\n hex_list[ring_hex]['elev']\n except KeyError:\n hotspots_in_hex = list_hotspots_in_hex(ring_hex)\n hex_list[ring_hex] = {\n 'hotspots': hotspots_in_hex,\n 'n_hotspots': len(hotspots_in_hex),\n 'elev': get_elevation_of_coords(h3.h3_to_geo(ring_hex))\n }\n\n hotspots_in_ring += hex_list[ring_hex]['hotspots']\n\n pos_dict = {}\n g.add_node(self.address, pos=(hotspot_data[self.address]['lng'], hotspot_data[self.address]['lat']),\n elev=hotspot_data[self.address]['elev'], node_class=1)\n pos_dict[self.address] = g.nodes[self.address]['pos']\n\n for witness_address in hotspot_data[self.address]['witnesses']:\n if witness_address in hotspots_in_ring:\n node_class = 1\n else:\n node_class = 0\n try:\n existing_node = g.nodes[witness_address]\n except KeyError:\n try:\n g.add_node(witness_address,\n pos=(hotspot_data[witness_address]['lng'], hotspot_data[witness_address]['lat']),\n elev=hotspot_data[witness_address]['elev'], node_class=node_class)\n except KeyError:\n details = get_hotspot_details(witness_address)\n g.add_node(witness_address,\n pos=(details['lng'], details['lat']),\n elev=get_elevation_of_coords(h3.h3_to_geo(details['location_hex'])),\n # rewards=get_hotspot_rewards(details['address'], 5))\n node_class=node_class)\n pos_dict[witness_address] = g.nodes[witness_address]['pos']\n dist = math.sqrt((g.nodes[witness_address]['pos'][0] - g.nodes[self.address]['pos'][0]) ** 2 + (\n g.nodes[witness_address]['pos'][0] - g.nodes[self.address]['pos'][0]) ** 2)\n g.add_edge(self.address, witness_address, dist=dist)\n\n if format == 'torch':\n gt = from_networkx(g)\n\n data = Data(x=gt.elev.reshape((gt.num_nodes, 1)), edge_index=gt.edge_index,\n pos=gt.pos.reshape((gt.num_nodes, 2)),\n y=gt.node_class, edge_attr=gt.dist, num_classes=2)\n self.torch_nearby_graph = data\n return data\n else:\n self.networkx_nearby_graph = g\n return g",
"def __generate_distribution_grid(self) -> np.ndarray:\n\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n\n # CREATE HOTSPOTS IF WANTED:\n if self.create_random_hotspots:\n if self.use_uniform_distr:\n rospy.logerr(\"*** WRAPPER MESSAGE ***\\n\\n\\t\" + rospy.get_caller_id()\n + \":\\n\\tIt does not make sense to create random hotspots if distribution should be uniform.\"\n + \"\\n\\tThe node will go on with uniform distribution. You can change this in the launch parameters.\\n\")\n self.number_of_random_hotspots = 1 # still needed to get the right edges\n elif self.number_of_random_hotspots <= 0:\n rospy.logerr(\"*** WRAPPER MESSAGE ***\\n\\n\\t\" + rospy.get_caller_id()\n + \":\\n\\tThe number of wanted hotspots is 0 or below. This is not possible.\"\n + \"\\n\\tThe node will go on with 1 random hotspot. You can change this in the launch parameters.\\n\")\n self.number_of_random_hotspots = 1\n self.hotspots = []\n for _ in range(self.number_of_random_hotspots):\n x, y, spread = self.__get_random_hotspot()\n self.hotspots.append([x, y, spread])\n\n if len(self.hotspots) < 1:\n if not self.use_uniform_distr:\n rospy.logerr(\"*** WRAPPER MESSAGE ***\\n\\n\\t\" + rospy.get_caller_id()\n + \":\\n\\tIf no hotspots should be generated randomly (and no uniform distribution),\\n\\tyou need to specify hotspots.\"\n + \"\\n\\tThe node will go on with 1 random hotspot. You can change this in the launch parameters.\\n\")\n x, y, spread = self.__get_random_hotspot()\n self.hotspots = [[x, y, spread]]\n\n hotspots_string = \"\\n\\tDirt generator - HOTSPOTS information:\"\n if self.use_uniform_distr:\n hotspots_string += \"\\n\\n\\tThe dirt distribution was set to be uniform. Hotspots do not exist.\"\n else:\n if self.create_random_hotspots:\n hotspots_string += \"\\n\\n\\tThe dirt distribution is based on the \" + str(len(\n self.hotspots)) + \" randomly created hotspot(s) [real world position in m]:\"\n else:\n hotspots_string += \"\\n\\tThe dirt distribution is based on the \" + \\\n str(len(self.hotspots)) + \\\n \" specified hotspot(s) [real world position in m]:\"\n for index, hotspot in enumerate(self.hotspots):\n hotspots_string += \"\\n\\t\\tHotspot %d: x=%.2f, y=%.2f, spread=%.2f\" % (\n index + 1, hotspot[0], hotspot[1], hotspot[2])\n print(hotspots_string + \"\\n\")\n\n # CREATING SEVERAL NORMAL DISTRIBUTIONS:\n # creating for each hotspot one normal distribution and then combine them to one for X and one for Y\n # for X:\n normal_distributions_attr_X = []\n for spot in self.hotspots:\n # each with [mean_x, std. deviation]\n normal_distributions_attr_X.append([spot[0], spot[2]])\n X_distributions = []\n for attributes in normal_distributions_attr_X:\n distr = stats.truncnorm((x_min - attributes[0]) / attributes[1], (\n x_max - attributes[0]) / attributes[1], loc=attributes[0], scale=attributes[1])\n X_distributions.append(distr.rvs(self.points_per_distr))\n X = np.concatenate(X_distributions)\n # to ensure that the distribution and later the grid is full sized (at a point at each end for full axis):\n X = np.append(X, [x_min, x_max])\n # for Y:\n normal_distributions_attr_Y = []\n for spot in self.hotspots:\n # each with [mean_y, std. deviation]\n normal_distributions_attr_Y.append([spot[1], spot[2]])\n Y_distributions = []\n for attributes in normal_distributions_attr_Y:\n distr = stats.truncnorm((y_min - attributes[0]) / attributes[1], (\n y_max - attributes[0]) / attributes[1], loc=attributes[0], scale=attributes[1])\n Y_distributions.append(distr.rvs(self.points_per_distr))\n Y = np.concatenate(Y_distributions)\n # to ensure that the distribution and later the grid is full sized (at a point at each end for full axis):\n Y = np.append(Y, [y_min, y_max])\n\n # # Plotting of the two separate distributions (rather for testing)\n # fig, ax = plt.subplots(2, sharex=True)\n # ax[0].hist(X, bins=self.occupancy_map.info.width)\n # ax[1].hist(Y, bins=self.occupancy_map.info.height)\n # plt.show() # will block the remaining process!\n\n # COMBINING TWO DISTRIBUTIONS TO A BIVARIATE DISTRIBUTION:\n H, xedges, yedges = np.histogram2d(\n X, Y, bins=[self.occupancy_map.info.width, self.occupancy_map.info.height], density=False)\n H = H.astype('int32')\n # inverse H, otherwise x and y would be the other way around (now, x are the columns, and y are rows, with row-major ordered)\n H = H.T\n # H starts with its inital element in (x_min, y_min) and then row-major ordered (see above): each row is one y and in there are all the column-x for this y\n\n if not self.strictly_hotspots:\n # if all cells should have at least a very small chance, no cell should have 0, which is why we add 1 to all of them\n H = H + 1\n\n if self.use_uniform_distr:\n H = np.ones((self.occupancy_map.info.height,\n self.occupancy_map.info.width), dtype=int)\n\n # already kick out all static occupied cells (walls, etc.) and replace them with -1\n for (cell_y, cell_x), _ in np.ndenumerate(H):\n if self.__cell_is_occupied(cell_x, cell_y):\n H[cell_y][cell_x] = -1\n\n if self.enable_printing:\n self.__print_probability_grid(H)\n\n # Creating image\n max_tick_number = 10\n x_axis_res = self.occupancy_map.info.resolution\n while (x_max - x_min) / x_axis_res > max_tick_number:\n x_axis_res *= 2\n x_axis = np.arange(\n x_min, x_max + self.occupancy_map.info.resolution, x_axis_res)\n y_axis_res = self.occupancy_map.info.resolution\n while (y_max - y_min) / y_axis_res > max_tick_number:\n y_axis_res *= 2\n y_axis = np.arange(\n y_min, y_max + self.occupancy_map.info.resolution, y_axis_res)\n\n figure_height = 10\n resize_factor_height = figure_height / self.occupancy_map.info.height\n fontsize = 26\n font_factor = 1.0 + (0.2 / resize_factor_height - 1.0) / 8\n\n fig = plt.figure(figsize=(2 * self.occupancy_map.info.width *\n resize_factor_height, self.occupancy_map.info.height * resize_factor_height))\n title = \"Dirt probability distribution in perspective of the initial map image\"\n fig.suptitle(title, fontsize=fontsize * font_factor, fontweight=\"bold\")\n\n viridis = cm.get_cmap('autumn_r', 256)\n newcolors = viridis(np.linspace(0, 1, 256))\n black = np.array([0, 0, 0, 1])\n white = np.array([1, 1, 1, 1])\n newcolors[:1, :] = black\n newcolors[1:20, :] = white\n cmap = colors.ListedColormap(newcolors)\n\n # actual bins/edges:\n ax = fig.add_subplot(1, 2, 1, aspect=\"equal\")\n ax.set_title(\"Actual cells\", size=(fontsize - 2)\n * font_factor, fontweight=\"bold\")\n mesh_X, mesh_Y = np.meshgrid(xedges, yedges)\n ax.pcolormesh(mesh_X, mesh_Y, H, cmap=cmap,\n edgecolors=\"k\", linewidths=0.5)\n ax.set_xlabel(\"X axis [m]\", fontsize=(fontsize - 4) * font_factor)\n ax.set_ylabel(\"Y axis [m]\", fontsize=(fontsize - 4) * font_factor)\n ax.set_xticks(x_axis)\n ax.set_yticks(y_axis)\n\n plt.tick_params(axis='both', which='major',\n labelsize=(fontsize - 6) * font_factor)\n\n # interpolated:\n ax = fig.add_subplot(1, 2, 2, aspect=\"equal\",\n xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])\n ax.set_title(\"Interpolated\", size=(fontsize - 2)\n * font_factor, fontweight=\"bold\")\n interpolated_image = NonUniformImage(\n ax, interpolation=\"bilinear\", cmap=cmap)\n xcenters = (xedges[:-1] + xedges[1:]) / 2\n ycenters = (yedges[:-1] + yedges[1:]) / 2\n interpolated_image.set_data(xcenters, ycenters, H)\n ax.images.append(interpolated_image)\n ax.set_xlabel(\"X axis [m]\", fontsize=(fontsize - 4) * font_factor)\n ax.set_ylabel(\"Y axis [m]\", fontsize=(fontsize - 4) * font_factor)\n ax.set_xticks(x_axis)\n ax.set_yticks(y_axis)\n\n plt.tick_params(axis='both', which='major',\n labelsize=(fontsize - 6) * font_factor)\n\n # get path to the output file\n rospack = rospkg.RosPack()\n pkg_path = rospack.get_path(self.output_pkg)\n image_path = pkg_path + \"/\" + self.distribution_folder + \\\n \"/\" + self.distribution_image\n\n plt.savefig(image_path)\n\n if self.enable_plotting:\n warning = \"Do not close this window until the end. Otherwise the generator node could crash!\"\n fig = plt.gcf()\n fig.canvas.set_window_title(warning)\n # show it (without blocking the remaining process)\n plt.ion()\n # plt.draw()\n plt.pause(0.001)\n # but cannot be closed without crashing this node!\n\n # each cell should have the probability of itself regarding the final distribution:\n # the occupied cells will have -1 and we do not want negative probabilities. So, we replace them with 0\n for (cell_y, cell_x), cell_value in np.ndenumerate(H):\n if cell_value < 0:\n H[cell_y][cell_x] = 0\n prob_grid = H / np.sum(H)\n\n rospy.loginfo(\n f\"*** WRAPPER MESSAGE ***\\n\\n\\t'{rospy.get_caller_id()}' has created the final probability distribution.\\n\"\n + \"\\tAn image of it was saved to (will be overridden each time!):\\n\"\n + image_path + \"\\n\")\n\n return prob_grid",
"def __generate_point_based_on_prob(self) -> Point:\n possible = False\n while not possible:\n # make the random decision based on a distribution (hot spots / different probabilities)\n prob_list = self.probability_distribution_grid.flatten()\n selected_index = np.random.choice(\n np.arange(0, len(prob_list)), p=prob_list)\n\n # get the indices of the cell (from the one array index)\n # width is the number of cells in x directions (it starts with cell 0/0) and is needed due to row-major order\n cell_x = int(selected_index % self.occupancy_map.info.width)\n cell_y = int(selected_index / self.occupancy_map.info.width)\n\n # get the real world coordinates (which represents the center of the cell)\n x = self.occupancy_map.info.origin.position.x + \\\n (cell_x + 0.5) * self.occupancy_map.info.resolution\n y = self.occupancy_map.info.origin.position.y + \\\n (cell_y + 0.5) * self.occupancy_map.info.resolution\n\n # Check if the actual cell is free of STATIC obstacles (not occupied)\n if not self.__cell_is_occupied(cell_x, cell_y):\n # Check for not occupied neighbors (the robot needs some space the reach it)\n if not self.__has_occupied_neighbors(cell_x, cell_y):\n # If actual spawning of dirt is enabled, then it should also be ensured that no other dirt object is already\n # at this position, because spawning a model in the same location of an already existing model can lead to problems\n if not self.prevent_duplicates or not self.__check_for_duplicates(Point(x, y, 0.0)):\n possible = True\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to already \"\n \"active dirt at this position.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied neighbor \"\n \"cells.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied cell.\"\n \"\\n\\tGenerating next one...\\n\" % (x, y))\n return Point(x=x, y=y, z=0.0)",
"def generate_graph(self, calculate_rewards=False):\n g = nx.Graph()\n positions = {}\n\n # create nodes for all hotspots in list\n for hotspot in self.hotspot_list:\n if calculate_rewards:\n hotspot['rewards'] = get_hotspot_rewards(hotspot['address'], 5)\n else:\n hotspot['rewards'] = 0\n coords = (hotspot['lng'], hotspot['lat'])\n g.add_node(hotspot['address'], pos=coords, lat=hotspot['lat'], lng=hotspot['lng'],\n location_hex=hotspot['location_hex'], name=hotspot['name'], rewards=hotspot['rewards'],\n gain=hotspot['gain'], elevation=hotspot['elevation'])\n positions[hotspot['address']] = coords\n\n # create edges representing hotspots that witness each other\n i = 0\n for hotspot in self.hotspot_list:\n witness_list = list_witnesses_for_hotspot(hotspot['address'])\n hotspot['num_witnesses'] = len(witness_list)\n g.nodes[hotspot['address']]['num_witnesses'] = hotspot['num_witnesses']\n for witness in witness_list:\n\n # make sure that all witnesses are also in this subset\n if not any(witness['address'] == h['address'] for h in self.hotspot_list):\n continue\n\n # calculate distance between witness and challengee\n dist = math.sqrt((hotspot['lat'] - witness['lat']) ** 2 + (hotspot['lng'] - witness['lng']) ** 2)\n g.add_edge(hotspot['address'], witness['address'], weight=dist)\n\n i += 1\n if i % 10 == 0:\n print(f\"{str(i)} out of {str(len(self.hotspot_list))} hotspots complete...\")\n\n self.g = g\n self.positions = positions\n return g",
"def make_pnts(start_point, end_point, path, cellsize):\n pnts = misc.generateSamples(tran = [start_point, end_point], path = path)\n \n #Only do sampling at DEM with >3m resolution \n if cellsize[0] >= 5:\n pnts = misc.selectSamplePts([start_point, end_point], pnts, cellsize[0])\n return pnts",
"def get_boundary_points(model,\n x,\n y_onehot,\n batch_size=64,\n pipeline=['pgd'],\n search_range=['local', 'l2', 0.3, None, 100],\n clamp=[0, 1],\n backend='pytorch',\n device='cuda:0',\n **kwargs):\n\n bd = None\n dis2cls_bd = np.zeros(x.shape[0]) + 1e16\n if 'pgd' in pipeline:\n print(\">>> Start PGD Attack <<<\", end='\\n', flush=True)\n if backend == 'tf.keras':\n fmodel = fb.TensorFlowModel(model, bounds=(clamp[0], clamp[1]))\n x = tf.constant(x, dtype=tf.float32)\n y_onehot = tf.constant(y_onehot, dtype=tf.int32)\n if isinstance(search_range[2], float):\n if search_range[1] == 'l2':\n attack = fb.attacks.L2PGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * search_range[2] / search_range[4],\n steps=search_range[4])\n else:\n attack = fb.attacks.LinfPGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * search_range[2] / search_range[4],\n steps=search_range[4])\n\n boundary_points = []\n success = 0\n for i in trange(0, x.shape[0], batch_size):\n batch_x = x[i:i + batch_size]\n batch_y = y_onehot[i:i + batch_size]\n\n _, batch_boundary_points, batch_success = attack(\n fmodel,\n batch_x,\n tf.argmax(batch_y, -1),\n epsilons=[search_range[2]])\n\n boundary_points.append(\n batch_boundary_points[0])\n success += np.sum(batch_success)\n\n boundary_points = tf.concat(boundary_points, axis=0)\n success /= x.shape[0]\n\n print(\n f\">>> Attacking with EPS={search_range[2]} (norm={search_range[1]}), Success Rate={success} <<<\"\n )\n\n elif isinstance(search_range[2], (list, np.ndarray)):\n boundary_points = []\n success = 0.\n for i in trange(0, x.shape[0], batch_size):\n\n batch_x = x[i:i + batch_size]\n batch_y = y_onehot[i:i + batch_size]\n\n batch_boundary_points = None\n batch_success = None\n\n for eps in search_range[2]:\n if search_range[1] == 'l2':\n attack = fb.attacks.L2PGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * eps / search_range[4],\n steps=search_range[4])\n else:\n attack = fb.attacks.LinfPGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * eps / search_range[4],\n steps=search_range[4])\n\n _, c_boundary_points, c_success = attack(\n fmodel,\n batch_x,\n tf.argmax(batch_y, -1),\n epsilons=[eps])\n c_boundary_points = c_boundary_points[0].numpy()\n c_success = tf.cast(c_success[0], tf.int32).numpy()\n\n print(\n f\">>> Attacking with EPS={eps} (norm={search_range[1]}), Success Rate={tf.reduce_mean(tf.cast(c_success, tf.float32))} <<<\"\n )\n\n if batch_boundary_points is None:\n batch_boundary_points = c_boundary_points\n batch_success = c_success\n else:\n for i in range(batch_boundary_points.shape[0]):\n if batch_success[i] == 0 and c_success[i] == 1:\n batch_boundary_points[\n i] = c_boundary_points[i]\n batch_success[i] = c_success[i]\n\n boundary_points.append(batch_boundary_points)\n success += np.sum(batch_success)\n\n boundary_points = tf.concat(boundary_points, axis=0)\n success /= x.shape[0]\n\n else:\n raise TypeError(\n f\"Expecting eps as float or list, but got {type(search_range[3])}\"\n )\n\n y_pred = np.argmax(\n model.predict(boundary_points, batch_size=batch_size), -1)\n\n x = x.numpy()\n y_onehot = y_onehot.numpy()\n boundary_points = boundary_points.numpy()\n\n elif backend == 'pytorch':\n model.eval()\n x, y_onehot, model = to_device(x, y_onehot, model, device)\n fmodel = fb.PyTorchModel(model, bounds=(clamp[0], clamp[1]))\n\n model = PytorchModel(model)\n if isinstance(search_range[2], float):\n if search_range[1] == 'l2':\n attack = fb.attacks.L2PGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * search_range[2] / search_range[4],\n steps=search_range[4])\n else:\n attack = fb.attacks.LinfPGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * search_range[2] / search_range[4],\n steps=search_range[4])\n\n boundary_points = []\n success = 0\n for i in trange(0, x.shape[0], batch_size):\n batch_x = x[i:i + batch_size]\n batch_y = y_onehot[i:i + batch_size]\n\n _, batch_boundary_points, batch_success = attack(\n fmodel,\n batch_x,\n torch.argmax(batch_y, -1),\n epsilons=[search_range[2]])\n\n boundary_points.append(\n batch_boundary_points[0].unsqueeze(0))\n success += torch.sum(batch_success.detach())\n\n boundary_points = torch.cat(boundary_points, dim=0)\n success /= x.shape[0]\n\n print(\n f\">>> Attacking with EPS={search_range[2]} (norm={search_range[1]}), Success Rate={success.cpu().numpy()} <<<\"\n )\n\n elif isinstance(search_range[2], (list, np.ndarray)):\n boundary_points = []\n success = 0.\n for i in trange(0, x.shape[0], batch_size):\n\n batch_x = x[i:i + batch_size]\n batch_y = y_onehot[i:i + batch_size]\n\n batch_boundary_points = None\n batch_success = None\n\n for eps in search_range[2]:\n if search_range[1] == 'l2':\n attack = fb.attacks.L2PGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * eps / search_range[4],\n steps=search_range[4])\n else:\n attack = fb.attacks.LinfPGD(\n rel_stepsize=search_range[3] if search_range[3]\n is not None else 2 * eps / search_range[4],\n steps=search_range[4])\n _, c_boundary_points, c_success = attack(\n fmodel,\n batch_x,\n torch.argmax(batch_y, -1),\n epsilons=[eps])\n c_boundary_points = c_boundary_points[0]\n c_success = c_success.squeeze(0)\n\n print(\n f\">>> Attacking with EPS={eps} (norm={search_range[1]}), Success Rate={c_success.detach().cpu().numpy().mean()} <<<\"\n )\n\n if batch_boundary_points is None:\n batch_boundary_points = c_boundary_points.detach(\n ).cpu()\n batch_success = c_success.detach().cpu()\n else:\n for i in range(batch_boundary_points.shape[0]):\n if batch_success[i] == 0 and c_success[i] == 1:\n batch_boundary_points[\n i] = c_boundary_points[i]\n batch_success[i] = c_success[i]\n\n boundary_points.append(batch_boundary_points)\n success += torch.sum(batch_success.detach()).float()\n\n boundary_points = torch.cat(boundary_points, dim=0)\n success /= x.shape[0]\n\n else:\n raise TypeError(\n f\"Expecting eps as float or list, but got {type(search_range[3])}\"\n )\n\n torch.cuda.empty_cache()\n y_pred = model(boundary_points,\n batch_size=batch_size,\n training=False,\n device=device)\n\n x = x.detach().cpu().numpy()\n y_onehot = y_onehot.detach().cpu().numpy()\n y_pred = y_pred.numpy()\n boundary_points = boundary_points.detach().cpu().numpy()\n \n else:\n raise ValueError(f\"Unknow backend: {backend}\")\n\n bd, dis2cls_bd = take_closer_bd(x, np.argmax(y_onehot, -1), bd,\n dis2cls_bd, boundary_points,\n np.argmax(y_pred, -1))\n\n if 'cw' in pipeline:\n print(\">>> Start CW Attack <<<\", end='\\n', flush=True)\n\n if backend == 'tf.keras':\n fmodel = fb.TensorFlowModel(model, bounds=(clamp[0], clamp[1]))\n x = tf.constant(x, dtype=tf.float32)\n y_onehot = tf.constant(y_onehot, dtype=tf.int32)\n\n attack = fb.attacks.L2CarliniWagnerAttack(\n stepsize=search_range[3] if search_range[3] is not None else\n 2 * search_range[2] / search_range[4],\n steps=search_range[4])\n\n boundary_points = []\n success = 0.\n for i in trange(0, x.shape[0], batch_size):\n batch_x = x[i:i + batch_size]\n batch_y = y_onehot[i:i + batch_size]\n\n _, batch_boundary_points, batch_success = attack(\n fmodel,\n batch_x,\n tf.argmax(batch_y, -1),\n epsilons=[search_range[2]])\n boundary_points.append(batch_boundary_points[0])\n success += tf.reduce_sum(tf.cast(batch_success, tf.int32))\n\n boundary_points = tf.concat(boundary_points, axis=0)\n success /= x.shape[0]\n\n print(\n f\">>> Attacking with EPS={search_range[2]} (norm={search_range[1]}), Success Rate={success} <<<\"\n )\n\n y_pred = np.argmax(\n model.predict(boundary_points, batch_size=batch_size), -1)\n\n x = x.numpy()\n y_onehot = y_onehot.numpy()\n boundary_points = boundary_points.numpy()\n\n elif backend == 'pytorch':\n model.eval()\n x, y_onehot, model = to_device(x, y_onehot, model, device)\n fmodel = fb.PyTorchModel(model, bounds=(clamp[0], clamp[1]))\n\n model = PytorchModel(model)\n attack = fb.attacks.L2CarliniWagnerAttack(\n stepsize=search_range[3] if search_range[3] is not None else\n 2 * search_range[2] / search_range[4],\n steps=search_range[4])\n\n boundary_points = []\n success = 0.\n for i in trange(0, x.shape[0], batch_size):\n batch_x = x[i:i + batch_size]\n batch_y = y_onehot[i:i + batch_size]\n\n _, batch_boundary_points, batch_success = attack(\n fmodel,\n batch_x,\n torch.argmax(batch_y, -1),\n epsilons=[search_range[2]])\n boundary_points.append(batch_boundary_points[0])\n success += torch.sum(batch_success.detach())\n\n boundary_points = torch.cat(boundary_points, dim=0)\n success /= x.shape[0]\n\n print(\n f\">>> Attacking with EPS={search_range[2]} (norm={search_range[1]}), Success Rate={success.cpu().numpy()} <<<\"\n )\n\n y_pred = model(boundary_points,\n batch_size=batch_size,\n training=False,\n device=device)\n\n x = x.detach().cpu().numpy()\n y_onehot = y_onehot.detach().cpu().numpy()\n y_pred = y_pred.detach().cpu().numpy()\n boundary_points = boundary_points.detach().cpu().numpy()\n\n else:\n raise ValueError(f\"Unknow backend: {backend}\")\n\n bd, dis2cls_bd = take_closer_bd(x, np.argmax(y_onehot, -1), bd,\n dis2cls_bd, boundary_points,\n np.argmax(y_pred, -1))\n\n return convert_to_numpy(bd), dis2cls_bd",
"def generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,\n used_spikes_indices=None, position_mult=2.25, threshold=0.1):\n # Load the required data from the kilosort folder\n channel_map = np.load(os.path.join(base_folder, 'channel_map.npy'))\n active_channel_map = np.squeeze(channel_map, axis=1)\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n\n spike_templates = np.load(os.path.join(base_folder, ct.SPIKE_TEMPLATES_FILENAME))\n templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))\n\n data_raw = np.memmap(os.path.join(base_folder, binary_data_filename),\n dtype=np.int16, mode='r')\n\n number_of_timepoints_in_raw = int(data_raw.shape[0] / number_of_channels_in_binary_file)\n data_raw_kilosorted = np.reshape(data_raw, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')\n\n spike_times = np.squeeze(np.load(os.path.join(base_folder, ct.SPIKE_TIMES_FILENAME)).astype(np.int))\n\n time_points = 50\n if used_spikes_indices is None:\n used_spikes_indices = np.arange(0, len(spike_times))\n\n # Run the loop over all spikes to get the positions\n counter = 0\n weighted_average_postions = np.empty((len(used_spikes_indices), 2))\n spike_distance_on_probe = np.empty(len(used_spikes_indices))\n for spike_index in np.arange(0, len(used_spikes_indices)):\n spike_raw_data = data_raw_kilosorted[active_channel_map,\n (spike_times[used_spikes_indices[spike_index]]-time_points):\n (spike_times[used_spikes_indices[spike_index]]+time_points)]\n template = templates[spike_templates[used_spikes_indices[spike_index]], :, :].squeeze()\n relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)\n\n spike_raw_data_median_over_time = np.median(spike_raw_data, axis=1)\n peaks_to_median = spike_raw_data_median_over_time - spike_raw_data.min(axis=1)\n peaks_to_median = peaks_to_median[relevant_channels]\n\n relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]\n\n peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)\n peaks_to_median_sorted.append(np.median(spike_raw_data_median_over_time[relevant_channels]))\n\n weights = _normalize(peaks_to_median_sorted)[:-1]\n relevant_channels_positions = channel_positions[relevant_channels_sorted]\n\n pos_x = relevant_channels_positions[0, 0]\n pos_y = relevant_channels_positions[0, 1]\n\n new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])\n new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])\n weighted_average_postions[spike_index, :] = [new_pos_x, new_pos_y]\n spike_distance_on_probe[spike_index] = np.sqrt(np.power(new_pos_x, 2) + np.power(new_pos_y, 2))\n\n counter += 1\n if counter % 5000 == 0:\n print('Completed ' + str(counter) + ' spikes')\n weighted_average_postions = weighted_average_postions * position_mult\n\n # sort according to position on probe\n spike_indices_sorted_by_probe_distance = np.array([b[0] for b in sorted(enumerate(spike_distance_on_probe),\n key=lambda dist: dist[1])])\n spike_distances_on_probe_sorted = np.array([b[1] for b in sorted(enumerate(spike_distance_on_probe),\n key=lambda dist: dist[1])])\n\n np.save(os.path.join(base_folder, ct.WEIGHTED_SPIKE_POSITIONS_FILENAME), weighted_average_postions)\n\n return weighted_average_postions, spike_distance_on_probe, \\\n spike_indices_sorted_by_probe_distance, spike_distances_on_probe_sorted",
"def create_points(number): \n\n # generate x and y coordinates:\n x = np.random.permutation(2*number)[:number] - number\n y = np.random.permutation(2*number)[:number] - number\n\n points = [ { 0 : float(x[i]), 1 : float(y[i]), \"index\" : i} for i in range(len(x)) ]\n\n return points\n\n # generate points as coordinate pairs of floats.\n # return zip(map(float,x),map(float,y))",
"def hotp(self, counter=4):\n return generate_hotp(self.secret, counter)",
"def generate(self, x, **kwargs):\n # TODO Consider computing attack for a batch of samples at a time (no for loop)\n # Parse and save attack-specific parameters\n assert self.set_params(**kwargs)\n clip_min, clip_max = self.classifier.clip_values\n\n x_adv = np.copy(x)\n dims = list(x.shape[1:])\n preds = self.classifier.predict(x_adv, logits=False)\n tol = 1e-10\n\n for ind, val in enumerate(x_adv):\n d = np.random.randn(*dims)\n\n for _ in range(self.max_iter):\n d = self._normalize(d)\n preds_new = self.classifier.predict((val + d)[None, ...], logits=False)\n\n from scipy.stats import entropy\n kl_div1 = entropy(preds[ind], preds_new[0])\n\n # TODO remove for loop\n d_new = np.zeros_like(d)\n array_iter = np.nditer(d, op_flags=['readwrite'], flags=['multi_index'])\n for x in array_iter:\n x[...] += self.finite_diff\n preds_new = self.classifier.predict((val + d)[None, ...], logits=False)\n kl_div2 = entropy(preds[ind], preds_new[0])\n d_new[array_iter.multi_index] = (kl_div2 - kl_div1) / (self.finite_diff + tol)\n x[...] -= self.finite_diff\n d = d_new\n\n # Apply perturbation and clip\n val = np.clip(val + self.eps * self._normalize(d), clip_min, clip_max)\n x_adv[ind] = val\n\n return x_adv",
"def gen_placecells(self, min_spread=0.2):\r\n\r\n N = None\r\n num_tries = 1000 # a limit on the number of attempts to place a new placecell\r\n\r\n # assign random x,y locations to each neuron\r\n locations = [self.random_location()]\r\n while True:\r\n # generate a random new point\r\n new_loc = self.random_location()\r\n\r\n # check that the point isn't too close to previous points\r\n count = 0\r\n while min([self.calc_dist(new_loc, l) for l in locations]) < min_spread and count < num_tries:\r\n new_loc = self.random_location()\r\n count += 1\r\n\r\n # add the new point\r\n locations += [new_loc]\r\n\r\n if (N == None and count >= num_tries) or len(locations) == N:\r\n # stop when required number of place cells built (if N specified),\r\n # or when world has been decently filled\r\n break\r\n\r\n return locations",
"def generate_random_points(\n start: Float,\n end: Float,\n limit: Integer\n) -> List[Point]:\n\n return [\n Point(x=random.uniform(start, end), y=random.uniform(start, end))\n for _ in range(limit)\n ]",
"def generate_suggestions(self):\n\n track_table = tasks.get_kdbg(self.obj_vm).PoolTrackTable\n\n for pair in self.distance:\n table_base = obj.Object(\"address\", \n offset = track_table - pair[0], \n vm = self.obj_vm)\n\n table_size = obj.Object(\"address\", \n offset = track_table - pair[1], \n vm = self.obj_vm)\n\n if table_size != 0 and self.obj_vm.is_valid_address(table_base):\n break\n\n debug.debug(\"Distance Map: {0}\".format(repr(self.distance)))\n debug.debug(\"PoolTrackTable: {0:#x}\".format(track_table))\n debug.debug(\"PoolBigPageTable: {0:#x} => {1:#x}\".format(table_base.obj_offset, table_base))\n debug.debug(\"PoolBigPageTableSize: {0:#x} => {1:#x}\".format(table_size.obj_offset, table_size))\n yield table_base, table_size",
"def hotspots(name, output, dimensions, hotspots):\n HotspotsDatasetProcessor(name=name,\n output_filename=output,\n dimensions=dimensions,\n hotspot_count=hotspots).process()",
"def add_points(grid, num_points):\n \n for i in range(num_points):\n # Coord for crit point\n rand_x = random.randint(0, GRID_WIDTH - 1)\n rand_y = random.randint(0, GRID_HEIGHT - 1)\n \n # Set value of crit point\n elev = (MAX_HEIGHT - MIN_HEIGHT) * random.random() + MIN_HEIGHT\n grid[rand_x][rand_y] = elev * PEAK_HEIGHT\n \n return grid",
"def paint_a_picture():\n # Make a training set (many random i,j coord and an x by y box around that coord to start with)\n # Throw it into the net\n # Test how it does for some random coordinate inputs\n pass",
"def prep_pointcloud(input_dict,\n root_path,\n voxel_generator,\n target_assigner,\n db_sampler=None,\n max_voxels=20000,\n max_sweeps=10,\n remove_outside_points=False,\n training=True,\n create_targets=True,\n shuffle_points=False,\n remove_unknown=False,\n gt_rotation_noise=(-np.pi / 3, np.pi / 3),\n gt_loc_noise_std=(1.0, 1.0, 1.0),\n global_rotation_noise=(-np.pi / 4, np.pi / 4),\n global_scaling_noise=(0.95, 1.05),\n global_random_rot_range=(0.78, 2.35),\n global_translate_noise_std=(0, 0, 0),\n num_point_features=4,\n anchor_area_threshold=1,\n gt_points_drop=0.0,\n gt_drop_max_keep=10,\n remove_points_after_sample=True,\n anchor_cache=None,\n remove_environment=False,\n random_crop=False,\n reference_detections=None,\n out_size_factor=2,\n use_group_id=False,\n multi_gpu=False,\n min_points_in_gt=-1,\n random_flip_x=True,\n random_flip_y=True,\n sample_importance=1.0,\n out_dtype=np.float32):\n t = time.time()\n class_names = target_assigner.classes\n points = input_dict[\"lidar\"][\"points\"]\n indices = input_dict[\"lidar\"][\"indices\"]\n origins = input_dict[\"lidar\"][\"origins\"]\n if training:\n anno_dict = input_dict[\"lidar\"][\"annotations\"]\n gt_dict = {\n \"gt_boxes\": anno_dict[\"boxes\"],\n \"gt_names\": anno_dict[\"names\"],\n \"gt_importance\": np.ones([anno_dict[\"boxes\"].shape[0]], dtype=anno_dict[\"boxes\"].dtype),\n }\n if \"difficulty\" not in anno_dict:\n difficulty = np.zeros([anno_dict[\"boxes\"].shape[0]],\n dtype=np.int32)\n gt_dict[\"difficulty\"] = difficulty\n else:\n gt_dict[\"difficulty\"] = anno_dict[\"difficulty\"]\n if use_group_id and \"group_ids\" in anno_dict:\n group_ids = anno_dict[\"group_ids\"]\n gt_dict[\"group_ids\"] = group_ids\n calib = None\n if \"calib\" in input_dict:\n calib = input_dict[\"calib\"]\n\n # # Disable these two since we do not do this for NuScenes\n # if reference_detections is not None:\n # assert calib is not None and \"image\" in input_dict\n # C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)\n # frustums = box_np_ops.get_frustum_v2(reference_detections, C)\n # frustums -= T\n # frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)\n # frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)\n # surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)\n # masks = points_in_convex_polygon_3d_jit(points, surfaces)\n # points = points[masks.any(-1)]\n # if remove_outside_points:\n # assert calib is not None\n # image_shape = input_dict[\"image\"][\"image_shape\"]\n # points = box_np_ops.remove_outside_points(\n # points, calib[\"rect\"], calib[\"Trv2c\"], calib[\"P2\"], image_shape)\n\n # # Very interesting attempt\n # # I have tried the same and found it doesn't really work\n # if remove_environment is True and training:\n # selected = kitti.keep_arrays_by_name(gt_names, target_assigner.classes)\n # _dict_select(gt_dict, selected)\n # masks = box_np_ops.points_in_rbbox(points, gt_dict[\"gt_boxes\"])\n # points = points[masks.any(-1)]\n\n metrics = {}\n\n point_indices_to_remove = None\n if training:\n \"\"\"\n boxes_lidar = gt_dict[\"gt_boxes\"]\n bev_map = simplevis.nuscene_vis(points, boxes_lidar)\n cv2.imshow('pre-noise', bev_map)\n \"\"\"\n selected = kitti.drop_arrays_by_name(gt_dict[\"gt_names\"], [\"Denture\"])\n _dict_select(gt_dict, selected)\n if remove_unknown:\n remove_mask = gt_dict[\"difficulty\"] == -1\n \"\"\"\n gt_boxes_remove = gt_boxes[remove_mask]\n gt_boxes_remove[:, 3:6] += 0.25\n points = prep.remove_points_in_boxes(points, gt_boxes_remove)\n \"\"\"\n keep_mask = np.logical_not(remove_mask)\n _dict_select(gt_dict, keep_mask)\n gt_dict.pop(\"difficulty\")\n\n # This part is interesting - we will need to do the same\n if min_points_in_gt > 0:\n # points_count_rbbox takes 10ms with 10 sweeps nuscenes data\n point_counts = box_np_ops.points_count_rbbox(points, gt_dict[\"gt_boxes\"])\n mask = point_counts >= min_points_in_gt\n _dict_select(gt_dict, mask)\n\n gt_boxes_mask = np.array(\n [n in class_names for n in gt_dict[\"gt_names\"]], dtype=np.bool_)\n\n if db_sampler is not None:\n group_ids = None\n if \"group_ids\" in gt_dict:\n group_ids = gt_dict[\"group_ids\"]\n\n sampled_dict = db_sampler.sample_all(\n root_path,\n gt_dict[\"gt_boxes\"],\n gt_dict[\"gt_names\"],\n num_point_features,\n random_crop,\n gt_group_ids=group_ids,\n calib=calib)\n\n if sampled_dict is not None:\n sampled_gt_names = sampled_dict[\"gt_names\"]\n sampled_gt_boxes = sampled_dict[\"gt_boxes\"]\n sampled_points = sampled_dict[\"points\"]\n sampled_gt_masks = sampled_dict[\"gt_masks\"]\n gt_dict[\"gt_names\"] = np.concatenate(\n [gt_dict[\"gt_names\"], sampled_gt_names], axis=0)\n gt_dict[\"gt_boxes\"] = np.concatenate(\n [gt_dict[\"gt_boxes\"], sampled_gt_boxes])\n gt_boxes_mask = np.concatenate(\n [gt_boxes_mask, sampled_gt_masks], axis=0)\n sampled_gt_importance = np.full(\n [sampled_gt_boxes.shape[0]], sample_importance,\n dtype=sampled_gt_boxes.dtype)\n gt_dict[\"gt_importance\"] = np.concatenate(\n [gt_dict[\"gt_importance\"], sampled_gt_importance])\n\n if group_ids is not None:\n sampled_group_ids = sampled_dict[\"group_ids\"]\n gt_dict[\"group_ids\"] = np.concatenate(\n [gt_dict[\"group_ids\"], sampled_group_ids])\n\n # # Commented out because we have a new way of removing points\n # if remove_points_after_sample:\n # masks = box_np_ops.points_in_rbbox(points, sampled_gt_boxes)\n # point_indices_to_remove = np.flatnonzero(masks.any(-1))\n # # # Delay this process so we can use the full point cloud\n # # # when we do the ray stopping algorithm\n # # points = points[np.logical_not(masks.any(-1))]\n\n # # Paste objects behind so that we don't have to update indices\n # points = np.concatenate([sampled_points, points], axis=0)\n points = np.concatenate([points, sampled_points], axis=0)\n\n pc_range = voxel_generator.point_cloud_range\n group_ids = None\n if \"group_ids\" in gt_dict:\n group_ids = gt_dict[\"group_ids\"]\n\n # # Disable this one for now (not used in PointPillars anyways)\n # prep.noise_per_object_v3_(\n # gt_dict[\"gt_boxes\"],\n # points,\n # gt_boxes_mask,\n # rotation_perturb=gt_rotation_noise,\n # center_noise_std=gt_loc_noise_std,\n # global_random_rot_range=global_random_rot_range,\n # group_ids=group_ids,\n # num_try=100)\n\n # should remove unrelated objects after noise per object\n # for k, v in gt_dict.items():\n # print(k, v.shape)\n _dict_select(gt_dict, gt_boxes_mask)\n gt_classes = np.array(\n [class_names.index(n) + 1 for n in gt_dict[\"gt_names\"]],\n dtype=np.int32)\n gt_dict[\"gt_classes\"] = gt_classes\n gt_dict[\"gt_boxes\"], points, origins = prep.random_flip(\n gt_dict[\"gt_boxes\"], points, origins, 0.5, random_flip_x, random_flip_y)\n gt_dict[\"gt_boxes\"], points, origins = prep.global_rotation_v2(\n gt_dict[\"gt_boxes\"], points, origins, *global_rotation_noise)\n gt_dict[\"gt_boxes\"], points, origins = prep.global_scaling_v2(\n gt_dict[\"gt_boxes\"], points, origins, *global_scaling_noise)\n prep.global_translate_(\n gt_dict[\"gt_boxes\"], points, origins, global_translate_noise_std)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n mask = prep.filter_gt_box_outside_range_by_center(gt_dict[\"gt_boxes\"], bv_range)\n _dict_select(gt_dict, mask)\n\n # limit rad to [-pi, pi]\n gt_dict[\"gt_boxes\"][:, 6] = box_np_ops.limit_period(\n gt_dict[\"gt_boxes\"][:, 6], offset=0.5, period=2 * np.pi)\n\n # boxes_lidar = gt_dict[\"gt_boxes\"]\n # bev_map = simplevis.nuscene_vis(points, boxes_lidar)\n # cv2.imshow('post-noise', bev_map)\n # cv2.waitKey(0)\n\n # # Disable this for now (not used in PointPillars anyways)\n # if shuffle_points:\n # # shuffle is a little slow.\n # np.random.shuffle(points)\n\n # [0, -40, -3, 70.4, 40, 1]\n voxel_size = voxel_generator.voxel_size\n pc_range = voxel_generator.point_cloud_range\n grid_size = voxel_generator.grid_size\n\n # organize points into lists based on timestamps\n time_stamps = points[indices[:-1], -1] # counting on the fact we do not miss points from any intermediate time_stamps\n time_stamps = (time_stamps[:-1]+time_stamps[1:])/2\n time_stamps = [-1000.0] + time_stamps.tolist() + [1000.0] # add boundaries\n time_stamps = np.array(time_stamps)\n\n # # LL_OCCUPIED, LL_FREE = 0.85, -0.4\n # lo_occupied = np.log(0.7 / (1 - 0.7))\n # lo_free = np.log(0.4 / (1 - 0.4))\n\n # is there are additional points (from database sampling)\n num_original = indices[-1]\n if len(points) > num_original:\n # split data into two half (indexed and un-indexed)\n original_points, sampled_points = points[:num_original], points[num_original:]\n # compute occupancy and masks\n # visibility, original_mask, sampled_mask = mapping.compute_visibility_and_masks(\n # original_points, sampled_points, origins, time_stamps, pc_range, min(voxel_size)\n # )\n logodds, original_mask, sampled_mask = mapping.compute_logodds_and_masks(\n original_points, sampled_points, origins, time_stamps, pc_range, min(voxel_size) # , lo_occupied, lo_free\n )\n # apply visible mask\n points = np.concatenate((original_points[original_mask], sampled_points[sampled_mask]))\n else:\n # visibility = mapping.compute_visibility(\n # points, origins, time_stamps, pc_range, min(voxel_size)\n # )\n logodds = mapping.compute_logodds(\n points, origins, time_stamps, pc_range, min(voxel_size) #, lo_occupied, lo_free\n )\n\n # T = len(time_stamps)-1\n # visibility = visibility.reshape(T, -1)\n # if T < (1 + max_sweeps):\n # visibility = np.pad(visibility, ((0, (1+max_sweeps)-T), (0,0)), 'edge')\n\n # with open(f'./utils/mapping/examples/{time.time()}.pkl', 'wb') as f:\n # ##\n # pickle.dump(original_points, f)\n # pickle.dump(sampled_points, f)\n # pickle.dump(origins, f)\n # pickle.dump(time_stamps, f)\n # pickle.dump(pc_range, f)\n # pickle.dump(voxel_size, f)\n # ##\n # pickle.dump(occupancy, f)\n # pickle.dump(original_mask, f)\n # pickle.dump(sampled_mask, f)\n\n if training:\n if min_points_in_gt > 0:\n # points_count_rbbox takes 10ms with 10 sweeps nuscenes data\n point_counts = box_np_ops.points_count_rbbox(points, gt_dict[\"gt_boxes\"])\n mask = point_counts >= min_points_in_gt\n _dict_select(gt_dict, mask)\n\n # [352, 400]\n t1 = time.time()\n if not multi_gpu:\n res = voxel_generator.generate(\n points, max_voxels)\n voxels = res[\"voxels\"]\n coordinates = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([voxels.shape[0]], dtype=np.int64)\n else:\n res = voxel_generator.generate_multi_gpu(\n points, max_voxels)\n voxels = res[\"voxels\"]\n coordinates = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([res[\"voxel_num\"]], dtype=np.int64)\n metrics[\"voxel_gene_time\"] = time.time() - t1\n example = {\n 'voxels': voxels,\n # 'visibility': visibility,\n 'logodds': logodds, \n 'num_points': num_points,\n 'coordinates': coordinates,\n \"num_voxels\": num_voxels,\n \"metrics\": metrics,\n }\n if calib is not None:\n example[\"calib\"] = calib\n feature_map_size = grid_size[:2] // out_size_factor\n feature_map_size = [*feature_map_size, 1][::-1]\n # print(f'feature_map_size in prep_pointcloud(): {feature_map_size}')\n if anchor_cache is not None:\n # print('having anchor cache')\n anchors = anchor_cache[\"anchors\"]\n anchors_bv = anchor_cache[\"anchors_bv\"]\n anchors_dict = anchor_cache[\"anchors_dict\"]\n matched_thresholds = anchor_cache[\"matched_thresholds\"]\n unmatched_thresholds = anchor_cache[\"unmatched_thresholds\"]\n\n else:\n # print('NOT having anchor cache')\n ret = target_assigner.generate_anchors(feature_map_size)\n anchors = ret[\"anchors\"]\n anchors = anchors.reshape([-1, target_assigner.box_ndim])\n anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)\n anchors_bv = box_np_ops.rbbox2d_to_near_bbox(\n anchors[:, [0, 1, 3, 4, 6]])\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n # print(f'anchors.shape: {anchors.shape}')\n\n example[\"anchors\"] = anchors\n anchors_mask = None\n if anchor_area_threshold >= 0:\n # slow with high resolution. recommend disable this forever.\n coors = coordinates\n dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(\n coors, tuple(grid_size[::-1][1:]))\n dense_voxel_map = dense_voxel_map.cumsum(0)\n dense_voxel_map = dense_voxel_map.cumsum(1)\n anchors_area = box_np_ops.fused_get_anchors_area(\n dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)\n anchors_mask = anchors_area > anchor_area_threshold\n # example['anchors_mask'] = anchors_mask.astype(np.uint8)\n example['anchors_mask'] = anchors_mask\n # print(\"prep time\", time.time() - t)\n metrics[\"prep_time\"] = time.time() - t\n if not training:\n return example\n example[\"gt_names\"] = gt_dict[\"gt_names\"]\n # voxel_labels = box_np_ops.assign_label_to_voxel(gt_boxes, coordinates,\n # voxel_size, coors_range)\n if create_targets:\n t1 = time.time()\n targets_dict = target_assigner.assign(\n anchors,\n anchors_dict,\n gt_dict[\"gt_boxes\"],\n anchors_mask,\n gt_classes=gt_dict[\"gt_classes\"],\n gt_names=gt_dict[\"gt_names\"],\n matched_thresholds=matched_thresholds,\n unmatched_thresholds=unmatched_thresholds,\n importance=gt_dict[\"gt_importance\"])\n\n \"\"\"\n boxes_lidar = gt_dict[\"gt_boxes\"]\n bev_map = simplevis.nuscene_vis(points, boxes_lidar, gt_dict[\"gt_names\"])\n assigned_anchors = anchors[targets_dict['labels'] > 0]\n ignored_anchors = anchors[targets_dict['labels'] == -1]\n bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], ignored_anchors, [128, 128, 128], 2)\n bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], assigned_anchors, [255, 0, 0])\n cv2.imshow('anchors', bev_map)\n cv2.waitKey(0)\n\n boxes_lidar = gt_dict[\"gt_boxes\"]\n pp_map = np.zeros(grid_size[:2], dtype=np.float32)\n voxels_max = np.max(voxels[:, :, 2], axis=1, keepdims=False)\n voxels_min = np.min(voxels[:, :, 2], axis=1, keepdims=False)\n voxels_height = voxels_max - voxels_min\n voxels_height = np.minimum(voxels_height, 4)\n # sns.distplot(voxels_height)\n # plt.show()\n pp_map[coordinates[:, 1], coordinates[:, 2]] = voxels_height / 4\n pp_map = (pp_map * 255).astype(np.uint8)\n pp_map = cv2.cvtColor(pp_map, cv2.COLOR_GRAY2RGB)\n pp_map = simplevis.draw_box_in_bev(pp_map, [-50, -50, 3, 50, 50, 1], boxes_lidar, [128, 0, 128], 1)\n cv2.imshow('heights', pp_map)\n cv2.waitKey(0)\n \"\"\"\n example.update({\n 'labels': targets_dict['labels'],\n 'reg_targets': targets_dict['bbox_targets'],\n # 'reg_weights': targets_dict['bbox_outside_weights'],\n 'importance': targets_dict['importance'],\n })\n return example",
"def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations",
"def gpbandits(model, data, iters=10, kernel='se', cl=0.1, v=0.0, num_samples=500, verbose=True, best_model_log=False):\n\n num_dims = model.num_dims # number of hyperparameter dimensions\n\n # initial model evaluation\n points = model.encode()[np.newaxis,:]\n scores = np.array([model.train_test_cv(data)])\n\n # best model and corresponding value at each iteration\n if best_model_log:\n best_point_tmp = []\n best_point_tmp.append(points[0,:])\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(0, scores[0]))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(0,points[0,:] scores[0]))\n\n # loop\n for i in range(iters):\n\n # sample num_Samples random points from [0,1)^num_dims\n candidates = sample(num_dims, num_samples)\n\n # find GP posterior\n A = formK(candidates, candidates, kernel, cl)\n B = formK(points, points, kernel, cl) + v*np.eye(points.shape[0])\n C = formK(candidates, points, kernel, cl)\n tmp = C.dot(np.linalg.inv(B))\n mu = tmp.dot(scores)\n Sigma = A - tmp.dot(C.T)\n var = np.diagonal(Sigma) + np.finfo(float).eps\n sig = np.sqrt(var)\n\n # choose new point with best expected improvement\n exp_imp = expected_improvement(scores.min(), mu, sig)\n best_idx = np.argmax(exp_imp)\n best_point = candidates[best_idx]\n\n # set hyperparameters with best sampled point\n model.decode(best_point)\n\n # return re-encoded point\n new_point = model.encode()\n\n # evaluate model\n new_score = model.train_test_cv(data)\n\n # append to points/scores lists\n points = np.vstack((points, best_point)) # use best_point, not re-encoded new_point to break discrete symmetries\n scores = np.append(scores, new_score)\n\n # save progress\n save_checkpoint(points, scores)\n\n # print update\n if verbose:\n print(\"Iteration: %03d | Score: %.06e\" %(i+1, new_score))\n #print(\"Iteration: %03d | Design Point: %f | Score: %.06e\" %(i+1, best_point, new_score))\n\n if best_model_log:\n ind = np.argmin(scores)\n best_point_tmp.append(points[ind])\n\n\n\n # return best model\n ind = np.argmin(scores)\n best_overall_point = points[ind]\n model.decode(best_overall_point)\n\n if not best_model_log:\n return model\n else:\n return model, best_point_tmp",
"def generate(random, lower, upper, count=1):\n if count > 1:\n points = []\n\n for x in range(lower.x, upper.x):\n for y in range(lower.y, upper.y):\n points.append(Point(x, y)) # REFACTOR: Not very efficient\n\n return random.sample(points, count)\n else:\n return Point(random.randrange(lower.x, upper.x), random.randrange(lower.y, upper.y))",
"def handle_guess(self, newPointList):\n if not self.ready_to_publish:\n return False\n assert isinstance(newPointList, list)\n assert isinstance(newPointList[0], Point)\n\n # Find the limits of the input data.\n xmax = -1.0e6 ; ymax = -1.0e6\n xmin = 1.0e6 ; ymin = 1.0e6\n for pt in newPointList:\n xmax = max(xmax, pt.point[0])\n ymax = max(ymax, pt.point[1])\n xmin = min(xmin, pt.point[0])\n ymin = min(ymin, pt.point[1])\n\n # Shrink the map to accommodate the relevant area\n self.mapData.sample((xmin,ymin), (xmax,ymax))\n\n # Cruise through the map looking for empty cells next to occupied\n # ones. These will be the likely cells when a bump is encountered.\n #\n # Because of the possibility of bumping an object that isn't on the\n # map, any empty map cell is possible. Therefore, we run through\n # the map, packing the map data into a list of Point objects, since\n # that's what the perfesser wants for input. While we're running\n # through, we keep a separate list of empty cells next to full ones.\n wallPointList = []\n emptyPointList = []\n for xi in range(self.mapData.ogS.info.width):\n for yi in range(self.mapData.ogS.info.height):\n if self.mapData.mapArrayS[xi,yi] < 50:\n p = Point()\n p.point = self.mapData.transform((xi, yi))\n emptyPointList.append(p)\n if (self.occupiedNeighbor(xi, yi)):\n newP = Point()\n newP.point = (p.point[0] + np.random.normal(0.0, self.mapData.ogS.info.resolution/3.0),\n p.point[1] + np.random.normal(0.0, self.mapData.ogS.info.resolution/3.0),\n p.point[2])\n\n wallPointList.append(newP)\n\n # Using the wallError, sample the two lists together to get a roughly\n # correct distribution of points to feed to the perfesser.\n self.mapPointList = []\n for i in range(self.nPoints):\n if i < self.wallError * self.nPoints:\n self.mapPointList.append(random.choice(wallPointList))\n else:\n self.mapPointList.append(random.choice(emptyPointList))\n\n self.guesser.newPoints(self.mapPointList)\n\n pts = Belief()\n pts.points = newPointList\n self.guesser.update(pts)\n\n self.pointList = self.guesser.outPoints()\n self.ready_to_publish = False\n return True",
"def generate(self):\n self.generate_points()\n self.generate_edges()",
"def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points",
"def run_example(num_points_to_sample=1000, verbose=True, **kwargs):\n\n exp = Experiment([[1, 52], [0, 6], [1, 52]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([26, 2, 46], get_fitness([26, 2, 35]), 0.5), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n # Sample num_points_to_sample points\n for i in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = map(round, gp_next_points(exp, **kwargs)[0]) # in [A, X, B] form, rounded integers\n value_of_next_point = get_fitness(next_point_to_sample)\n\n if verbose:\n if in_results(next_point_to_sample):\n print '***', \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point), '***'\n else:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n bank[i,0:3] = next_point_to_sample\n bank[i,3] = value_of_next_point\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise",
"def generateData(numPoints,x,y):\n\tfor i in range(0,numPoints):\n\t\tif (i % 2 == 0):\n\t\t\tx.append(random.normalvariate(25, 15))\n\t\t\ty.append(random.normalvariate(25, 15))\n\t\t\t \n\t\t\t\n\t\telse:\n\t\t\tx.append(random.normalvariate(75, 15))\n\t\t\ty.append(random.normalvariate(75, 15))",
"def synthetic_gen(self):\r\n logging.debug('generating synthetic map...')\r\n data = self.realData\r\n unit = Params.unitGrid\r\n x_min = np.floor(Params.LOW[0] / unit) * unit\r\n x_max = np.ceil(Params.HIGH[0] / unit) * unit\r\n y_min = np.floor(Params.LOW[1] / unit) * unit\r\n y_max = np.ceil(Params.HIGH[1] / unit) * unit\r\n\r\n x_CELL = int(np.rint((x_max - x_min) / unit))\r\n y_CELL = int(np.rint((y_max - y_min) / unit))\r\n\r\n self.root.n_box = np.array([[x_min, y_min], [x_max, y_max]])\r\n\r\n self.mapp = np.zeros((x_CELL, y_CELL)) - 1 # ## initialize every cell with -1\r\n for i in range(Params.NDATA): # ## populate the map\r\n point = data[:, i]\r\n cell_x = int(np.floor((point[0] - x_min) / unit))\r\n cell_y = int(np.floor((point[1] - y_min) / unit))\r\n if self.mapp[cell_x, cell_y] != -1:\r\n self.mapp[cell_x, cell_y] += 1\r\n else:\r\n self.mapp[cell_x, cell_y] = 1\r\n\r\n for i in range(x_CELL): # ## perturb the counts\r\n for j in range(y_CELL):\r\n if self.mapp[i, j] != -1:\r\n self.mapp[i, j] += np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n else:\r\n self.mapp[i, j] = np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n # if noisy count is negative, ignore the noise and generate no points\r\n if self.mapp[i, j] < 0:\r\n self.mapp[i, j] = 0",
"def generate(self, start_point: int) -> None:\n self.map_points = [start_point for i in range(self.map_length)]\n current_segment = 0\n\n while current_segment <= self.map_length - c.SAFE_EXCESS:\n how_much_segments_to_side = random.randrange(c.MIN_SEGMENTS_TO_SIDE, c.MAX_SEGMENTS_TO_SIDE, 1)\n max_curvature = math.log(self.score.get_score() + 1, c.MAX_CURVATURE_COEFFICIENT)\n if max_curvature > c.MAX_CURVATURE:\n max_curvature = c.MAX_CURVATURE\n curvature = round(random.uniform(-max_curvature, max_curvature), 1)\n\n for j in range(current_segment, current_segment + how_much_segments_to_side):\n if current_segment + how_much_segments_to_side <= self.map_length:\n while not (\n c.MAX_LEFT_DEVIATION_OF_ROAD < self.map_points[\n j] + curvature < c.MAX_RIGHT_DEVIATION_OF_ROAD):\n curvature = round(random.uniform(-max_curvature, max_curvature), 1)\n self.map_points[j + 1] = self.map_points[j] + curvature\n\n current_segment += how_much_segments_to_side",
"def find_knn_hyperparams():\n n_neighbors = np.arange(5, 10)\n ps = np.arange(1, 10)\n results = []\n\n for p in ps:\n result = []\n for _ in range(10):\n data = FaceDataset(\"embeddings/known\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for n in n_neighbors:\n clf = KNeighborsClassifier(n_neighbors=n, weights=\"distance\", p=p)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n result.append(accs)\n result = np.mean(result, axis=0)\n results.append(result)\n\n plots = []\n for i in range(len(ps)):\n p = plotly.graph_objs.Scatter(x=n_neighbors, y=results[i], name=\"p={}\".format(ps[i]))\n plots.append(p)\n\n plotly.offline.plot(plots, filename=\"knn.html\")\n print(\"C={}\".format(n_neighbors[np.argmax(results)]))",
"def main():\n size = WINDOW_SIZE\n num_points = NUM_OF_POINTS\n\n # initialize the pygame system\n pg.init()\n\n # set the screen size and get teh window handle\n screen = pg.display.set_mode((size, size))\n pg.display.set_caption(\"TSP with Kohonen rings\")\n\n # for ever do...\n while 1 == 1:\n # initialize a new KOHONENRING instance\n my_sofm = KOHONENRING(2, num_points * 3, 0.99, 0.99999, 0.01)\n\n # generate random points in [0.0, 1.0] as our \"towns\"\n points = np.random.rand(num_points, 2)\n\n # reset the tick counter\n tick = 0\n\n # while not reached MAX_STEP\n while tick < MAX_STEP:\n # get the all teh weights from the ring\n weights = my_sofm.get_weights()\n\n # do we need to draw now?\n if tick % DRAW_STEP == 0:\n # fill all white\n screen.fill((255, 255, 255))\n\n # for each weight in all the weights\n for it_x in range(weights.shape[0]):\n # mark the start of the line\n pos_start = weights[it_x] * size\n\n # are we the last one?\n if it_x < weights.shape[0] - 1:\n # mark the end of the line and draw\n pos_stop1 = weights[it_x + 1] * size\n pg.draw.line(screen, (0, 0, 0), pos_start, pos_stop1)\n else:\n # mark the first neuron as the end and draw -> this creates a ring\n pos_stop1 = weights[0] * size\n pg.draw.line(screen, (0, 0, 0), pos_start, pos_stop1)\n\n # draw the neuron as circle\n pg.draw.circle(screen, (0, 0, 255), (int(pos_start[0]), int(pos_start[1])), 2)\n\n # for each point in the list of \"towns\"\n for point in points:\n # scale the position according to the window and draw\n pos_circle = point * size\n pg.draw.circle(screen, (255, 0, 0), (int(pos_circle[0]), int(pos_circle[1])), 4)\n # make the changes visible on the screen\n pg.display.update()\n\n # perform a training step with one random \"town\" from the point list\n my_sofm.train(points[np.random.randint(0, num_points)])\n\n # increment the tick counter\n tick += 1\n\n # handle the events to see if someone wants to close the window\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n return",
"def random_points_ascending_hillclimber(house, all_houses, waters, total_value_map):\n total_value_map_NEW = total_value_map\n\n # check in welke range het huis geplaats kan worden, niet kijkend naar water of andere \n rangex = MAXIMUM_WIDTH - house.width\n rangey = MAXIMUM_HEIGHT - house.length\n\n for x in range(100):\n # maak random x en y coördinaat\n randomizex = rangex * random()\n randomizey = rangey * random()\n\n # bewaar oude locaties\n tempx = house.bottom_left[0]\n tempy = house.bottom_left[1]\n \n # verander locatie\n bottom_left = (randomizex,randomizey)\n house.location(bottom_left)\n\n # als je je huis op nieuwe locatie kan plaatsen\n if place_house(house, all_houses, waters) == True:\n # bereken nieuw waarde map, waarin huis is verplaatst\n total_value_map_temp = 0\n for item in all_houses.values():\n for house in item:\n house.extra_meters()\n total_value_map_temp += house.totalprice()\n\n # als waarde met nieuwe locatie hoger is, verander deze\n if total_value_map_NEW < total_value_map_temp:\n total_value_map_NEW = total_value_map_temp\n # als waarde niet hoger is verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n # als huis niet geplaats kan worden, verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n\n return all_houses, total_value_map_NEW",
"def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1",
"def get_pairs(feature_size=256, window_size=31):\n # Approach proposed by the professor\n # std = 1 / 5 * window_size\n # point_pairs2 = np.int32(np.random.randn(4, feature_size) * std)\n\n # Generate random point pairs\n # Using the approach G II: Gaussian(0, 1/25 * window_size^2)\n std = 0\n dev = 1 / 25 * (window_size * window_size)\n point_pairs = np.int32(np.random.normal(std, dev, (4, feature_size)))\n # Make sure the points are inside the window (patch)\n half_window_size = window_size // 2 -1\n brief_points = np.maximum(-half_window_size, np.minimum(point_pairs, half_window_size))\n return brief_points",
"def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point",
"def pointPerClass(classMap):\n rand1 = 100 * pcr.uniform(pcr.boolean(classMap)) \n rand2 = 100 * pcr.uniform(pcr.boolean(classMap))\n rand3 = 100 * pcr.uniform(pcr.boolean(classMap))\n \n randomMap = pcr.scalar(classMap) * rand1 * rand2 * rand3\n pointMap = pcr.ifthen(randomMap == pcr.areaminimum(randomMap, classMap), classMap)\n nrPointsPerClass = pcr.areatotal(pcr.scalar(pcr.boolean(pointMap)), classMap)\n assert pcr.cellvalue(pcr.mapmaximum(nrPointsPerClass), 0)[0] == 1\n return pointMap",
"def get_hit_points(min, max):\n return random.randint(min, max)",
"def generate_points(self, userdata):\n # TODO: generate along multiple axes\n x, y, _ = userdata.initial_point\n x, y = 0, 0\n\n i = 0\n for _ in range(self.num_points):\n point = PointStamped()\n point.header.frame_id = \"map\"\n point.point.x = x + i * self.spacing\n point.point.y = y\n point.point.z = 0.0\n\n i += 1\n\n yield point",
"def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords):\n cat_boxes = Boxes.cat(boxes)\n num_boxes = [len(b) for b in boxes]\n\n point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)\n split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes)\n\n point_features = []\n for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image):\n point_features_per_image = []\n for idx_feature, feature_map in enumerate(features_list):\n h, w = feature_map.shape[-2:]\n scale = torch.tensor([w, h], device=feature_map.device) / feature_scales[idx_feature]\n point_coords_scaled = point_coords_wrt_image_per_image / scale\n point_features_per_image.append(\n point_sample(\n feature_map[idx_img].unsqueeze(0),\n point_coords_scaled.unsqueeze(0),\n align_corners=False,\n )\n .squeeze(0)\n .transpose(1, 0)\n )\n point_features.append(cat(point_features_per_image, dim=1))\n\n return cat(point_features, dim=0), point_coords_wrt_image",
"def _createPoints(self):\n self.doc2quest = self._docMapping()\n\n self.unigram, self.bigram = invertedIndex(self.documents)\n self.points = [dataPoint(key, self) for key in self.questions.keys()]",
"def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)",
"def create_training_set_blending():\n stime = time.time()\n indexes = []\n y_train = np.zeros((nb_places * nb_pis,))\n x_train = np.zeros((nb_places * nb_pis, nb_clfs))\n for i, place_id in enumerate(place_ids):\n # 1. Get the relevance ratings for the place (y).\n ratings = get_relevance_ratings(place_id, connection=c_study, cursor=cur_study)\n if len(ratings) == 0: # filter the ratings.\n continue\n r = [np.mean(ratings[pi_id]['ratings']) if pi_id in ratings else 0 for pi_id in pis_ids]\n for k, pi_id in enumerate(pis_ids):\n cl = 0\n if r[k] >= 4:\n cl = 1\n y_train[i * nb_pis + k] = cl # int(np.ceil(r[k]))\n indexes.append((place_id, pi_id))\n\n # 2. Get the predictions from the models for the place (x).\n for j, clf in enumerate(clfs):\n predictions = clf._get_prediction(place_id)\n p = [predictions[pi_id]['score'] if pi_id in predictions else 0 for pi_id in pis_ids]\n for k in range(nb_pis):\n x_train[i * nb_pis + k, j] = p[k]\n print(\"[.] Done with x_train: %s, y_train: %s, indexes: %s (%.2f)\" % (x_train.shape, y_train.shape, len(indexes), time.time()-stime))\n return x_train, y_train, indexes",
"def attack(self, x, y_p, **kwargs):\n inputs = []\n outputs = []\n\n # Create the initial random perturbation\n device_name = '/gpu:0'\n self.model.set_device(device_name)\n with tf.device(device_name):\n with tf.variable_scope('init_rand'):\n if self.rand_init:\n eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)\n eta = clip_eta(eta, self.ord, self.eps)\n eta = tf.stop_gradient(eta)\n else:\n eta = tf.zeros_like(x)\n\n # TODO: Break the graph only nGPU times instead of nb_iter times.\n # The current implementation by the time an adversarial example is\n # used for training, the weights of the model have changed nb_iter\n # times. This can cause slower convergence compared to the single GPU\n # adversarial training.\n for i in range(self.nb_iter):\n # Create the graph for i'th step of attack\n inputs += [OrderedDict()]\n outputs += [OrderedDict()]\n device_name = x.device\n self.model.set_device(device_name)\n with tf.device(device_name):\n with tf.variable_scope('step%d' % i):\n if i > 0:\n # Clone the variables to separate the graph of 2 GPUs\n x = clone_variable('x', x)\n y_p = clone_variable('y_p', y_p)\n eta = clone_variable('eta', eta)\n\n inputs[i]['x'] = x\n inputs[i]['y_p'] = y_p\n outputs[i]['x'] = x\n outputs[i]['y_p'] = y_p\n inputs[i]['eta'] = eta\n\n eta = self.attack_single_step(x, eta, y_p)\n\n if i < self.nb_iter-1:\n outputs[i]['eta'] = eta\n else:\n # adv_x, not eta is the output of the last step\n adv_x = x + eta\n if (self.clip_min is not None and self.clip_max is not None):\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n adv_x = tf.stop_gradient(adv_x, name='adv_x')\n outputs[i]['adv_x'] = adv_x\n\n return inputs, outputs",
"def generate_keypoint(cls, keypoint):\n batch_size = keypoint.shape[0]\n\n joints = np.zeros(shape=[batch_size, 3, 14])\n\n for i in range(batch_size):\n for j in range(14):\n coords = cv.minMaxLoc(keypoint[i, :, :, j])\n joints[i, 0, j] = coords[3][0]\n joints[i, 1, j] = coords[3][1]\n joints[i, 2, j] = coords[1]\n return joints",
"def create_points_using_atmospheric_model(x_source_list, y_source_list, side_length, number_of_maps):\n pollution_maps = {}\n number_of_sources = len(x_source_list)\n\n x = 5\n\n for map in range(0, number_of_maps): # loops through for each map\n\n pollution_values = gaussian_atmospheric_dispersion_model(x_source_list[0], y_source_list[0],\n side_length) # Creates map of all the pollution simulations\n for i in range(1, len(x_source_list)):\n pollution_values += gaussian_atmospheric_dispersion_model(x_source_list[i], y_source_list[i],\n side_length) # adds additional sources to the pollution map\n label_index = 0\n point_map = {}\n\n for i in range(0, side_length): # assigns pollution values to points\n y = 5\n for j in range(0, side_length):\n point_map[label_index] = Point(label_index, pollution_values[i][j], x, y)\n label_index += 1\n y += 10\n x += 10\n pollution_maps[map] = point_map\n\n return pollution_maps",
"def __init__(self, num_points = 5000):\n self.num_points = num_points\n\n #all walks start at 0.0\n self.x_values = [0]\n self.y_values = [0]",
"def samplepoint(x,u):\n return point(x)",
"def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T",
"def _generate_training_batch(ground_truth_data, representation_function,\n batch_size, num_points, random_state):\n points = None # Dimensionality depends on the representation function.\n labels = np.zeros(num_points, dtype=np.int64)\n for i in range(num_points):\n labels[i], feature_vector = _generate_training_sample(\n ground_truth_data, representation_function, batch_size, random_state)\n if points is None:\n points = np.zeros((num_points, feature_vector.shape[0]))\n points[i, :] = feature_vector\n return points, labels",
"def get_regular_points(self, npoints=2500, device=\"gpu0\"):\n if not self.npoints == npoints:\n self.npoints = npoints\n vertices, faces = self.generate_square(np.sqrt(npoints))\n self.mesh = pymesh.form_mesh(vertices=vertices, faces=faces) # 10k vertices\n self.vertex = torch.from_numpy(self.mesh.vertices).to(device).float()\n self.num_vertex = self.vertex.size(0)\n self.vertex = self.vertex.transpose(0,1).contiguous().unsqueeze(0)\n\n return Variable(self.vertex[:, :2].contiguous().to(device))",
"def run_example(num_points_to_sample=20, verbose=True, **kwargs):\n exp = Experiment([[0, 2], [0, 4]]) # 2D experiment, we build a tensor product domain\n # Bootstrap with some known or already sampled point(s)\n exp.historical_data.append_sample_points([\n SamplePoint([0, 0], function_to_minimize([0, 0]), 0.05), # Iterables of the form [point, f_val, f_var] are also allowed\n ])\n\n # Sample num_points_to_sample points\n for _ in range(num_points_to_sample):\n # Use MOE to determine what is the point with highest Expected Improvement to use next\n next_point_to_sample = gp_next_points(exp, **kwargs)[0] # By default we only ask for one point\n # Sample the point from our objective function, we can replace this with any function\n value_of_next_point = function_to_minimize(next_point_to_sample)\n\n if verbose:\n print \"Sampled f({0:s}) = {1:.18E}\".format(str(next_point_to_sample), value_of_next_point)\n\n # Add the information about the point to the experiment historical data to inform the GP\n exp.historical_data.append_sample_points([SamplePoint(next_point_to_sample, value_of_next_point, 0.01)]) # We can add some noise",
"def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]",
"def get_topology(init_height, map_size, max_height):\n num_features = math.ceil(map_size[0] / FEATURE_SIZE)\n generators = [create_valley, create_hill, create_plateau]\n previous = [random.randrange(len(generators)), random.randrange(len(generators))]\n feature_points = []\n for i in range(num_features):\n while True:\n idx = random.randrange(len(generators))\n # do not repeat topology more than once\n if previous.count(idx) != 2:\n break\n new_points = generators[idx](map_size[1], init_height, max_height, FEATURE_SIZE)\n for idp in range(len(new_points)):\n # as the feature points are generated in local coordinates, shift them on the x axis to the correct part\n # of the terrain.\n new_points[idp] = (new_points[idp][0] + i * FEATURE_SIZE, new_points[idp][1])\n feature_points.extend(new_points)\n previous.pop(0)\n previous.append(idx)\n\n return feature_points",
"def generate(self):\n t_0 = time()\n\n if self.random_towers:\n self.towers = np.random.rand(self.number_towers, 2)\n else:\n step = np.ceil(np.sqrt(self.number_towers)).astype('int')\n\n if step ** 2 != self.number_towers:\n self.number_towers = step ** 2\n print(f'WARNING: number of towers changed to {self.number_towers}')\n\n X, Y = np.mgrid[0:1:step * 1j, 0:1:step * 1j]\n positions = np.vstack([X.ravel(), Y.ravel()])\n self.towers = positions.swapaxes(1, 0)\n\n self.towers_manager = TowersManager(self.towers, self.vel_friction)\n\n self.distances = self.towers_manager.generate_distances()\n self.print(f'Took {time() - t_0} to create distrances matrix')\n\n t = time()\n self.probabilities = self.generate_probabilities()\n self.print(f'Took {time() - t} to create probabilities matrix')\n\n t = time()\n self.traces = self.generate_weighted_users_traces()\n self.print(f'Took {time() - t} to create user traces')\n\n t = time()\n self.aggregated_data = self.generate_aggregate_data()\n self.print(f'Took {time() - t} to build aggregated data')\n\n self.print(f'Took {time() - t_0} to generate all')",
"def calc_points_park(self):\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n cnt_PG = 0\n cnt_P = 0\n points = 0\n vptab_park = (0, 2, 4, 7, 11)\n for i in range(8, 34):\n if be[i] == 'P' or be[i] == 'G':\n cnt_PG += 1\n if be[i] == 'P':\n cnt_P += 1\n neigh_tower_office = 0\n if be[i - 1] == 'T' or be[i - 1] == 'O':\n neigh_tower_office += 1\n if be[i + 1] == 'T' or be[i + 1] == 'O':\n neigh_tower_office += 1\n if be[i - 7] == 'T' or be[i - 7] == 'O':\n neigh_tower_office += 1\n if be[i + 7] == 'T' or be[i + 7] == 'O':\n neigh_tower_office += 1\n points += vptab_park[neigh_tower_office]\n if 'park' in args.exp:\n points += cnt_PG\n if 'repr' in args.exp:\n recycle_energy = max(self.energy - self.energy_used, 0)\n points += recycle_energy\n else:\n penalty_energy = max(self.energy - self.energy_used - cnt_P, 0)\n points -= penalty_energy\n return points",
"def create_points_with_random_pollution_1d(length, mean, std):\n new_map = {}\n x = 5\n for i in range(0, length):\n new_map[i] = (Point(i, np.random.normal(mean, std), x))\n x = x + 10\n return new_map",
"def newPoint(x, y, weighting, n):\n currX=x\n currY=y\n\n closest = np.argmax(weighting)\n closest_tuple = np.unravel_index(closest, (n+1, n+1))\n closestX, closestY = closest_tuple\n \n # Set x value\n if closestX == x:\n x = closestX\n elif closestX >= x+1:\n x = x+1\n elif closestX <= x-1:\n x = x-1\n\n # Set y value\n if closestY == y:\n y = closestY\n elif closestY >= y+1:\n y = y+1\n elif closestY <= y-1:\n y = y-1\n\n try:\n if weighting[x,y]==0:\n # top left corner\n if x==currX-1 and y==currY+1:\n if math.sqrt((currX-closestX)**2+(closestY-currY+1)**2)<=math.sqrt((currX-1-closestX)**2+(closestY-currY)**2) and weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n elif weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n else:\n if weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n elif weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n # top middle\n if x==currX and y==currY+1:\n if weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n elif weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n else:\n if weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n elif weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY\n # top right\n if x==currX+1 and y==currY+1:\n if math.sqrt((closestX-currX)**2+(closestY-currY+1)**2)<=math.sqrt((closestX-currX+1)**2+(closestY-currY)**2) and weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n elif weighting[x+1,y]!=0:\n x=currX+1\n y=currY\n else:\n if weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n elif weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n # middle left\n if x==currX-1 and y==currY:\n if weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n elif weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n else:\n if weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n elif weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n # middle RIGHT\n if x==currX+1 and y==currY:\n if weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY-1\n elif weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n else:\n if weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n elif weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n # bottom left corner\n if x==currX-1 and y==currY-1:\n if math.sqrt((currX+1-closestX)**2+(currY-closestY)**2)<=math.sqrt((currX-closestX)**2+(currY-1-closestY)**2) and weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n elif weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n else:\n if weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n elif weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY-1\n # bottom middle\n if x==currX and y==currY-1:\n if weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY-1\n elif weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n else:\n if weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n elif weighting[x+1,y]!=0:\n x=currX+1\n y=currY\n # bottom right\n if x==currX+1 and y==currY-1:\n if math.sqrt((closestX-currX)**2+(currY-1-closestY)**2)<=math.sqrt((closestX-currX+1)**2+(currY-closestY)**2) and weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n elif weighting[x+1,y]!=0:\n x=currX+1\n y=currY\n else:\n if weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n elif weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n except:\n x=x\n y=y\n \n return x,y",
"def mutate_point_poly(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 3: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)",
"def hcgps(data_src, min_supp=MIN_SUPPORT, max_iteration=MAX_ITERATIONS, step_size=STEP_SIZE, return_gps=False):\n # Prepare data set\n d_set = DataGP(data_src, min_supp)\n d_set.init_attributes()\n attr_keys = [GI(x[0], x[1].decode()).as_string() for x in d_set.valid_bins[:, 0]]\n\n if d_set.no_bins:\n return []\n\n # Parameters\n it_count = 0\n var_min = 0\n counter = 0\n var_max = int(''.join(['1'] * len(attr_keys)), 2)\n eval_count = 0\n\n # Empty Individual Template\n best_sol = structure()\n candidate = structure()\n\n # Best Cost of Iteration\n best_costs = np.empty(max_iteration)\n best_patterns = []\n str_best_gps = list()\n str_iter = ''\n str_eval = ''\n repeated = 0\n\n # generate an initial point\n best_sol.position = None\n # candidate.position = None\n if best_sol.position is None:\n best_sol.position = np.random.uniform(var_min, var_max, N_VAR)\n # evaluate the initial point\n apply_bound(best_sol, var_min, var_max)\n best_sol.cost = costfxn(best_sol.position, attr_keys, d_set)\n\n # run the hill climb\n while counter < max_iteration:\n # while eval_count < max_evaluations:\n # take a step\n candidate.position = None\n if candidate.position is None:\n candidate.position = best_sol.position + (random.randrange(var_min, var_max) * step_size)\n apply_bound(candidate, var_min, var_max)\n candidate.cost = costfxn(candidate.position, attr_keys, d_set)\n\n if candidate.cost < best_sol.cost:\n best_sol = candidate.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n best_gp = validategp(d_set, decodegp(attr_keys, best_sol.position))\n \"\"\":type best_gp: GP\"\"\"\n is_present = isduplicate(best_gp, best_patterns)\n is_sub = amcheck(best_patterns, best_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if best_gp.support >= min_supp:\n best_patterns.append(best_gp)\n str_best_gps.append(best_gp.print(d_set.titles))\n\n try:\n # Show Iteration Information\n # Store Best Cost\n best_costs[it_count] = best_sol.cost\n str_iter += \"{}: {} \\n\".format(it_count, best_sol.cost)\n except IndexError:\n pass\n it_count += 1\n\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"LS-GRAD\", \"Best Patterns\": str_best_gps, \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, best_patterns\n else:\n return out",
"def __init__(self, points, knn=16, **kwargs):\n super(FPSKNNGrouper, self).__init__()\n self.points = points # points number of Farthest Points Sampling\n self.knn = knn # number of k neighbors",
"def calc_points_tower(self):\n points = 0\n cnt_tower = 0\n vptab_tower = (0, 1, 3, 6, 10, 15)\n for i in range(20):\n if self.b[i] == 'T':\n points += vptab_tower[self.f[i]]\n cnt_tower += 1\n if 'poli' in args.exp:\n points += max(self.f)\n if 'scho' in args.exp:\n points += cnt_tower\n return points",
"def get_coords_gt_cost(expert_env, parent_dir, gen=False, n_waypoints=10000):\n\t# Step 1: Generate ground truth data, sampling uniformly from 7D angle space\n\tif gen == True:\n\t\twaypts = np.random.uniform(size=(n_waypoints, 7), low=0, high=np.pi*2)\n\t\t# Transform to 97D\n\t\traw_waypts = []\n\t\tfor waypt in waypts:\n\t\t\traw_waypts.append(expert_env.raw_features(waypt))\n\t\traw_waypts = np.array(raw_waypts)\n\n\telse:\n\t\t# load coordinates above the table\n\t\tdata_file = parent_dir + '/data/gtdata/data_table.npz'\n\t\tnpzfile = np.load(data_file)\n\t\traw_waypts = npzfile['x']\n\n\t# generate gt_labels\n\tfeat_idx = list(np.arange(expert_env.num_features))\n\tfeatures = [[0.0 for _ in range(len(raw_waypts))] for _ in range(0, len(expert_env.feature_list))]\n\tfor index in range(len(raw_waypts)):\n\t\tfor feat in range(len(feat_idx)):\n\t\t\tfeatures[feat][index] = expert_env.featurize_single(raw_waypts[index,:7], feat_idx[feat])\n\n\tfeatures = np.array(features).T\n\tgt_cost = np.matmul(features, np.array(expert_env.weights).reshape(-1,1))\n\n\treturn raw_waypts, gt_cost",
"def hotspot_data(self, num_timestamps=5, remove_outliers=True):\n while True:\n video_id = random.choice(self.video_ids)\n hotspot_func = self.hotspot_function(video_id)\n thumbnails = [th for th in self.video_thumbnails(video_id)]\n\n # The beginning of the video usually has too many views.\n while remove_outliers and thumbnails and thumbnails[0][1] < 20:\n del thumbnails[0]\n\n if hotspot_func is None or len(thumbnails) < num_timestamps:\n continue\n\n while len(thumbnails) > num_timestamps:\n del thumbnails[random.randrange(len(thumbnails))]\n _, timestamps = zip(*thumbnails)\n yield list(thumbnails), [float(hotspot_func(t)) for t in timestamps]",
"def generate_random_scatter(x_range, w, b, k):\n\tx_1 = []\n\ty_1 = []\n\tx_2 = []\n\ty_2 = []\n\tfor i in range(k):\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_1.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b + amplitude\n\t\ty_1.append(yy)\n\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_2.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b - amplitude\n\t\ty_2.append(yy)\n\treturn x_1, y_1, x_2, y_2",
"def subsample_hog_features(self, img, clf, nsteps, hog_img_features):\n bbox_coords = []; heatmap_coords = []\n nxsteps, nysteps = nsteps\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*self.cells_per_step\n xpos = xb*self.cells_per_step\n # extract image patch via defining in image space\n xleft, ytop = (xpos*self.pix_per_cell, ypos*self.pix_per_cell)\n clr_trf_patch = img[ytop:ytop+self.window, xleft:xleft+self.window]\n patch_img = cvu.resize_image(clr_trf_patch, size=(64,64))\n # Get color features for a given patch\n spatial_features = cvf.bin_spatial(patch_img, size=self.spatial_size)\n hist_features = cvf.color_histogram(patch_img, nbins=self.hist_bins)\n # Get HOG Features for given patch: patches have discontinuties at edges: different gradient\n hog_features = [ hog_it[ypos:ypos+self.nblocks_per_window, xpos:xpos+self.nblocks_per_window].ravel()\n for hog_it in (hog_img_features) ]\n hog_features = np.hstack((hog_features))\n # concatenate features and perform classifier prediction\n patch_features = np.hstack((spatial_features, hist_features, hog_features)).reshape(1,-1)\n patch_features_sc = self.scaler.transform(patch_features)\n # perform prediction, check for positive instance detection\n #yhat = clf.predict(patch_features_sc)\n yhat = clf.predict_proba(patch_features_sc)\n pred_pos = yhat[:, 1] > 0.95\n if pred_pos:\n bbox_coord, heatmap_coord = self.create_coords(xleft, ytop)\n bbox_coords.append((bbox_coord))\n heatmap_coords.append((heatmap_coord))\n\n return bbox_coords, heatmap_coords",
"def build_one_point_lookup_table(self, **kwargs):\n galaxy_table = kwargs['input_galaxy_table']\n prim_galprop_bins = kwargs['prim_galprop_bins']\n\n self.one_point_lookup_table = np.zeros(\n len(prim_galprop_bins)+1, dtype=object)\n\n binned_prim_galprop = np.digitize(\n galaxy_table[self.prim_galprop_key], \n self.prim_galprop_bins)\n\n for i in range(len(self.one_point_lookup_table)):\n idx_bini = np.where(binned_prim_galprop == i)[0]\n if model_helpers.custom_len(idx_bini) > self.minimum_sampling:\n gals_bini = galaxy_table[idx_bini]\n abcissa = np.arange(len(gals_bini))/float(len(gals_bini)-1)\n ordinates = np.sort(gals_bini[self.galprop_key])\n self.one_point_lookup_table[i] = (\n model_helpers.custom_spline(abcissa, ordinates, k=2)\n )\n\n # For all empty lookup tables, fill them with the nearest lookup table\n unfilled_lookup_table_idx = np.where(\n self.one_point_lookup_table == 0)[0]\n filled_lookup_table_idx = np.where(\n self.one_point_lookup_table != 0)[0]\n\n if len(unfilled_lookup_table_idx) > 0:\n msg = (\"When building the one-point lookup table from input_galaxy_table, \" + \n \"there were some bins of prim_galprop_bins that contained fewer than \" + \n str(self.minimum_sampling)+ \" galaxies. In such cases, the lookup table \" + \n \"of the nearest sufficiently populated bin will be chosen.\")\n warn(msg)\n for idx in unfilled_lookup_table_idx:\n closest_filled_idx_idx = array_utils.find_idx_nearest_val(\n filled_lookup_table_idx, idx)\n closest_filled_idx = filled_lookup_table_idx[closest_filled_idx_idx]\n self.one_point_lookup_table[idx] = (\n self.one_point_lookup_table[closest_filled_idx])",
"def geo_sampler(self):\n state = np.zeros(self.K+1, dtype=int)\n\n # adds high one according to probability\n high_one = np.random.binomial(1, self.high_one_prob)\n if high_one:\n state = self.get_high_one(state)\n\n # pick the p in Geometric(p), where p is randomly chosen from predefined list of ps\n ps = self.geo_ps\n p_idx = np.random.randint(low=0, high=len(ps))\n p = ps[p_idx]\n for i in range(max(1000, int(1/(100000*self.weights[0])))):\n # get pieces at different levels, highest level = self.geo_high\n assert self.K+1 < 30, \"K too high, cannot use geo sampler\"\n levels = np.random.geometric(p, int(1.0/self.weights[0])) - 1\n idxs = np.where(levels < self.geo_high)\n levels = levels[idxs]\n\n # bin the levels into the same place which also sorts them from 0 to K\n # counts created separately to ensure correct shape\n tmp = np.bincount(levels)\n counts = np.zeros(self.K + 1)\n counts[:len(tmp)] = tmp\n\n # add levels to state with lowest levels going first\n for l in range(self.K + 1):\n max_pieces = (self.initial_potential -\n self.potential(state))/self.weights[l]\n max_pieces = int(np.min([counts[l], max_pieces]))\n state[l] += max_pieces\n\n # checks potential to break\n if self.potential(state) >= self.initial_potential - max(1e-8, self.weights[0]):\n break\n # checks potential to break\n if self.potential(state) >= self.initial_potential - max(1e-8, self.weights[0]):\n break\n\n return state",
"def generate_point(width, height):\n x = random.randrange(0 - OFFSET, width + OFFSET, 1)\n y = random.randrange(0 - OFFSET, height + OFFSET, 1)\n return (x, y)",
"def generate_samples(self):\n self.analytic_probability()",
"def _generate(self, feature_map_shape_list, **params):\n pass",
"def generate_waypoint(lowest_crime_index, points_dict_data, segmented_points):\n\n # passes in something like waypoints_dict_data is [{dictn,}, ... ,{dictw}]\n # points is [(pointn, pointn), ... ,(pointw, pointw)]\n print \"inside generate_waypoint\"\n print \"This is points_dict_data\", points_dict_data\n\n # do a for loop to see if we find the waypoint data that matches\n print \"this is points_dict_data\", points_dict_data\n for point_data in points_dict_data:\n print \"this is point_data\", point_data\n if lowest_crime_index in point_data.values():\n # store the waypoint coords\n segmented_points[0]['data']['waypoints'].append({\n 'location': {'lat': point_data['point'][0],\n 'lng': point_data['point'][1]},\n 'stopover': False # b/c not stop on the route, a recalc\n })\n # returns nothing, just appends stuff into segmented_points",
"def numberOfPoints(self):\n return 20000",
"def conditional_sample(p, y, temperature, key):\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n\n v = random.uniform(key, shape=y.shape)\n v_prime = (v * p + (1 - p)) * y + (v * (1 - p)) * (1 - y)\n v_prime = np.clip(v_prime, tol, 1 - tol)\n\n logit_v = logit(v_prime)\n logit_p = logit(p)\n return nn.sigmoid((logit_p + logit_v) / (temperature + tol))",
"def generate_point(self):\n x = random.uniform(0.0, 9999.9)\n y = random.uniform(0.0, 9999.9)\n random_point = Point(x, y)\n assert isinstance(random_point, Point)\n return random_point",
"def get_dots(self):\n logging.debug('Generate dots to draw')\n gc = self.coordinates\n coords = []\n zmin = ymin = xmin = self.fmin = 999999\n self.fmax = 0\n for line in gc:\n temp = [None, None, None, None] # X, Y, Z, Feedrate\n for c in line:\n if c.startswith('X'):\n temp[0] = float(c[1:])\n xmin = min(xmin, temp[0])\n elif c.startswith('Y'):\n temp[1] = float(c[1:])\n ymin = min(ymin, temp[1])\n elif c.startswith('Z'):\n temp[2] = float(c[1:])\n zmin = min(zmin, temp[2])\n elif c.startswith('F'):\n temp[3] = int(float(c[1:]))\n self.fmin = min(self.fmin, temp[3])\n self.fmax = max(self.fmax, temp[3])\n if ((temp[0] is not None) or (temp[1] is not None) or\n (temp[2] is not None) or (temp[3] is not None)):\n if coords:\n if temp[0] is None:\n temp[0] = coords[-1][0]\n if temp[1] is None:\n temp[1] = coords[-1][1]\n if temp[2] is None:\n temp[2] = coords[-1][2]\n if temp[3] is None:\n temp[3] = coords[-1][3]\n coords.append(temp)\n\n if (self.fmin == 999999) or (self.fmax == 0):\n raise GcodeError('Please check feedrate')\n if (xmin == ymin == zmin == 999999):\n raise GcodeError('Please check coordinates')\n if xmin == 999999:\n xmin = 0\n if ymin == 999999:\n ymin = 0\n if zmin == 999999:\n zmin = 0\n\n for i in coords: # if something is still 0\n if i[0] is None:\n i[0] = xmin\n if i[1] is None:\n i[1] = ymin\n if i[2] is None:\n i[2] = zmin\n if i[3] is None:\n i[3] = self.fmin\n i[0] -= xmin\n i[1] -= ymin\n i[2] -= zmin\n i[3] -= self.fmin\n\n self.fmax -= self.fmin\n self.colors_list = grad(MIN_COLOR, MAX_COLOR, self.fmax+1)\n\n dots = []\n for i in range(len(coords)):\n temp = []\n if i != len(coords)-1:\n temp = self.getColorLine(coords[i], coords[i+1])\n if temp:\n dots.extend(temp)\n\n return dots",
"def create_fit_model(features_df):\n model = NearestNeighbors(n_neighbors=10,\n n_jobs=-1)\n knn_spotify = model.fit(features_df)\n return knn_spotify",
"def random_point(self, n_samples=1, bound=1.0):\n samples = self._iterate_over_factors(\n \"random_point\", {\"n_samples\": n_samples, \"bound\": bound}\n )\n return samples",
"def _gen(self, datapoint_params) -> RecordThunkIter:\n\n def entrance(datapoints, params_list) -> RecordThunkIter:\n \"\"\"\n Return a generator iter througth an effectful generator\n iter through data will cause a network connection.\n \"\"\"\n size = len(params_list)\n effectful = zip(datapoints, params_list)\n\n def g():\n \"\"\" SIDE EFFECTFUL \"\"\"\n data, param = next(effectful)\n return ((MakeDict.make_spot_record(record, param)\n for record in data)\n if data is not None\n else iter([]))\n\n for _ in range(size):\n yield g\n\n datapoints = map(self._datapoint, datapoint_params)\n return entrance(datapoints, datapoint_params)",
"def mutate_point_wline(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point_wline(mutated_genome,index)\n elif seed == 1:\n remove_point_wline(mutated_genome,index)\n elif seed == 2:\n switch_points_wline(mutated_genome,index)\n elif seed == 3:\n shuffle_points_wline(mutated_genome,index)\n elif seed == 4:\n move_point_wline(mutated_genome,index)\n elif seed == 5:\n shift_point_wline(mutated_genome,index)\n elif seed == 6:\n increment_point_wline(mutated_genome,index)\n else: #seed == 7:\n decrement_point_wline(mutated_genome,index)",
"def make_smiley_training_set(num_points=0, delta=0.05):\n log.out.info(\"Generating happy data.\")\n # Select coordinates to do an XOR like operation on\n coords = []\n bools = []\n x_min = 0.0\n x_max = 1.0\n y_min = 0.0\n y_max = 1.0\n for i in range(num_points):\n # Add num_points randomly\n coord_point = np.random.random(2)\n coord_point[0] = coord_point[0] * (x_max - x_min) + x_min\n coord_point[1] = coord_point[1] * (y_max - y_min) + y_min\n coords.append(coord_point)\n\n # Assign an xor boolean value to the coordinates\n for coord_point in coords:\n x = coord_point[0]\n y = coord_point[1]\n if (abs(x - 0.65) < delta) & (abs(y - 0.65) < (0.05+delta)):\n bools.append(True)\n elif (abs(x - 0.35) < delta) & (abs(y - 0.65) < (0.05+delta)):\n bools.append(True)\n elif ((x > 0.2) & (x < 0.8) &\n (abs(y - ((1.5 * (x - 0.5))**2 + 0.25)) < delta)):\n bools.append(True)\n else:\n bools.append(False)\n\n # Build training vectors\n train_in = None\n train_out = None\n for i, coord in enumerate(coords):\n # Need to initialize the arrays\n if i == 0:\n train_in = np.array([coord])\n train_out = np.array([[bools[i]]])\n else:\n train_in = np.append(train_in, np.array([coord]), axis=0)\n train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)\n\n train_out = train_out.T\n return train_in, train_out",
"def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()",
"def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()",
"def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()",
"def train(self, input_vects):\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n print(iter_no)\n if (iter_no % 1==0) & (iter_no>0) :\n \n self.map_plot(iter_no)\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n \n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid \n \n #Train with each vector one by one\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n print(iter_no)\n self.map_plot(iter_no) \n self._trained = True\n gif.build_gif(imgs, saveto='exoplaneta005s6 .gif')",
"def create_random_point(x0,y0,distance): \n r = distance/ 111300\n u = np.random.uniform(0,1)\n v = np.random.uniform(0,1)\n w = r * np.sqrt(u)\n t = 2 * np.pi * v\n x = w * np.cos(t)\n x1 = x / np.cos(y0)\n y = w * np.sin(t)\n return (x0+x1, y0 +y)",
"def sample_pixel(i, j, image, labels, eps, beta):\n probs = []\n neighbour_pxls = neighbours(image, i, j)\n for label in labels:\n pixel = Pixel(i, j, label)\n pxl_cost = pixel_cost(pixel, image[i, j], eps)\n neighbours_costs = []\n for neighbour in neighbour_pxls:\n neighbours_costs += [edge_cost(pixel, neighbour, beta)]\n\n probs.append(np.exp(-pxl_cost - np.sum(neighbours_costs)))\n\n probs = probs / np.sum(probs)\n sampled_pixel = np.random.choice(labels, p=probs)\n return sampled_pixel",
"def generate_random_input(n, p, fileName):\n\n\tmax_x = 1000\n\tL = []\n\tH = []\n\tE = []\n\tx = [] #non negative x-coordinate of vertices\n\tfor i in range(n):\n\t\tL.append('location' + str(i))\n\t\trand = round(random.random() * max_x) + 1\n\t\twhile rand in x:\n\t\t\trand = round(random.random() * max_x) + 1\n\t\tx.append(rand)\n\tfor i in range(n):\n\t\tif random.random() < p and len(H) < n / 2: #vertex is a home with probability p\n\t\t\tH.append(i)\n\tfor i in range(n):\n\t\tE.append([])\n\t\tfor j in range(0, i):\n\t\t\tE[i].append(abs(x[i] - x[j])) #E[i][j] = absolute value of difference in x-coordinates of vertex i and vertex j as weight to ensure triangular inequality\n\t\tE[i].append('x') #no self-edges\n\tfor i in range(n):\n\t\tfor j in range(i+1, n):\n\t\t\tE[i].append(E[j][i])\n\tstarting_index = int((random.random() * (len(L) - 1)) // 1)\n\ts = L[starting_index]\n\tprint_input(L, E, H, s, fileName)",
"def sample_attack(self, eps, num_samples = 100):\n #Repeat x num_sample times\n n, d = self.x.shape\n x_ext = tf.keras.backend.repeat(self.x, num_samples)\n big_shape = tf.shape(x_ext)\n x_ext = tf.reshape(x_ext, [-1, d])\n n, num_classes = self.y.shape\n y_ext = tf.keras.backend.repeat(self.y, num_samples)\n y_ext = tf.reshape(y_ext, [-1, num_classes])\n\n #Perturb x_ext\n x_pert = x_ext + tf.random.uniform(tf.shape(x_ext), minval = -eps, maxval = eps)\n\n #Get loss for x_pert\n activations, predictions = model(x_pert, self.hidden_sizes, self.num_classes, self.sigma)\n loss_vector_ext = tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y_ext)\n\n #Reshape into desired shapes\n loss_vector_ext = tf.reshape(loss_vector_ext, [-1, num_samples])\n x_three_dim = tf.reshape(x_ext, big_shape)\n\n #Perform argmax to get indices\n best_indices = tf.argmax(loss_vector_ext, axis = 1, output_type = tf.dtypes.int32)\n n = tf.shape(self.x)[0]\n row_idx = tf.range(n)\n extract_idx = tf.stack([row_idx, best_indices], axis = 1)\n\n #Return X_adv, loss_adv, acc_adv\n x_adv = tf.gather_nd(x_three_dim, extract_idx)\n\n #Sample a bunch of points around X\n return x_adv",
"def sample_points(self, network, input_helpers, sample_along=None):\n pre_points = []\n post_points = []\n if not sample_along:\n x_samples = np.linspace(self.min_x, self.max_x, 25)\n y_samples = np.linspace(self.min_y, self.max_y, 25)\n for x, y in itertools.product(x_samples, y_samples):\n rho = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y, x)\n inputs = np.array([rho, theta, self.intruder_heading,\n self.own_velocity, self.intruder_velocity])\n pre_points.append(inputs)\n label = np.argmax(\n network.compute(input_helpers[\"process\"](inputs))[0])\n post_points.append(label)\n else:\n low, high = sample_along\n n_samples = 15\n points = [low + (float(i) / (n_samples - 1))*(high - low)\n for i in range(n_samples)]\n post_points = np.argmax(network.compute(points), axis=1)\n pre_points = [input_helpers[\"reset\"](point) for point in points]\n return np.array([pre_points, post_points])",
"def generate_points(octrees, pyramids, exsum):\n return _C.ops.spc.GeneratePoints(octrees.contiguous(),\n pyramids.contiguous(),\n exsum.contiguous())",
"def __sample(self):\n # xvals are \"east\" vals and yvals are \"north\" vals on the map\n xvals = np.random.uniform(self._xmin, self._xmax, self._num_samples)\n yvals = np.random.uniform(self._ymin, self._ymax, self._num_samples)\n if self._target_altitude is None:\n zvals = np.random.uniform(self._zmin, self._zmax, self._num_samples)\n else:\n zvals = np.full(self._num_samples, self._target_altitude, dtype=float)\n \n samples = list(zip(xvals, yvals, zvals))\n\n pts = []\n for s in samples:\n in_collision = False\n idxs = list(self._obstacles_tree.query_radius(\n np.array([s[0], s[1]]).reshape(1, -1), r=self._max_poly_xy)[0])\n \n if len(idxs) > 0:\n for ind in idxs: \n p = self._polygons[int(ind)]\n if p.contains(s) and p.height >= s[2]:\n in_collision = True\n\n if not in_collision:\n pts.append(s)\n \n return pts",
"def generateIndividual(self, numElemMax, actualMap):\n\n numElem = np.random.randint(1,numElemMax+1)\n for i in range(numElem):\n s1 = np.random.randint(0, np.shape(actualMap.mapPoints)[0])\n s2 = np.random.randint(0, np.shape(actualMap.mapPoints)[1])\n while(actualMap.mapPoints[s1, s2].is_wall == True):\n # print(\"Probowano wstawic sprinklera na sciane (%d, %d)\" % (s1, s2))\n s1 = np.random.randint(0, np.shape(actualMap.mapPoints)[0])\n s2 = np.random.randint(0, np.shape(actualMap.mapPoints)[1])\n self.sprinklers.append(Sprinkler((s1, s2), self.radius))\n print(\"liczba sprinklerow: %d\" % (self.getSprinklersAmmount()))",
"def generate_base_points(num_points, domain_size, density_map=None,\n reflect=True):\n\n def _try_points(num_points, prob):\n prob = np.atleast_3d(prob)\n prob = np.array(prob)/np.amax(prob) # Ensure prob is normalized\n base_pts = []\n N = 0\n while N < num_points:\n pt = np.random.rand(3) # Generate a point\n # Test whether to keep it or not\n [indx, indy, indz] = np.floor(pt*np.shape(prob)).astype(int)\n if np.random.rand(1) <= prob[indx][indy][indz]:\n base_pts.append(pt)\n N += 1\n base_pts = np.array(base_pts)\n return base_pts\n\n if len(domain_size) == 1: # Spherical\n domain_size = np.array(domain_size)\n r = domain_size[0]\n if density_map is None:\n # Make an image of a sphere filled with ones and use _try_points\n density_map = np.ones([41, 41, 41])\n density_map[20, 20, 20] = 0\n density_map = spim.distance_transform_edt(density_map) < 20\n base_pts = _try_points(num_points, density_map)\n # Convert to spherical coordinates\n X, Y, Z = np.array(base_pts - [0.5, 0.5, 0.5]).T\n r = 2*np.sqrt(X**2 + Y**2 + Z**2)*domain_size[0]\n theta = 2*np.arctan(Y/X)\n phi = 2*np.arctan(np.sqrt(X**2 + Y**2)/Z)\n # Trim points outside the domain (from improper prob images)\n inds = r <= domain_size[0]\n [r, theta, phi] = [r[inds], theta[inds], phi[inds]]\n # Reflect base points across perimeter\n if reflect:\n r, theta, phi = reflect_base_points(np.vstack((r, theta, phi)),\n domain_size)\n # Convert to Cartesean coordinates\n X, Y, Z = from_spherical(r, theta, phi)\n base_pts = np.vstack([X, Y, Z]).T\n\n elif len(domain_size) == 2: # Cylindrical or Disk\n domain_size = np.array(domain_size)\n if density_map is None:\n density_map = np.ones([41, 41, 41])\n density_map[20, 20, :] = 0\n if domain_size[1] == 0: # Disk\n density_map = density_map[:, :, 0]\n density_map = spim.distance_transform_edt(density_map) < 20\n base_pts = _try_points(num_points, density_map)\n # Convert to cylindrical coordinates\n X, Y, Z = np.array(base_pts - [0.5, 0.5, 0]).T # Center on z-axis\n r = 2*np.sqrt(X**2 + Y**2)*domain_size[0]\n theta = 2*np.arctan(Y/X)\n z = Z*domain_size[1]\n # Trim points outside the domain (from improper prob images)\n inds = r <= domain_size[0]\n [r, theta, z] = [r[inds], theta[inds], z[inds]]\n inds = ~((z > domain_size[1]) + (z < 0))\n [r, theta, z] = [r[inds], theta[inds], z[inds]]\n if reflect:\n r, theta, z = reflect_base_points(np.vstack([r, theta, z]),\n domain_size)\n # Convert to Cartesean coordinates\n X, Y, Z = from_cylindrical(r, theta, z)\n base_pts = np.vstack([X, Y, Z]).T\n\n elif len(domain_size) == 3: # Cube or square\n if density_map is None:\n density_map = np.ones([41, 41, 41])\n if domain_size[2] == 0:\n density_map = density_map[:, :, 0]\n base_pts = _try_points(num_points, density_map)\n base_pts = base_pts*domain_size\n if reflect:\n base_pts = reflect_base_points(base_pts, domain_size)\n\n return base_pts",
"def _sample_points(self,\n spatial_dims: Sequence[int],\n gt_data_for_label: Dict[str, tf.Tensor],\n sampler: Union[point_sampler_lib.PointSampler,\n point_sampler_lib.PointSampler3D],\n params: Dict[str, Any] = None) -> Dict[str, tf.Tensor]:\n # Sample points for each data sample in a batch.\n output = []\n if self._num_point_dim == 2:\n sdf_map = gt_data_for_label['sdf_map']\n batch_size = sdf_map.shape[0]\n for i_sample in range(batch_size):\n output.append(sampler(spatial_dims, sdf_map[i_sample, ...]))\n elif self._num_point_dim == 3:\n batch_size = gt_data_for_label['grid_samples'].shape[0]\n for i_sample in range(batch_size):\n gt_data_for_label_i = {}\n for key in gt_data_for_label:\n gt_data_for_label_i[key] = gt_data_for_label[key][i_sample, ...]\n output.append(sampler(spatial_dims, gt_data_for_label_i, params))\n\n # Batch outputs and merge into one dict\n points_data = {}\n for key in output[0].keys():\n if key.startswith('points/'):\n points_data[key] = tf.stack([output[i][key] for i in range(batch_size)],\n axis=0)\n # Tensor with shape [batch_size, num_point, num_point_dim].\n\n points_data['points_sdf_gt/' + key[7:]] = tf.stack(\n [output[i]['points_sdf_gt/' + key[7:]] for i in range(batch_size)],\n axis=0) # Tensor with shape [batch_size, num_point, 1].\n\n elif key.startswith('points_symmetry/'):\n points_data[key] = tf.stack([output[i][key] for i in range(batch_size)],\n axis=0)\n # Tensor with shape [batch_size, num_point, num_point_dim].\n\n key_dist = key.replace('points_symmetry/', 'points_symmetry_dist/')\n points_data[key_dist] = tf.stack(\n [output[i][key_dist] for i in range(batch_size)],\n axis=0) # Tensor with shape [batch_size, num_point, 1].\n\n elif key == 'points_consistency':\n points_data[key] = tf.stack([output[i][key] for i in range(batch_size)],\n axis=0)\n # Tensor with shape [batch_size, num_point, num_point_dim].\n\n points_data[key + '_dist'] = tf.stack(\n [output[i][key + '_dist'] for i in range(batch_size)],\n axis=0) # Tensor with shape [batch_size, num_point, 1].\n\n elif key == 'mask_for_point':\n points_data[key] = tf.stack([output[i][key] for i in range(batch_size)],\n axis=0)\n # Tensor with shape [batch_size, dim_h * dim_w] or [batch_size,\n # dim_d * dim_h * dim_w]\n\n return points_data",
"def make_spots(self, spots):\n dummy_na_parameters = [0,0,1,0]\n if len(spots[0]) == 4:\n for x in spots:\n x.extend(dummy_na_parameters) #if the spots are missing NA information, add it\n # for x in spots:\n # x[3] = I_cal(x[3])\n spots = np.array(spots)\n assert spots.shape[1]==8, \"Spots are 8 elements long - your array must be (n,8)\"\n self.set_uniform(0, np.reshape(spots,spots.shape[0]*spots.shape[1]))\n self.set_uniform(1, spots.shape[0])",
"def update(self, round, npc, cheat_labels=None):\n with torch.no_grad():\n batch_size = 100\n ANs_num = self.get_ANs_num(round)\n features = npc.memory\n\n for start in range(0, self.samples_num, batch_size):\n end = start + batch_size\n end = min(end, self.samples_num)\n\n preds = F.softmax(npc(features[start:end], None), 1)\n self.entropy[start:end] = -(preds * preds.log()).sum(1)\n\n # get the anchor list and instance list according to the computed\n # entropy\n self.anchor_indexes = self.entropy.topk(ANs_num, largest=False)[1]\n self.instance_indexes = (torch.ones_like(self.position)\n .scatter_(0, self.anchor_indexes, 0)\n .nonzero().view(-1))\n anchor_entropy = self.entropy.index_select(0, self.anchor_indexes)\n instance_entropy = self.entropy.index_select(0, self.instance_indexes)\n\n # get position\n # if the anchor sample x whose index is i while position is j, then\n # sample x_i is the j-th anchor sample at current round\n # if the instance sample x whose index is i while position is j, then\n # sample x_i is the (-j-1)-th instance sample at current round\n\n instance_cnt = 0\n for i in range(self.samples_num):\n\n # for anchor samples\n if (i == self.anchor_indexes).any():\n self.position[i] = (self.anchor_indexes == i).max(0)[1]\n continue\n # for instance samples\n instance_cnt -= 1\n self.position[i] = instance_cnt\n\n anchor_features = features.index_select(0, self.anchor_indexes)\n self.neighbours = (torch.LongTensor(ANs_num, self.ANs_size)\n .to('cuda'))\n for start in range(0, ANs_num, batch_size):\n\n end = start + batch_size\n end = min(end, ANs_num)\n\n sims = torch.mm(anchor_features[start:end], features.t())\n sims.scatter_(1, self.anchor_indexes[start:end].view(-1, 1), -1.)\n _, self.neighbours[start:end] = (\n sims.topk(self.ANs_size, largest=True, dim=1))\n\n # if cheat labels is provided, then compute consistency\n if cheat_labels is None:\n return 0.\n anchor_label = cheat_labels.index_select(0, self.anchor_indexes)\n neighbour_label = cheat_labels.index_select(0,\n self.neighbours.view(-1)).view_as(self.neighbours)\n self.consistency = ((anchor_label.view(-1, 1) == neighbour_label)\n .float().mean())\n\n return self.consistency",
"def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = 400\n num_total_points = num_target\n x_values = tf.tile(\n tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),\n [self._batch_size, 1])\n x_values = tf.expand_dims(x_values, axis=-1)\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n x_values = tf.random_uniform(\n [self._batch_size, num_total_points, self._x_size], -2, 2, seed=seed)\n \n def w(x, x_min=-2, x_max=2):\n weight_vals = tf.stack([ [1/(i+1) if j <= i else 0 for j in range(self._num_gammas)] for i in range(self._num_gammas)])\n \n bucketsize = (x_max-x_min)/self._num_gammas\n buckets = (x-x_min)/bucketsize\n buckets = tf.reshape(buckets,[-1])\n \n mapped = tf.expand_dims(tf.expand_dims(tf.map_fn(lambda x: weight_vals[tf.cast(x,tf.int32)], buckets),-2),-2)\n\n return mapped \n\n # Set kernel parameters\n # Either choose a set of random parameters for the mini-batch\n if self._random_kernel_parameters:\n gammas = 3.14*tf.random_uniform([self._num_gammas, self._batch_size], 0.1, 2)\n gammas = tf.expand_dims(tf.expand_dims(gammas,-1),-1)\n # Or use the same fixed parameters for all mini-batches\n else:\n gammas = 3.14*tf.linspace(0.1,2,self._num_gammas)\n print(gammas)\n #gammas = tf.broadcast_to(gammas,[self._num_gammas, self._batch_size])\n gammas = tf.reshape(tf.tile(gammas,tf.constant([self._batch_size])),[self._num_gammas, self._batch_size])\n gammas = tf.expand_dims(tf.expand_dims(gammas,-1),-1)\n\n weights = w(x_values)\n \n weights = tf.reshape(weights, [self._batch_size, num_total_points,self._x_size,self._num_gammas])\n weights = tf.transpose(weights,[3,0,1,2])\n \n gammas = tf.broadcast_to(gammas,[self._num_gammas, self._batch_size, num_total_points, self._x_size])\n x_values_bcast = tf.expand_dims(x_values, 0)\n x_values_bcast = tf.broadcast_to(x_values_bcast,[self._num_gammas, self._batch_size, num_total_points, self._x_size])\n \n out = tf.math.multiply(gammas,x_values_bcast)\n out = tf.math.multiply(weights,tf.sin(out))\n out = tf.reduce_sum(out,axis=0)\n \n y_values = out\n y_values += tf.random.normal((self._batch_size,num_total_points,self._y_size),stddev = self._epsilon, seed=seed)\n\n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx[:num_context], axis=1)\n context_y = tf.gather(y_values, idx[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)",
"def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()",
"def make_points(self, npts, xbest, sigma, subset=None, proj_fun=None,\n merit=candidate_merit_weighted_distance):\n\n new_points = np.zeros((npts, self.data.dim))\n\n # Figure out what we need to generate\n npoints = np.zeros((self.nstrats,), dtype=int)\n for i in range(npts):\n npoints[self.cycle[self.current_strat]] += 1\n self.current_strat = (self.current_strat + 1) % len(self.cycle)\n\n # Now generate the points from one strategy at the time\n count = 0\n for i in range(self.nstrats):\n if npoints[i] > 0:\n new_points[count:count+npoints[i], :] = \\\n self.sampling_strategies[i].make_points(npts=npoints[i], xbest=xbest,\n sigma=sigma, subset=subset,\n proj_fun=proj_fun,\n merit=merit)\n\n count += npoints[i]\n # Update list of proposed points\n for j in range(self.nstrats):\n if j != i:\n self.sampling_strategies[j].proposed_points = \\\n self.sampling_strategies[i].proposed_points\n\n return new_points",
"def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])"
] | [
"0.5980552",
"0.5892994",
"0.5780709",
"0.5760391",
"0.5661503",
"0.56367826",
"0.5512635",
"0.5436439",
"0.5356636",
"0.53310627",
"0.53305316",
"0.53082323",
"0.52768755",
"0.5271147",
"0.52537584",
"0.52499676",
"0.5233892",
"0.5219804",
"0.5211902",
"0.52063584",
"0.5202607",
"0.5202411",
"0.51892394",
"0.51585954",
"0.51514417",
"0.5132772",
"0.5129288",
"0.512153",
"0.5117197",
"0.5114698",
"0.5095789",
"0.5095664",
"0.5089035",
"0.5076398",
"0.5076369",
"0.50624967",
"0.5056477",
"0.50489223",
"0.5042242",
"0.502434",
"0.50202554",
"0.5012801",
"0.50125843",
"0.50042033",
"0.5004163",
"0.49987015",
"0.49978894",
"0.49968487",
"0.4985377",
"0.4982058",
"0.49813238",
"0.4973179",
"0.49708226",
"0.49647775",
"0.4958927",
"0.49588925",
"0.4955938",
"0.4955599",
"0.49444616",
"0.49383244",
"0.49286908",
"0.49200046",
"0.4914115",
"0.4900483",
"0.48971063",
"0.48961663",
"0.48752",
"0.48675317",
"0.4865516",
"0.4863454",
"0.486245",
"0.48593456",
"0.4859031",
"0.48561105",
"0.48553556",
"0.48543444",
"0.4854332",
"0.48539656",
"0.48327172",
"0.48262814",
"0.4823196",
"0.4823196",
"0.48214388",
"0.4818872",
"0.48184532",
"0.4817894",
"0.48159632",
"0.48139632",
"0.48112342",
"0.4809832",
"0.48095345",
"0.4808452",
"0.48082215",
"0.4807051",
"0.48000598",
"0.47895917",
"0.47880584",
"0.47855303",
"0.47763914",
"0.47719222"
] | 0.5625842 | 6 |
r"""Compute the Einstein radius for a given isotropic velocity dispersion assuming a singular isothermal sphere (SIS) mass profile | def approximate_theta_E_for_SIS(vel_disp_iso, z_lens, z_src, cosmo):
lens_cosmo = LensCosmo(z_lens, z_src, cosmo=cosmo)
theta_E_SIS = lens_cosmo.sis_sigma_v2theta_E(vel_disp_iso)
return theta_E_SIS | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_residual_sphere(parameters, x_values, y_values, z_values):\n #extract the parameters\n x_centre, y_centre, z_centre, radius = parameters\n\n #use numpy's sqrt function here, which works by element on arrays\n distance_from_centre = numpy.sqrt((x_values - x_centre)**2 +\n (y_values - y_centre)**2 +\n (z_values - z_centre)**2)\n\n return distance_from_centre - radius",
"def sphere_sre(solution):\n a = 0\n bias = 0.2\n x = solution.get_x()\n x1 = x[:10]\n x2 = x[10:]\n value1 = sum([(i-bias)*(i-bias) for i in x1])\n value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2])\n return value1 + value2",
"def ISE_loop(mu, s, DIMENSION=2):\n total = 0\n for i in range(len(mu)):\n for j in range(len(mu)):\n dist_sq = np.sum((mu[i]-mu[j])**2)\n total += (i != j)*(1/(s*s*2*np.pi))**(0.5*DIMENSION)*np.exp(-dist_sq/(2*s*s))\n return (2*total/len(mu)/(len(mu)-1))",
"def effective_radius(self, n):\n\n er2 = 5.0 * self.sa / n\n er = np.sqrt(er2)\n\n return er",
"def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius",
"def getSphereRadius(self):\n return 1.5",
"def Iq(q, second_moment, adsorbed_amount, density_shell, radius,\n volfraction, sld_shell, sld_solvent):\n with errstate(divide='ignore'):\n aa = ((sld_shell - sld_solvent)/density_shell * adsorbed_amount) / q\n bb = q * second_moment\n #scale by 10^-2 for units conversion to cm^-1\n inten = 6.0e-02 * pi * volfraction * aa**2 * exp(-bb**2) / radius\n return inten",
"def get_circumsphere(S):\n\n U = S[1:] - S[0]\n B = numpy.sqrt(numpy.square(U).sum(axis=1))\n U /= B[:, None]\n B /= 2\n C = numpy.dot(numpy.linalg.solve(numpy.inner(U, U), B), U)\n r2 = numpy.square(C).sum()\n C += S[0]\n return C, r2",
"def sphere(\n network,\n pore_diameter='pore.diameter'\n):\n return 4/3*_pi*(network[pore_diameter]/2)**3",
"def ellipse_ellipticity(S):\n return 1/2 * np.arcsin(S[..., 3]/S[..., 0])",
"def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere",
"def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere",
"def velocity_dispersion_from(\r\n self, redshift_0: float, redshift_1: float, einstein_radius: float\r\n ) -> float:\r\n const = constants.c.to(\"kpc / s\")\r\n\r\n angular_diameter_distance_to_redshift_0_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_to_redshift_1_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_between_redshifts_kpc = (\r\n self.angular_diameter_distance_between_redshifts_in_kpc_from(\r\n redshift_0=redshift_0, redshift_1=redshift_1\r\n )\r\n )\r\n\r\n kpc_per_arcsec = self.kpc_per_arcsec_from(redshift=redshift_0)\r\n\r\n einstein_radius_kpc = einstein_radius * kpc_per_arcsec\r\n\r\n velocity_dispersion_kpc = const * np.sqrt(\r\n (einstein_radius_kpc * angular_diameter_distance_to_redshift_1_kpc)\r\n / (\r\n 4\r\n * np.pi\r\n * angular_diameter_distance_to_redshift_0_kpc\r\n * angular_diameter_distance_between_redshifts_kpc\r\n )\r\n )\r\n\r\n return velocity_dispersion_kpc.to(\"km/s\").value",
"def approx_sun_position_ECI(MJD):\n import math\n JD = MJD + 2400000.5\n OplusW = 282.94\n T = (JD - 2451545.0) / 36525\n\n M = math.radians(357.5256 + 35999.049 * T)\n\n long = math.radians(OplusW + math.degrees(M) + 6892 / 3600 * math.sin(M) + 72 / 3600 * math.sin(2*M))\n r_mag = (149.619 - 2.499 * math.cos(M) - 0.021 * math.cos(2*M)) * 10**6\n\n epsilon = math.radians(23.43929111)\n r_vec = (r_mag * math.cos(long), r_mag * math.sin(long) * math.cos(epsilon), r_mag * math.sin(long) * math.sin(epsilon))\n\n return r_vec",
"def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])",
"def radial_distance(x_i, y_i, z_i, x_j, y_j, z_j, box_length):\n delta_x = min(((x_i - x_j) % box_length), ((x_j - x_i) % box_length))\n delta_y = min(((y_i - y_j) % box_length), ((y_j - y_i) % box_length))\n delta_z = min(((z_i - z_j) % box_length), ((z_j - z_i) % box_length))\n return np.sqrt(delta_x ** 2 + delta_y ** 2 + delta_z ** 2)",
"def calc_length_distortion_on_ellipsoid(self, lon, lat):\n\n # get the subgrid\n sg, _, _ = self.lonlat2xy(lon, lat)\n\n lon0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('central_meridian')\n lat0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('latitude_of_origin')\n\n # get spherical distance and azimuth between projection centre and point of interest\n geod = Geodesic.WGS84\n gi = geod.Inverse(lat0, lon0, lat, lon)\n c1 = gi['s12']\n az1 = gi['azi1']\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n k = c1 / geod.a / np.sin(c1 / geod.a)\n\n return k",
"def big_psi(sun_pos, sat_3d_pos):\n return np.arccos(np.dot(sun_pos.T, sat_3d_pos) / (vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2]) * vector_magnitude(sat_3d_pos[0], sat_3d_pos[1], sat_3d_pos[2])))",
"def Keldysh_Parameter(omega,Uion,E):\n\treturn omega*np.sqrt(2.0*Uion)/E",
"def find_radius(mass,delta_m,eta,xi,mue,pp_factor):\n\n #range of radii; reason in detail under step 9 of report\n r_low = 0.01*Rsun # MKS\n r_high = 3*Rsun # MKS\n \n radius = brentq(lum_difference, r_low, r_high, xtol=1.0e-4, args = (mass,delta_m,eta,xi,mue,pp_factor))\n return radius",
"def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume",
"def welch_stetson_I(magnitudes, errors):\n num_obs = magnitudes.shape[0]\n\n if num_obs % 2 == 1:\n magnitudes = magnitudes[:-1]\n errors = errors[:-1]\n num_obs -= 1\n\n evens = np.arange(0, num_obs, 2)\n odds = np.arange(1, num_obs, 2)\n\n b = magnitudes[evens]\n v = magnitudes[odds]\n\n b_err = magnitudes[evens]\n v_err = magnitudes[odds]\n\n mean = np.mean(magnitudes)\n\n d = (b - mean) / b_err\n e = (v - mean) / v_err\n stetson_I = np.sqrt(1 / (num_obs * (num_obs - 1))) * np.sum(d * e)\n\n return stetson_I",
"def wheels_radius_INV(ds):\n wr = ds[0]\n wl = ds[1]\n V = ds[2]\n Nsample = len(wr)\n H = np.zeros((Nsample,2))\n H[:,0] = wr*0.5\n H[:,1] = wl*0.5 \n X = np.dot(np.linalg.pinv(H),V) #X=rayons estimés\n Rl_est, Rr_est = X[1], X[0]\n return Rr_est, Rl_est",
"def boringInterlude (radiusIn):\n\n\n import math\n volIn = (4/3) * math.pi * (radiusIn ** 3)\n vol = volIn/ 1728\n return vol",
"def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()",
"def calculate_esi_index(radius, mass, temperature):\n density = Calculator.calculate_average_density(radius, mass)\n escape_velocity = Calculator.calculate_escape_velocity(radius, mass)\n\n factors = [\n (radius, 6.3781e6, 0.57/4),\n (density, 5513, 1.07/4),\n (escape_velocity, 11200, 0.70/4),\n (temperature, 288, 5.58/4)\n ]\n res = [(1 - abs(x - y)/abs(x + y)) ** z for x, y, z in factors]\n return functools.reduce(operator.mul, res)",
"def compute_in_radius(self, boids_in_radius):\r\n \r\n avg_velocity = Vector(*np.zeros(2))\r\n center_of_mass = Vector(*np.zeros(2))\r\n avg_vector = Vector(*np.zeros(2))\r\n total = 0\r\n for boid in boids_in_radius:\r\n avg_velocity += boid.velocity # calculating average direction \r\n center_of_mass += boid.position # calculating center of mass\r\n total += 1\r\n distance = np.linalg.norm(boid.position - self.position)\r\n \r\n if self.position != boid.position:\r\n diff = self.position - boid.position\r\n diff /= distance # scaling with the distance in order to avoid closer boids with greater force \r\n avg_vector += diff # calculating repulsive force vector\r\n \r\n return avg_velocity, center_of_mass, avg_vector, total",
"def hardSphereRadius(self):\n\n return self.__hardSphereRadius",
"def _calculate_anisoplatanism_error(self):\n\n self.sigma_anisoplatanism = np.sqrt((self.science_object_separation/self.isoplanatic_angle)**(5/3))*(self.parameter_wavelength /(2*np.pi))",
"def _template_sphere_disc(dim, outer_radius, inner_radius):\n rmax = np.array(outer_radius, ndmin=1)\n rmin = np.array(inner_radius, ndmin=1)\n ind = 2 * rmax - 1\n coord = np.indices((ind * np.ones(dim, dtype=int)))\n coord = coord - (ind - 1)/2\n x = coord[0, :]\n y = coord[1, :]\n if dim == 2:\n img = (x ** 2 + y ** 2) < rmax ** 2\n elif dim == 3:\n z = coord[2, :]\n img = (x ** 2 + y ** 2 + z ** 2) < rmax ** 2\n if rmin[0] != 0:\n if dim == 2:\n img_min = (x ** 2 + y ** 2) > rmin ** 2\n elif dim == 3:\n img_min = (x ** 2 + y ** 2 + z ** 2) > rmin ** 2\n img = img * img_min\n return img",
"def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)",
"def wpe(nev):\n return np.sqrt(nev*eV2J**2/(me*epsilon));",
"def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)",
"def sphere(indiv):\n return sum([ x ** 2 for x in indiv])",
"def irs_method(state):\n\n # First, importing all variables from the dictionary 'state'\n theta_ein2cm = state['theta_ein2cm']\n beta_boundary = state['beta_boundary']\n beta_res = state['beta_res']\n epsilon = state['epsilon']\n mu_h = state['mu_h']\n mu_v = state['mu_v']\n m = state['m']\n zeta = state['zeta']\n max_memory = state['max_memory']\n rays_per_pixel = state['rays_per_pixel']\n\n pixel2cm = theta_ein2cm * beta_boundary / beta_res # size of 1 pixel in cm in the source plane\n print('The physical size of 1 pixel is ' + str(beta_boundary / beta_res) + ' Einstein radii\\nor ' + str(\n np.format_float_scientific(pixel2cm, 2)) + ' cm in the source plane\\n')\n\n theta_boundaries = [epsilon * mu_h * beta_boundary / 2,\n epsilon * mu_v * beta_boundary / 2]\n # The number of images to draw in IRS method, assuming an ellipse in the image plane\n num_of_img = int((beta_res * epsilon) ** 2 * mu_v * mu_h * rays_per_pixel)\n print('A total of ' + str(num_of_img) + ' images for IRS method')\n state['num_of_img'] = num_of_img\n print(str(num_of_img / beta_res ** 2) + ' rays per source plane pixels')\n # The area in (Einstein-radii)^2 that each ray uniquely occupies\n s_ray = (epsilon ** 2 * mu_h * mu_v * beta_boundary ** 2) / num_of_img\n\n l_tmp = int(max_memory / m.shape[0] * 10 ** 9 / 8) # the maximum number of images to vector-compute\n n_runs = max(int(num_of_img / l_tmp), 1) # the number of sub arrays to vector-compute\n print('Max memory for array: ' + str(l_tmp * m.shape[0] * 8 / 10 ** 9) + 'GB')\n mu_grid = np.zeros((beta_res, beta_res)) # this will save the total number of rays per cell in the source plane\n start_time = time.time()\n theta = []\n beta = []\n num_cores = multiprocessing.cpu_count()\n print(str(num_cores) + ' active CPU cores')\n # starting the parallel routine, the variable mu_grid_temp_array is just a placeholder.\n mu_grid_temp_array = Parallel(n_jobs=num_cores, require='sharedmem')\\\n (delayed(parallel_irs)(i,mu_grid,l_tmp,n_runs,s_ray,theta_boundaries,start_time,state) for i in range(n_runs))\n\n if n_runs * l_tmp < num_of_img: # if some values are left\n # Drawing images locations\n theta = random_image_draw(int(num_of_img - n_runs * l_tmp), theta_boundaries[0], theta_boundaries[1])\n # Calculating locations of sources and corresponding magnitudes\n beta = af.img2src(theta, m, zeta, state)\n # Binning sources magnification\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n mu_grid += mu_grid_temp\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n else:\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n beta = np.ones(2, 2) # Just so that the next line can run smoothly and return beta_grid_h and beta_grid_v\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n\n return beta_grid_h, beta_grid_v, mu_grid",
"def sphvol(r):\n return (4./3.)*np.pi*(r**3.)",
"def J_over_JUV_outside_slab(tau, tau_SF):\n # if not np.all(np.abs(tau) >= 0.5*tau_SF):\n # raise ValueError(\"optical depth must be larger than or equal to tau_SF/2\")\n \n return 0.5/tau_SF*(expn(2,tau - 0.5*tau_SF) - expn(2,tau + 0.5*tau_SF))",
"def getHardSphereRadius(self):\n\n if self.__hardSphereRadius is not None:\n return self.__hardSphereRadius\n return self.__scatteringRadius",
"def circum(radius, places):\n return 2 * pi * radius",
"def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r",
"def sphrad(vol):\n return (3.*vol/(4.*np.pi))**(1./3.)",
"def M_s_evol(redshift, richness):\n\treturn function_eq10( redshift, richness, M_s_z[0], M_s_z[1], M_s_z[2] )",
"def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu",
"def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2",
"def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2",
"def _c_numeric(self, rij):\n radial_fun = np.zeros((self.lmax+1, self.nmax))\n radial_fun[0,1] = 1.0\n\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb \n for n in range(1, self.nmax+1):\n argbess = 2*alpha*rb[n-1]*rij\n ep = np.exp(-alpha*(rij + rb[n-1])**2)\n em = np.exp(-alpha*(rij - rb[n-1])**2)\n #In the loops below, msb prefix refers to modified spherical bessel.\n for l in range(self.lmax+1):\n if l == 0:\n if argbess == 0.0:\n msb_fi_ki_l = np.exp(-alpha*(rb[n-1]**2 + rij**2))\n else:\n #msb_fi_ki_lm = cosh(arg_bess)/arg_bess\n #msb_fi_ki_l = sinh(arg_bess)/arg_bess\n msb_fi_ki_lm = 0.5 * (em + ep) / argbess\n msb_fi_ki_l = 0.5 * (em - ep) / argbess\n else:\n if argbess == 0.0:\n msb_fi_ki_l = 0.0\n else:\n msb_fi_ki_lmm = msb_fi_ki_lm\n msb_fi_ki_lm = msb_fi_ki_l\n msb_fi_ki_l = msb_fi_ki_lmm-(2*l-1)*msb_fi_ki_lm/argbess\n\n radial_fun[l,n-1] = msb_fi_ki_l #* rb[n-1]\n fc = fcut(rij, self.rcut, self.trans_width)\n return np.dot(radial_fun, self.transformbasis)*fc",
"def rS_rhoS_c(self, m, z):\n Rvir = self.U.rVir(m, z)\n # concentration parameter\n #c = 10./(1.+z) * (m / self.m_nonlin)**(-0.2) # from Takada & Jain 2002\n c = 9./(1.+z) * (m / self.m_nonlin)**(-0.13) # Takada & Jain 2003\n # scale radius\n RS = Rvir / c # in Mpc/h\n # normalize the mass within rVir to be mVir\n rhoS = m / (4.*np.pi*RS**3)\n rhoS /= np.log(1.+c) - c/(1.+c) # (Msun/h) / (Mpc/h)^3\n return RS, rhoS, c",
"def rS_rhoS_c(self, m, z):\n Rvir = self.U.rVir(m, z)\n # concentration parameter\n #c = 10./(1.+z) * (m / self.m_nonlin)**(-0.2) # from Takada & Jain 2002\n c = 9./(1.+z) * (m / self.m_nonlin)**(-0.13) # Takada & Jain 2003\n # scale radius\n RS = Rvir / c # in Mpc/h\n # normalize the mass within rVir to be mVir\n rhoS = m / (4.*np.pi*RS**3)\n rhoS /= np.log(1.+c) - c/(1.+c) # (Msun/h) / (Mpc/h)^3\n return RS, rhoS, c",
"def Calc_axe_spheroid(r,c):\n return np.sqrt((r**3)/c)",
"def radius(self):\n return sqrt(self.radius_square())",
"def omega(self, mass: float) -> float:\n return np.sqrt(self.spring_constant / mass)",
"def insphere(network,\n geometry,\n **kwargs):\n import warnings\n try:\n import pulp as pu\n Np = geometry.num_pores()\n value = _sp.zeros(Np)\n pore_map = geometry.map_pores(geometry.pores(),geometry._net)\n for geom_pore,net_pore in pore_map:\n net_throats = geometry._net.find_neighbor_throats(net_pore)\n geom_throats = geometry._net.map_throats(net_throats,geometry)[:,1]\n verts = geometry['throat.offset_vertices'][geom_throats]\n if len(verts) > 1:\n try:\n pts = np.vstack((i for i in verts if len(i)>0))\n except ValueError:\n pts = []\n if len(pts) > 4:\n \"Work out central point to use as initial guess\"\n c0 = np.mean(pts,axis=0)\n \"Compute convex hull to find points lying on the hull in order\"\n hull = ConvexHull(pts, qhull_options='QJ Pp')\n \"For each simplex making up the hull collect the end points\"\n A = pts[hull.simplices[:,0]]\n B = pts[hull.simplices[:,1]]\n C = pts[hull.simplices[:,2]]\n #I = np.array([[0,1],[-1,0]])\n \"Normal of the simplices\"\n #N = np.dot((B-A),I)\n N = np.cross((B-A),(C-A),axis=1)\n #L = np.sqrt(np.sum(np.square(N),axis=1))\n \"Normalize the normal vector\"\n L = np.linalg.norm(N,axis=1)\n F = np.vstack((L,L,L)).T\n N /= F\n \"If normals point out of hull change sign to point in\"\n pointing_out = (np.sum((A-c0)*N,axis=1)>0)\n N[pointing_out]*= -1\n \"Define Linear Program Variables\"\n \"The centre of the incircle adjustment\"\n cx = pu.LpVariable(\"cx\",None,None,pu.LpContinuous)\n cy = pu.LpVariable(\"cy\",None,None,pu.LpContinuous)\n cz = pu.LpVariable(\"cz\",None,None,pu.LpContinuous)\n \"Radius of the incircle\"\n R = pu.LpVariable(\"R\",0,None,pu.LpContinuous)\n \"Slack variables for shortest distance between centre and simplices\" \n S = pu.LpVariable.dict(\"SlackVariable\",range(len(A)),0,None,pu.LpContinuous)\n \"Set up LP problem\"\n prob = pu.LpProblem(\"FindInRadius\",pu.LpMaximize)\n \"Objective Function\"\n prob += R\n for i in range(len(A)):\n \" Ni.(C-Ai)-Si = 0\"\n prob += N[i][0]*(c0[0]+cx) + N[i][1]*(c0[1]+cy) + N[i][2]*(c0[2]+cz)- N[i][0]*A[i][0] - N[i][1]*A[i][1] - N[i][2]*A[i][2]- S[i] == 0\n \"Si >= R\"\n prob += S[i] >= R\n \"Solve the LP\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n prob.solve()\n \"As the radius is the objective function we can get it from the objective or as R.value()\"\n rad = prob.objective.value()\n #cen = c0 + np.array([cx.value(),cy.value(),cz.value()])\n value[geom_pore]=rad*2\n \n \n return value\n except ImportError:\n print(\"Cannot use insphere method without installing pulp package\")",
"def ci_OLS(OLSMod):\r\n if hasattr(OLSMod, 'xtx'):\r\n xtx = OLSMod.xtx # (array) k x k projection matrix (includes constant)\r\n elif hasattr(OLSMod, 'hth'):\r\n xtx = OLSMod.hth # (array) k x k projection matrix (includes constant)\r\n diag = np.diagonal(xtx)\r\n scale = xtx/diag \r\n eigval = np.linalg.eigvals(scale)\r\n max_eigval = max(eigval)\r\n min_eigval = min(eigval)\r\n ci_result = sqrt(max_eigval/min_eigval)\r\n \r\n return ci_result",
"def arg_per(self):\n h_mom = self.sp_ang_mom()\n n_vec = np.cross([0, 0, 1], h_mom)\n e_vec = self.get_ecc()\n if np.linalg.norm(e_vec) == 0:\n small_omega = 0.0\n else:\n small_omega = np.arccos(np.dot(n_vec, e_vec) /\n (np.linalg.norm(n_vec) *\n np.linalg.norm(e_vec)))\n if e_vec[2] < 0:\n small_omega = 2*np.pi - small_omega\n return small_omega",
"def calc_Jrad_pp(s, num):\n \n from scipy import interpolate\n \n rsp = s.read_starpar(num, force_override=False)\n zpa = s.read_zprof('whole')\n \n domain = s.domain\n u = s.u\n par = s.par\n dz = s.domain['dx'][2]\n sigmad = dict()\n for f in ('LW','PE'):\n sigmad[f] = par['opacity']['sigma_dust_{0:s}0'.format(f)]*par['problem']['Z_dust']\n\n dz_cgs = dz*u.length.cgs.value\n LxLy = (s.domain['Lx'][0]*s.domain['Lx'][1])*u.pc**2\n\n # Cell centers (z plus center)\n zpc = zpa.z.data\n zmc = np.flipud(zpc)\n # Cell edges\n zpe = zpc + 0.5*domain['dx'][2]\n zme = zmc + 0.5*domain['dx'][2]\n\n # zprofile\n zp = zpa.sel(time=rsp['time'], method='nearest')\n\n zstar = rsp['sp_src']['x3']\n S4pi = dict()\n Jrad = dict()\n for f in ('LW','PE'):\n taup = sigmad[f]*np.cumsum(zp['d'].data)*dz_cgs\n taum = sigmad[f]*np.cumsum(np.flipud(zp['d'].data))*dz_cgs\n\n # interpolation function\n fp = interpolate.interp1d(zpe, taup)\n fm = interpolate.interp1d(zme, taum)\n\n # Surface density of luminosity over 4pi in cgs units\n S4pi[f] = rsp['sp_src'][f'L_{f}']/LxLy/(4.0*np.pi)*(1.0*au.Lsun/au.pc**2).cgs.value\n\n # plane-parallel approximation\n Jrad[f] = []\n for z_ in zpe:\n Jrad[f].append(calc_Jrad(z_, S4pi[f], zstar, fp, fm, dz))\n\n Jrad[f] = np.array(Jrad[f])\n \n return Jrad, zpc",
"def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):",
"def radial6(self) -> float:\n return self.distortion_coefficients[2]",
"def analysis_function_ensquared(system, wavelength_idx, surface, config, px, py, box_size):\n\n det_pix = 15e-3 # Size of the detector pixel [mm]\n\n # Set Current Configuration\n system.MCE.SetCurrentConfiguration(config)\n\n # First of all, we need to find the Surface Number for the IMAGE SLICER \"Image Plane\"\n N_surfaces = system.LDE.NumberOfSurfaces\n surface_names = {} # A dictionary of surface number -> surface comment\n for k in np.arange(1, N_surfaces):\n surface_names[k] = system.LDE.GetSurfaceAt(k).Comment\n # find the Slicer surface number\n try:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('Image Plane')]\n except ValueError:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('IFU SRM FP')]\n slicer_surface = slicer_num\n # slicer = system.LDE.GetSurfaceAt(slicer_num)\n\n # Get the Field Points for that configuration\n sysField = system.SystemData.Fields\n N_fields = sysField.NumberOfFields\n N_waves = len(wavelength_idx)\n\n X_MAX = np.max([np.abs(sysField.GetField(i + 1).X) for i in range(N_fields)])\n Y_MAX = np.max([np.abs(sysField.GetField(i + 1).Y) for i in range(N_fields)])\n\n # Use the Field Point at the centre of the Slice\n fx, fy = sysField.GetField(2).X, sysField.GetField(2).Y\n hx, hy = fx / X_MAX, fy / Y_MAX # Normalized field coordinates (hx, hy)\n obj_xy = np.array([fx, fy])\n\n N_pupil = px.shape[0] # Number of rays in the Pupil for a given field point and wavelength\n N_rays = N_waves * N_pupil\n\n EE = np.empty(N_waves)\n sli_foc_xy = np.empty((N_waves, 2))\n det_foc_xy = np.empty((N_waves, 2))\n\n slicer_xy = np.empty((N_waves, N_pupil, 2))\n slicer_xy[:] = np.nan\n detector_xy = np.empty((N_waves, N_pupil, 2))\n detector_xy[:] = np.nan\n\n # (1) Run the raytrace up to the IMAGE SLICER\n raytrace = system.Tools.OpenBatchRayTrace()\n # remember to specify the surface to which you are tracing!\n rays_slicer = raytrace.CreateNormUnpol(N_rays, constants.RaysType_Real, slicer_surface)\n\n # Loop over all wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n\n for (p_x, p_y) in zip(px, py): # Add the ray to the RayTrace\n rays_slicer.AddRay(wave_idx, hx, hy, p_x, p_y, constants.OPDMode_None)\n\n CastTo(raytrace, 'ISystemTool').RunAndWaitForCompletion()\n rays_slicer.StartReadingResults()\n checksum_slicer = 0\n for k in range(N_rays): # Get Raytrace results at the Image Slicer\n i_wave = k // N_pupil\n j_pupil = k % N_pupil\n # print(i_wave, j_pupil)\n output = rays_slicer.ReadNextResult()\n if output[2] == 0 and output[3] == 0:\n slicer_xy[i_wave, j_pupil, 0] = output[4]\n slicer_xy[i_wave, j_pupil, 1] = output[5]\n checksum_slicer += 1\n # this might have to change. We assume no vignetting should occur before the slicer\n # but for the MC this might happen\n if output[2] == 0 and output[3] != 0:\n vignetting_code = output[3]\n vignetting_surface = system.LDE.GetSurfaceAt(vignetting_code).Comment\n print(\"\\nConfig #%d\" % config)\n print(\"Vignetting at surface #%d: %s\" % (vignetting_code, vignetting_surface))\n\n if checksum_slicer < N_rays:\n raise ValueError('Some rays were lost before the Image Slicer')\n\n rays_slicer.ClearData()\n\n # Count how many rays fall inside a +- 1 mm window in Y, wrt the centroid\n slicer_cent_x = np.nanmean(slicer_xy[:, :, 0], axis=1)\n slicer_cent_y = np.nanmean(slicer_xy[:, :, 1], axis=1)\n sli_foc_xy[:, 0] = slicer_cent_x\n sli_foc_xy[:, 1] = slicer_cent_y\n\n # print(slicer_cent_y)\n below_slicer = slicer_xy[:, :, 1] < slicer_cent_y[:, np.newaxis] + 1.0 * box_size / 2\n above_slicer = slicer_xy[:, :, 1] > slicer_cent_y[:, np.newaxis] - 1.0 * box_size / 2\n inside_slicer = (np.logical_and(below_slicer, above_slicer))\n # print(inside_slicer[0, :10])\n\n # Now, for each wavelength, we calculate which rays fulfil the Image Slicer conditions\n index_valid_slicer = [np.argwhere(inside_slicer[i, :] == True)[:, 0] for i in range(N_waves)]\n # print(index_valid_slicer[1][:10])\n # print(index_valid_slicer[2][:10])\n\n # (2) Run the raytrace up to the DETECTOR\n # For speed, we re-use the same Raytrace, just define new rays!\n # raytrace_det = system.Tools.OpenBatchRayTrace()\n # Detector is always the last surface\n detector_surface = system.LDE.NumberOfSurfaces - 1\n rays_detector = raytrace.CreateNormUnpol(N_rays, constants.RaysType_Real, detector_surface)\n # Loop over all wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n for (p_x, p_y) in zip(px, py):\n rays_detector.AddRay(wave_idx, hx, hy, p_x, p_y, constants.OPDMode_None)\n\n CastTo(raytrace, 'ISystemTool').RunAndWaitForCompletion()\n\n rays_detector.StartReadingResults()\n checksum_detector = 0\n # index_valid_detector = [] # Valid means they make it to the detector even if vignetted at the Slicer\n vignetted = []\n index_vignetted = []\n index_valid_detector = np.empty((N_waves, N_pupil))\n index_valid_detector[:] = np.nan\n for k in range(N_rays): # Get Raytrace results at the Detector\n i_wave = k // N_pupil\n j_pupil = k % N_pupil\n output = rays_detector.ReadNextResult()\n if output[2] == 0 and output[3] == 0: # ErrorCode & VignetteCode\n detector_xy[i_wave, j_pupil, 0] = output[4]\n detector_xy[i_wave, j_pupil, 1] = output[5]\n checksum_detector += 1\n index_valid_detector[i_wave, j_pupil] = j_pupil\n\n elif output[2] == 0 and output[3] != 0:\n # Some rays are vignetted\n vignetted.append([output[4], output[5]])\n detector_xy[i_wave, j_pupil, 0] = output[4]\n detector_xy[i_wave, j_pupil, 1] = output[5]\n checksum_detector += 1\n index_valid_detector[i_wave, j_pupil] = j_pupil\n index_vignetted.append(k)\n\n # index_valid_detector = np.array(index_valid_detector)\n # # print(index_valid_detector.shape)\n # # print(index_valid_detector)\n # index_valid_detector = index_valid_detector.reshape((N_waves, N_pupil))\n # # print(index_valid_detector.shape)\n\n rays_detector.ClearData()\n CastTo(raytrace, 'ISystemTool').Close()\n\n # (3) Calculate the ENSQUARED ENERGY\n # We only count the rays that where inside the slicer to begin with and the ones that make it to the detector\n for i_wave in range(N_waves):\n valid_both = []\n for k in range(N_pupil):\n # print(index_valid_detector[i_wave])\n if k in index_valid_slicer[i_wave] and k in index_valid_detector[i_wave]:\n valid_both.append(k)\n\n valid_det_x = detector_xy[i_wave, :, 0][valid_both]\n valid_det_y = detector_xy[i_wave, :, 1][valid_both]\n\n # Now, out of the VALID rays, we calculate which detector rays fall inside a 2x pixel box along X\n dcx = np.mean(valid_det_x) # Detector Centroid X\n dcy = np.mean(valid_det_y)\n det_foc_xy[i_wave] = [dcx, dcy]\n\n left_detector = valid_det_x < dcx + det_pix * box_size / 2\n right_detector = valid_det_x > dcx - det_pix * box_size / 2\n inside_detector = (np.logical_and(left_detector, right_detector))\n total_detector = np.sum(inside_detector)\n ensq = total_detector / N_pupil\n # print(ensq)\n EE[i_wave] = ensq * 0.98\n\n # SHOW THIS in the methodology\n\n # fig, axes = plt.subplots(2, N_waves)\n # colors = cm.Reds(np.linspace(0.5, 1, N_waves))\n # for j in range(N_waves):\n # ax1 = axes[0][j]\n # scy = sli_foc_xy[j, 1]\n # scx = sli_foc_xy[j, 0]\n # ax1.axhline(y=scy + 1.0 * box_size / 2, color='black', linestyle='--')\n # ax1.axhline(y=scy - 1.0 * box_size / 2, color='black', linestyle='--')\n # ax1.scatter(slicer_xy[j, :, 0], slicer_xy[j, :, 1], s=3, color=colors[j])\n # ax1.scatter(sli_foc_xy[j, 0], sli_foc_xy[j, 1], s=3, color='black')\n # wavelength = system.SystemData.Wavelengths.GetWavelength(wavelength_idx[j]).Wavelength\n # ax1.set_title(\"IMG SLI | %.3f $\\mu$m\" % wavelength)\n # ax1.set_aspect('equal')\n # ax1.get_yaxis().set_visible(False)\n # ax1.get_xaxis().set_visible(False)\n #\n # p = 1.2\n # ax1.set_xlim([scx - p * box_size / 2, scx + p * box_size / 2])\n # ax1.set_ylim([scy - p * box_size / 2, scy + p * box_size / 2])\n #\n # ax2 = axes[1][j]\n # dcx = det_foc_xy[j, 0]\n # dcy = det_foc_xy[j, 1]\n # ax2.scatter(detector_xy[j, :, 0], detector_xy[j, :, 1], s=3, color=colors[j])\n # ax2.scatter(det_foc_xy[j, 0], det_foc_xy[j, 1], s=3, color='black')\n # ax2.axvline(x=dcx + det_pix * box_size / 2, color='black', linestyle='--')\n # ax2.axvline(x=dcx - det_pix * box_size / 2, color='black', linestyle='--')\n # ax2.set_title(\"DET | %.3f $\\mu$m\" % wavelength)\n # ax2.set_aspect('equal')\n # ax2.get_yaxis().set_visible(False)\n # ax2.get_xaxis().set_visible(False)\n # ax2.set_xlim([dcx - p * det_pix * box_size / 2, dcx + p * det_pix * box_size / 2])\n # ax2.set_ylim([dcy - p * det_pix * box_size / 2, dcy + p * det_pix * box_size / 2])\n #\n #\n # plt.show()\n\n return EE, obj_xy, sli_foc_xy, det_foc_xy",
"def __poprzeczna_s(self, sph_func, R):\n q = self.omega / self.c\n r = q * R.r\n a = self.l * ( self.l + 1 )\n return (-a * sph_func(self.l, r) *\n vsh1(self.m, self.l, R.theta, R.phi) / r -\n (sph_func(self.l, r, derivative=True) +\n sph_func(self.l, r) / r\n ) * vsh2(self.m, self.l, R.theta, R.phi)\n ) / np.sqrt(a) / q",
"def dp_radius(self, s, survey='SPIRE_500'):\n shape = np.array(s[survey].shape)\n cosPA, sinPA = np.cos(s['PA_RAD']), np.sin(s['PA_RAD'])\n cosINCL = s['cosINCL']\n w = s[survey + '_WCS']\n xcm, ycm = s['RA_RAD'], s['DEC_RAD']\n dp_coords = np.zeros([shape[0], shape[1], 2])\n # Original coordinate is (y, x)\n # :1 --> x, RA --> the one needed to be divided by cos(incl)\n # :0 --> y, Dec\n dp_coords[:, :, 0], dp_coords[:, :, 1] = \\\n np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n # Now, value inside dp_coords is (x, y)\n # :0 --> x, RA --> the one needed to be divided by cos(incl)\n # :1 --> y, Dec\n for i in range(shape[0]):\n dp_coords[i] = Angle(w.wcs_pix2world(dp_coords[i], 1) * u.deg).rad\n dp_coords[:, :, 0] = 0.5 * (dp_coords[:, :, 0] - xcm) * \\\n (np.cos(dp_coords[:, :, 1]) + np.cos(ycm))\n dp_coords[:, :, 1] -= ycm\n # Now, dp_coords is (dx, dy) in the original coordinate\n # cosPA*dy-sinPA*dx is new y\n # cosPA*dx+sinPA*dy is new x\n if survey[:5] == 'GALEX':\n return np.sqrt((cosPA * dp_coords[:, :, 1] +\n sinPA * dp_coords[:, :, 0])**2 +\n ((cosPA * dp_coords[:, :, 0] -\n sinPA * dp_coords[:, :, 1]))**2) * \\\n s['DIST_MPC'] * 1.0E3 # Radius in kpc\n else:\n return np.sqrt((cosPA * dp_coords[:, :, 1] +\n sinPA * dp_coords[:, :, 0])**2 +\n ((cosPA * dp_coords[:, :, 0] -\n sinPA * dp_coords[:, :, 1]) / cosINCL)**2) * \\\n s['DIST_MPC'] * 1.0E3 # Radius in kpc",
"def SNR(self, flux_sky, n_pix_star, flux_star, gain, ron):\n SNR = (gain*flux_star/sqrt(gain*flux_star + n_pix_star*gain*flux_sky + n_pix_star*ron**2)) \n return SNR",
"def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4",
"def moment_of_inertia(self):\r\n # update the coordinates of the mesh\r\n self.get_coords()\r\n\r\n # take the cross product of the position and rotation vectors, giving\r\n # ||r||*||omega||*sin(theta), with theta the angle between r and omega.\r\n # note that the minimum distance to the rotation axis is\r\n # ||r||*sin(theta).\r\n A1 = project(cross(self.r, self.omega), self.V)\r\n\r\n # compute ||omega||^2\r\n ommag = project(dot(self.omega, self.omega), self.Q)\r\n\r\n # compute ||A1||^2, so this is ||r||^2*||omega||^2*sin(theta)^2, which\r\n # is also d^2*||omega||^2\r\n A2 = project(dot(A1, A1), self.Q)\r\n\r\n # divide out ||omega||^2 to get d^2\r\n d2 = project(abs(A2/ommag), self.Q)\r\n\r\n # compute and return the integral of d^2*rho over the body\r\n return(assemble(self.rho*d2*dx), \"MoIs\")",
"def E(z, omega_m, omega_l):\n return 1 / np.sqrt(omega_m * (1 + z) ** 3 + omega_l)",
"def semidiameter(radius, distance):\n\n return np.arcsin(radius / distance)",
"def psi_enstrophy(\n Tau, # SGS; (6,64,64,64)\n h = False, # spatial step size\n flag = True): # spectral flag; default is gradient tool\n #---------------------------------------------------------------------#\n # Default variables #\n #---------------------------------------------------------------------#\n if h is False:\n Pi = np.pi\n N = 64\n h = (2.0*Pi)/N\n #---------------------------------------------------------------------#\n # Preallocation variables #\n #---------------------------------------------------------------------#\n dim = np.shape(Tau)[1]\n Psi = np.zeros((9, dim, dim, dim))\n #---------------------------------------------------------------------#\n # Calculating psi using spectral methods #\n #---------------------------------------------------------------------#\n if flag is False:\n kspec = np.fft.fftfreq(dim) * dim\n Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij'))\n #-----------------------------------------------------------------#\n # Psi_{11} #\n #-----------------------------------------------------------------#\n Psi[0] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real -\\\n np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real\n #-----------------------------------------------------------------#\n # Psi_{12} #\n #-----------------------------------------------------------------#\n Psi[1] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[4])).real -\\\n np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[3])).real\n #-----------------------------------------------------------------#\n # Psi_{13} #\n #-----------------------------------------------------------------#\n Psi[2] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[5])).real -\\\n np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[4])).real\n #-----------------------------------------------------------------#\n # Psi_{21} #\n #-----------------------------------------------------------------#\n Psi[3] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[0])).real -\\\n np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[2])).real\n #-----------------------------------------------------------------#\n # Psi_{22} #\n #-----------------------------------------------------------------#\n Psi[4] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real -\\\n np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real\n #-----------------------------------------------------------------#\n # Psi_{23} #\n #-----------------------------------------------------------------#\n Psi[5] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[2])).real -\\\n np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[5])).real\n #-----------------------------------------------------------------#\n # Psi_{31} #\n #-----------------------------------------------------------------#\n Psi[6] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[1])).real -\\\n np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[0])).real\n #-----------------------------------------------------------------#\n # Psi_{32} #\n #-----------------------------------------------------------------#\n Psi[7] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[3])).real -\\\n np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[1])).real\n #-----------------------------------------------------------------#\n # Psi_{33} #\n #-----------------------------------------------------------------#\n Psi[8] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real -\\\n np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real\n #---------------------------------------------------------------------#\n # Calculating psi using gradient tool #\n #---------------------------------------------------------------------#\n else:\n #-----------------------------------------------------------------#\n # Psi_{11} #\n #-----------------------------------------------------------------#\n Psi[0] = np.gradient(Tau[2],h, edge_order=2)[1] -\\\n np.gradient(Tau[1], h, edge_order=2)[0]\n #-----------------------------------------------------------------#\n # Psi_{12} #\n #-----------------------------------------------------------------#\n Psi[1] = np.gradient(Tau[4],h, edge_order=2)[1] -\\\n np.gradient(Tau[3], h, edge_order=2)[0]\n #-----------------------------------------------------------------#\n # Psi_{13} #\n #-----------------------------------------------------------------#\n Psi[2] = np.gradient(Tau[5],h, edge_order=2)[1] -\\\n np.gradient(Tau[4], h, edge_order=2)[0]\n #-----------------------------------------------------------------#\n # Psi_{21} #\n #-----------------------------------------------------------------#\n Psi[3] = np.gradient(Tau[0],h, edge_order=2)[0] -\\\n np.gradient(Tau[2], h, edge_order=2)[2]\n #-----------------------------------------------------------------#\n # Psi_{22} #\n #-----------------------------------------------------------------#\n Psi[4] = np.gradient(Tau[1],h, edge_order=2)[0] -\\\n np.gradient(Tau[4], h, edge_order=2)[2]\n #-----------------------------------------------------------------#\n # Psi_{23} #\n #-----------------------------------------------------------------#\n Psi[5] = np.gradient(Tau[2],h, edge_order=2)[0] -\\\n np.gradient(Tau[5], h, edge_order=2)[2]\n #-----------------------------------------------------------------#\n # Psi_{31} #\n #-----------------------------------------------------------------#\n Psi[6] = np.gradient(Tau[1],h, edge_order=2)[2] -\\\n np.gradient(Tau[0], h, edge_order=2)[1]\n #-----------------------------------------------------------------#\n # Psi_{32} #\n #-----------------------------------------------------------------#\n Psi[7] = np.gradient(Tau[3],h, edge_order=2)[2] -\\\n np.gradient(Tau[1], h, edge_order=2)[1]\n #-----------------------------------------------------------------#\n # Psi_{33} #\n #-----------------------------------------------------------------#\n Psi[8] = np.gradient(Tau[4],h, edge_order=2)[2] -\\\n np.gradient(Tau[2], h, edge_order=2)[1]\n\n return Psi",
"def drag(s):\n\n r = np.linalg.norm(s[0:3])\n v_atm = we*np.array([-s[1],s[0],0]) # calculate velocity of atmosphere\n v_rel = s[3:6] - v_atm\n\n rs = Re*(1-(ee*s[2]/r)**2) # calculate radius of surface\n h = r-rs\n p = 0.6*np.exp(-(h-175)*(29.4-0.012*h)/915) # in kg/km^3\n coeff = 3.36131e-9 # in km^2/kg\n acc = -p*coeff*np.linalg.norm(v_rel)*v_rel\n\n return acc",
"def einstein_coeff(mol_data):\n\n if not isinstance(mol_data, Phys):\n raise ValueError('mol_data must be a `sbpy.data.phys` instance.')\n\n temp = mol_data['Temperature'][0]\n lgint = mol_data['lgint300'][0]\n part300 = mol_data['partfn300'][0]\n partition = mol_data['partfn'][0]\n eup_J = mol_data['eup_j'][0]\n elo_J = mol_data['elo_J'][0]\n df = mol_data['degfr'][0]\n t_freq = mol_data['t_freq'][0]\n gu = mol_data['dgup'][0]\n\n h = con.h.to('J*s') # Planck constant\n\n k = con.k_B.to('J/K') # Boltzmann constant\n\n intl = mol_data['lgint'][0]\n\n if (h*t_freq/(k*temp)).decompose().value and \\\n (h*t_freq/(k*300*u.K)).decompose().value < 1:\n\n au = (lgint*t_freq\n * (part300/gu)*np.exp(eup_J / (k*300*u.K))*(1.748e-9)).value\n\n else:\n\n au = (intl*(t_freq)**2 *\n (partition/gu)*(np.exp(-(elo_J/(k*temp)).value) -\n np.exp(-(eup_J/(k*temp)).value))**(-1)\n * (2.7964e-16)).value\n\n au = au / u.s\n\n return au",
"def calc_length_distortion(self, x, y):\n\n # get the major axis of the used Earth ellipsoid\n ellaxis = Geodesic.WGS84.a\n\n # get the centre of the subgrid's projection\n fe = self.core.projection.osr_spref.GetProjParm('false_easting')\n fn = self.core.projection.osr_spref.GetProjParm('false_northing')\n\n # create the distances to the projection centre\n dists = np.sqrt((np.array(x) - fe)**2 + (np.array(y) - fn)**2)\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n #\n # is it just about the distance to the centre (c), and as are equally long\n # on the ellipsoid and on the projected plane (the core of of AEQD!)\n k = dists / ellaxis / np.sin(dists / ellaxis)\n\n return k",
"def term_1(\n omega1, # vorticity-1\n omega2, # vorticity-2\n omega3, # vorticity-3\n enst, # enstrophy\n nu_sgs, # turbulent viscosity\n h = True): # spatial step size\n #---------------------------------------------------------------------#\n # Setting default values #\n #---------------------------------------------------------------------#\n if h is True:\n h = 2.0*np.pi/64.0\n #---------------------------------------------------------------------#\n # Preallocating space #\n #---------------------------------------------------------------------#\n term = np.zeros((64,64,64))\n #---------------------------------------------------------------------#\n # Enstrophy term #\n #---------------------------------------------------------------------#\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[2], h, edge_order=2)[2]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[1], h, edge_order=2)[1]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[0], h, edge_order=2)[0]\n #---------------------------------------------------------------------#\n # Dissipation #\n #---------------------------------------------------------------------#\n omega1_grad = np.gradient(omega1, h, edge_order=2)\n omega2_grad = np.gradient(omega2, h, edge_order=2)\n omega3_grad = np.gradient(omega3, h, edge_order=2)\n term -= np.square(omega1_grad[2])\n term -= np.square(omega1_grad[1])\n term -= np.square(omega1_grad[0])\n term -= np.square(omega2_grad[2])\n term -= np.square(omega2_grad[1])\n term -= np.square(omega2_grad[0])\n term -= np.square(omega3_grad[2])\n term -= np.square(omega3_grad[1])\n term -= np.square(omega3_grad[0])\n #---------------------------------------------------------------------#\n # Applying the subgrid stress #\n #---------------------------------------------------------------------#\n term *= nu_sgs\n\n return term",
"def sommerfeld_number(self):\n modified_s = self.modified_sommerfeld_number()\n return (modified_s / np.pi) * (self.radius_stator * 2 / self.length) ** 2",
"def inertia(mus):\n pos, negs, zeros = cluster_eignvalues(mus)\n\n return len(zeros) + min(len(pos), len(negs))",
"def radius_orbit_eccentric(time: float, axis_semimajor: float, period: float, \n eccentricity: float, argument_periastron: float, t0: float, inclination: float, \n longitude_node_ascending: float, latitude_ecliptic: float = None, \n longitude_ecliptic: float = None, ecliptic: bool = False):\n\n incl_in = inclination * d2r\n asc_in = longitude_node_ascending * d2r\n\n so, co = np.sin(asc_in), np.cos(asc_in)\n si, ci = np.sin(incl_in), np.cos(incl_in)\n\n ma = anomaly_mean(time, period, t0)\n ea = anomaly_eccentric(ma, eccentricity)\n ta = anomaly_true(ea, eccentricity)\n tot = np.mod(argument_periastron + ta, 360.) * d2r\n\n # compute length of vector.\n r_mag = axis_semimajor * (1 - eccentricity**2) / (1 + eccentricity * np.cos(ta * d2r))\n\n st, ct = np.sin(tot), np.cos(tot)\n\n X = -ap * (so * ct + co * ci * st)\n Y = ap * (co * ct - so * ci * st)\n Z = ap * (-si * st)\n\n # determine which basis to use, output results. \n if ecliptic and all([latitude_ecliptic, longitude_ecliptic]):\n sb, cb = np.sin(ecl_b * d2r), np.cos(ecl_b * d2r)\n sl, cl = np.sin(ecl_l * d2r), np.cos(ecl_l * d2r)\n Xecl = X * sl - Y * cl * sb - Z * cl * cb\n Yecl = -(X * cl + Y * sl * sb + Z * sl * cb)\n Zecl = Y * cb - Z * sb\n X = Xecl\n Y = Yecl\n Z = Zecl\n\n elif ecliptic:\n print(\"WARNING: ecliptic basis is desired but one or both ecliptic coordinates are not set!\")\n print(\"... returning vector with basis relative to plane of sky ...\")\n\n # now, return vector.\n vector = np.array([X, Y, Z])\n\n return vector",
"def testSphereRadius(self):\n sp = nigel.SphereSelection(nb, radius=10)\n self.assertEqual(sp.n, 9)",
"def ps(self, sigma, z):\n \n delta_c = self.params[0]\n \n return sqrt(2.e0/pi) * (delta_c/sigma) * exp( (-delta_c*delta_c)/(2.e0*sigma*sigma) )",
"def omega_sun_snodgrass90(lat):\n return differential_rotation(lat, 14.71, -2.39, -1.78)",
"def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))",
"def coriolis(self, lat):\n return 2. * self.omega * np.sin(np.deg2rad(lat))",
"def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)",
"def calcualte_ellipse_radii(guess, eccentricity = 0, perimeter = 2 * np.pi*1):\n return fsolve(ellipse_radii_test, guess, args = (eccentricity, perimeter))",
"def SetRSoftSF(self, etol_rad = None, mus = None, Ls = None, \\\n radial_Xs = None, radial_Ys = None, etol_ang = None, \\\n xis = None, lambdas = None, zetas = None, angular_Xs = None, \\\n angular_Ys = None, angular_Zs = None):\n # Initializes global cutoff radius\n Rc_global = 0\n\n # Checks if any radial inputs used. If so, if any parameters are \n # not None then throws an error assuming the user is confused. \n # Checks all inputs are valid.\n if any(v is None for v in [etol_rad, mus, Ls, radial_Xs, radial_Ys]):\n if any(v is not None for v in (etol_rad, mus, Ls, radial_Xs, \\\n radial_Ys)):\n print('ERROR: If radial structure functions are used, must ')\n print(' supply etol_rad, mus, Ls, radial_Xs, radial_Ys ')\n print(' to SetRSoftSF')\n sys.exit(-1)\n else:\n \n # Marks that it contains radial structure functions\n self.__containsRadial = True \n\n # Initializes radial structure function variables\n if etol_rad > 0 and etol_rad < 1: \n self.etol_radial = etol_rad\n else:\n print('ERROR: 0 < etol_rad < 1 used in SetRSoftSF')\n sys.exit(-1)\n if any(len(mus) != len(arr) for arr in (Ls, radial_Xs, \\\n radial_Ys)):\n print('ERROR: Length of mus, radial_Xs, and radial_Ys in ')\n print(' SetRSoftSF must be equal')\n sys.exit(-1)\n self.mus = mus\n self.Ls = Ls \n if np.all(np.mod(radial_Xs,1)==0):\n self.radial_Xs = radial_Xs.astype(int)\n else:\n print('ERROR: radial_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(radial_Ys,1)==0):\n self.radial_Ys = radial_Ys.astype(int)\n else:\n print('ERROR: radial_Ys used in SetRSoftSF must be integers')\n sys.exit(-1)\n\n # Outputs radial cut-off radii\n print('Calculating radial cutoff...')\n Rc_max = 0.0\n for SF in range(len(mus)):\n mu = mus[SF]\n L = Ls[SF]\n X = radial_Xs[SF]\n Y = radial_Ys[SF]\n Rc = mu+L*sqrt(log(1/etol_rad))\n print(' mu='+str(mu)+', L='+str(L)+', X='+str(X)+', Y='+\\\n str(Y)+' --> Rc='+str(Rc))\n if Rc > Rc_max:\n Rc_max = Rc \n print('Rc_radial='+str(Rc_max))\n print(' ')\n print('--------------------------------------------------------')\n if Rc_max > Rc_global:\n Rc_global = Rc_max\n\n # Checks if any angular inputs used. If so, if any parameters are \n # not None then throws an error assuming the user is confused. \n # Checks all inputs are valid.\n if any(v is None for v in [etol_ang, xis, lambdas, angular_Xs, \n angular_Ys, angular_Zs]):\n if any(v is not None for v in (etol_ang, xis, lambdas, zetas, \\\n angular_Xs, angular_Ys, angular_Zs)):\n print('ERROR: If angular structure functions are used, must ')\n print(' supply etol_ang, xis, lambdas, zetas, angular_Xs,')\n print(' angular_Ys, angular_Zs')\n print(' to SetRSoftSF')\n sys.exit(-1)\n else:\n\n # Marks that contains angular structure functions\n self.__containsAngular = True \n\n # Initializes angular structure function variables\n if etol_ang > 0 and etol_ang < 1: \n self.etol_angular = etol_ang\n else:\n print('ERROR: 0 < etol_ang < 1 used in SetRSoftSF')\n sys.exit(-1)\n if any(len(xis) != len(arr) for arr in (lambdas, zetas, \\\n angular_Xs, angular_Ys, angular_Zs)):\n print('ERROR: Length of xis, zetas, angular_Xs, angular_Ys, ')\n print(' and angular_Zs in SetRSoftSF must be equal')\n sys.exit(-1)\n self.xis = xis\n if np.all(np.abs(lambdas)==1):\n self.lambdas = lambdas\n else:\n print('ERROR: lambdas used in SetRSoftSF must be +/-1')\n sys.exit(-1)\n if np.all(np.mod(zetas,1)==0):\n self.zetas = zetas.astype(int)\n else:\n print('ERROR: angular_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Xs,1)==0):\n self.angular_Xs = angular_Xs.astype(int)\n else:\n print('ERROR: angular_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Ys,1)==0):\n self.angular_Ys = angular_Ys.astype(int)\n else:\n print('ERROR: angular_Ys used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Zs,1)==0):\n self.angular_Zs = angular_Zs.astype(int)\n else:\n print('ERROR: angular_Zs used in SetRSoftSF must be integers')\n sys.exit(-1)\n\n # Outputs radial cut-off radii\n print('Calculating angular cutoff...')\n Rc_max = 0.0\n for SF in range(len(xis)):\n xi = xis[SF]\n l = lambdas[SF]\n zeta = zetas[SF]\n X = angular_Xs[SF]\n Y = angular_Ys[SF]\n Z = angular_Zs[SF]\n if l==1:\n Rc = xi*sqrt(2.0*log(1.0/etol_ang)/3.0)\n else:\n Rc = xi*sqrt(log(1.0/etol_ang)/2.0)\n print(' xi='+str(xi)+', lambda='+str(l)+', zeta='+str(zeta)+\\\n ', X='+str(X)+', Y='+str(Y)+', Z='+str(Z)+' --> Rc='+str(Rc))\n if Rc > Rc_max:\n Rc_max = Rc \n print('Rc_angular='+str(Rc_max))\n print(' ')\n print('--------------------------------------------------------')\n if Rc_max > Rc_global:\n Rc_global = Rc_max\n\n # Sets structure functions into netCDF file\n self.__SetSFParams()\n\n print('Rc='+str(Rc_global))",
"def J_over_JUV_inside_slab(tau, tau_SF):\n # if not np.all(np.abs(tau) <= 0.5*tau_SF):\n # raise ValueError(\"tau must be smaller than or equal to tau_SF/2\")\n\n return 0.5/tau_SF*(2.0 - expn(2,0.5*tau_SF - tau) - expn(2,0.5*tau_SF + tau))",
"def _compute_e_S1S2(self, x):\r\n psi = x[PSI_IDX]\r\n return np.array([-np.sin(psi), np.cos(psi)])",
"def nsphere_volume(n, r):\n return math.pi ** (n / 2) * (r ** n) / gamma(n / 2 + 1)",
"def velocity_calc(mass, MoS):\r\n\tif bool(eval(input('Do you change the Kinetic Energy section? (y/n) '))):\r\n\t\tKEmax = float(input('Kenetic Energy Limit: '))\r\n\t\tsafety_margin = float(input('Margin of Safety: '))\r\n\telse:\r\n\t\tKEmax = 75 #ft-lbf\r\n\t\tsafety_margin = MoS\r\n\r\n\tKE_limit = KEmax / (safety_margin + 1)\r\n\t\r\n\treturn math.sqrt((2*(KE_limit)*conversion)/mass)",
"def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)",
"def get_pi1(mol, atom, coord):\n\n omega = np.identity(2)\n spin_j = np.einsum(\"ij,kl->ikjl\", omega, omega)\n\n pi_py = -mol.intor(\"int2e_ip1\")[coord] #minus sign due to pyscf definition\n\n j1_spatial = np.zeros((pi_py.shape[0],pi_py.shape[0],pi_py.shape[0],\n pi_py.shape[0]))\n\n for i in range(pi_py.shape[0]):\n\n lambda_i = int(i in range(mol.aoslice_by_atom()[atom][2],\n mol.aoslice_by_atom()[atom][3]))\n\n for j in range(pi_py.shape[0]):\n\n lambda_j = int(j in range(mol.aoslice_by_atom()[atom][2],\n mol.aoslice_by_atom()[atom][3]))\n\n for k in range(pi_py.shape[0]):\n\n lambda_k = int(k in range(mol.aoslice_by_atom()[atom][2],\n mol.aoslice_by_atom()[atom][3]))\n\n for l in range(pi_py.shape[0]):\n\n lambda_l = int(l in range(mol.aoslice_by_atom()[atom][2],\n mol.aoslice_by_atom()[atom][3]))\n\n j1_spatial[i][j][k][l] += (pi_py[i][j][k][l] * lambda_i\n + pi_py[j][i][k][l] * lambda_j\n + pi_py[k][l][i][j] * lambda_k\n + pi_py[l][k][i][j] * lambda_l)\n\n j1_spatial = np.einsum(\"abcd->acbd\", j1_spatial,\n optimize='optimal') #convert to physicists\n j1 = np.kron(spin_j, j1_spatial)\n k1 = np.einsum(\"ijkl->ijlk\", j1,\n optimize='optimal') #physicists notation\n\n pi1 = j1 - k1\n\n return pi1",
"def test_euclidean_unit_spheres(self):\n \n s1_ref = 6.28318530717958647692528676655867\n v2_ref = 3.14159265358979323846264338327933\n s2_ref = 12.5663706143591729538505735331173\n v3_ref = 4.18879020478639098461685784437218\n\n s = space(curvature=0)\n\n self.assertTrue(isclose(\n s.sphere_s1(1),\n s1_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_s1(s1_ref),\n 1\n ))\n self.assertTrue(isclose(\n s.sphere_v2(1),\n v2_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_v2(v2_ref),\n 1\n ))\n self.assertTrue(isclose(\n s.sphere_s2(1),\n s2_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_s2(s2_ref),\n 1\n ))\n self.assertTrue(isclose(\n s.sphere_v3(1),\n v3_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_v3(v3_ref),\n 1\n ))",
"def sphere_volume(r):\n return (4/3) * 3.14159 * r**3",
"def parallel_radius(self, lat):\n\n return EARTH_RADIUS * lat.cos()",
"def evolve_satellite(t, included_physics, halo_gas_density, galaxy_velocity, galaxy_gas_density, rho_DM, M_o, R_o, physics_kwargs={}, RPS_KH_exclusive = False):\n # included physics is going to be a list of the physics \"modules\" to evovel\n # right now, options should be 'KH' and 'RPS'\n\n physics_kwargs_copy = copy.deepcopy(physics_kwargs)\n # do a check of input parameters. Are they functions or constants?\n \n if not hasattr(halo_gas_density, '__call__'):\n halo_gas_density = lambda x : halo_gas_density\n \n if not hasattr(galaxy_velocity, '__call__'):\n galaxy_velocity = lambda x : galaxy_velocity\n \n # galaxy gas density should be function of radius in galaxy!!!\n if not hasattr(galaxy_gas_density, '__call__'):\n galaxy_gas_density = lambda x : galaxy_gas_density # constant!\n \n # assume KH and RPS are off unless in list of included physics\n KH_const = 0.0; RPS_const = 0.0\n \n if 'KH' in included_physics:\n KH_const = 1.0\n\n if not 'KH' in physics_kwargs_copy.keys(): # bookkeeping if off\n physics_kwargs_copy['KH'] = {}\n \n if 'RPS' in included_physics:\n RPS_const = 1.0\n \n if not 'RPS' in physics_kwargs_copy.keys(): # bookkeeping if off\n physics_kwargs_copy['RPS'] = {}\n \n # if alpha is contained in physcis kwargs... strip it to be \n # used in the RPS condition function call, as it is not used in the\n # RPS mass loss rate calculation\n if 'alpha' in physics_kwargs_copy['RPS'].keys():\n alpha = physics_kwargs_copy['RPS']['alpha']\n physics_kwargs_copy['RPS'].pop('alpha',None)\n else:\n alpha = 1.0\n \n # need to come up with some way to make a function on the fly... constants is fine\n # but if this gets complicated then.... yaa.....\n \n ode_function = lambda y, t, A, B:\\\n A * _KH_evolution(y, t, halo_gas_density, galaxy_velocity,\n galaxy_gas_density, **physics_kwargs_copy['KH'])+\\\n B * _RPS_evolution(y, t, halo_gas_density, galaxy_velocity,\n galaxy_gas_density,\n galaxy_gas_density(0.0), **physics_kwargs_copy['RPS'])\n \n # write a loop here... solve step by step\n M = np.zeros(np.size(t)); R = np.zeros(np.size(t))\n M[0] = M_o; R[0] = R_o\n keep_looping = True; i = 0; turn_KH_off = 1.0 \n while (i < np.size(t) - 1) and keep_looping:\n \n # check if ram pressure stripping occurs\n if 'RPS' in included_physics:\n # integrate and test around the current radius\n# rps_cond = _RPS_condition(np.linspace(0.9999*R[i],1.0001*R[i],5), rho_DM, galaxy_gas_density, \n# halo_gas_density(t[i]), galaxy_velocity(t[i]), alpha=alpha)\n\n rps_cond = _RPS_condition(R[i], rho_DM, galaxy_gas_density, halo_gas_density(t[i]),\n galaxy_velocity(t[i]), alpha=alpha)\n \n # if RPS is valid at current radius, use it... otherwise set to zero\n if rps_cond > 0:\n RPS_const = 1.0\n else:\n RPS_const = 0.0 \n \n if RPS_KH_exclusive and RPS_const == 1.0: # turn KH off\n turn_KH_off = 0.0\n elif RPS_KH_exclusive and RPS_const == 0.0: # turn KH on\n turn_KH_off = 1.0\n else: # else just keep it the same\n turn_KH_off = KH_const \n \n ode_function_args = (KH_const * turn_KH_off, RPS_const,)\n \n \n \n soln = integrate.odeint(ode_function, [M[i],R[i]], t[i:i+2], \n args=ode_function_args,\n mxhnil=0, ixpr=False)\n M[i+1] = soln[1,0]; R[i+1] = soln[1,1]\n \n i = i + 1\n \n simple_check = M[i] + ode_function([M[i],R[i]], t[i], *ode_function_args)[0] * (t[i] - t[i-1])\n \n if M[i] <= 0.0 or R[i] <= 0.0 or simple_check <= 0.0:\n M[i] = 0.0; R[i] = 0.0\n keep_looping = False\n \n \n return M, R",
"def squared_radial_component(x, tol=0.01):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / (x ** 3)\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1./3, result)",
"def bessellsuniv(x, y, z1, z2, lab, rzero):\n nlab = len(lab)\n omega = np.zeros(nlab, dtype=np.complex_)\n za, zb, N = circle_line_intersection(z1, z2, x + y * 1j, rzero * abs(lab[0]))\n if N > 0:\n for n in range(nlab):\n omega[n] = bessellsuni(x, y, za, zb, lab[n])\n return omega",
"def kramers_kronig_hs(self, I_EELS,\n N_ZLP=None,\n iterations=1,\n n=None,\n t=None,\n delta=0.5, correct_S_s=False):\n output = {}\n # Constants and units\n me = 511.06\n\n\n e0 = self.e0\n beta = self.beta\n\n eaxis = self.deltaE[self.deltaE > 0] # axis.axis.copy()\n S_E = I_EELS[self.deltaE > 0]\n y = I_EELS[self.deltaE > 0]\n l = len(eaxis)\n i0 = N_ZLP\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2 #m0 v**2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me) #me c**2 / (hbar c) gamma sqrt(2Ekin /(me c**2))\n \n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # We start by the \"angular corrections\"\n Im = y / (np.log(1 + (beta * tgt / eaxis) ** 2)) / self.ddeltaE # axis.scale\n if n is None and t is None:\n raise ValueError(\"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\")\n elif n is not None and t is not None:\n raise ValueError(\"Please provide the refractive index OR the \"\n \"thickness information, not both\")\n elif n is not None:\n # normalize using the refractive index.\n K = np.sum(Im / eaxis) * self.ddeltaE\n K = K / (np.pi / 2) / (1 - 1. / n ** 2)\n te = (332.5 * K * ke / i0)\n \n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = next_fast_len(2 * l) # 2**math.floor(math.log2(l)+1)*4\n q = -2 * np.fft.fft(Im, esize).imag / esize #TODO : min twee?????\n\n q[:l] *= -1\n q = np.fft.fft(q)\n # Final touch, we have Re(1/eps)\n Re = q[:l].real + 1 #TODO: plus 1???\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re ** 2 + Im ** 2)\n e2 = Im / (Re ** 2 + Im ** 2)\n\n if iterations > 0 and N_ZLP is not None: #TODO: loop weghalen.\n # Surface losses correction:\n # Calculates the surface ELF from a vaccumm border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im\n adep = (tgt / (eaxis + delta) *\n np.arctan(beta * tgt / eaxis) -\n beta / 1000. /\n (beta ** 2 + eaxis ** 2. / tgt ** 2))\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * self.ddeltaE # axis.scale\n if correct_S_s == True:\n print(\"correcting S_s\")\n Srfint[Srfint < 0] = 0\n Srfint[Srfint > S_E] = S_E[Srfint > S_E]\n y = S_E - Srfint\n _logger.debug('Iteration number: %d / %d', io + 1, iterations)\n\n eps = (e1 + e2 * 1j)\n del y\n del I_EELS\n if 'thickness' in output:\n # As above,prevent errors if the signal is a single spectrum\n output['thickness'] = te\n\n return eps, te, Srfint",
"def spin_energy(self, i, S):\n iplus1 = (i + 1) % self.N\n iminus1 = (i - 1) % self.N\n return - self.J * np.dot(spinlib.get_spin_at_index(self.s0,iplus1)+ \\\n spinlib.get_spin_at_index(self.s0,iminus1),\n S)",
"def _degree_of_polarization(S):\n if S[0] == 0:\n return 0\n return np.sqrt(S[1]**2+S[2]**2+S[3]**2)/S[0]",
"def residual(S):\n rho = seawater.density(T, S, Pa)\n return (rho_1 - rho)",
"def inv_p_error(q,s,v, dq,ds,dv):\n return np.sqrt( (ds**2*(q-v)**2 + dv**2*(q+s-1)**2 + dq**2*(v+s-1)**2)/(v+s-1)**4 )",
"def sphere_volume(sphere_radius):\n return (4 / 3 * np.pi * sphere_radius**3)",
"def modified_sommerfeld_number(self):\n return (\n self.radius_stator * 2 * self.omega * self.viscosity * (self.length ** 3)\n ) / (8 * self.load * (self.radial_clearance ** 2))"
] | [
"0.59292984",
"0.5824991",
"0.5817226",
"0.58086616",
"0.57849306",
"0.57736725",
"0.5735249",
"0.57111436",
"0.5704036",
"0.56862265",
"0.5664737",
"0.5664737",
"0.56512725",
"0.5550405",
"0.5496292",
"0.5456463",
"0.5426158",
"0.54168355",
"0.53976965",
"0.5365244",
"0.5362637",
"0.5339783",
"0.5337699",
"0.5287755",
"0.52820945",
"0.5281513",
"0.52810335",
"0.52762115",
"0.52234447",
"0.52072275",
"0.5206206",
"0.5203669",
"0.5198666",
"0.5195853",
"0.5190186",
"0.51899636",
"0.5186796",
"0.5179275",
"0.51719314",
"0.5170612",
"0.51643693",
"0.51545334",
"0.51487154",
"0.5145391",
"0.514022",
"0.51376915",
"0.5134779",
"0.5134779",
"0.51284266",
"0.51247364",
"0.5122091",
"0.51213276",
"0.5117607",
"0.5109711",
"0.5109687",
"0.50990206",
"0.50902766",
"0.50892276",
"0.5074603",
"0.5072448",
"0.50693154",
"0.5066227",
"0.50649446",
"0.5062949",
"0.50607145",
"0.50577176",
"0.50575626",
"0.50556976",
"0.5055455",
"0.5054388",
"0.50539356",
"0.50523657",
"0.5052202",
"0.5047056",
"0.50459",
"0.5045113",
"0.50403386",
"0.50355846",
"0.5034309",
"0.50277",
"0.5024398",
"0.50217706",
"0.5021711",
"0.5021097",
"0.5012025",
"0.50060076",
"0.50030774",
"0.50015646",
"0.5001367",
"0.50013345",
"0.500015",
"0.49984482",
"0.49954084",
"0.499077",
"0.49877423",
"0.49816883",
"0.49769342",
"0.4964241",
"0.49597764",
"0.49595323"
] | 0.6008606 | 0 |
Evaluate the Vband luminosity L_V expected from the FJ relation for a given velocity dispersion | def get_luminosity(self, vel_disp):
log_L_V = self.slope*np.log10(vel_disp) + self.intercept
return log_L_V | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vcond(lambdam, taum):\n return 2 * lambdam / taum",
"def V_hipass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_hipass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n I_L = V_out/Z_high(C, R_L, f) # current through load branch\n V_L = I_L*R_L # voltage across load\n return V_L",
"def LJ(r, epsilon, sigma, x, y):\n A=((x/y)**(x/(x-y))/((x/y)-1))\n\n\n V=A*epsilon*((sigma/r)**x-(sigma/r)**y) #-4*Epsilon*((Sigma/Rc)**12-(Sigma/Rc)**6)\n\n return V",
"def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)",
"def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum",
"def V_bandpass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_bandpass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n return V_out",
"def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)",
"def calc_lhv(self):\n hf = {}\n hf['hydrogen'] = 0\n hf['methane'] = -74.85\n hf['ethane'] = -84.68\n hf['propane'] = -103.8\n hf['butane'] = -124.51\n hf['O2'] = 0\n hf['CO2'] = -393.5\n # water (gaseous)\n hf['H2O'] = -241.8\n\n lhv = 0\n\n for f, x in self.fuel.val.items():\n molar_masses[f] = CP.PropsSI('M', f)\n fl = set(list(hf.keys())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n if len(fl) == 0:\n continue\n\n if list(fl)[0] in self.fuels():\n structure = fluid_structure(f)\n\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure:\n n[el] = structure[el]\n else:\n n[el] = 0\n\n lhv += (-(n['H'] / 2 * hf['H2O'] + n['C'] * hf['CO2'] -\n ((n['C'] + n['H'] / 4) * hf['O2'] +\n hf[list(fl)[0]])) / molar_masses[f] * 1000) * x\n\n return lhv",
"def velocity(n_core, q, beta_invariant, material_dispersion=None):\n c = scipy.constants.speed_of_light\n if material_dispersion is None:\n A = 2 / c / (2 + q)\n B = q * n_core**2 / c / (2 + q)\n else:\n N1 = n_core + material_dispersion\n y = 2 * n_core / N1\n A = 2 * N1 / n_core * (1 + 0.25 * y) / c / (q + 2)\n B = q * n_core**2 * A - 1 / 4 / c * N1 * n_core * y\n\n return A * beta_invariant + B / beta_invariant",
"def LotkaVolterra_Dynamics(self):\n LV_c = self.toConceptual(self.state) # (nF, nR)\n LV_c = LV_c.mul((1 - LV_c) + self.LV_inhM.mm(LV_c))\n LV_s = self.toNeural(LV_c)\n\n return LV_c, LV_s",
"def compute_Flocal(config):\n \n vlow = config['vlow']\n vhigh = config['vhigh']\n vdef = config['vdef']\n lo_restfreq = config[\"DOPPLERTRACKFREQ\"]\n\n velocity = (vlow + vhigh) * 0.5\n vd = Vdef()\n vd.compute_local_frame_with_vdef(vdef, velocity,\n lo_restfreq, velocity)\n # this better be the same as vlow since i sent in the avg\n cur_vhigh = vd.get_vhigh()\n cur_vlow = vd.get_vlow()\n if cur_vhigh != cur_vlow:\n \"PANIC: How can the avg velocities differ!!!!!\"\n \n return cur_vhigh",
"def _fv(self):\n return self.beta * (self.x ** self.c)",
"def cie_luv(self):\n K = Fraction(29, 3) ** 3\n e = Fraction(6, 29) ** 3\n XYZ = self.cie_xyz\n yr = XYZ[1] / D65[1]\n L = 116 * yr ** Fraction(1, 3) - 16 if yr > e else K * yr\n u = 13 * L * (U(*XYZ) - U(*D65))\n v = 13 * L * (V(*XYZ) - V(*D65))\n return (L, u, v)",
"def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w",
"def __rho2v(self, vm, beta, rhoc, w, rho):\n if rho < 0:\n return float(vm)\n elif rho <= rhoc:\n return float(vm - vm * rho / beta)\n else:\n rhom = rhoc - (vm * rhoc - vm * (rhoc ** 2) / beta) / w\n # print('rho {0}; rhoc {1}'.format(rho, rhoc))\n return float(w * (rho - rhom) / rho)",
"def dLJverlet(x,r2,R1,R2):\r\n rc = (2**(1/6))*((R1+R2)/(2))\r\n sig_int = (R1+R2)/(2) #JV: This is the sigma of the interaction (in the system units). We don't need to divide by sigma because we are already working with reduced units\r\n\r\n #JV: Because we are working on reduced units (from the values of the Argon gas)\r\n # we want need to divide our radius by the radius of the Argon gas\r\n\r\n #JV: See LJverlet() for more explanation on the truncation\r\n if((r2**(1/2))>rc):\r\n value = 0\r\n else:\r\n value = ((48.*x)/(r2))*(((((sig_int**2)*1.)/r2)**6) - ((((sig_int**2)*0.5)/r2)**3))\r\n\r\n return value",
"def test_lfc_inversion():\n levels = np.array([963., 789., 782.3, 754.8, 728.1, 727., 700.,\n 571., 450., 300., 248.]) * units.mbar\n temperatures = np.array([25.4, 18.4, 17.8, 15.4, 12.9, 12.8,\n 10., -3.9, -16.3, -41.1, -51.5]) * units.celsius\n dewpoints = np.array([20.4, 0.4, -0.5, -4.3, -8., -8.2, -9.,\n -23.9, -33.3, -54.1, -63.5]) * units.celsius\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints)\n assert_almost_equal(lfc_pressure, 705.8806 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 10.6232 * units.celsius, 2)",
"def __q2v_ff(self, vm, beta, q):\n return float((vm * beta - np.sqrt(np.power(vm * beta, 2) - 4 * vm * beta * q)) / (2 * vm))",
"def get_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. (.*?) .*? .*? .*? .*? . .*? .*? . . . .*?'\n voltage = float(re.findall(pattern,summary).pop())\n return voltage",
"def do_test_values(self, vel=numpy.array((3e6, 4e4, 1e2)), bf=numpy.array((0, 0, -2)),\n ef=numpy.array((0, 0, 1e6)), charge=4*e_chg):\n res = sim.lorentz(vel, ef, bf, charge)\n exp = charge*(ef + numpy.cross(vel, bf))\n nptest.assert_allclose(res, exp)",
"def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s",
"def test_filt_vegamag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=JMmag)\n assert np.isclose(fluxd.value, -26.75, atol=0.006)",
"def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension",
"def get_lsf(wave_obs, sigma_v, speclib=\"miles\", zred=0.0, **extras):\n # filter out some places where sdss reports zero dispersion\n good = sigma_v > 0\n wave_obs, sigma_v = wave_obs[good], sigma_v[good]\n wave_rest = wave_obs / (1 + zred)\n\n # Get the library velocity resolution function at the corresponding\n # *rest-frame* wavelength\n if speclib == \"miles\":\n miles_fwhm_aa = 2.54\n sigma_v_lib = lightspeed * miles_fwhm_aa / 2.355 / wave_rest\n # Restrict to regions where MILES is used\n good = (wave_rest > 3525.0) & (wave_rest < 7500)\n elif speclib == \"c3k_a\":\n R_c3k = 3000\n sigma_v_lib = lightspeed / (R_c3k * 2.355)\n # Restrict to regions where C3K is used\n good = (wave_rest > 2750.0) & (wave_rest < 9100.0)\n else:\n sigma_v_lib = sigma_v\n good = slice(None)\n raise ValueError(\"speclib of type {} not supported\".format(speclib))\n\n # Get the quadrature difference\n # (Zero and negative values are skipped by FSPS)\n dsv = np.sqrt(np.clip(sigma_v**2 - sigma_v_lib**2, 0, np.inf))\n\n # return the broadening of the rest-frame library spectra required to match\n # the obserrved frame instrumental lsf\n return wave_rest[good], dsv[good]",
"def fLinear(Vc1,Vc2,Vc3,Vk,Vw,Va,Vf,Pc1,Pc2,Pc3,Pk,Pw,Pa,Pf):\n#\n# 1. Normalise volumetric components:\n#\t-----------------------------------\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vk)+abs(Vw)+abs(Va)+abs(Vf)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVa=abs(Va)/Sum\n\tVf=abs(Vf)/Sum\n#\n#\t2. Compute liear response function:\n#\t-----------------------------------\n\tLrf=Vc1*Pc1+Vc2*Pc2+Vc3*Pc3+Vk*Pk+Vw*Pw+Va*Pa+Vf*Pf\n#\n# 3. Output result:\n#\t-----------------\n\treturn Lrf",
"def w_dispersion(q,v=1):\r\n # parameters for two-fluid hydrodynamic model from [1]\r\n Vol = np.sqrt(3)/2 * 4.63**2; # unit cell volume in graphene\r\n wr1= 4.08 / HARTREE; # Pi-electrons [eV]\r\n n1 = 2/Vol;\r\n wr2= 13.06 / HARTREE; # Sigma-electrons [eV]\r\n n2 = 6/Vol;\r\n \r\n # resonance frequencies\r\n w12 = wr1**2; # we neglect the acoustic velocity s=0\r\n w22 = wr2**2;\r\n\r\n # generalized plasma frequencies\r\n Q12 = 2*np.pi*n1*q * v ; # effective Omega_nu^2\r\n Q22 = 2*np.pi*n2*q * v ;\r\n\r\n # dispersion formula (17) in [1]\r\n A = 0.5*(w12 + Q12 + w22 + Q22);\r\n B = np.sqrt( 0.25*( w12 + Q12 - w22 - Q22 )**2 + Q12 * Q22 );\r\n\r\n return np.asarray([np.sqrt(A-B), np.sqrt(A+B)]);",
"def calcLorentzGammaFromVelocity(self,direction):\n if direction not in self.v.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 /(1 - (getattr(self.v,direction)/speed_light)**2))",
"def beta_fct(M_p, F_xuv, R_p):\n\n M_EARTH= const.M_earth.cgs.value\n R_EARTH = const.R_earth.cgs.value\n\n if (type(F_xuv) == float) or (type(F_xuv) == np.float64):\n # if F_xuv is single value\n grav_pot = -const.G.cgs.value * (M_p*M_EARTH) / (R_p*R_EARTH)\n log_beta = max(0.0, -0.185 * np.log10(-grav_pot)\n \t\t\t\t\t+ 0.021 * np.log10(F_xuv) + 2.42)\n beta = 10**log_beta\n return beta\n\n elif len(F_xuv) > 1:\n # if F_xuv is a list\n betas = []\n for i in range(len(F_xuv)):\n grav_pot_i = -const.G.cgs.value \\\n * (M_p[i]*M_EARTH) / (R_p[i]*R_EARTH)\n log_beta_i = max(0.0, -0.185 * np.log10(-grav_pot_i)\n \t\t\t\t\t + 0.021 * np.log10(F_xuv[i]) + 2.42)\n beta_i = 10**log_beta_i\n betas.append(beta_i)\n betas = np.array(betas)\n return betas",
"def velocity_field(xt,yt,x0,y0,velf,dia,tsr,solidity):\n rad = dia/2.\n rot = tsr*velf/rad\n\n # Calculating EMG distribution parameters\n loc,spr,skw,scl = vorticity(tsr,solidity)\n \n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n \n # Integration of the vorticity profile using Fortran code (vorticity.f90; _vortrun.so)\n vel_vs = dblquad(_vortmodel.integrand,0.,35.*dia,lambda x: -4.*dia,lambda x: 4.*dia, args=(x0t,y0t,dia,loc[0],loc[1],loc[2],spr[0],spr[1],skw[0],skw[1],scl[0],scl[1],scl[2]))\n \n # Calculating velocity deficit\n vel = (vel_vs[0]*(rot))/(2.*pi)\n vel = (vel + velf)/velf # normalization of velocity\n \n return vel",
"def V_lopass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_lopass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n I_C = V_out/Xcap(C, f)\n I_L = V_out/Z_low(L, R_L, f)\n V_L = I_L*R_L\n return V_L",
"def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)",
"def fluxRatio_fromVmag(Vmag):\n fluxRatio = 10.**(-0.4*Vmag)\n return fluxRatio",
"def algoRTLE(self, V, d, f, rpm, flute):\n return math.pow(100, (1/0.15)) * math.pow(V, (-1/0.15)) * math.pow((d*25.4), -1) * math.pow(((f/(rpm * flute)) *\n 25.4), (-0.1/0.15))",
"def _bf_vel(self, z, t, dx, mincorr, Vmin, Vmax, Lc, dfracmin, dfracmax,\n minR2):\n displacement = self._dx_mat(z, dx, mincorr)\n duration = self._dt_mat(t)\n displacement = self._clean_dx_mat(displacement, duration,\n Vmin, Vmax, Lc, dfracmin, dfracmax)\n\n Vc = self._vregress(displacement, duration, minR2)\n return Vc",
"def detect_velocity(image):\n nonlocal prev, v_last\n curr_bgr = cv.warpPerspective(image, M, (160, 120))\n curr = cv.cvtColor(curr_bgr, cv.COLOR_BGR2GRAY)\n\n if prev is None:\n prev = curr\n v_last = 0.0\n return v_last, curr_bgr, np.zeros_like(image)\n\n flow = cv.calcOpticalFlowFarneback(\n prev, # Previous image\n curr, # Current image\n None, # Computed flow image that has the same size oas prev and type CV_32FC2.\n 0.5, # Specifies the image scale (<1) to build pyramids for each image.\n 3, # Number of pyramid layers including the initial image.\n 15, # winsize, averaging windows size.\n 3, # iterations, number of iterations the algorithm does at each pyramid level.\n 5, # standard deviation of the Gaussian that is used to smooth derivative\n 1.5,\n 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n\n v = mag * np.sin(ang)\n\n ######################\n ## Histogram for mag\n ar = np.arange(-20.0, 20.0, 0.50, dtype=np.float)\n his = np.histogram(v, bins=ar)\n\n for i, n in enumerate(his[0]):\n bgr = (255, 255, 0)\n if his[1][i] < 0:\n bgr = (0, 255, 255)\n\n #print('[{}] {} - {}'.format(i, n, his[1][i]))\n cv.rectangle( image, #curr_bgr,\n (i*2, HEIGHT),\n (i*2, HEIGHT - int(n / 10)),\n bgr, #(0, 255, 255),\n cv.FILLED)\n\n hsv = np.zeros_like(image)\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 1] = 255\n hsv[..., 2] = cv.normalize(np.abs(v), None, 0, 255, cv.NORM_MINMAX)\n hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n ##\n ######################\n\n v_abs = np.absolute(v)\n v = v[v_abs >= np.percentile(v_abs, VELOCITY_CUTOFF_PCT)]\n\n v_max = v_last + MAX_ACC\n v_min = v_last - MAX_ACC\n v = np.clip(v, v_min, v_max)\n if v.size > 0:\n v_avg = v.mean()\n else:\n if v_last > 0:\n v_avg = max(v_last - MAX_ACC, 0)\n elif v_last < 0:\n v_avg = min(v_last + MAX_ACC, 0)\n else:\n v_avg = 0\n\n prev = curr\n v_last = v_avg\n return v_last, curr_bgr, hsv_bgr",
"def particle_LJV(R,N,D):\n b = np.zeros(N)\n for i in range(N):\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n b[i] = np.sum(4*((1/r)**12-(1/r)**6))\n Uv = np.sum(b)\n return Uv",
"def vel(z, c = cp.cc.c_light_cm_s/1e5):\n # return z*c/(1+z)\n return c*((1+z)**2-1)/((1+z)**2+1)",
"def calc_V(A):\n return 1. / calc_rV(A)",
"def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum",
"def feller(self):\n return 2 * self.kappa_y * self.mean_v - self.eta_y**2 > 0",
"def get_vsolar(self):\n return self.read_register(4098, 1, 3)",
"def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3",
"def compute_llfr(fwd, volume=np.array([3.3, 1.45 , 6, 0.3, 0.4]), va=0.0):\n weight = volume / volume.sum()\n fwds_incl_va = fwd.copy()\n fwds_incl_va[0] = fwds_incl_va[0] + va / 10000.0\n llfr = fwds_incl_va * weight\n return np.array([llfr.sum()])",
"def V_magJupiter_2(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 9.428 - 2.5*np.log10(1.0 - 1.507*(alpha/180.) - 0.363*(alpha/180.)**2. - 0.062*(alpha/180.)**3.+ 2.809*(alpha/180.)**4. - 1.876*(alpha/180.)**5.)\n return V",
"def test_el_lfc_equals_lcl():\n levels = np.array([912., 905.3, 874.4, 850., 815.1, 786.6, 759.1, 748.,\n 732.3, 700., 654.8, 606.8, 562.4, 501.8, 500., 482.,\n 400., 393.3, 317.1, 307., 300., 252.7, 250., 200.,\n 199.3, 197., 190., 172., 156.6, 150., 122.9, 112.,\n 106.2, 100.]) * units.mbar\n temperatures = np.array([29.4, 28.7, 25.2, 22.4, 19.4, 16.8, 14.3,\n 13.2, 12.6, 11.4, 7.1, 2.2, -2.7, -10.1,\n -10.3, -12.4, -23.3, -24.4, -38., -40.1, -41.1,\n -49.8, -50.3, -59.1, -59.1, -59.3, -59.7, -56.3,\n -56.9, -57.1, -59.1, -60.1, -58.6, -56.9]) * units.celsius\n dewpoints = np.array([18.4, 18.1, 16.6, 15.4, 13.2, 11.4, 9.6, 8.8, 0.,\n -18.6, -22.9, -27.8, -32.7, -40.1, -40.3, -42.4, -53.3,\n -54.4, -68., -70.1, -70., -70., -70., -70., -70., -70.,\n -70., -70., -70., -70., -70., -70., -70., -70.]) * units.celsius\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_almost_equal(el_pressure, 175.7663 * units.mbar, 3)\n assert_almost_equal(el_temperature, -57.03994 * units.degC, 3)",
"def calibV(self):\n # clear buffer in case of errors\n self.flushInput()\n \n if (self.model == 'GDS'):\n self.write(':CHAN'+str(ch)+':SCAL?\\n')\n # returns V/div, turn it into multiplicative factor\n # between digitizer and actual volts\n vmult = float(self.readline()) * 10./255.\n # GDS includes vertical offset in the data returned.\n voff = 0.\n elif (self.model == 'TDS'):\n self.write('WFMPre:YMUlt?\\n')\n # formula I am using later is from TDS manual, so this\n # is straightforward.\n vmult = float(self.readline())\n self.write('WFMPre:YOFf?\\n')\n voff = float(self.readline())\n \n # clear buffer in case of errors\n self.flushInput()\n\n return (vmult, voff)",
"def get_vlb(self):\n vlb = 0\n\n # First term\n # E[LN p(g | \\gamma)]\n E_ln_g = self.expected_log_g()\n vlb += Dirichlet(self.gamma[None, None, :]).negentropy(E_ln_g=E_ln_g).sum()\n\n # Second term\n # E[LN q(g | \\tilde{gamma})]\n vlb -= Dirichlet(self.mf_gamma).negentropy().sum()\n\n return vlb",
"def get_V(self):\n if not self.gpu:\n self.rho[...] = conj_square(self.psi)\n self.fourier_grid[...] = fft.rfftn(self.rho)\n ft_inv_laplace(self.fourier_grid)\n self.fourier_grid *= 4*np.pi*G\n self.V[...] = fft.irfftn(self.fourier_grid)\n self.V[...] += self.lam*self.rho**2\n else:\n self.g_conj_square(self.g_psi, self.g_rho)\n cufft.cufftExecD2Z(self.rho_plan, self.g_rho.ptr, self.g_fourier.ptr)\n self.g_fourier /= self.psi.shape[0]**3\n self.g_pot_func(self.g_fourier, np.float64(4*np.pi*G/self.N), np.int64(self.fourier_grid.shape[0]), np.int64(self.fourier_grid.shape[1]), np.int64(self.fourier_grid.shape[2]), block=(8,8,8), grid=tuple([(i+7)/8 for i in self.psi_hat.shape]))\n cufft.cufftExecZ2D(self.inv_plan, self.g_fourier.ptr, self.g_V.ptr)\n self.g_V += self.lam*self.g_rho**2",
"def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)",
"def _VRF(self) -> array:\n pass",
"def fv(X,Y,dx,dy,r2,i,append,L,N,U,dt,close_list,Nlist,vel_verlet_on,R,menu,submenu,n1,grid,G,wallcount,X2):\r\n\r\n \"\"\"JV: append is a boolean. If it's true, adds the energy to our list, if it isn't, it doesn't.\r\n We do that because in some cases we will call the algorithm more times than the actual step number (and\r\n we only want to sum the value T/dt times), this is needed in the velocity-Verlet algorithm, that we call the fv()\r\n function one more time than needed just to start the loop.\"\"\"\r\n\r\n# L = self.param[2]\r\n#\r\n# N = self.particles.size\r\n\r\n #For computing all the distances I use a trick with the meshgrid function,\r\n #see the documentation on how this works if you dont see it.\r\n\r\n \"\"\"JV: X is an array that contains each position, mx is an nxn array that each column is the position of one particle (so it's a matrix\r\n that has n X rows) and mxt is the same but tranposed (so it's a matrix of n X columns)\"\"\"\r\n\r\n \"\"\"\r\n UPDATE: This block of code is commented because now it's done in a loop inside solve_verlet() (due to Numba...).\r\n Looks a little bit messy but if Numba allowed me to call the np.meshgrid() function we would do this here. Sorry, but I like to keep the comment to remind me that.\r\n \"\"\"\r\n # MX, MXT = np.meshgrid(X,X,copy=False)\r\n # MY, MYT = np.meshgrid(Y,Y,copy=False)\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n # dx = MXT - MX\r\n # dx = dx\r\n\r\n # dy = MYT - MY\r\n # dy = dy\r\n\r\n # r2 = np.square(dx)+np.square(dy)\r\n\r\n # if(menu == \"Free!\"):\r\n # #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n # dx_v2 = (np.abs(dx.copy())-1*L)\r\n # r2_v2 = dx_v2**2+dy**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # dy_v2 = (np.abs(dy.copy())-1*L)\r\n # r2_v2 = dx**2+dy_v2**2\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # r2_v2 = dx_v2**2+dy_v2**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n dUx = 0.\r\n dUy = 0.\r\n utot = np.zeros((N))\r\n f = np.zeros((N,2))\r\n\r\n for j in range(0,N):\r\n dUx = 0.\r\n dUy = 0.\r\n u = 0.\r\n\r\n #JV: we now calculate the force with only the Nlist closest particles\r\n for k in range(0,Nlist):\r\n c = int(close_list[j][k])\r\n\r\n #In the force computation we include the LJ and the walls (JV: in the verlet case). I truncate the interaction at self.R units of lenght,\r\n #I also avoid distances close to 0 (which only should affect the diagonal in the matrix of distances)\r\n #All these conditions are included using the numpy.where function.\r\n #If you want to include more forces you only need to add terms to these lines.\r\n\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c])\r\n dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c])\r\n # print(dUx,dUy,dx[j,c],r2[j,c],R[j],R[c])\r\n#JV: COMMENTED PART BECAUSE NUMBA HAS PROBLEMS WITH THIS BLOCK OF CODE THAT DOES THE CALCULATION IN THE VERLET ALGORITHM, NOW IT ONLY WORKS WITH THE VELOCITY VERLET, TO FIX\"\r\n# else:\r\n# if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n# dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n\r\n #JV: We add the energy in the corresponding array in both cases, remember that the verlet algorithm will include the energy from the walls\r\n # and that will be visible in fluctuations on the energy\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n u = u + LJverlet(r2[j,c],R[c],R[j])\r\n# else:\r\n# u = u + walls([X[j],Y[j]])#JV: TO CHANGE; NOW ONLY WORKS WITH VEL_VERLET_ON\r\n# else:\r\n# if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# u = u + LJverlet(r2[j,c],R[c],R[j],param)\r\n#\r\n# if((X[j]**2+Y[j]**2) > (0.8*L)**2):\r\n# u = u + walls([X[j],Y[j]],param)\r\n #JV: COMMENTED FOR NOW\r\n\r\n #JV: If the argument it's True, we will append the energy to our corresponding array\r\n if(append == True):\r\n utot[j] = u\r\n\r\n f[j,:] = f[j,:]+np.array([dUx,dUy])\r\n\r\n if(append == True):\r\n U[int(i)] = np.sum(utot) #JV: Finally, we add the total energy so we have the global energy in a step of time\r\n\r\n return f",
"def V_magMercury(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 0.613 + 6.3280e-02*alpha - 1.6336e-03*alpha**2. + 3.3644e-05*alpha**3. - 3.4265e-07*alpha**4. + 1.6893e-09*alpha**5. - 3.0334e-12*alpha**6.\n return V",
"def calculate_luminosity(\n spec_fname, distance, wavelength_column=0,\n wavelength_unit=u.angstrom, flux_column=1,\n flux_unit=u.Unit('erg / (Angstrom cm2 s)')):\n #BAD STYLE change to parse quantity\n distance = u.Unit(distance)\n\n wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)\n\n flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)\n luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')\n\n return luminosity.value, wavelength.min(), wavelength.max()",
"def _vtmaxEq(vT,R,diskdf):\n #Calculate a bunch of stuff that we need\n if diskdf._beta == 0.:\n E= vT**2./2.+sc.log(R)\n xE= sc.exp(E-.5)\n OE= xE**-1.\n LCE= xE\n dxEdvT= xE*vT\n else: #non-flat rotation curve\n E= vT**2./2.+1./2./diskdf._beta*R**(2.*diskdf._beta)\n xE= (2.*E/(1.+1./diskdf._beta))**(1./2./diskdf._beta)\n OE= xE**(diskdf._beta-1.)\n LCE= xE**(diskdf._beta+1.)\n dxEdvT= xE/2./diskdf._beta/E*vT\n L= R*vT\n sigma2xE= diskdf._surfaceSigmaProfile.sigma2(xE,log=False)\n return OE*R/sigma2xE+\\\n (diskdf._surfaceSigmaProfile.surfacemassDerivative(xE,log=True)\\\n -(1.+OE*(L-LCE)/sigma2xE)*diskdf._surfaceSigmaProfile.sigma2Derivative(xE,log=True)\\\n +(L-LCE)/sigma2xE*(diskdf._beta-1.)*xE**(diskdf._beta-2.)\\\n -OE*(diskdf._beta+1.)/sigma2xE*xE**diskdf._beta)\\\n *dxEdvT",
"def read_voltage(self):\n self.write(':FETC?')\n msg = self.read()\n #print ('dmm msg = ', msg)\n v = msg.split(',')[0].rstrip('NVDC').strip()\n if v[-1] == 'R':\n return float(v[:-1])\n else:\n return float(v)",
"def get_vlb(self):\n raise NotImplementedError()\n vlb = 0\n\n # First term\n # E[LN p(g | \\gamma)]\n E_ln_g = self.expected_log_g()\n vlb += Dirichlet(self.gamma[None, None, :]).negentropy(E_ln_g=E_ln_g).sum()\n\n # Second term\n # E[LN q(g | \\tilde{gamma})]\n vlb -= Dirichlet(self.mf_gamma).negentropy().sum()\n\n return vlb",
"def ccm_unred(wave, flux, ebv, r_v=\"\"):\n import numpy as np\n wave = np.array(wave, float)\n flux = np.array(flux, float)\n \n if wave.size != flux.size: raise TypeError, 'ERROR - wave and flux vectors must be the same size'\n \n if not bool(r_v): r_v = 3.1\n \n x = 10000.0/wave\n npts = wave.size\n a = np.zeros(npts, float)\n b = np.zeros(npts, float)\n \n ###############################\n #Infrared\n \n good = np.where( (x > 0.3) & (x < 1.1) )\n a[good] = 0.574 * x[good]**(1.61)\n b[good] = -0.527 * x[good]**(1.61)\n \n ###############################\n # Optical & Near IR\n \n good = np.where( (x >= 1.1) & (x < 3.3) )\n y = x[good] - 1.82\n \n c1 = np.array([ 1.0 , 0.104, -0.609, 0.701, 1.137, \\\n -1.718, -0.827, 1.647, -0.505 ])\n c2 = np.array([ 0.0, 1.952, 2.908, -3.989, -7.985, \\\n 11.102, 5.491, -10.805, 3.347 ] )\n \n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n ###############################\n # Mid-UV\n \n good = np.where( (x >= 3.3) & (x < 8) )\n y = x[good]\n F_a = np.zeros(np.size(good),float)\n F_b = np.zeros(np.size(good),float)\n good1 = np.where( y > 5.9 )\n \n if np.size(good1) > 0:\n y1 = y[good1] - 5.9\n F_a[ good1] = -0.04473 * y1**2 - 0.009779 * y1**3\n F_b[ good1] = 0.2130 * y1**2 + 0.1207 * y1**3\n \n a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a\n b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b\n \n ###############################\n # Far-UV\n \n good = np.where( (x >= 8) & (x <= 11) )\n y = x[good] - 8.0\n c1 = [ -1.073, -0.628, 0.137, -0.070 ]\n c2 = [ 13.670, 4.257, -0.420, 0.374 ]\n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n # Applying Extinction Correction\n \n a_v = r_v * ebv\n a_lambda = a_v * (a + b/r_v)\n \n funred = flux * 10.0**(0.4*a_lambda) \n \n return funred",
"def fla (mva, vnom):\r\n x=mva*1000000\r\n y=(vnom*1000)\r\n z=round(x/y,3)\r\n return z",
"def comp_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n v1, v2, v3 = [], [], []\r\n for p in p_range:\r\n v1.append(c*beta(p, m1))\r\n v2.append(c*beta(p, m2))\r\n v3.append(c*beta(p, m3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n p1_name = r'K$^+$'\r\n p2_name = r'$\\pi^+$'\r\n p3_name = r'p$^+$'\r\n ax.plot(p_range, v1, 'r', label=p1_name)\r\n ax.plot(p_range, v2, 'b', label=p2_name)\r\n ax.plot(p_range, v3, 'g', label=p3_name)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'v / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20)\r\n plt.show\r\n return",
"def loadLuminosityFunction(self):\n\n tab = np.genfromtxt(self.fname[0], skip_header=self.skip_header)\n if not self.evolve:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, self.nzbins))\n\n else:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, 1))\n\n if self.ecol is not None:\n self.ye = np.zeros(self.luminosity_function.shape)\n imult = 1\n else:\n self.ye = None\n imult = 2\n\n self.magmean = tab[:,self.xcol]\n\n if self.nzbins==1:\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,self.ecol]\n else:\n if not self.evolve:\n assert((tab.shape[1]-1)==self.nzbins)\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,i*imult+self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,i*imult+self.ecol]\n else:\n for j in range(self.nbands):\n self.luminosity_function[:,j,0] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,0] = tab[:,self.ecol]\n\n self.xmean = self.magmean\n self.y = self.luminosity_function",
"def LJ(epsilon,sigma,r):\n P1=(sigma/r)**12\n P2=(sigma/r)**6\n return 4*epsilon*(P1-P2)",
"def rvs(self):\n return float(self.interp(random.rand()))",
"def test_vic_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\")\n expected_w_vector = np.array(\n [0.33817571, 0.33091215, 0.33091215],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)",
"def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)",
"def V_magMars_2(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 0.367 - 0.02573*alpha + 0.0003445*alpha**2. + 0. + 0. #L(λe) + L(Ls)\n return V",
"def width_v_rho(model: SingleRhNeutrinoModel):\n params = _neutrino_vector_meson_constants[\"rho0\"]\n k, g = params[\"k\"], params[\"g\"]\n return _width_v_hv(model, MRHO, k, g)",
"def define_ufl_velocity_equation(self):\n\n if hasattr(self, 'f1'):\n return None\n\n if self.config['material']['type'] == 'viscous':\n self.f1 = 0\n return None\n\n if not self.config['formulation']['time']['unsteady']:\n self.f1 = 0\n return None\n\n theta = self.config['formulation']['time']['theta']\n dt = self.config['formulation']['time']['dt']\n f1 = self.displacement - self.displacement0 \\\n - dt*(theta*self.velocity + (1.0 - theta)*self.velocity0)\n\n self.f1 = dlf.dot(self.test_vector, f1)*dlf.dx\n\n return None",
"def test_jv():\n import time\n t1 = time.time()\n\n v_list = [ 3.3, 4, 1.9, 0, 9.2, -7.1 ]\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.jv(v,x) for v,x in zip(v_list,x_list) ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.jv(v,x) for v,x in zip(v_list,x_list) ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jv disagrees with scipy.special.jv\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of jv.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 0.0,\n 0.0025745895535573995,\n 0.0068656051839294848,\n -0.34429626039888467,\n 0.015134049434950021,\n 0.087784805831697565\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.jv disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)",
"def testDensityCenterVelocity(self):\n known_dcv = np.array([0.24275266063732542, 0.25474645145914782, 0.32455563530545328])\n np.testing.assert_allclose(nb.dc_vel, known_dcv)",
"def luminosity(r,T,autoDebug=True):\n\t#-----------BEGIN ERROR CHECKING----------\n\tif autoDebug:\n\t\tsam.type_check(r, sam.TYPES_math, \"r\")\n\t\tsam.type_check(T, sam.TYPES_math, \"T\")\n\t\tsam.value_check(r,.0,\">\",\"r\")\n\t\tsam.value_check(T,.0,\">\",\"T\")\n\t#-----------END ERROR CHECKING----------\n\n\tL = 4 * sam.CONSTANT_pi * r**2 * sam.CONSTANT_SB* T**4\n\treturn L",
"def integrate_verlet(X, V, iparams, blist, sp):\n T, E = np.zeros(sp.Nt), np.zeros(sp.Nt)\n N = len(X)\n Vtemp = np.zeros(X.shape)\n Fnew = np.zeros(X.shape)\n ti = time.time()\n\n F = force_list(X, V, iparams, blist, \\\n sp.L, sp.gamma, sp.kT, sp.dt, sp.rc)\n T[0] = temperature(V)\n E[0] = tot_KE(V) + tot_PE(X, iparams, blist, sp.rc)\n save_xyzmatrix(\"Dump/dump_%i.xyz\" % 0, blist, X)\n\n for i in range(1, sp.Nt):\n # 1. GW formulation DIVERGES TWICE AS MUCH AS 3rd!\n# X = X + V * sp.dt + F * sp.dt**2 / 2.0\n# Vtemp = V + F * sp.dt / 2.0\n# Fnew = force_list(X, Vtemp, iparams, blist, \\\n# sp.L, sp.gamma, sp.kT, sp.dt, sp.rc)\n# V = Vtemp + (F + Fnew) * sp.dt / 2\n# F = Fnew\n\n # 2. Wiki formulation without half-step velocity\n X = X + V * sp.dt + F * sp.dt**2 / 2.0\n Fnew = force_list(X, V, iparams, blist, \\\n sp.L, sp.gamma, sp.kT, sp.dt, sp.rc)\n V = V + (F + Fnew) * sp.dt / 2.0\n F = Fnew\n\n X = X % sp.L\n\n KE = tot_KE(V)\n E[i] = KE + tot_PE(X, iparams, blist, sp.rc)\n T[i] = KE / (3.0 / 2.0 * N)\n tf = time.time()\n if (i+1) % sp.thermo == 0:\n save_xyzmatrix(\"Dump/dump_%i.xyz\" % (i+1), blist, X)\n print(\"Step: %i | t: %.3f | T: %.5f | E: %.3e | Time: %.2f\" % \\\n (i+1, i * sp.dt, T[i], E[i], tf - ti))\n return T, E",
"def V2E(V):\n# for v in m/s returns energy in meV\n return 5.227e-6*V*V",
"def VerletHope2(r, v, beta,dt,R_dust,M_dust):\n # Deceptively simple (read about Velocity Verlet on wikipedia)\n r_new = r + v*dt + calculate_acceleration2(r,v,beta,omega,R_dust,M_dust)*dt**2/2\n v_new = v + (calculate_acceleration2(r,v,beta,omega,R_dust,M_dust) + calculate_acceleration2(r_new,v,beta,omega,R_dust,M_dust))/2 * dt\n \n return (r_new, v_new)",
"def test_velocity_vs_current(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n beta_n = self.model.param.beta_n\n beta_n = self.param.evaluate(beta_n)\n beta_p = self.model.param.beta_p\n beta_p = self.param.evaluate(beta_p)\n\n np.testing.assert_array_almost_equal(\n self.v_box(t, x_n), beta_n * self.i_e(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.v_box(t, x_p), beta_p * self.i_e(t, x_p)\n )",
"def test_lfc_equals_lcl():\n levels = np.array([912., 905.3, 874.4, 850., 815.1, 786.6, 759.1,\n 748., 732.2, 700., 654.8]) * units.mbar\n temperatures = np.array([29.4, 28.7, 25.2, 22.4, 19.4, 16.8,\n 14.0, 13.2, 12.6, 11.4, 7.1]) * units.celsius\n dewpoints = np.array([18.4, 18.1, 16.6, 15.4, 13.2, 11.4, 9.6,\n 8.8, 0., -18.6, -22.9]) * units.celsius\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints)\n assert_almost_equal(lfc_pressure, 777.0786 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 15.8714 * units.celsius, 2)",
"def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None",
"def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y",
"def width_l_rho(model: SingleRhNeutrinoModel):\n ml = _lepton_masses[model.gen]\n params = _neutrino_vector_meson_constants[\"rho\"]\n g = params[\"g\"]\n return _width_ell_hv(model, ml, MRHO, g, VUD)",
"def calc_Lr(rho,mld,f,g=9.8,po=1027.):\n n2ml=np.ndarray(len(rho[1,:-1]))\n for i in range(len(rho[1,:-1])):\n n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])\n Lr=(np.sqrt(n2ml)*mld[:-1])/f\n\n return Lr",
"def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)",
"def V(E, g, gl):\n num = 0\n den = 0\n for i in range(len(E)):\n num += E[i][0]*g[i][0] + E[i][1]*g[i][1]\n den += g[i][0] + g[i][1] + gl\n return num / den",
"def calculate_v4_L(v4_Psi2, v4_Psi2_err, vn_array):\n dN = real(vn_array[:, 0])\n Q4 = dN*vn_array[:, 4]\n nev = len(dN)\n\n N2_weight = dN*(dN - 1.)\n Q4_2 = abs(Q4)**2. - dN\n\n v4_Psi4_sq = mean(Q4_2)/mean(N2_weight)\n v4_Psi4_sq_err = std(Q4_2)/mean(N2_weight)/sqrt(nev)\n\n v4_L = sqrt(v4_Psi4_sq - v4_Psi2**2.)\n v4_L_err = (sqrt(v4_Psi4_sq_err**2. + (2.*v4_Psi2*v4_Psi2_err)**2.)\n /(2.*v4_L))\n return(v4_L, v4_L_err)",
"def uL( wavelen, **kwargs ):\n Cc = C.c * 1e6 # speed of light um s^-1\n\n Gamma = kwargs.get('Gamma', 2*np.pi *5.9e6 ) # linewidth s^-1\n lambda0 = kwargs.get('lambda0', 0.671 ) # transition wavelength in microns \n \n omega0 = 2*np.pi*Cc / lambda0\n omegaL = 2*np.pi*Cc / wavelen\n intensity = 1.0 \n depthJ = (intensity)* -3*np.pi* Cc**2*Gamma / ( 2*omega0**3) * \\\n ( 1/(omega0 - omegaL ) + 1/(omega0 + omegaL ) ) # Joule\n depthuK = depthJ / C.k *1e6 # C.k is Boltzmann's constant\n return depthuK",
"def _microstrip_v_with_Lk(wire_width, dielectric_thickness, eps_r, Lk_per_sq):\n L_m, C_m = _microstrip_LC_per_meter(wire_width,\n dielectric_thickness,\n eps_r)\n Lk_m = Lk_per_sq * (1.0/wire_width)\n v = 1 / sqrt((L_m+Lk_m) * C_m)\n return v",
"def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second",
"def test_lfc_kelvin():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.mbar\n temperature = (np.array([22.2, 14.6, 12., 9.4, 7., -49.]\n ) + 273.15) * units.kelvin\n dewpoint = (np.array([19., -11.2, -10.8, -10.4, -10., -53.2]\n ) + 273.15) * units.kelvin\n lfc_pressure, lfc_temp = lfc(pressure, temperature, dewpoint)\n assert_almost_equal(lfc_pressure, 727.371 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 9.705 * units.degC, 2)\n assert lfc_temp.units == temperature.units",
"def V(x,nw):\n V = 0\n pList, iList = getLists(nw)\n #print(\"pList : {}\".format(pList))\n #print(\"iList : {}\".format(iList))\n \n if (checkValue(x,iList)):\n V = -300/Eh\n elif (x in pList):\n V = -150/Eh\n return V",
"def approximate_nonlinear_vector_field_radial(dataset_path, L, epsilon):\n\n file_X0 = \"nonlinear_vectorfield_data_x0.txt\"\n names_X0 = ['X0_x', 'X0_y']\n data_X0 = pd.read_csv(dataset_path / file_X0, sep=' ', names=names_X0).to_numpy()\n\n names_X1 = ['X1_x', 'X1_y']\n file_X1 = \"nonlinear_vectorfield_data_x1.txt\"\n data_X1 = pd.read_csv(dataset_path / file_X1, sep=' ', names=names_X1).to_numpy()\n\n \"\"\"\n Following block calculates the values of phi_l's for each point in dataset of X0\n and form the corresponding phi_X matrix with the given value of L.\n \"\"\"\n phi = np.empty([2000, L])\n for l in range(L):\n phi_l = np.exp(-np.square(np.linalg.norm(data_X0 - data_X0[l],\n axis=1)) / epsilon ** 2)\n phi[:, l] = phi_l\n\n \"\"\"\n The following block performs the approximation of the vector field.\n \"\"\"\n V = (data_X1 - data_X0) / 0.1\n approx_func_Ct = np.linalg.inv(phi.T @ phi) @ phi.T @ V\n final = phi @ approx_func_Ct\n plt.scatter(final[:, 0], final[:, 1], c='green',\n label='approximated f(x)_hat values')\n plt.show()\n\n \"\"\"\n The following code plots the approximated vector field and the phase portrait.\n \"\"\"\n x, y = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10))\n u, v = np.zeros((10, 10)), np.zeros((10, 10))\n for i in range(0, 10):\n for j in range(0, 10):\n u[i, j] = final.T[0, i]\n v[i, j] = final.T[1, j]\n plt.quiver(x, y, u, v)\n plt.streamplot(x, y, u, v)\n plt.title(\"Approximated Vector field\")\n plt.show()\n\n \"\"\"\n The following code calculates the MSE for the dataset X1 and the final values.\n \"\"\"\n MSE = np.square(data_X1 - final).mean()\n print(MSE)",
"def _vmomentsurfaceIntegrand(vR,vT,R,df,logSigmaR,logsigmaR2,sigmaR1,gamma,\n n,m):\n E,L= _vRpvTpRToEL(vR,vT,R,df._beta,sigmaR1,gamma,df._dftype)\n return vR**n*vT**m*df.eval(E,L,logSigmaR,logsigmaR2)*2.*nu.pi/df._gamma #correct",
"def BetaVelocity(self):\n return np.linalg.norm(self.velocity) / const.speed_of_light",
"def get_voltage(self):\n status = self.get_status_response()\n volts = status[20] + (status[21] * 0x100) + (status[22] * 0x10000) + (status[23] * 0x1000000)\n volts = float(volts)\n volts /= (1000.0 * 1000.0)\n return volts\n #end get_voltage",
"def calc_elv(self, redstar, compstar):\n self.type = 'elv'\n self.red_file = redstar.file\n self.comp_file = compstar.file\n for cursrc in _poss_datasources:\n if cursrc == 'BAND':\n self.calc_elv_bands(redstar, compstar)\n else:\n self.calc_elv_spectra(redstar, compstar, cursrc)",
"def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))",
"def vel(self,M):\n v_peri = np.sqrt((HohmannTransfer.G*M)*((2/self.r1)-(2/(self.r1+self.r2))))\n \n v_aphe = np.sqrt((HohmannTransfer.G*M)*((2/self.r2)-(2/(self.r1+self.r2))))\n \n return v_peri, v_aphe",
"def radio_lumfn(L, _params):\n print _params\n # Number density as a function of sfr, dn/dlog(sfr)\n sfr = L * 5.52e-29 # erg/s/Hz, Bell (2003), Eq. 6\n dndlogsfr_sfms, dndlogsfr_pass = g.sfr_fn(hm, sfr, z=0., params=_params)\n #phi = dndlogsfr_sfms #+ dndlogsfr_pass\n return dndlogsfr_sfms, dndlogsfr_pass",
"def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel",
"def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol",
"def calc_lamb(self, x_surface, geom):\n\n return self.rfl",
"def define_ufl_incompressibility_equation(self):\n\n if hasattr(self, 'f3'):\n return None\n\n if not self.config['material']['incompressible']:\n self.f3 = 0\n return None\n\n if self.config['material']['type'] == 'elastic':\n b_vol = self._material.incompressibilityCondition(self.displacement)\n else:\n b_vol = self._material.incompressibilityCondition(self.velocity)\n\n if self.config['material']['type'] == 'elastic':\n kappa = self._material._parameters['kappa']\n self.f3 = self.test_scalar*(kappa*b_vol - self.pressure)*dlf.dx\n else:\n self.f3 = self.test_scalar*b_vol*dlf.dx\n\n return None",
"def retrieve_VILD(\n ds, variable=\"zFactorFinal\", radar_frequency=\"Ku\", threshold=18, use_echo_top=True\n):\n da_vil = retrieve_VIL(ds, variable=variable, radar_frequency=\"Ku\")\n if use_echo_top:\n da_e = retrieve_EchoTopHeight(\n ds,\n threshold=threshold,\n variable=variable,\n radar_frequency=radar_frequency,\n min_threshold=0,\n )\n else:\n da_e = retrieve_EchoDepth(\n ds,\n threshold=threshold,\n variable=variable,\n radar_frequency=radar_frequency,\n min_threshold=0,\n )\n da_vild = da_vil / da_e * 1000\n # Add attributes\n da_vild.name = \"VILD\"\n da_vild.attrs[\"description\"] = \"VIL Density\"\n da_vild.attrs[\"units\"] = \"g/m3\"\n return da_vild"
] | [
"0.63736004",
"0.5998151",
"0.5983169",
"0.5946282",
"0.58367145",
"0.5795854",
"0.5793172",
"0.57542485",
"0.571141",
"0.57062334",
"0.56781685",
"0.5638643",
"0.5626958",
"0.5622531",
"0.56085986",
"0.5567615",
"0.5557632",
"0.5544373",
"0.55408514",
"0.5531132",
"0.553042",
"0.552939",
"0.55273545",
"0.5506578",
"0.54752326",
"0.5445424",
"0.5427518",
"0.5426323",
"0.54248345",
"0.54197603",
"0.54168195",
"0.54122555",
"0.54107374",
"0.5401187",
"0.53942794",
"0.5385541",
"0.5376156",
"0.5369063",
"0.5359616",
"0.53579015",
"0.53437436",
"0.5342906",
"0.5329026",
"0.5325195",
"0.53225327",
"0.53178877",
"0.5315785",
"0.5311995",
"0.5305674",
"0.5304458",
"0.5292073",
"0.5281554",
"0.52808785",
"0.5280556",
"0.5277382",
"0.52683246",
"0.5265008",
"0.52539325",
"0.5238561",
"0.5238287",
"0.52271324",
"0.5221098",
"0.52129734",
"0.5205365",
"0.52037066",
"0.5201625",
"0.5198691",
"0.51980466",
"0.5196057",
"0.51898277",
"0.51860994",
"0.51821584",
"0.5180998",
"0.51809764",
"0.51796126",
"0.517755",
"0.5170427",
"0.5169333",
"0.51659703",
"0.51615155",
"0.5160093",
"0.515905",
"0.5157316",
"0.5155313",
"0.51544565",
"0.51497626",
"0.5147764",
"0.51475155",
"0.5147419",
"0.51419437",
"0.5138064",
"0.51341736",
"0.51299775",
"0.51299465",
"0.5127589",
"0.51263815",
"0.5123867",
"0.51168144",
"0.5113276",
"0.51089025"
] | 0.64296955 | 0 |
Set the parameters fit on SDSS DR4 Note The values of slope and intercept are taken from the rband orthogonal fit on SDSS DR4. See Table 2 of [1]_. References .. [1] Hyde, Joseph B., and Mariangela Bernardi. "The luminosity and stellar mass Fundamental Plane of earlytype galaxies." | def _define_SDSS_fit_params(self):
self.a = 1.4335
self.b = 0.3150
self.c = -8.8979
self.intrinsic_scatter = 0.0578
#self.delta_a = 0.02
#self.delta_b = 0.01 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")",
"def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)",
"def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")",
"def set_parameters(self, full=None, r=None, l=None, d=None, z=None):\n\n original = _deepcopy(self.parameters) # save in case of error\n\n if type(full) is bool:\n self.parameters[\"full\"] = full\n if type(r) in [int, float]:\n self.parameters[\"r\"] = float(r)\n if self._type == 2: # observation well\n if type(d) in [int, float]:\n self.parameters[\"d\"] = float(d)\n if type(l) in [int, float]:\n self.parameters[\"l\"] = float(l)\n else: # piezometer\n if type(z) in [int, float]:\n self.parameters[\"z\"] = float(z)\n\n flag, message = self.validate_parameters()\n if not flag:\n print(message)\n self.parameters.update(original)\n # End Function",
"def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p",
"def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise",
"def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])",
"def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)",
"def create_design_params(self):\n self.design_params = np.array([self.r1, self.r2, self.d1, self.d2, self.Ixx, self.Iyy, self.Izz])",
"def psdf_4(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.array([.5/(dt*n), 1./dt])\n\n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100,\n input_psd=['broken_powerlaw', [1e-4, -1, -2, 3e-2]])\n\n model = ['bpl', [-5, -2, -3]]\n inP = extra['input_psd'][1]\n inP = [np.log(inP[0]), inP[2], np.log(inP[3])]\n fit_psdf(fql, model, lc, extra, '4', input_pars=inP)",
"def dline_dSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_run,nGal=p.nGal)\n \n marker = 'o'\n if p.sim_run == p.sim_runs[0]: marker = '^'\n\n L_line = getattr(GR,'L_'+p.line+'_sun')#[380:400]#[0:100]\n SFR = getattr(GR,'SFR')#[380:400]#[0:100]\n M_star = getattr(GR,'M_star')#[380:400]#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[380:400]#[0:100]\n R_gas = getattr(GR,'R2_gas')#[380:400]#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[380:400]#[0:100]\n\n SFR = SFR[L_line > 0]\n M_star = M_star[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n L_line = L_line[L_line > 0]\n print('%i data points ' % (len(L_line)))\n\n # Distance from MS\n dlSFR = aux.distance_from_salim18(GR.M_star,GR.SFR)\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n # Distance from observed relation\n L_obs,SFR_obs,fit,std = add_line_SFR_obs(p.line,[1e6,1e6],ax,plot=False,select=p.select)\n ldL_line = np.log10(L_line) - fit.predict(np.log10(SFR.reshape(-1, 1))).flatten()\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_ext':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'SIGAME v3',\\\n '_arepoPDF_CMZ':'SIGAME v3',\\\n '_arepoPDF_M51':'SIGAME v3'}\n lab = labs[p.table_ext]\n\n\n ax.text(0.05,0.9,p.line,transform=ax.transAxes,fontsize=13)\n ax.set_xlabel('log SFR - log SFR$_{MS,Salim+18}$')\n ax.set_ylabel('log L - log L$_{obs}$(SFR)')\n if not p.xlim: p.xlim = np.array([-3,3])\n if not p.ylim: \n p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 3]\n # if p.line == '[OI]63': p.ylim = [np.median(ldL_line) - 5,np.median(ldL_line) + 4]\n # if 'CO' in p.line: p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 4]\n\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.plot([0,0],ax.get_ylim(),'--k',lw=1)\n ax.plot(ax.get_xlim(),[0,0],'--k',lw=1)\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)/1e6 # per pc^-2\n m = ax.scatter(dlSFR[np.argsort(Sigma_M_H2)],ldL_line[np.argsort(Sigma_M_H2)],marker=marker,s=14,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=-2.5,vmax=2.2,label=lab,alpha=0.5,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/pc$^2$]',size=15)",
"def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()",
"def fit(self, skydip):\n parameter_order = ['tau', 'offset', 'kelvin', 'tsky']\n self.parameters = {}\n self.errors = {}\n self.p_opt = None\n self.p_cov = None\n self.fitted_values = None\n self.data = None\n self.sigma = None\n self.elevation = None\n\n log.debug(\"Initial skydip values:\")\n log.debug(f\" Tsky = {self.initial_guess['tsky']}\")\n log.debug(f\" offset = {self.initial_guess['offset']}\")\n log.debug(f\" kelvin = {self.initial_guess['kelvin']}\")\n log.debug(f\" tau = {self.initial_guess['tau']}\")\n\n if self.el_range is not None:\n from_bin = max(0, skydip.get_bin(self.el_range.min))\n to_bin = min(skydip.data.size, skydip.get_bin(self.el_range.max))\n else:\n from_bin = 0\n to_bin = skydip.data.size\n\n self.init_parameters(skydip)\n\n data = skydip.data[from_bin:to_bin]\n weight = skydip.weight[from_bin:to_bin]\n valid = weight > 0\n data = data[valid]\n weight = weight[valid]\n\n if self.uniform_weights:\n sigma = None\n else:\n sigma = 1 / weight\n\n elevation = skydip.get_elevation(\n np.nonzero(valid)[0]).to('radian').value\n\n self.use_points = data.size\n\n p0 = []\n lower_bounds = np.zeros(4, dtype=float)\n upper_bounds = np.zeros(4, dtype=float)\n\n for i, parameter in enumerate(parameter_order):\n value = self.initial_guess[parameter]\n p0.append(value)\n if parameter in self.fit_for:\n lower_bounds[i] = self.bounds[parameter][0]\n upper_bounds[i] = self.bounds[parameter][1]\n else: # An attempt to fix parameters with curve_fit\n eps = abs(value - np.nextafter(value, 1))\n lower_bounds[i] = value - eps\n upper_bounds[i] = value + eps\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', OptimizeWarning)\n p_opt, p_cov = curve_fit(self.value_at, elevation, data,\n p0=p0, sigma=sigma,\n bounds=(lower_bounds, upper_bounds))\n self.p_opt = p_opt\n self.p_cov = p_cov\n self.data = data\n self.elevation = elevation\n self.sigma = sigma\n\n self.has_converged = np.isfinite(p_opt).all()\n if not self.has_converged: # pragma: no cover\n log.warning(\"Skydip fit did not converge!\")\n errors = np.sqrt(np.diag(p_cov))\n\n for i, parameter in enumerate(parameter_order):\n self.parameters[parameter] = p_opt[i]\n self.errors[parameter] = errors[i]\n\n self.fitted_values = self.fit_elevation(elevation)\n fit_weights = None if sigma is None else weight ** 2\n\n t_obs_rms = np.sqrt(np.average((data - self.fitted_values) ** 2,\n weights=fit_weights))\n self.rms = t_obs_rms / self.parameters['kelvin']",
"def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model",
"def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)",
"def initDataParms(self):\n self.xpos = self.pltw.curvelist[self.blkno].xvinfo.vidx\n self.data = self.pltw.blklst[self.blkno] # original data block\n self.idata = None # interpolated data\n (self.nvec, self.npt) = self.data.shape\n self.xmin = (self.data[self.xpos]).min()\n self.xmax = (self.data[self.xpos]).max()\n self.xspan = self.xmax - self.xmin\n if self.parent.test:\n self.dx = self.xspan / (self.npt * 5)",
"def vary_fit(xvalues, yvalues, d_sample, r1_func, f_i, thetaS_i, phiS_i, phiS_max):\n params1 = Parameters()\n params1.add('ds', value=d_sample, vary=False)\n params1.add('thetaS', value=thetaS_i, min=0, max=d_sample)\n params1.add('f', value=f_i, min=3, max=300000)\n ## originally max was 1\n params1.add('phiS', value=phiS_i, min=0, max=phiS_max)\n params1.add('w', value=2.0/3.0, vary=False)\n params1.add('a', value=4.0/3.0, vary=False)\n ##originally thetaP, phiP had no minima\n params1.add('thetaP', expr='(ds*(1 + phiS*w*f + a*thetaS)-thetaS)/ \\\n ((1 - a*ds)*(phiS*w*f + a*thetaS)-(a*ds))')\n params1.add('phiP', expr='phiS*thetaP/thetaS')\n params1.add('c', expr='w*phiS*f/(1+w*phiS*f+thetaS*a)')\n params1.add('dp', expr='thetaP/(1+a*thetaP)')\n params1.add('dc', expr='thetaS/(1+a*thetaS)')\n minner1 = Minimizer(fcn2min, params1, fcn_args=(xvalues, yvalues, r1_func))\n try:\n fitres1 = minner1.minimize()\n except:\n fitres1 = None\n return fitres1",
"def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration",
"def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs",
"def test_linear_fit_2d_model_set_fixed_parameters(self):\n init_model = models.Polynomial2D(\n degree=2,\n c1_0=[1, 2],\n c0_1=[-0.5, 1],\n n_models=2,\n fixed={\"c1_0\": True, \"c0_1\": True},\n )\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)",
"def set_parameters(self, mode, data):\n if mode == 'design' or self.local_design:\n self.new_design = True\n\n for key, dc in self.variables.items():\n if isinstance(dc, dc_cp):\n if ((mode == 'offdesign' and not self.local_design) or\n (mode == 'design' and self.local_offdesign)):\n self.get_attr(key).design = data[key]\n\n else:\n self.get_attr(key).design = np.nan",
"def set_parameters(self, L, r):\n self.L = L\n self.r = r",
"def test_linear_fit_model_set_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5 * x * x, -2 * x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)",
"def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]",
"def SetParams(ss, sheet, setMsg):\n if sheet == \"\":\n ss.Params.ValidateSheets(go.Slice_string([\"Network\", \"Sim\"]))\n ss.SetParamsSet(\"Base\", sheet, setMsg)\n if ss.ParamSet != \"\" and ss.ParamSet != \"Base\":\n sps = ss.ParamSet.split()\n for ps in sps:\n ss.SetParamsSet(ps, sheet, setMsg)\n if ss.Learn == LearnType.Hebbian:\n ss.SetParamsSet(\"Hebbian\", sheet, setMsg)\n elif ss.Learn == LearnType.ErrorDriven:\n ss.SetParamsSet(\"ErrorDriven\", sheet, setMsg)",
"def _set_params(self,x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]",
"def set_sgd_params(self, lr_1=0.01, lr_2=0.01, \\\n mom_1=0.9, mom_2=0.999):\n zero_ary = np.zeros((1,))\n # set learning rates\n new_lr_1 = zero_ary + lr_1\n self.lr_1.set_value(to_fX(new_lr_1))\n new_lr_2 = zero_ary + lr_2\n self.lr_2.set_value(to_fX(new_lr_2))\n # set momentums\n new_mom_1 = zero_ary + mom_1\n self.mom_1.set_value(to_fX(new_mom_1))\n new_mom_2 = zero_ary + mom_2\n self.mom_2.set_value(to_fX(new_mom_2))\n return",
"def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()",
"def fit(self) -> LinearRegressionParams:\n result = self.__m.fit()\n self._intercept, *self._coef = result.params\n self._pvalues = result.f_pvalue\n self._f_statistics = result.fvalue\n self._r_squared = result.rsquared\n\n return LinearRegressionParams(\n Coef=self._coef,\n Intercept=self._intercept,\n pvalues=self._pvalues,\n F_statistics=self._f_statistics,\n R_square=self._r_squared\n )",
"def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55",
"def set_slope(self, slope: float) -> None:\r\n self.slope = slope",
"def set_parameters(self, *args, **kwargs):\n super(DAEM, self).set_parameters(*args, **kwargs)\n self._Em = self._calc_Em()",
"def line_sSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n \n L_line = getattr(GR,'L_'+p.line+'_sun')#[0:100]\n SFR = getattr(GR,'SFR')#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[0:100]\n R_gas = getattr(GR,'R2_gas')#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[0:100]\n M_star = getattr(GR,'M_star')#[0:100]\n\n # Take only MS galaxies?\n if p.select == '_MS':\n indices = aux.select_salim18(GR.M_star,GR.SFR)\n L_line = L_line[indices]\n SFR = SFR[indices]\n Zsfr = Zsfr[indices]\n print('With MS selection criteria: only %i galaxies' % (len(L_line)))\n\n SFR = SFR[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n M_star = M_star[L_line > 0]\n sSFR = SFR/M_star\n L_line = L_line[L_line > 0]\n\n print('%i data points ' % (len(L_line)))\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_dim':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'AREPO parametric PDF'}\n lab = labs[p.table_ext]\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)\n m = ax.scatter(sSFR[np.argsort(Sigma_M_H2)],L_line[np.argsort(Sigma_M_H2)],marker='o',s=20,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=3.5,label=lab,alpha=0.6,zorder=10)\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/kpc$^2$]',size=15)\n else:\n m = ax.scatter(sSFR,L_line,marker='o',s=20,\\\n c=Zsfr,label=lab,alpha=0.6,zorder=10)\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'$\\langle Z\\rangle_{\\mathrm{SFR}}$ [Z$_{\\odot}$]',size=15)\n\n if p.add_obs:\n add_line_sSFR_obs(p.line,L_line,ax,select=p.select)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel(getlabel('sSFR'))\n ax.set_ylabel(getlabel(p.line))\n handles,labels = ax.get_legend_handles_labels()\n handles = np.flip(handles)\n labels = np.flip(labels)\n # ax.legend(handles,labels,loc='upper left',fontsize=7)\n ax.legend(handles,labels,loc='lower right',fontsize=7,frameon=True,framealpha=0.5) \n print(np.min(sSFR),np.max(sSFR))\n if not p.xlim: p.xlim = 10.**np.array([-13,-7])\n if not p.ylim: \n p.ylim = [np.median(L_line)/1e6,np.median(L_line)*1e4]\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.grid(ls='--')\n\n if p.savefig & (not p.add):\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/%s_sSFR.png' % p.line, format='png', dpi=300)",
"def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here",
"def residual4(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n E = parvals['E']\n Eh = parvals['Eh']\n Th = parvals['Th']\n model = np.log((B0*np.exp((-E/k)*((1/x)-(1/283.15)))) / (1+(np.exp((Eh/k)*((1/Th)-(1/x))))))\n return data - model",
"def __init__(self):\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False",
"def set_parameters(self, x):\n params = x.reshape((-1, self.Y.shape[1]))\n if self.add_bias:\n self.bias = params[0:1]\n self.W = params[1:]\n else:\n self.W = params",
"def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)",
"def set_parameter_values(self, c5=None, lm=1.0):\n\n self._c5 = c5\n self._lm = lm\n\n self._update()",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]",
"def setHeuristics(self, fitwid, fitoff, bothpass, relht, relwid, reloff):\n\tself.fwidth = fitwid\n\tself.foffset = fitoff\n\tself.bothpass = bothpass\n\tself.rheight = relht\n\tself.rwidth = relwid\n\tself.roffset = reloff",
"def partial_fit(self, X, y=..., **fit_params):\n ...",
"def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)",
"def fit_slopes_intercepts(slopes, intercepts, stds, waves, norm):\n # define a mask for the good data\n mask = ~np.isnan(slopes)\n short_wave_mask = waves < 4.1\n\n # fit the intercepts with a power law\n fit_lev = fitting.LevMarLSQFitter()\n powerlaw = models.PowerLaw1D(fixed={\"x_0\": True})\n fit_intercepts = fit_lev(powerlaw, waves[mask], intercepts[mask])\n\n # define the anchor points for the spline interpolation\n # divide the data into 25 bins with the same number of data points in every bin\n alloc, bin_edges = pd.qcut(waves[mask * short_wave_mask], q=25, retbins=True)\n # calculate the median wavelength, slope and standard deviation in every bin\n meds, edges, indices = stats.binned_statistic(\n waves[mask * short_wave_mask],\n (\n waves[mask * short_wave_mask],\n slopes[mask * short_wave_mask],\n stds[mask * short_wave_mask],\n ),\n statistic=\"median\",\n bins=bin_edges,\n )\n\n # use the median values as the anchor points for the spline interpolation\n spline_wave = meds[0][~np.isnan(meds[0])]\n spline_slope = meds[1][~np.isnan(meds[1])]\n spline_std = meds[2][~np.isnan(meds[2])]\n\n # interpolate the slopes with a spline function\n fit_slopes = interpolate.splrep(spline_wave, spline_slope)\n\n # interpolate the standard deviations with a spline function\n fit_stds = interpolate.splrep(spline_wave, spline_std)\n\n # create tables with the fitting results at certain wavelengths\n table_waves = np.arange(0.8, 4.05, 0.05)\n table_inv_rv_dep(\n table_path, table_waves, fit_slopes, fit_intercepts, fit_stds, norm\n )\n\n # create a table with the anchor points of the spline interpolation\n table_spline(table_path, spline_wave, spline_slope, spline_std, norm)\n\n return spline_wave, spline_slope, spline_std, fit_slopes, fit_intercepts, fit_stds",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]",
"def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]",
"def lam4fit(param, x):\n pedestal, amplitude, inc = param\n ophase = x*2*pi\n return pedestal + abs(amplitude)*lambertian(ophase, inc=inc)",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)",
"def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit(self, X, Y, **fit_params):\n ...",
"def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)",
"def _prepare_fit(self):\n self.lower_conf_int, self.upper_conf_int, self.upper_whisker_res = None, None, None\n self.model_fit = None\n self.residuals, self.residuals_forecast, self.fittedvalues = None, None, None",
"def __fill_motor_parameters(self, motor_id, model_number):\n # Get the Motor Name\n model_name = self.dynotools.getModelNameByModelNumber(model_number)\n\n # Get Max and Min angles\n angles = self.sdk_io.get_angle_limits(motor_id, model_name)\n self.angles[str(model_name)] = angles \n\n # Get Current, Min, and Max voltages\n voltage = self.sdk_io.get_voltage(motor_id, model_name)\n voltages = self.sdk_io.get_voltage_limits(motor_id, model_name)\n \n # ROS Parameters Setup\n rospy.set_param('dynamixel/%s/%d/model_number' %(self.port_namespace, motor_id), model_number)\n rospy.set_param('dynamixel/%s/%d/model_name' %(self.port_namespace, motor_id), DXL_MODEL_TO_PARAMS[model_number]['name'])\n rospy.set_param('dynamixel/%s/%d/min_angle' %(self.port_namespace, motor_id), angles['min'])\n rospy.set_param('dynamixel/%s/%d/max_angle' %(self.port_namespace, motor_id), angles['max'])\n torque_per_volt = DXL_MODEL_TO_PARAMS[model_number]['torque_per_volt']\n rospy.set_param('dynamixel/%s/%d/torque_per_volt' %(self.port_namespace, motor_id), torque_per_volt)\n rospy.set_param('dynamixel/%s/%d/max_torque' %(self.port_namespace, motor_id), torque_per_volt * voltage)\n \n velocity_per_volt = DXL_MODEL_TO_PARAMS[model_number]['velocity_per_volt']\n rpm_per_tick = DXL_MODEL_TO_PARAMS[model_number]['rpm_per_tick']\n rospy.set_param('dynamixel/%s/%d/velocity_per_volt' %(self.port_namespace, motor_id), velocity_per_volt)\n rospy.set_param('dynamixel/%s/%d/max_velocity' %(self.port_namespace, motor_id), velocity_per_volt * voltage)\n rospy.set_param('dynamixel/%s/%d/radians_second_per_encoder_tick' %(self.port_namespace, motor_id), rpm_per_tick * RPM_TO_RADSEC)\n \n encoder_resolution = DXL_MODEL_TO_PARAMS[model_number]['encoder_resolution']\n range_degrees = DXL_MODEL_TO_PARAMS[model_number]['range_degrees']\n range_radians = math.radians(range_degrees)\n rospy.set_param('dynamixel/%s/%d/encoder_resolution' %(self.port_namespace, motor_id), encoder_resolution)\n rospy.set_param('dynamixel/%s/%d/range_degrees' %(self.port_namespace, motor_id), range_degrees)\n rospy.set_param('dynamixel/%s/%d/range_radians' %(self.port_namespace, motor_id), range_radians)\n rospy.set_param('dynamixel/%s/%d/encoder_ticks_per_degree' %(self.port_namespace, motor_id), encoder_resolution / range_degrees)\n rospy.set_param('dynamixel/%s/%d/encoder_ticks_per_radian' %(self.port_namespace, motor_id), encoder_resolution / range_radians)\n rospy.set_param('dynamixel/%s/%d/degrees_per_encoder_tick' %(self.port_namespace, motor_id), range_degrees / encoder_resolution)\n rospy.set_param('dynamixel/%s/%d/radians_per_encoder_tick' %(self.port_namespace, motor_id), range_radians / encoder_resolution)\n\n # Get Parameters for pos_to_raw\n self.flipped = angles['min'] > angles['max']\n self.encoder_resolution = encoder_resolution\n self.range_degrees = range_degrees\n self.range_radians = range_radians\n self.encoder_ticks_per_degree = encoder_resolution / range_degrees\n self.encoder_ticks_per_radian = encoder_resolution / range_radians\n self.degrees_per_encoder_tick = range_degrees / encoder_resolution\n self.radians_per_encoder_tick = range_radians / encoder_resolution\n self.initial_position_raw = self.sdk_io.get_position(motor_id, model_name)\n\n # Flipped case\n if self.flipped:\n self.min_angle = (self.initial_position_raw - angles['min']) * self.radians_per_encoder_tick\n self.max_angle = (self.initial_position_raw - angles['max']) * self.radians_per_encoder_tick\n else:\n self.min_angle = (angles['min'] - self.initial_position_raw) * self.radians_per_encoder_tick\n self.max_angle = (angles['max']- self.initial_position_raw) * self.radians_per_encoder_tick\n\n # keep some parameters around for diagnostics\n self.motor_static_info[motor_id] = {}\n self.motor_static_info[motor_id]['model'] = DXL_MODEL_TO_PARAMS[model_number]['name']\n self.motor_static_info[motor_id]['min_angle'] = angles['min']\n self.motor_static_info[motor_id]['max_angle'] = angles['max']",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')",
"def test_linear_fit_model_set_masked_values(self):\n # NB. For single models, there is an equivalent doctest.\n\n init_model = models.Polynomial1D(degree=1, n_models=2)\n x = np.arange(10)\n y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))\n\n y[0, 7] = 100.0 # throw off fit coefficients if unmasked\n y.mask[0, 7] = True\n y[1, 1:3] = -100.0\n y.mask[1, 1:3] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n\n assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)",
"def fit_lattice(self):\n self.lattice = minimize(self._residual_lattice, self.latt_par) \n self.qx = self._q_x()\n self.qz = self._q_z()",
"def fitModel(self, params:lmfit.Parameters=None):\r\n if params is None:\r\n params = self.params\r\n self.initializeRoadRunnerModel()\r\n if self.parametersToFit is not None:\r\n self.optimizer = Optimizer.optimize(self.calcResiduals, params,\r\n self._fitterMethods, logger=self.logger,\r\n numRestart=self._numRestart)\r\n self.minimizerResult = self.optimizer.minimizerResult\r\n # Ensure that residualsTS and fittedTS match the parameters\r\n self.updateFittedAndResiduals(params=self.params)",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')",
"def __init__(self, x, y, pRanges, xerr=None, yerr=None, flag=None, lnlikeType=\"Nukers\",\n fix_slope=None, fix_intercept=None):\n self.x = x\n self.y = y\n self.pRanges = pRanges\n self.flag = flag\n self.xerr = xerr\n self.yerr = yerr\n self.fix_slope = fix_slope\n self.fix_intercept = fix_intercept\n nfix = 0\n if not fix_slope is None:\n nfix += 1\n if not fix_intercept is None:\n nfix += 1\n ndim = len(pRanges)\n if (ndim + nfix) == 2:\n print(\"[linfit]: The model uncertainty is NOT considered!\")\n elif (ndim + nfix) == 3:\n print(\"[linfit]: The model uncertainty is considered!\")\n else:\n raise ValueError(\"[linfit]: The parameter number ({0}) is incorrect!\".format(ndim))\n self.ndim = ndim\n if (xerr is None) & (yerr is None):\n xerr = np.zeros_like(x)\n yerr = np.ones_like(y)\n else:\n if xerr is None:\n xerr = np.zeros_like(x)\n if yerr is None:\n yerr = np.zeros_like(y)\n if lnlikeType == \"Nukers\":\n self.lnlike = lnlike_Nukers\n self.parNames = [\"beta\", \"alpha\", \"epsy0\"]\n elif lnlikeType == \"gcs\":\n self.lnlike = lnlike_gcs\n self.parNames = []\n if fix_slope is None:\n self.parNames.append(\"m\")\n if fix_intercept is None:\n self.parNames.append(\"b\")\n self.parNames.append(\"lnf\")\n elif lnlikeType == \"naive\":\n self.lnlike = lnlike_naive\n self.parNames = [\"m\", \"b\", \"lnf\"]\n elif lnlikeType == \"perp\":\n self.lnlike = lnlike_perp\n self.parNames = [\"theta\", \"bv\", \"V\"]\n elif lnlikeType == \"perp2\":\n self.lnlike = lnlike_perp2\n self.parNames = [\"theta\", \"b\", \"V\"]\n else:\n raise ValueError(\"[linfit]: The lnlike function ({0}) is not recognised!\".format(lnlike))\n self.lnlikeType = lnlikeType",
"def fitbackground(xdata,ydata,fitparams=None, showfit=False,showstartfit=False,label=\"\"):\n fitdatax=xdata\n fitdatay=ydata\n if fitparams is None: \n fitparams=[-6,0,0,0,0,0,0,0,0,0,6.9e+9]\n #print '--------------Initial Parameter Set--------------\\nf0: {0}\\nQi: {1}\\nQc: {2}\\ndf: {3}\\nScale: {4}\\nSlope: {5}\\nOffset:{6}\\n'.format(f0,Qi,Qc,0.,scale,slope, offset)\n return fitgeneral(fitdatax, fitdatay, polynomial, fitparams, domain=None, showfit=showfit,\n showstartfit=showstartfit, label=label)",
"def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [5*np.sqrt(5)/self.lengthscale**3, 15./self.lengthscale**2,3*np.sqrt(5)/self.lengthscale, 1.]\r\n self.b = [9./8, 9*self.lengthscale**4/200., 3*self.lengthscale**2/5., 3*self.lengthscale**2/(5*8.), 3*self.lengthscale**2/(5*8.)]\r\n\r\n self.basis_alpha = np.ones((2*self.n_freq,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)",
"def fit(self):\n # Initialize parameter estimates\n if self.estimator is not None:\n param_estimates = self.estimator(self.xf, self.yf)\n else: param_estimates = None\n self.popt, self.pcov = curve_fit(self.model, self.xf, self.yf, \n p0=param_estimates)\n self.fit_history.append({\"popt\" : self.popt, \"pcov\" : self.pcov})",
"def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\"}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)",
"def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.varianceU = x[0]\r\n self.varianceY = x[1]\r\n self.lengthscaleU = x[2]\r\n self.lengthscaleY = x[3]",
"def SetSampleParameters(self, data):\n self._SetParameters(data, 'SetSampleParameters')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')",
"def add_line_SFR_obs(line,L_line,ax,plot_fit=True,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # --- Observations compiled in Observations.ipynb ---\n\n L_obs = np.array([])\n SFR_obs = np.array([])\n\n if p.plot: print('\\nObserved galaxies with %s:' % line)\n\n c = 'dimgrey'\n a = 0.8\n mew = 1\n\n # Kamenetzky et al. 2016\n df = pd.read_pickle('data/observations/AHIMSA_sample_lit')\n df = df[(df.sizes < 47) & (df.SFR > 1e-4) & (df[line+ '_Lsun'].values > 0)] \n try:\n if p.plot: ax.plot(np.log10(df.SFR[(df.sizes < 47) & (df.SFR > 1e-4)]),\\\n np.log10(df[line + '_Lsun'][(df.sizes < 47) & (df.SFR > 1e-4)]),'>',ms=6,fillstyle='none',mew=mew,\\\n color=c,alpha=a,label='Mixed type galaxies [Kamenetzky+16]')\n lo_err = np.array(np.log10(df[line+ '_Lsun'].values)-np.log10(df[line+ '_Lsun'].values-df['e_'+line+ '_Lsun'].values))\n up_err = np.array(np.log10(df[line+ '_Lsun'].values+df['e_'+line+ '_Lsun'].values)-np.log10(df[line+ '_Lsun'].values))\n lo_err[df[line+ '_Lsun'].values == 0] = 0\n up_err[df[line+ '_Lsun'].values == 0] = 0\n # ax.errorbar(np.log10(df.SFR),\\\n # np.log10(df[line+ '_Lsun']),\\\n # yerr=np.column_stack([lo_err,up_err]).T,\\\n # elinewidth=1,marker='>',ms=6,mew=1,fillstyle='none',\\\n # color='grey',alpha=0.8,lw=0,label='Mixed z~0 sample [Kamenetzky+16]')\n L_obs = np.append(L_obs,df[line + '_Lsun'].values)\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n if p.plot: print('%i galaxies from Kamenetzky+16 ' % (len(L_obs)))\n except:\n pass\n\n # print('min SFR: ',np.min(df.SFR.values[df.sizes < 47]))\n\n # Brauher et al. 2008\n try:\n df = pd.read_pickle('data/observations/Brauher_2008')\n if p.plot: \n # lo_err = np.array(np.log10(df['L_'+line].values)-np.log10(df['L_'+line].values-df['e_'+line].values))\n # up_err = np.array(np.log10(df['L_'+line].values+df['e_'+line].values)-np.log10(df['L_'+line].values))\n # print(lo_err)\n # print(df['e_'+line].values/df['L_'+line])\n # ax.errorbar(np.log10(df.SFR),np.log10(df['L_'+line]),\\\n # yerr=np.column_stack([lo_err,up_err]).T,\\\n # elinewidth=1,marker='o',ms=7,mew=1,fillstyle='none',\\\n # color='grey',alpha=0.8,lw=0,label='MS/SB galaxies [Brauher+08]')\n ax.plot(np.log10(df.SFR),np.log10(df['L_%s' % line]),'o',fillstyle='none',ms=4,mew=mew,color=c,\\\n alpha=a,label='MS/SB galaxies [Brauher+08]')\n L_ul = np.log10(df['L_%s' % line][df['f_'+line] == -1])\n if len(L_ul) > 0:\n # ax.plot(df.SFR[df['f_'+line] == -1],L_ul,'o',zorder=0,ms=7,mew=1,color='grey',alpha=0.8)\n ax.errorbar(np.log10(df.SFR[df['f_'+line] == -1]),L_ul,capsize=3,color=c,alpha=a,elinewidth=1,\\\n uplims=np.ones(len(L_ul)),\\\n yerr=0.3,lw=0)\n if p.plot: print('%i galaxies from Brauher+08 ' % (len(df)))\n L =df['L_%s' % line].values\n SFR =df['SFR'].values\n L = L[SFR > 0]\n SFR = SFR[SFR > 0]\n L_obs = np.append(L_obs,L)\n SFR_obs = np.append(SFR_obs,SFR)\n # print('min SFR: ',np.min(df.SFR))\n except:\n pass\n\n if p.select != '_MS':\n # Cormier et al. 2015\n try:\n df = pd.read_pickle('data/observations/DGS_Cormier_2015')\n if p.plot: \n # try: \n # lo_err = np.array(np.log10(df['L_'+line].values)-np.log10(df['L_'+line].values-df['e_'+line].values))\n # up_err = np.array(np.log10(df['L_'+line].values+df['e_'+line].values)-np.log10(df['L_'+line].values))\n # ax.errorbar(df.SFR,np.log10(df['L_'+line]),\\\n # yerr=np.column_stack([lo_err,up_err]).T,\\\n # elinewidth=1,marker='x',ms=7,mew=mew,\\\n # color=c,alpha=a,lw=0,label='Dwarf galaxies [Cormier+15]')\n # except:\n ax.plot(df.SFR,np.log10(df['L_%s' % line]),'x',zorder=0,ms=7,mew=mew,color=c,alpha=a,\\\n label='Dwarf galaxies [Cormier+15]')\n L_ul = np.log10(-1.*df['L_'+line][df['L_'+line] < 0])\n if len(L_ul) > 0:\n ax.plot(df.SFR[df['L_'+line] < 0],L_ul,'x',zorder=0,ms=7,mew=mew,color=c,alpha=a)\n ax.errorbar(df.SFR[df['L_'+line] < 0],L_ul,capsize=3,color=c,alpha=a,elinewidth=1,\\\n uplims=np.ones(len(L_ul)),\\\n yerr=0.3,lw=0)\n # np.log10(-1.*L_ul - 10.**(np.log10(-1.*L_ul)-0.3))\n if p.plot: print('%i galaxies from Cormier+15 ' % (len(df)))\n L_obs = np.append(L_obs,df['L_%s' % line].values)\n SFR_obs = np.append(SFR_obs,10.**df.SFR.values)\n except:\n pass\n\n # Schruba et al. 2012\n #try:\n if (line == 'CO(1-0)') | (line == 'CO(2-1)'):\n df = pd.read_pickle('data/observations/Schruba_2012')\n if p.plot: \n if line == 'CO(1-0)': label = 'Mixed type galaxies [Schruba+12]'\n if line == 'CO(2-1)': label = 'Dwarf galaxies [Schruba+12]'\n f_ul = df['f_%s' % line].values\n L = df['L_%s' % line].values\n SFR = df['SFR'].values\n L_obs = np.append(L_obs,L[L > 0])\n SFR_obs = np.append(SFR_obs,SFR[L > 0])\n Z = df['Z'].values\n if line == 'CO(2-1)': \n SFR = SFR[L>0]\n f_ul = f_ul[L>0]\n Z = Z[L>0]\n L = L[L>0]\n print('Schruba min max Z: ',Z.min(),Z.max())\n M_H2 = 1.8e9 * SFR # from S12 paper\n area = np.array([1.33,1.79,1.75,7.74,11.47,12.37,26.69,83.85,12.23,39.40,19.21,7.78,14.75,59.54,31.19,39.19]) # kpc2\n Sigma_M_H2 = M_H2 / (area*1000*1000)\n if p.select == 'Zsfr': \n ax.scatter(np.log10(SFR[L > 0]),np.log10(L[L > 0]),marker='*',zorder=0,facecolors='none',s=30,\\\n linewidth=mew,c=np.log10(Z),alpha=a,label=label,vmin=p.vmin,vmax=p.vmax)\n else:\n ax.scatter(np.log10(SFR[L > 0]),np.log10(L[L > 0]),marker='*',zorder=0,facecolors='none',s=30,\\\n linewidth=mew,c=np.log10(Sigma_M_H2),alpha=a,label=label,vmin=p.vmin,vmax=p.vmax)\n if line == 'CO(1-0)': \n ax.plot(np.log10(SFR[L > 0]),np.log10(L[L > 0]),'*',zorder=0,fillstyle='none',ms=7,mew=mew,color=c,alpha=a,\\\n label=label)\n if len(f_ul) > 0:\n # ax.plot(np.log10(SFR[f_ul == 1]),np.log10(L[f_ul == 1]),'*',zorder=0,fillstyle='none',ms=7,mew=mew,color=c,alpha=a)\n ax.errorbar(np.log10(SFR[f_ul == 1]),np.log10(L[f_ul == 1]),capsize=3,fillstyle='none',color=c,alpha=a,elinewidth=1,\\\n uplims=np.ones(len(L[f_ul == 1])),\\\n yerr=0.3,lw=0)\n if p.plot: print('%i galaxies from Schruba+12 ' % (len(df)))\n #except:\n # pass\n\n # Accurso et al. 2017\n try:\n df = pd.read_pickle('data/observations/xCOLD_GASS_Accurso_2017')\n df = df.loc[np.argwhere(df['L_CO(1-0)'].values > 0).flatten()]\n if p.plot: ax.plot(np.log10(df['SFR']),df['L_%s' % line], 'd', zorder=0,ms=7,fillstyle='none',mew=mew,color=c,alpha=a,label='COLD GASS [Accurso+17]') #c=np.log10(A17['Z']), \n L_obs = np.append(L_obs,10.**df['L_%s' % line].values)\n if p.plot: print('%i galaxies from Accurso+17 ' % (len(df)))\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n except:\n pass\n\n # Vanzi et al. 2009\n if line == 'CO(3-2)':\n df = pd.read_pickle('data/observations/Vanzi_2009')\n df = df.loc[np.argwhere(df['L_CO(3-2)'].values > 0).flatten()]\n if p.plot: ax.plot(np.log10(df['SFR']),np.log10(df['L_%s' % line]), 'D', zorder=0,ms=7,fillstyle='none',mew=mew,\\\n color=c,alpha=a,label='Dwarf galaxies [Vanzi+09]') #c=np.log10(A17['Z']), \n L_obs = np.append(L_obs,df['L_%s' % line].values)\n if p.plot: print('%i galaxies from Vanzi+09 ' % (len(df)))\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n # except:\n # pass\n\n\n # Diaz-Santos et al. 2013\n try:\n df = pd.read_pickle('data/observations/Diaz-Santos_2013')\n if p.plot: ax.plot(np.log10(df.SFR),np.log10(df['L_%s' % line]),'^',ms=6,zorder=0,fillstyle='none',mew=mew,color=c,alpha=a,label='LIRGs [Diaz-Santos+13]')\n if p.plot: print('%i galaxies from Diaz-Santos+17 ' % (len(df)))\n L_obs = np.append(L_obs,df['L_%s' % line].values)\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n # print('min SFR: ',np.min(df.SFR))\n except:\n pass\n # Farrah et al. 2013\n # try:\n # df = pd.read_pickle('data/observations/Farrah_2013')\n # if p.plot: ax.plot(df.SFR,df['L_%s' % line],'<',fillstyle='none',mew=1,color='grey',alpha=0.8,label='Farrah+13 (ULIRGs)')\n # if p.plot: print('%i galaxies from Farrah+13 ' % (len(df)))\n # L_obs = np.append(L_obs,df['L_%s' % line].values)\n # SFR_obs = np.append(SFR_obs,df.SFR.values)\n # except:\n # pass\n # Zhao et al. 2016\n try:\n df = pd.read_pickle('data/observations/Zhao_2016')\n if p.plot: ax.plot(np.log10(df.SFR),np.log10(df['L_%s' % line]),'<',ms=6,fillstyle='none',mew=mew,color=c,alpha=a,label='GOALS (U)LIRGs [Zhao+16]')\n if p.plot: print('%i galaxies from Zhao+16 ' % (len(df)))\n L_obs = np.append(L_obs,df['L_%s' % line].values)\n SFR_obs = np.append(SFR_obs,df.SFR.values)\n # print('min SFR: ',np.min(df.SFR))\n except:\n pass\n\n if line in ['[CII]158','[OI]63','[OIII]88']:\n # De Looze 2014 relation\n if np.min(L_line) == 0 : L_line[L_line == 0] = 1e-30\n if p.plot: print(np.min(np.log10(L_line)),np.max(np.log10(L_line)))\n logL_delooze = np.arange(np.min(np.log10(L_line)) - 3,np.max(np.log10(L_line)) + 3)\n\n if line == '[CII]158':\n logSFR_delooze_DGS = -5.73 + 0.8 * logL_delooze\n logSFR_delooze_SBG = -7.06 + 1.0 * logL_delooze\n\n if line == '[OI]63':\n logSFR_delooze_DGS = -6.23 + 0.91 * logL_delooze\n logSFR_delooze_SBG = -6.05 + 0.89 * logL_delooze\n\n if line == '[OIII]88':\n logSFR_delooze_DGS = -6.71 + 0.92 * logL_delooze\n logSFR_delooze_SBG = -3.89 + 0.69 * logL_delooze\n\n if p.plot: ax.plot(logSFR_delooze_DGS,logL_delooze,'--',color='grey',alpha=0.7,\\\n label='Local dwarf galaxies [de Looze+ 2014]')\n if p.plot: ax.plot(logSFR_delooze_SBG,logL_delooze,':',color='grey',alpha=0.7,\\\n label='Local SB galaxies [de Looze+ 2014]')\n # print(SFR_obs)\n logSFR = np.arange(np.min(np.log10(SFR_obs[SFR_obs > 0])) - 3,np.max(np.log10(SFR_obs[SFR_obs > 0])) + 3)\n # fit = np.polyfit(np.log10(SFR_obs[(L_obs > 0) & (SFR_obs > 0)]),\\\n # np.log10(L_obs[(L_obs > 0) & (SFR_obs > 0)]),1)\n # pfit = np.poly1d(fit)\n # L_fit = 10.**pfit(logSFR)\n\n # Make log-linear fit to SFR-binned luminosities\n SFRs = SFR_obs[(L_obs > 0) & (SFR_obs > 0)]\n Ls = L_obs[(L_obs > 0) & (SFR_obs > 0)]\n SFR_axis = np.linspace(np.log10(SFRs.min()),np.log10(SFRs.max()),20)\n SFR_bins = SFR_axis[0:-1] + (SFR_axis[1]-SFR_axis[0])/2.\n Ls_binned = np.zeros(len(SFR_axis)-1)\n for i in range(len(Ls_binned)):\n Ls1 = Ls[(SFRs >= 10.**SFR_axis[i]) & (SFRs <= 10.**SFR_axis[i+1])]\n Ls_binned[i] = np.mean(np.log10(Ls1))\n SFR_bins = SFR_bins[Ls_binned > 0]\n Ls_binned = Ls_binned[Ls_binned > 0]\n # ax.plot(10.**SFR_bins,10.**Ls_binned,'x',color='orange',mew=3)\n fit = LinearRegression().fit(SFR_bins.reshape(-1, 1),\\\n Ls_binned.reshape(-1, 1))\n L_fit = 10.**fit.predict(logSFR.reshape(-1, 1))\n if p.plot & plot_fit: ax.plot(logSFR,np.log10(L_fit),'--k',lw=1.5,zorder=0)\n\n # print(line)\n # print(np.log10(L_obs[(L_obs > 0) & (SFR_obs > 0)]))\n # print(fit.predict(SFR_obs[(L_obs > 0) & (SFR_obs > 0)].reshape(-1, 1)).flatten())\n\n std = np.std(np.log10(L_obs[(L_obs > 0) & (SFR_obs > 0)]) - \\\n fit.predict(np.log10(SFR_obs[(L_obs > 0) & (SFR_obs > 0)]).reshape(-1, 1)).flatten())\n\n\n # Read literature data from AHIMSA project\n # obsdf = pd.read_pickle(p.d_data+'observations/sample_lit')\n # print(obsdf.keys())\n # print(L_obs)\n # print(SFR_obs)\n\n if not p.plot: \n return(L_obs.flatten(),SFR_obs.flatten(),fit,std)",
"def _set_params(self,x):\r\n self.k._set_params(x)",
"def linearize(self, params, unknowns, resids):\n\n m = self.slope\n J = {}\n\n J['y', 'x'] = m\n return J",
"def init_solid_params(eos_d):\n # All units must be per atom (to make sense for arbitrary composition)\n\n models.Control.set_consts( [], [], eos_d )\n\n const_d = eos_d['const_d']\n\n Nat_cell = 20\n Nat_formula = 5\n\n T0 = 300 # K\n\n # EOS Parameter values initially set by Mosenfelder2009\n # Set model parameter values\n mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)\n S0 = 0.0 # must adjust\n param_key_a = ['T0','S0','mass_avg']\n param_val_a = np.array([T0,S0,mass_avg])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom\n V0 = 162.35/Nat_cell # ang^3/atom\n K0 = 254.7 # GPa\n KP0= 4.26\n E0 = 0.0\n param_key_a = ['V0','K0','KP0','E0']\n param_val_a = np.array([V0,K0,KP0,E0])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n VR = V0\n thetaR = 736 # K\n gammaR = 2.23\n qR = 1.83\n param_key_a = ['VR','thetaR','gammaR','qR']\n param_val_a = np.array([VR,thetaR,gammaR,qR])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # NOTE: Mosenfelder(2009) has mislabeled units as J/K/g\n # -> units are actually J/K/kg ???\n # The measured 1000K heat capacity of MgSiO3 is ~125 J/K/mol\n # (equal to Dulong Petit value for 5 atom basis)\n # -> This value is thus ~65% of that nominal value,\n # balancing the 30 to 40% values of gamma that are higher than other\n # studies (static compression only constrains Gamma*Cv\n #\n # Max const-vol heat capacity:\n Cvmax = (806.0/1e3)*mass_avg/const_d['kJ_molpereV']/1e3 # J/mol atoms/K -> eV/K/atom\n\n param_key_a = ['Cvmax']\n param_val_a = np.array([Cvmax])\n models.Control.set_params( param_key_a, param_val_a, eos_d )\n\n # # Must convert energy units from kJ/g to eV/atom\n energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']\n models.Control.set_consts( ['energy_conv_fac'], [energy_conv_fac], eos_d )\n\n\n compress_path_mod = models.BirchMurn3(path_const='S',level_const=T0,\n supress_energy=False,\n supress_press=False,\n expand_adj=False)\n models.Control.set_modtypes( ['CompressPathMod'], [compress_path_mod],\n eos_d )\n\n gamma_mod = models.GammaPowLaw(V0ref=False)\n models.Control.set_modtypes( ['GammaMod'], [gamma_mod], eos_d )\n\n thermal_mod = models.MieGrunDebye()\n models.Control.set_modtypes( ['ThermalMod'], [thermal_mod], eos_d )\n\n full_mod = models.ThermalPressMod()\n models.Control.set_modtypes( ['FullMod'], [full_mod], eos_d )\n\n\n return eos_d",
"def fit2d(xdata,ydata,zdata,degree=1,reject=0,plot=None,xr=None,yr=None,zr=None,xt=None,yt=None,zt=None,gdrange=None,pfit=None,log=False,size=5) :\n\n if gdrange is not None :\n gd = np.where((zdata > gdrange[0]) & (zdata < gdrange[1]))[0]\n xfit = xdata[gd]\n yfit = ydata[gd]\n zfit = zdata[gd]\n else :\n xfit = xdata\n yfit = ydata\n zfit = zdata\n\n # set up fitter and do fit\n if pfit is None :\n fit_p = fitting.LinearLSQFitter()\n p_init = models.Polynomial2D(degree=degree)\n pfit = fit_p(p_init, xfit, yfit, zfit)\n # rejection of points?\n if reject > 0 :\n gd=np.where(abs(zfit-pfit(xfit,yfit)) < reject)[0]\n bd=np.where(abs(zfit-pfit(xfit,yfit)) >= reject)[0]\n print('rejected ',len(xdata)-len(gd),' of ',len(xdata),' points')\n pfit = fit_p(p_init, xfit[gd], yfit[gd], zfit[gd])\n\n print('2D rms: ',(zfit-pfit(xfit,yfit)).std())\n \n if plot is not None :\n if log :\n zfit = 10.**zfit\n if xr is None : xr = [xfit.min(),xfit.max()]\n if yr is None : yr = [yfit.min(),yfit.max()]\n if zr is None : zr = [zfit.min(),zfit.max()]\n # plot data\n plots.plotc(plot,xfit,yfit,zfit,xr=xr,yr=yr,zr=zr,\n xt=xt,yt=yt,zt=zt,colorbar=True,size=size,linewidth=1)\n # create independent variable grid for model and display\n y, x = np.mgrid[yr[1]:yr[0]:200j, xr[1]:xr[0]:200j]\n if log :\n plot.imshow(10.**pfit(x,y),extent=[xr[1],xr[0],yr[1],yr[0]],\n aspect='auto',vmin=zr[0],vmax=zr[1], origin='lower',cmap='rainbow')\n else :\n plot.imshow(pfit(x,y),extent=[xr[1],xr[0],yr[1],yr[0]],\n aspect='auto',vmin=zr[0],vmax=zr[1], origin='lower',cmap='rainbow')\n #plt.show()\n\n return pfit",
"def _set_params(self, x):\r\n assert x.size == self.num_params\r\n\r\n self.varianceU = x[0]\r\n self.varianceY = x[1]\r\n self.lengthscaleU = x[2]\r\n self.lengthscaleY = x[3]",
"def fit(self, X, y, **fit_params):\n ...",
"def partial_fit(self, X, y=...):\n ...",
"def partial_fit(self, X, y=...):\n ...",
"def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recursive_prob = rp)\r\n model_likelihoods = []\r\n for i in df.index:\r\n c = df.context[i]\r\n trial_choice = df.subj_ts[i]\r\n conf = model.calc_posterior(c)\r\n model_likelihoods.append(conf[trial_choice])\r\n return np.array(model_likelihoods)\r\n \r\n def bias_errfunc(params,df):\r\n rp = params['rp']\r\n tsb = params['tsb']\r\n #minimize\r\n return abs(np.sum(np.log(bias_fitfunc(rp,tsb,df)))) #single value\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('rp', value = .6, min = 0, max = 1)\r\n if bias == True:\r\n fit_params.add('tsb', value = 1, min = 0)\r\n else:\r\n fit_params.add('tsb', value = 1, vary = False, min = 0)\r\n out = lmfit.minimize(bias_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(out)\r\n return out.values\r\n \r\n elif mode == \"midline\":\r\n #Fitting Functions\r\n def midline_errfunc(params,df):\r\n eps = params['eps'].value\r\n context_sgn = np.array([max(i,0) for i in df.context_sign])\r\n choice = df.subj_ts\r\n #minimize\r\n return -np.sum(np.log(abs(abs(choice - (1-context_sgn))-eps)))\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('eps', value = .1, min = 0, max = 1)\r\n midline_out = lmfit.minimize(midline_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(midline_out)\r\n return midline_out.values",
"def GetParameters(ParamsFile, QualityFile, Bands, NumberOfParameters, RelativeUncert, ScaleFactor, ProcessSnow = 0):\n\n FillValue = 32767\n NumberOfBands = Bands.shape[0]\n\n # Get dimensions\n rows, cols = GetDimSubDataset( ParamsFile )\n\n Parameters = np.zeros((rows, cols, NumberOfBands, NumberOfParameters), np.float32)\n Uncertainties = np.zeros((rows, cols, NumberOfBands), np.float32)\n\n # Get Snow\n # 1 Snow albedo retrieved\n # 0 Snow-free albedo retrieved\n # 255 Fill Value\n print \"Reading Snow QA:\", QualityFile\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + QualityFile + '\":MOD_Grid_BRDF:Snow_BRDF_Albedo'\n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n SnowQA = SubDataset.GetRasterBand(1).ReadAsArray()\n if ProcessSnow == 0:\n SnowQA = np.where( SnowQA == 0, 1, 0)\n else:\n SnowQA = np.where( SnowQA == 1, 1, 0)\n\n # Load BRDF parameters\n print \"Reading BRDF parameters...\"\n for Band in range( Bands.shape[0] ):\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + ParamsFile + '\":MOD_Grid_BRDF:BRDF_Albedo_Parameters_Band' + str( Bands[Band] )\n print SubDatasetName \n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n\n for Parameter in range(NumberOfParameters):\n print \"Getting BRDF parameter\", Parameter\n Parameters[:,:,Band,Parameter] = SubDataset.GetRasterBand( Parameter + 1 ).ReadAsArray()\n\n # Snow mask\n Parameters[:,:,Band,Parameter] = Parameters[:,:,Band,Parameter] * SnowQA\n\n # Filter out fill values\n Parameters[:,:,Band,Parameter] = np.where(Parameters[:,:,Band,Parameter] == FillValue, 0.,\n Parameters[:,:,Band,Parameter] * ScaleFactor )\n\n # Get QA\n print \"Reading QA:\", QualityFile\n for Band in range( Bands.shape[0] ):\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + QualityFile + '\":MOD_Grid_BRDF:BRDF_Albedo_Band_Quality_Band' + str( Bands[Band] )\n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n QA = SubDataset.GetRasterBand(1).ReadAsArray()\n\n # https://ladsweb.nascom.nasa.gov/api/v1/filespec/collection=6&product=MCD43A2\n # BRDF_Albedo_Band_Quality_BandN ( N is 1 to 7 )> \n # 0 = best quality, full inversion (WoDs, RMSE majority good)\n # 1 = good quality, full inversion (also including the cases that no clear sky\n # observations over the day of interest or the Solar Zenith Angle is too \n # large even WoDs, RMSE majority good)\n # 2 = Magnitude inversion (numobs >=7)\n # 3 = Magnitude inversion (numobs >=2&<7)\n # 4 = Fill value\n\n QA_flags = np.array( [ 0,1,2,3 ] )\n\n for i, QA_flag in enumerate( QA_flags ) :\n indices = np.where( QA == QA_flag )\n Uncertainties[ indices[0], indices[1], Band ] = RelativeUncert[ i ]\n\n Uncertainties[:,:,Band] = Uncertainties[:,:,Band] * SnowQA \n\n SubDataset = None\n return Parameters, Uncertainties",
"def set_dh_params(self, dh_params, flag=0):\r\n return self._arm.set_dh_params(dh_params, flag)",
"def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.]\r\n self.b = [1,self.lengthscale**2/3]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)",
"def set_param_values(self, flattened_params, **tags):\n self._regressor.set_param_values(flattened_params, **tags)",
"def fitpulse_err(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=\"\"):\n if domain is not None:\n fitdatax,fitdatay = selectdomain(xdata,ydata,domain)\n else:\n fitdatax=xdata\n fitdatay=ydata\n if fitparams is None: \n fitparams=[0.,0.]\n fitparams[0]=fitdatay[-1]\n fitparams[1]=fitdatay[0]-fitdatay[-1]\n fitparams[1]=fitdatay[0]-fitdatay[-1]\n #print fitparams\n p1 = fitgeneral(fitdatax, fitdatay, pulse_errfunc, fitparams, domain=None, showfit=showfit,\n showstartfit=showstartfit, label=label)\n return p1",
"def lam4fit2(param, x):\n pedestal, amplitude, inc, poff = param\n ophase = x*2*pi + poff\n return pedestal + abs(amplitude)*lambertian(ophase, inc=inc)",
"def curve_fit():\n x_west, z_west, x_east, z_east = roof_measurements()\n \n # find best curve fit for west roof section\n param_west, param_cov_west = optimize.curve_fit(test_func, x_west, z_west)\n print(param_west)\n \n # z = 7.29944696 + (1.27415518*x) + (-0.0680139854*x**2) + (0.00152035861*x**3)\n \n z_west_fitted = test_func(x_west,*param_west)\n \n # mirror curve for east roof section\n z_east_fitted = np.flip(z_west_fitted,axis=0)\n \n # create array for both west and east roof sections\n x_whole_roof = np.concatenate((x_west, x_east), axis=0)\n z_whole_roof = np.concatenate((z_west_fitted, z_east_fitted), axis=0)\n\n # plot roof\n plt.plot(x_whole_roof, z_whole_roof, '-', color ='blue', label=\"roof curve\") \n plt.axis('equal') # make axes square\n plt.show() \n\n return param_west",
"def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })",
"def fit(self, x, y, dy, constraints, **options):\n\n raise NotImplementedError()",
"def SetMLEParameters(self, data):\n self._SetParameters(data, 'SetMLEParameters')",
"def fun_set(self):\n\n self.type.set(self.xtl._scattering_type)\n # self.energy_kev.set(8)\n self.theta_offset.set(self.xtl._scattering_theta_offset)\n self.theta_min.set(self.xtl._scattering_min_theta)\n self.theta_max.set(self.xtl._scattering_max_theta)\n self.twotheta_min.set(self.xtl._scattering_min_two_theta)\n self.twotheta_max.set(self.xtl._scattering_max_two_theta)\n\n if self.orientation.get() == 'Reflection':\n self.direction_h.set(self.xtl._scattering_specular_direction[0])\n self.direction_k.set(self.xtl._scattering_specular_direction[1])\n self.direction_l.set(self.xtl._scattering_specular_direction[2])\n else:\n self.direction_h.set(self.xtl._scattering_parallel_direction[0])\n self.direction_k.set(self.xtl._scattering_parallel_direction[1])\n self.direction_l.set(self.xtl._scattering_parallel_direction[2])",
"def setLVDS(self, cmd, sd, optimizeSD):\n\n @inlineCallbacks\n # See U:\\John\\ProtelDesigns\\GHzDAC_R3_1\\Documentation\\HardRegProgram.txt\n # for how this function works.\n def func():\n #TODO: repeat LVDS measurement five times and average results.\n pkt = [[0x0400 + (i << 4), 0x8500, 0x0400 + i, 0x8500][j]\n for i in range(16) for j in range(4)]\n\n if optimizeSD is True:\n # Find the leading/trailing edges of the DATACLK_IN clock.\n # First set SD to 0. Then, for bits from 0 to 15, set MSD to\n # this bit and MHD to 0, read the check bit, set MHD to this\n # bit and MSD to 0, read the check bit.\n answer = yield self._runSerial(cmd, [0x0500] + pkt)\n answer = [answer[i * 2 + 2] & 1 for i in range(32)]\n\n # Find where check bit changes from 1 to 0 for MSD and MHD.\n MSD = -2\n MHD = -2\n for i in range(16):\n if MSD == -2 and answer[i * 2] == 1: MSD = -1\n if MSD == -1 and answer[i * 2] == 0: MSD = i\n if MHD == -2 and answer[i * 2 + 1] == 1: MHD = -1\n if MHD == -1 and answer[i * 2 + 1] == 0: MHD = i\n MSD = max(MSD, 0)\n MHD = max(MHD, 0)\n # Find the optimal SD based on MSD and MHD.\n t = (MHD - MSD) / 2 & 0xF\n setMSDMHD = False\n elif sd is None:\n # Get the SD value from the registry.\n t = int(self.boardParams['lvdsSD']) & 0xF\n MSD, MHD = -1, -1\n setMSDMHD = True\n else:\n # This occurs if the SD is not specified (by optimization or\n # in the registry).\n t = sd & 0xF\n MSD, MHD = -1, -1\n setMSDMHD = True\n\n # Set the SD and check that the resulting difference between MSD\n # and MHD is no more than one bit. Any more indicates noise on the\n # line.\n answer = yield self._runSerial(cmd, [0x0500 + (t << 4)] + pkt)\n MSDbits = [bool(answer[i * 4 + 2] & 1) for i in range(16)]\n MHDbits = [bool(answer[i * 4 + 4] & 1) for i in range(16)]\n MSDswitch = [(MSDbits[i + 1] != MSDbits[i]) for i in range(15)]\n MHDswitch = [(MHDbits[i + 1] != MHDbits[i]) for i in range(15)]\n # Find first index at which MHD/MSD switch\n leadingEdge = MSDswitch.index(True)\n trailingEdge = MHDswitch.index(True)\n if setMSDMHD:\n if sum(MSDswitch) == 1: MSD = leadingEdge\n if sum(MHDswitch) == 1: MHD = trailingEdge\n if abs(trailingEdge - leadingEdge) <= 1 and sum(MSDswitch) == 1 and \\\n sum(MHDswitch) == 1:\n success = True\n else:\n success = False\n checkResp = yield self._runSerial(cmd, [0x8500])\n checkHex = checkResp[0] & 0x7\n returnValue((success, MSD, MHD, t, (range(16), MSDbits, MHDbits),\n checkHex))\n\n return self.testMode(func)",
"def fitting_parameter_plot(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tdist_names = ['rayleigh', 'norm', 'lognorm', 'gamma']\n\t\t\tcolors = ['b', 'g', 'r', 'y', 'm']\n\t\t\tfor dist_name,col in zip(dist_names,colors):\n\t\t\t\tdist = getattr(sp, dist_name)\n\t\t\t\tshape[i], location[i], scale[i] = dist.fit(s)\n\t\treturn shape, location, scale",
"def setParams(self, minPts=5):\n return self._set(minPts=minPts)",
"def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self",
"def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [1./self.lengthscale, 1.]\r\n self.b = [1]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)",
"def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu"
] | [
"0.6524372",
"0.5828944",
"0.5815241",
"0.5792441",
"0.5774732",
"0.5649416",
"0.560434",
"0.5591655",
"0.55806255",
"0.5562633",
"0.5548635",
"0.5524629",
"0.5523357",
"0.5509879",
"0.5508611",
"0.5453511",
"0.544221",
"0.54013795",
"0.54003555",
"0.53875685",
"0.5369344",
"0.5365295",
"0.53618485",
"0.53513116",
"0.53484184",
"0.53309566",
"0.5307402",
"0.5292875",
"0.5277187",
"0.52509415",
"0.52435154",
"0.5210985",
"0.5206242",
"0.52041143",
"0.52034736",
"0.5196202",
"0.5192214",
"0.5190594",
"0.5190302",
"0.5185839",
"0.5183499",
"0.5178848",
"0.5164318",
"0.51525325",
"0.5142841",
"0.5141395",
"0.5140803",
"0.5135283",
"0.5132826",
"0.513188",
"0.513188",
"0.5128777",
"0.5128178",
"0.51275396",
"0.5110944",
"0.5110944",
"0.5109952",
"0.51086986",
"0.51062644",
"0.51061726",
"0.5104001",
"0.51038176",
"0.51013184",
"0.5096329",
"0.5091645",
"0.50876325",
"0.50867933",
"0.5085175",
"0.50818574",
"0.5079042",
"0.5073733",
"0.5071353",
"0.5067325",
"0.50564665",
"0.5054293",
"0.5038163",
"0.50375587",
"0.5034807",
"0.50346667",
"0.5033989",
"0.50325507",
"0.50325507",
"0.50241953",
"0.50237024",
"0.50218123",
"0.502113",
"0.5018518",
"0.50183874",
"0.5014884",
"0.50140756",
"0.50122154",
"0.5010622",
"0.5006793",
"0.5005242",
"0.5004706",
"0.50042063",
"0.49946058",
"0.4993448",
"0.49880564",
"0.49868977"
] | 0.6969803 | 0 |
Evaluate the size expected from the FP relation for a given velocity dispersion and Vband apparent magnitude | def get_effective_radius(self, vel_disp, m_V):
log_vel_disp = np.log10(vel_disp)
log_R_eff = self.a*log_vel_disp + self.b*m_V + self.c + np.random.randn()*self.intrinsic_scatter
R_eff = 10**log_R_eff
return R_eff | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fluxRatio_fromVmag(Vmag):\n fluxRatio = 10.**(-0.4*Vmag)\n return fluxRatio",
"def width_v_a(model: SingleRhNeutrinoModel) -> float:\n u = 0.5 * np.tan(2 * model.theta)\n return 9 * ALPHA_EM * GF**2 / (256 * np.pi**4) * model.mx**5 * u**2",
"def testCalspecMags(self):\n std = MKIDStd.MKIDStd()\n bFilter = std.filters['B']\n vFilter = std.filters['V']\n\n # BD17\n bd17Flux = std.load(\"bd17\")\n B = std.getVegaMag(bd17Flux, bFilter)\n V = std.getVegaMag(bd17Flux, vFilter)\n self.assertAlmostEqual(B-V, 0.44, places=1, msg=\"value=%f\"%B)\n self.assertAlmostEqual(B, 9.47, places=0, msg=\"value=%f\"%B)",
"def computeMagnitudeErr(instFluxErr, instFlux, calibrationErr, calibration, flux):\n return 2.5/np.log(10)*computeMaggiesErr(instFluxErr, instFlux, calibrationErr, calibration, flux) / flux",
"def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v)) #math.sqrt() is a square root function",
"def fl_over_avfl(self, searcher, docnum, fieldnum):\n return searcher.doc_field_length(docnum, fieldnum) / self.avg_field_length(searcher, fieldnum)",
"def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")",
"def magnitude(v):\n if len(v.arr) != 4 or v[3] != 0.0:\n raise ValueError(\"Only use this function with vectors.\")\n return np.sqrt(np.sum(np.square(v.arr)))",
"def get_vcond(lambdam, taum):\n return 2 * lambdam / taum",
"def aliveness(self, physics):\n return 0.",
"def test_spectral_density_vega_wf(wf, fluxd, to):\n v = fluxd.to(to.unit, spectral_density_vega(wf))\n assert v.unit == to.unit\n if to.unit in (VEGAmag, JMmag):\n assert np.isclose(v.value, to.value, atol=0.001)\n else:\n assert np.isclose(v.value, to.value, rtol=0.001)",
"def calculate_magnitude(self, band, system='AB'):\n\n if system not in ('AB', 'Vega'):\n raise ValueError('`system` must be one of `AB` or `Vega`')\n\n f1 = self.calculate_flux(band)\n\n if f1 > 0:\n magnitude = -2.5 * log10(f1 / band.flux[system])\n\n if system == 'Vega':\n # Add 0.026 because Vega has V = 0.026:\n magnitude += 0.026\n\n else:\n magnitude = np.inf\n\n return magnitude",
"def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)",
"def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)",
"def magnitude(v: Vector) -> float:\n return math.sqrt(sum_of_squares(v))",
"def velocity(n_core, q, beta_invariant, material_dispersion=None):\n c = scipy.constants.speed_of_light\n if material_dispersion is None:\n A = 2 / c / (2 + q)\n B = q * n_core**2 / c / (2 + q)\n else:\n N1 = n_core + material_dispersion\n y = 2 * n_core / N1\n A = 2 * N1 / n_core * (1 + 0.25 * y) / c / (q + 2)\n B = q * n_core**2 * A - 1 / 4 / c * N1 * n_core * y\n\n return A * beta_invariant + B / beta_invariant",
"def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s",
"def VarianceOfAbsAcceleration(self):\n H = []\n for i in range(len(self.omega_range)):\n \"\"\"Calculation of the Transmission matrix H\"\"\"\n H.append(np.linalg.inv((-self.omega_range[i] ** 2 * self.M\n - 1j * self.omega_range[i] * self.C\n + self.K)))\n \"\"\"squared absolute of the transmission matrix H multiplied with the diagonal of the mass matrix M (M*I)\"\"\"\n FRFacc = [H[wincr].dot(np.diagonal(self.M)) * self.omega_range[wincr] ** 2 for wincr in\n range(len(self.spectrum))]\n Habs2 = [(np.abs(np.ones(len(vector), dtype=float) - vector) ** 2) for vector in FRFacc]\n PSDexc = self.spectrum\n \"\"\"Response of all DOFs as PSD\"\"\"\n RespPSD = [Habs2[wincr] * PSDexc[wincr] for wincr in range(len(self.spectrum))]\n AccPSD = [abs(RespPSD[wincr] + 0*PSDexc[wincr]) for wincr in range(len(self.spectrum))]\n \"\"\"The variance of the response can be obtained with the integral of the response PSD. \n integral(PSD_response)\"\"\"\n variance = (np.trapz(AccPSD, self.omega_range, axis=0))\n return variance",
"def magnitude(self):\n\t\treturn sqrt(self.dot(self))",
"def magnitude(v):\n\treturn math.sqrt(sum_squares(v))",
"def __q2v_cf(self, w, rhom, q):\n return float(q / (rhom + q / w))",
"def calculate_magnitude(east, north, vertical):\n\n if not Ensemble.is_bad_velocity(east) and not Ensemble.is_bad_velocity(north) and not Ensemble.is_bad_velocity(vertical):\n return math.sqrt((east*east) + (north*north) + (vertical*vertical))\n else:\n return Ensemble.BadVelocity",
"def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)",
"def test_mag_form_fac_case1():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac()[0], ion.calc_mag_form_fac()[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)",
"def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0",
"def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):\n volume = 0.0 # in cubic angstroms\n for k, (pdb_file) in enumerate(pdb_filenames):\n molecule_volume = 0.0\n molecule_trj = md.load(pdb_filenames[k])\n for atom in molecule_trj.topology.atoms:\n if atom.element.symbol == 'H':\n molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms\n else:\n molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms\n volume += molecule_volume * n_molecules_list[k]\n box_size = volume**(1.0/3.0) * box_scaleup_factor\n return box_size",
"def test_mag_form_fac_case2():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(qrange=[0, 2])[0], ion.calc_mag_form_fac(qrange=[0, 2])[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)",
"def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint",
"def magnitude(v):\n return math.sqrt(sum_of_squares(v))",
"def testGetVegaMag(self):\n std = MKIDStd.MKIDStd()\n vegaFlux = std.load(\"vega\")\n bd17Flux = std.load(\"bd17\")\n for filter in ['U','B','V','R','I']:\n aFilter = std.filters[filter] \n mag = std.getVegaMag(vegaFlux, aFilter)\n self.assertAlmostEqual(0.03, mag, msg=\"filter=%s mag=%f\"%(filter,mag))",
"def get_axis_ratio(self, vel_disp):\n\t\tscale = self.a*vel_disp + self.b\n\t\tq = 0.0\n\t\twhile q < self.lower:\n\t\t\tq = 1.0 - np.random.rayleigh(scale, size=None)\n\t\treturn q",
"def test_spectral_density_vega_bp(filename, fluxd, to, tol):\n fn = get_pkg_data_filename(os.path.join(\n '..', '..', 'photometry', 'data', filename))\n bp = synphot.SpectralElement.from_file(fn)\n\n v = fluxd.to(to.unit, spectral_density_vega(bp))\n assert v.unit == to.unit\n if to.unit in (VEGAmag, JMmag):\n assert np.isclose(v.value, to.value, atol=tol)\n else:\n assert np.isclose(v.value, to.value, rtol=tol)",
"def fraction_of_infectious_virus(self) -> _VectorisedFloat:\n return self.virus.viable_to_RNA_ratio * (1 - self.host_immunity)",
"def magnitude(self):\n return self.real ** 2 + numpy.inner(self.pure, self.pure)",
"def test_mag_form_fac():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(q=1.)[0], ion.calc_mag_form_fac(q=1.)[1:]\n del _temp\n assert (abs(formfac - 0.932565) < 1e-6)",
"def calculate_rf_size(rf_size, downsample):\n h = 61 # 24\" monitor\n d = 10 # 10cm from the right eye\n r = 1080 / downsample # Vertical resolution\n d_px = np.degrees(math.atan2(h / 2, d)) / (r / 2)\n return rf_size * d_px",
"def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl",
"def EDD_VDIF_Frame_Size(sampling_rate):\n bw_GHz = sampling_rate / 2E9\n\n rate_Msps = bw_GHz*2000\n rate_Mbps = 2*rate_Msps # % 2-bit\n log.debug('Bandwidth {:.3f} GHz --> {:.3f} Msps --> {:.3f} Mbit/sec'.format(bw_GHz, rate_Msps, rate_Mbps))\n\n vdifGranularity = 8 # % VDIF specs, granularity of payload size\n\n num = np.arange(1024, 9001, vdifGranularity) * 8*1000 # % bits per frame, various payload sizes\n den = rate_Mbps #; % datarate bits per sec\n fitting_payloads = num[num % den == 0]/(8*1000); # % corresponding frame payloads in byte\n\n rate_Bps = rate_Mbps*1e6/8 #;\n final_payloads = fitting_payloads[rate_Bps % fitting_payloads == 0] #;\n final_fpss = rate_Bps / final_payloads;\n final_framens = final_payloads*4*1e3 / rate_Msps;\n\n return final_payloads, final_fpss, final_framens",
"def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag",
"def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w",
"def Sizes(self, with_sign=False):\n\n self.__do_essential_memebers_exist__()\n\n try:\n from Florence import DisplacementFormulation\n except ImportError:\n raise ValueError(\"This functionality requires Florence's support\")\n\n if self.element_type != \"line\":\n # FOR LINE ELEMENTS THIS APPROACH DOES NOT WORK AS JACOBIAN IS NOT WELL DEFINED\n formulation = DisplacementFormulation(self)\n sizes = np.zeros(self.nelem)\n if not with_sign:\n for elem in range(self.nelem):\n LagrangeElemCoords = self.points[self.elements[elem,:],:]\n sizes[elem] = formulation.GetVolume(formulation.function_spaces[0],\n LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)\n else:\n for elem in range(self.nelem):\n LagrangeElemCoords = self.points[self.elements[elem,:],:]\n sizes[elem] = formulation.GetSignedVolume(formulation.function_spaces[0],\n LagrangeElemCoords, LagrangeElemCoords, False, elem=elem)\n return sizes\n\n else:\n warn(\"Sizes of line elements could be incorrect if the mesh is curvilinear\")\n return self.Lengths()",
"def _get_rupture_dimensions(src, mag, nodal_plane):\n area = src.magnitude_scaling_relationship.get_median_area(\n mag, nodal_plane.rake)\n rup_length = math.sqrt(area * src.rupture_aspect_ratio)\n rup_width = area / rup_length\n seismogenic_layer_width = (src.lower_seismogenic_depth\n - src.upper_seismogenic_depth)\n max_width = (seismogenic_layer_width\n / math.sin(math.radians(nodal_plane.dip)))\n if rup_width > max_width:\n rup_width = max_width\n rup_length = area / rup_width\n return rup_length, rup_width",
"def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)",
"def compute_volume(self) -> float:\n return (\n (1 if self.clockwise else -1)\n * np.sum(\n np.linalg.det(\n np.dstack(\n (\n self.vertices[self._faces[:, 0]],\n self.vertices[self._faces[:, 1]],\n self.vertices[self._faces[:, 2]],\n )\n )\n )\n )\n / 6\n )",
"def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol",
"def read_voltage_stepsize(self):\n function_string = 'DELTAV' + self.output + '?'\n return self.scpi_comm(function_string)",
"def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')",
"def test_magnitude(self):\n\n # test small magnitudes with regular unit vectors\n u1 = (1,)\n u2 = (0, 1/2, 0, 1/2, 1/2, 0, 0, 0, 1/2)\n u3 = (12/13, 4/13, 3/13)\n for k in (0, -1, 1):\n s = space(fake_curvature=k)\n for d in (0, 1, 1/3, 3/2):\n for n in (u1, u2, u3):\n p = s.make_point(n, d)\n self.assertTrue(isclose(\n abs(p),\n d\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d\n ))\n\n # test direction vector normalization\n v1 = (73733,)\n v2 = tuple(range(30))\n v3 = (-11, 1, 0, -1, 11, 1/11)\n for k in (0, -1, 1):\n s = space(fake_curvature=k)\n for d in (0, 1, 1/3, 3/2):\n for n in (v1, v2, v3):\n p = s.make_point(n, d, normalize=True)\n self.assertTrue(isclose(\n abs(p),\n d\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d\n ))\n \n # test elliptic space looping property\n pi_ref = 3.14159265358979323846264338327933\n for r in (1, 2, 3, 1/3):\n k = 1/r\n s = space(fake_curvature=k)\n for j, d in ((2, pi_ref - 2), (pi_ref, 0)):\n j *= r\n d *= r\n for n in (u1, u2, u3):\n p = s.make_point(n, j)\n self.assertTrue(isclose(\n abs(p),\n d,\n abs_tol = 1e-15\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d,\n abs_tol = 1e-15\n ))",
"def effectivedb_size_percentage(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n retout = out.get(get_key(zonekeys.EFFDB_PER, self._SW_VER), None)\n if retout is not None:\n return str(retout) + \"%\"\n return None",
"def effectivedb_size(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n retout = out.get(get_key(zonekeys.EFFDB_SIZE, self._SW_VER), None)\n if retout is not None:\n return int(retout)\n return None",
"def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3",
"def normalized_effect_size(self):\n mus = self.mu + self.absolute_effects\n pop_mu = (mus * self.test_splits).sum()\n sigma2_m = (self.test_splits * np.square(mus - pop_mu)).sum()\n f = np.sqrt(sigma2_m) / self.sigma\n return f",
"def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel",
"def vector_length(self, x: float, y: float, z: float) -> float:\n A = 2.0 * (x * y * self.aga + x * z * self.bbe + y * z * self.cal)\n return sqrt(x ** 2 * self.asq + y ** 2 * self.bsq + z ** 2 * self.csq + A)",
"def calc_mag(self):\n mag = np.sum(self.box)\n return mag",
"def _calc_energy( self, V_a, eos_d ):\n pass",
"def magnitude(frame):\n sobelx = lambda im: cv2.Sobel(im, cv2.CV_64F, 1, 0, ksize=3)\n sobely = lambda im: cv2.Sobel(im, cv2.CV_64F, 0, 1, ksize=3)\n dxabs = cv2.convertScaleAbs(sobelx(frame))\n dyabs = cv2.convertScaleAbs(sobely(frame))\n\n return cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)",
"def magnitude(p):\n return sqrt((p**2).sum())",
"def mag_length(B, q=q_e):\n\n return np.sqrt(hbar/(q * B))",
"def estimate_focal_length(self):\n fl = (self.fiber_diameter / 2) / np.tan(np.deg2rad(self.fov / 2))\n\n return fl",
"def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm",
"def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6",
"def betP(self, element):\n if element.is_empty():\n return 0\n\n if self.is_empty():\n return 0\n\n if not element.is_compatible(next(iter(self.focals))):\n return 0\n\n result = 0\n for focal, value in self.items():\n if not focal.is_empty():\n result += value * focal.conjunction_unsafe(element).cardinal / focal.cardinal\n return round(result, 6)",
"def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)",
"def E2V(E):\n# for energy in mev returns velocity in m/s\n return sqrt(E/5.227e-6)",
"def magnitude(self):\n return sqrt(self & self)",
"def effective_width(self, intrinsic_width, dm, bandwidth, freq):\n a = sqrt(pow(intrinsic_width, 2) + pow((8.3e6 * fabs(dm) * (bandwidth / pow(freq, 3))), 2))\n return a",
"def compute_Flocal(config):\n \n vlow = config['vlow']\n vhigh = config['vhigh']\n vdef = config['vdef']\n lo_restfreq = config[\"DOPPLERTRACKFREQ\"]\n\n velocity = (vlow + vhigh) * 0.5\n vd = Vdef()\n vd.compute_local_frame_with_vdef(vdef, velocity,\n lo_restfreq, velocity)\n # this better be the same as vlow since i sent in the avg\n cur_vhigh = vd.get_vhigh()\n cur_vlow = vd.get_vlow()\n if cur_vhigh != cur_vlow:\n \"PANIC: How can the avg velocities differ!!!!!\"\n \n return cur_vhigh",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def fD(self, vpd):\n\t if vpd < 0.1:\n\t return 1.\n\t else:\n\t return 3/13./sqrt(vpd/1000.)",
"def mag(field):\n return np.sqrt(np.sum(field**2, axis=0, keepdims=True))",
"def volume_unit_ball(d_dimensions: int, norm=2) -> float:\n\n # get ball\n if norm == 0:\n b = float(\"inf\")\n elif norm == 1:\n b = 1.0\n elif norm == 2:\n b = 2.0\n else:\n raise ValueError(f\"Unrecognized norm: {norm}\")\n\n return (np.pi ** (0.5 * d_dimensions)) ** d_dimensions / gamma(b / d_dimensions + 1)",
"def testscfenergydim(self):\r\n count_scfenergies = self.data.scfenergies.shape[0] - self.extrascfs\r\n count_atomcoords = self.data.atomcoords.shape[0] - self.extracoords\r\n assert count_scfenergies == count_atomcoords",
"def getFluxSize(self,flux=0.5,frac=True,mode='radial',cen=(0,0),v0=1,\n minfunc='fmin',intkwargs=None,**kwargs):\n import scipy.optimize\n\n fmin = getattr(scipy.optimize,minfunc)\n\n if intkwargs is None:\n intkwargs = {}\n\n if mode == 'radial':\n if cen != (0,0):\n raise NotImplementedError('radial profiles must be centered on (0,0)')\n if frac:\n total = self.integrateCircular(np.inf,**intkwargs)\n flux = flux * total\n def f(r):\n intres = self.integrateCircular(r,**intkwargs)-flux\n return intres*intres\n\n if np.isscalar(v0):\n v0 = (v0,)\n elif mode == 'square':\n x0,y0 = cen\n if frac:\n total = self.integrateCartesian(-np.inf,np.inf,-np.inf,np.inf,**intkwargs)\n flux = flux * total\n def f(l):\n intres = self.integrateCartesian(x0-l,x0+l,x0-l,x0+l,**intkwargs)-flux\n return intres*intres\n\n if np.isscalar(v0):\n v0 = (v0,)\n elif mode == 'rectangular':\n x0,y0 = cen\n if frac:\n total = self.integrateCartesian(-np.inf,np.inf,-np.inf,np.inf,**intkwargs)\n flux = flux * total\n def f(ls):\n lx,ly = ls\n intres = self.integrateCartesian(x0-lx,x0+lx,y0-ly,y0+ly,**intkwargs)-flux\n return intres*intres\n\n if np.isscalar(v0):\n v0 = (v0,v0)\n else:\n raise ValueError('unrecognized mode')\n\n if minfunc!='brent':\n res = fmin(f,v0,full_output=1,**kwargs)\n else:\n res = fmin(f,full_output=1,**kwargs)\n self.lastfluxsize = res\n val = res[0]\n\n return val.ravel()[0] if val.size == 1 else tuple(val)",
"def V_magEarth(alpha,a_p,d):\n V = 5.*np.log10(a_p*d) - 3.99 - 1.060e-3*alpha + 2.054e-4*alpha**2.\n return V",
"def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")",
"def test_jam_axi_rms():\n np.random.seed(123)\n xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T\n\n inc = 60. # Assumed galaxy inclination\n r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk\n a = 40 # Scale length in arcsec\n vr = 2000*np.sqrt(r)/(r+a) # Assumed velocity profile\n vel = vr * np.sin(np.radians(inc))*xbin/r # Projected velocity field\n sig = 8700/(r+a) # Assumed velocity dispersion profile\n rms = np.sqrt(vel**2 + sig**2) # Vrms field in km/s\n\n surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])\n sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])\n qObs = np.full_like(sigma, 0.57)\n\n distance = 16.5 # Assume Virgo distance in Mpc (Mei et al. 2007)\n mbh = 1e8 # Black hole mass in solar masses\n beta = np.full_like(surf, 0.3)\n\n surf_lum = surf # Assume self-consistency\n sigma_lum = sigma\n qobs_lum = qObs\n surf_pot = surf\n sigma_pot = sigma\n qobs_pot = qObs\n\n sigmapsf = 0.6\n pixsize = 0.8\n goodbins = r > 10 # Arbitrarily exclude the center to illustrate how to use goodbins\n\n # The model is similar but not identical to the adopted kinematics!\n rmsModel, ml, chi2, flux = jam_axi_rms(\n surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, plot=True, rms=rms, sigmapsf=sigmapsf,\n beta=beta, pixsize=pixsize, tensor='zz', goodbins=goodbins)\n plt.pause(0.01)",
"def calc_wvf(pedestrian, key, face, mesh_area,radius): \n normal = envuo.py3dmodel.construct.make_vector((0,0,0),envuo.py3dmodel.calculate.face_normal(face))\n surf2ped = envuo.py3dmodel.construct.make_vector(key,pedestrian)\n sa_ped = 4.0*np.pi*radius**2\n theta = normal.Angle(surf2ped)\n h = surf2ped.Magnitude()/radius \n phi = np.arctan(1/h)\n threshold = np.pi/2.0 - phi\n\n if abs(h*np.cos(theta)) > 1:\n F = abs(np.cos(theta))/h**2; \n else:\n x = np.sqrt(h**2-1)/np.tan(theta) #np.sqrt(h**2-1)\n y = np.sqrt(1-x**2) #-x/np.tan(theta) #\n F = (np.pi - abs(np.cos(x)) - x*y*np.tan(theta)**2)*abs(np.cos(theta))/(np.pi*h**2) + np.arctan(y*abs(np.cos(theta))/x)/np.pi; \n print pedestrian,' passes threshold'\n return mesh_area*F/sa_ped",
"def __q2v_ff(self, vm, beta, q):\n return float((vm * beta - np.sqrt(np.power(vm * beta, 2) - 4 * vm * beta * q)) / (2 * vm))",
"def ve(self) -> float:\n a = np.sum(np.abs(self.predicted - self.true))\n b = np.sum(self.true)\n return float(1 - (a / b))",
"def fraction_of_infectious_virus(self) -> _VectorisedFloat:\n return 1.",
"def _pred_mag(self,params: ndarray, times: ndarray) -> ndarray:\n tE = np.exp(params[0])\n A0 = np.exp(params[1])\n deltaT = np.exp(params[2])\n fbl = params[3]\n mb = params[4]\n\n u0 = np.sqrt((2*A0/np.sqrt(A0**2-1))-2)\n u = np.sqrt(u0**2+((times-deltaT-self.alert_time)/tE)**2)\n Amp = (u**2+2) / (u*np.sqrt(u**2+4))\n pred_mag = mb - 2.5*np.log10(fbl*(Amp-1)+1)\n\n return pred_mag",
"def flux_ratio(self):\n return self._flux_ratio",
"def _feet_2_meter(item_in_feet):\n # vfunc_model = np.vectorize(spherical)\n try:\n return item_in_feet / 3.28084\n except TypeError:\n return float(item_in_feet) / 3.28084",
"def calc_vol_vfrac(self, r_cool, PD, c):\n # core and reflector volume required for depletion calculation\n self.core_vol = self.r**2 * math.pi * self.z\n self.refl_vol = ((self.r + self.refl_t)**2 - self.r**2)*math.pi * self.z\n \n pitch = 2*r_cool*PD\n # calculate 'volumes' for fixed length\n v_cool = (r_cool ** 2 * math.pi)\n # clad volume fraction\n v_clad = ((r_cool + c)**2 - r_cool**2)*math.pi\n # fuel volume fraction\n v_cermet = (math.sqrt(3)*pitch**2 / 2.0) - (r_cool + c) ** 2 * math.pi \n\n self.cell_vol = v_cool + v_clad + v_cermet\n # calculate normalized vfracs from total cell volume\n self.vfrac_cool = v_cool / self.cell_vol\n self.vfrac_clad = v_clad / self.cell_vol\n self.vfrac_cermet = v_cermet / self.cell_vol",
"def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor",
"def Q(self):\n return np.array(list(self.center_frequencies)) \\\n / np.array(list(self.bandwidths))",
"def magnitude(self): # @todo @caution check: something wrong?\n\n return (math.sqrt(reduce(lambda x, y: x+y,\n [x**2 for x in self.vector])))",
"def _calc_bvf(self, points, bead_radius, shell_radius):\n dists = distance.cdist(points, points, 'euclidean')\n\n '''\n Check for intersection. If three spheres intersect we cannot\n (easily) accurately determine the shared volume and therefore\n cannot calculate the bead volume fraction.\n '''\n for i, point_distances in enumerate(dists):\n overlaps = np.where(np.logical_and(np.less(point_distances,\n bead_radius * 2), np.not_equal(point_distances, 0)))[0]\n for combo in itertools.combinations(overlaps, 2):\n positions = [points[idx] for idx in combo + (i,)]\n if self._intersected(positions, bead_radius):\n return 1.0\n\n dists = dists[np.nonzero(dists)]\n\n r_min = shell_radius - bead_radius\n r_max = shell_radius + bead_radius\n vol_shell = (4/3) * np.pi * (r_max**3 - r_min**3)\n vol_beads = len(points) * (4/3) * np.pi * bead_radius**3\n '''\n The total volume taken up by beads is the volume of all of the beads\n minus the volume of the bead intersections. Since each bead-bead\n distance will be present twice within `dists` we divide by two here\n to compensate.\n '''\n vol_overlap = np.sum([self._overlap_volume(bead_radius, dist)\n for dist in dists]) / 2\n\n return (vol_beads - vol_overlap) / vol_shell",
"def magnitude_of_vector(v):\n return math.sqrt(sum_of_squares(v))",
"def velocity_field(xt,yt,x0,y0,velf,dia,tsr,solidity):\n rad = dia/2.\n rot = tsr*velf/rad\n\n # Calculating EMG distribution parameters\n loc,spr,skw,scl = vorticity(tsr,solidity)\n \n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n \n # Integration of the vorticity profile using Fortran code (vorticity.f90; _vortrun.so)\n vel_vs = dblquad(_vortmodel.integrand,0.,35.*dia,lambda x: -4.*dia,lambda x: 4.*dia, args=(x0t,y0t,dia,loc[0],loc[1],loc[2],spr[0],spr[1],skw[0],skw[1],scl[0],scl[1],scl[2]))\n \n # Calculating velocity deficit\n vel = (vel_vs[0]*(rot))/(2.*pi)\n vel = (vel + velf)/velf # normalization of velocity\n \n return vel",
"def _Fqt_comp(vh,q):\n r_scale = 6.45/60\n edges,count,x_lim = vh\n # make sure that vh is normalized\n count = count/np.sum(count)\n\n return np.sum(count * np.exp(1j*q*edges*r_scale))",
"def computeMaggiesErr(instFluxErr, instFlux, calibrationErr, calibration, flux):\n return flux*np.hypot(instFluxErr/instFlux, calibrationErr/calibration)",
"def finite_size_scale(standard, ssize, primordial, fsize, psize=[1,1,1],writefile=True):\n \n # Check if the input sizes work out with the desired final size\n padding = [0,0,0]\n srcon = [0,0,0]\n for i in range(3):\n diff = fsize[i] - ssize[i]\n if diff < 0:\n raise RuntimeError('Desired final size of the structure must be larger than existing defect structure size. Defect Size = '+repr(ssize)+' Final Size = '+repr(fsize))\n elif diff >= 0:\n if math.fmod(diff,psize[i]):\n raise RuntimeError('Primordial structure and defect structure sizes cannot be used to form desired final size. Reduce size of primordial structure. Defect Size = '+repr(ssize)+' Final Size = '+repr(fsize)+' Primordial size = '+repr(psize))\n else:\n padding[i] = diff/psize[i]\n \n # Load the defect structure and primordial structure\n defst = read(standard)\n pst = read(primordial)\n \n # Pad the structure\n positions = pst.get_positions()\n syms = pst.get_chemical_symbols()\n final = defst.copy()\n lv = [one/ssize for one in defst.get_cell()]\n vect = []\n for m0 in range(padding[0]):\n for m1 in numpy.arange(0,fsize[1],psize[1]):\n for m2 in numpy.arange(0,fsize[2],psize[2]):\n vect.append([ssize[0]+m0*psize[0],m1,m2])\n\n for m1 in range(padding[1]):\n for m0 in numpy.arange(0,ssize[0],psize[0]):\n for m2 in numpy.arange(0,fsize[2],psize[2]):\n vect.append([m0,ssize[1]+m1*psize[1],m2])\n\n for m2 in range(padding[2]):\n for m0 in numpy.arange(0,ssize[0],psize[0]):\n for m1 in numpy.arange(0,ssize[1],psize[1]):\n vect.append([m0,m1,ssize[2]+m2*psize[2]])\n\n for m0,m1,m2 in vect:\n npos = positions + numpy.dot((m0, m1, m2), lv)\n for i in range(len(npos)):\n final.append(Atom(symbol=syms[i],position=npos[i]))\n \n final.set_cell(numpy.array([fsize[c] * lv[c] for c in range(3)]))\n \n # Write output as POSCAR\n if writefile:\n write('POSCAR_Final', final)\n \n return final",
"def get_vqvae_top_resolution_n() -> int:\n global vqvae\n assert vqvae is not None\n global transformer_top\n assert transformer_top is not None\n global spectrograms_helper\n assert spectrograms_helper is not None\n global DEVICE\n assert DEVICE is not None\n dummy_codes_top = torch.zeros(transformer_top.shape,\n dtype=torch.long).to(DEVICE).unsqueeze(0)\n dummy_codes_bottom = torch.zeros(transformer_bottom.shape,\n dtype=torch.long).to(DEVICE).unsqueeze(0)\n decoded_audio = spectrograms_helper.to_audio(\n vqvae.decode_code(dummy_codes_top, dummy_codes_bottom))\n _, duration_top = transformer_top.shape\n return decoded_audio.shape[-1] // duration_top",
"def test_9(self):\n for _ in range(1000):\n num_free = np.random.randint(1, 100)\n values = np.random.uniform(-1000.0, 1000.0, size=num_free)\n py = get_scales_magnitudes(values)\n f90 = fort_debug.wrapper_get_scales_magnitude(values, num_free)\n assert_almost_equal(py, f90)",
"def aperture_fields(horn_width, eplane_effective_length, hplane_effective_length, frequency, x, y):\n # Calculate the wavenumber\n k = 2.0 * pi * frequency / c\n\n # Calculate the wave impedance\n eta = sqrt(mu_0 / epsilon_0)\n\n # Define the x-component of the electric field\n e_x = 0.0\n\n # Define the y-component of the electric field\n e_y = cos(pi * x / horn_width) * exp(-1j * k * 0.5 * (x ** 2 / hplane_effective_length +\n y ** 2 / eplane_effective_length))\n\n # Define the z-component of the electric field\n e_z = 0.0\n\n # Define the x-component of the magnetic field\n h_x = -cos(pi * x / horn_width) / eta * exp(-1j * k * 0.5 * (x ** 2 / hplane_effective_length +\n y ** 2 / eplane_effective_length))\n\n # Define the y-component of the magnetic field\n h_y = 0.0\n\n # Define the z-component of the magnetic field\n h_z = 0.0\n\n # Return all six components of the aperture field\n return e_x, e_y, e_z, h_x, h_y, h_z",
"def _osLen(self):\n return int(np.ceil(self.minOverscan * self.sampleRate / self.downsample) * self.downsample)\n\n #osv = self.osVector\n #return np.ceil(np.linalg.norm(osv) / self.pixelWidth)",
"def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude",
"def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb",
"def q_div(self, PFC, MHD, q):\n psi = PFC.psimin\n xyz = PFC.centers\n\n R_div,Z_div,phi_div = tools.xyz2cyl(xyz[:,0],xyz[:,1],xyz[:,2])\n\n R_omp = self.map_R_psi(psi,PFC)\n Z_omp = np.zeros(R_omp.shape)\n # Dot product between surface normal and B field\n #self.HFincidentAngle(PFC, MHD)\n # Calculate Magnitude of B at Divertor\n Bp_div = PFC.ep.BpFunc.ev(R_div,Z_div)\n Bt_div = PFC.ep.BtFunc.ev(R_div,Z_div)\n B_div = np.sqrt(Bp_div**2 + Bt_div**2)\n # Evaluate B at outboard midplane\n Bp_omp = PFC.ep.BpFunc.ev(R_omp,Z_omp)\n Bt_omp = PFC.ep.BtFunc.ev(R_omp,Z_omp)\n B_omp = np.sqrt(Bp_omp**2 + Bt_omp**2)\n\n# Bt_omp = MHD.ep.BtFunc.ev(R_omp,Z_omp)\n# BR_omp = MHD.ep.BRFunc.ev(R_omp,Z_omp)\n# BZ_omp = MHD.ep.BZFunc.ev(R_omp,Z_omp)\n# B_omp = np.sqrt(Bt_omp**2 + BR_omp**2 + BZ_omp**2)\n#\n# Bt_div = MHD.ep.BtFunc.ev(R_div,Z_div)\n# BR_div = MHD.ep.BRFunc.ev(R_div,Z_div)\n# BZ_div = MHD.ep.BZFunc.ev(R_div,Z_div)\n# B_div = np.sqrt(Bt_div**2 + BR_div**2 + BZ_div**2)\n\n\n #For Debugging, plot Bfield Ratio\n #import matplotlib.pyplot as plt\n #testB_div = B_div.reshape(self.grid['Nphi'],self.grid['Nswall']).T\n #testB_omp = B_omp.reshape(self.grid['Nphi'],self.grid['Nswall']).T\n #B_ratio = testB_div/testB_omp\n #CS = plt.contourf(self.grid['phi'], self.grid['Swall'],B_ratio,levels=30,cmap=plt.cm.cool)\n #plt.colorbar(CS, label=r'$B Ratio$')\n #plt.show()\n #Divertor heat flux\n q_div = np.zeros((len(xyz)))\n use = np.where(PFC.shadowed_mask == 0)[0]\n\n #Matt's method\n# q_div[use] = q[use] * B_div[use]/B_omp * PFC.bdotn[use]\n #Menard's Method\n q_div[use] = q[use] * B_div[use] * PFC.bdotn[use]\n\n #for i in range(len(q_div)):\n #\tif q_div[i] > 8.0: q_div[i] = 0.0\n #Plot q|| and qdiv\n #import matplotlib.pyplot as plt\n #plt.scatter(self.grid['Swall'][:,0], q_div[0:self.grid['Nswall']], label='qdiv')\n #plt.scatter(self.grid['Swall'][:,0], q[0:self.grid['Nswall']], label='q||')\n #plt.legend()\n #plt.show()\n return np.abs(q_div)"
] | [
"0.6291265",
"0.5848869",
"0.5809261",
"0.5802869",
"0.57336414",
"0.5729062",
"0.569543",
"0.56734866",
"0.5662906",
"0.56628376",
"0.56435436",
"0.56359833",
"0.5614053",
"0.5611046",
"0.55810857",
"0.55808634",
"0.5556087",
"0.55491066",
"0.55242413",
"0.5521659",
"0.55142677",
"0.5494072",
"0.5483656",
"0.5480256",
"0.5479738",
"0.5459442",
"0.5450414",
"0.54472464",
"0.5447242",
"0.54394233",
"0.5439184",
"0.5437157",
"0.5435416",
"0.5420653",
"0.5419178",
"0.54076374",
"0.5406825",
"0.5406578",
"0.5367682",
"0.5362832",
"0.5360413",
"0.5342483",
"0.53373945",
"0.5329687",
"0.53285646",
"0.5324419",
"0.532067",
"0.5311715",
"0.5307666",
"0.5306026",
"0.5296108",
"0.52959025",
"0.52913",
"0.5285341",
"0.5284511",
"0.52835536",
"0.527975",
"0.5277954",
"0.5276111",
"0.5266926",
"0.5266647",
"0.52586734",
"0.5255235",
"0.525495",
"0.5254056",
"0.5247706",
"0.5247305",
"0.5245982",
"0.5244649",
"0.5242518",
"0.5241943",
"0.5240536",
"0.52401316",
"0.5232331",
"0.52309704",
"0.5222893",
"0.52148956",
"0.52144456",
"0.5212836",
"0.52127105",
"0.52109665",
"0.52105427",
"0.5208686",
"0.5206257",
"0.52050334",
"0.52041185",
"0.5203874",
"0.52024287",
"0.52021873",
"0.5194008",
"0.5193397",
"0.51898986",
"0.51889235",
"0.5187252",
"0.518399",
"0.5182107",
"0.5175448",
"0.51717776",
"0.5167737",
"0.51635885",
"0.51635134"
] | 0.0 | -1 |
Set the parameters fit on the Sloan Lens Arcs Survey (SLACS) sample of 73 ETGs Note See Table 4 of [1]_ for the fit values, taken from the empirical correlation derived from the SLACS lens galaxy sample. References | def _define_SLACS_fit_params(self):
# Fit params from R_eff
self.a = -0.41
self.b = 0.39
#self.delta_a = 0.12
#self.delta_b = 0.10
self.intrinsic_scatter = 0.14
# Fit params from vel_disp
self.a_v = 0.07
self.b_v = -0.12
self.int_v = 0.17 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01",
"def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def fit(self, stimulus, ref, targets):\n self.pars, fval = fitSSL(stimulus, ref.samples, targets, ref.shape[1], ref.C)\n self.rsq = self._compute_coef_determination(stimulus, ref.samples, targets, ref.C)",
"def fit_LuEd(self, wl, Ls, Lu, Ed, params, weights, verbose=True):\n\n\t\t\tdef min_funct(params):\n\t\t\t\tp = params.valuesdict() \n\t\t\t\n\t\t\t\tRrs_modelled, Rrs_refl, Lu_Ed_modelled = self.model(beta = p['beta'], alpha = p['alpha'], am = p['am'], rh = p['rh'], pressure = p['pressure'], C_chl = p['C_chl'], C_sm = p['C_sm'], C_mie = p['C_mie'], n_mie = p['n_mie'], C_y = p['C_y'], S_y = p['S_y'], T_w = p['T_w'], theta_sun = p['theta_sun'], theta_view = p['theta_view'], n_w = p['n_w'], rho_s = p['rho_s'], rho_dd = p['rho_dd'], rho_ds = p['rho_ds'], delta = p['delta'], wl = wl, a_w = self.spectra['a_w'].values, daw_dT = self.spectra['daw_dT'].values, astar_ph = self.spectra['astar_ph'].values, astar_y = self.spectra['astar_y'].values, Ls_Ed = Ls/Ed)\n\n\t\t\t\tRrs_obs = Lu/Ed - Rrs_refl\n\n\t\t\t\t# Least squares\n\t\t\t\tresid = np.sum((Lu_Ed_modelled - Lu/Ed)**2 * weights)\n\n\t\t\t\treturn resid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs\n\n\t\t\tstart_time = time.time()\n\n\t\t\treg = lm.minimize(lambda x: min_funct(x)[0], params=params, method='lbfgsb', options={'disp': verbose, 'gtol': 1e-16, 'eps': 1e-07, 'maxiter': 15000, 'ftol': 1e-16, 'maxls': 20, 'maxcor': 20}) \n\n\t\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\t\t\tresid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs = min_funct(reg.params)\n\t\t\treg.params.add('resid', resid, False, 0.0, 100, None)\n\n\t\t\treturn reg, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs",
"def setCSEParameters(csi:str, ri:str, rn:str) -> None:\n\t\t\tCSE.cseCsi = csi\n\t\t\tConfiguration.set('cse.csi', csi)\n\t\t\tCSE.cseRi = ri\n\t\t\tConfiguration.set('cse.ri', ri)\n\t\t\tCSE.cseRn = rn\n\t\t\tConfiguration.set('cse.rn', rn)",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")",
"def setup_lls_fit_analy(spec_fil, zlls, lls_windows, NHI_mnx, nNHI=100, spec_keys=None):\n # Init\n if spec_keys is None:\n spec_keys = dict(sig='ERROR', flux='FLUX', wave='WAVE')\n # Load up spectrum (Table and xspec)\n spec = Table.read(spec_fil)\n # Deal with NANs\n sig = spec[spec_keys['sig']].data.flatten()\n sig[np.isnan(sig)] = 0.\n xspec = XSpectrum1D.from_tuple((np.array(spec[spec_keys['wave']].data.flatten()),\n np.array(spec[spec_keys['flux']].data.flatten()),\n sig), masking='none')\n\n # Analysis pixels\n pixels = []\n for window in lls_windows:\n gdwv = np.where((xspec.wavelength >= window[0]*u.AA) &\n (xspec.wavelength <= window[1]*u.AA))[0]\n pixels.append(gdwv)\n gdwv = np.concatenate(pixels)\n\n # NHI\n NHI = np.linspace(NHI_mnx[0], NHI_mnx[1], num=nNHI)\n wv_rest = xspec.wavelength[gdwv] / (zlls+1)\n energy = wv_rest.to(u.eV, equivalencies=u.spectral())\n # Get photo_cross and calculate tau\n tau0 = (10.**NHI[0] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n\n # Return\n return spec, xspec, gdwv, NHI, tau0",
"def cvstem(self):\n if (self.iEC == \"est\") and (len(sig(self.Cfun).parameters) == 1):\n fun1 = self.Cfun\n self.Cfun = lambda x,p: fun1(x)\n if (self.iEC == \"est\") and (len(sig(self.Gw).parameters) == 1):\n fun2 = self.Gw\n self.Gw = lambda x,p: fun2(x)\n if self.iEC == \"est\":\n self.c_over = self.matrix_2bound(self.Cfun)\n self.g_over = self.matrix_2bound(self.Gw)\n if (len(sig(self.Bw).parameters) == 1):\n fun3 = self.Bw\n self.Bw = lambda x,p: fun3(x)\n self.b_over = self.matrix_2bound(self.Bw)\n self.linesearch()\n alp = self.alp_opt\n Nx = self.Nx\n Nsplit = 1\n Np = int(Nx/Nsplit)\n Nr = np.remainder(Nx,Nsplit)\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Nx,Nxp))\n xs_opt,ps_opt,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n Ws_opt = []\n chi_opt = 0\n nu_opt = 0\n print(\"========================================================\")\n print(\"====== SAMPLING OF CONTRACTION METRICS BY CV-STEM ======\")\n print(\"========================================================\")\n for p in range(Np):\n if np.remainder(p,int(Np/10)) == 0:\n print(\"# sampled metrics: \",p*Nsplit,\"...\")\n xs_p = xs_opt[Nsplit*p:Nsplit*(p+1),:]\n ps_p = ps_opt[Nsplit*p:Nsplit*(p+1),:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n if Nr != 0:\n print(\"# samples metrics: \",Nx,\"...\")\n xs_p = xs_opt[Nsplit*(p+1):Nx,:]\n ps_p = ps_opt[Nsplit*(p+1):Nx,:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n self.xs_opt = xs_opt\n self.ps_opt = ps_opt\n self.Ws_opt = Ws_opt\n self.chi_opt = chi_opt\n self.nu_opt = nu_opt\n if self.iEC == \"est\":\n self.Jcv_opt = (self.d1_over*self.b_over*np.sqrt(chi_opt)\\\n +self.d2_over*self.c_over*self.g_over*nu_opt)/alp\n print(\"Optimal steady-state estimation error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n elif self.iEC == \"con\":\n self.Jcv_opt = self.d1_over*self.b_over*np.sqrt(chi_opt)/alp\n print(\"Optimal steady-state tracking error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.M2cholM()\n path = \"models/optvals/\"+self.fname\n if os.path.exists(path) == False:\n try:\n os.makedirs(path)\n except: \n raise OSError(\"Creation of directory %s failed\" %path)\n else:\n print (\"Successfully created directory %s \" %path)\n else:\n print (\"Directory %s already exists\" %path)\n np.save(path+\"/alp_opt.npy\",alp)\n np.save(path+\"/chi_opt.npy\",self.chi_opt)\n np.save(path+\"/nu_opt.npy\",self.nu_opt)\n np.save(path+\"/Jcv_opt.npy\",self.Jcv_opt)\n print(\"========================================================\")\n print(\"==== SAMPLING OF CONTRACTION METRICS BY CV-STEM END ====\")\n print(\"========================================================\\n\\n\")\n pass",
"def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0",
"def _fit_point_lens(self):\n\n def chi2_fun(theta, event, parameters_to_fit):\n \"\"\"\n for a given event set attributes from parameters_to_fit\n (list of str) to values from theta list\n \"\"\"\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n chi2 = event.get_chi2()\n if chi2 < chi2_fun.best_chi2:\n chi2_fun.best_chi2 = chi2\n return chi2\n chi2_fun.best_chi2 = 1.e10\n\n def jacobian(theta, event, parameters_to_fit):\n \"\"\"\n Calculate chi^2 gradient (also called Jacobian).\n \"\"\"\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)\n\n if self._event_PSPL is None:\n self._set_event_PSPL()\n\n parameters_to_fit = [\"t_0\", \"u_0\", \"t_E\"]\n initial_guess = [self._parameters[p] for p in parameters_to_fit]\n\n failed = False\n try:\n result = op.minimize(\n chi2_fun, x0=initial_guess,\n args=(self._event_PSPL, parameters_to_fit),\n method='Newton-CG', jac=jacobian, tol=3.e-4)\n except:\n failed = True\n\n if failed:\n try:\n result = op.minimize(\n chi2_fun, x0=initial_guess,\n args=(self._event_PSPL, parameters_to_fit),\n method='Newton-CG', jac=jacobian, tol=3.e-4)\n except:\n pass\n# XXX what if fit failed (i.e., .success is False)?\n\n self._LSST_PSPL_chi2 = chi2_fun.best_chi2",
"def initialize(self, es):\n self.disregard_length_setting = True if es.opts['CSA_disregard_length'] else False\n if es.opts['CSA_clip_length_value'] is not None:\n try:\n if len(es.opts['CSA_clip_length_value']) == 0:\n es.opts['CSA_clip_length_value'] = [-np.Inf, np.Inf]\n elif len(es.opts['CSA_clip_length_value']) == 1:\n es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value'][0]]\n elif len(es.opts['CSA_clip_length_value']) == 2:\n es.opts['CSA_clip_length_value'] = np.sort(es.opts['CSA_clip_length_value'])\n else:\n raise ValueError('option CSA_clip_length_value should be a number of len(.) in [1,2]')\n except TypeError: # len(...) failed\n es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value']]\n es.opts['CSA_clip_length_value'] = list(np.sort(es.opts['CSA_clip_length_value']))\n if es.opts['CSA_clip_length_value'][0] > 0 or es.opts['CSA_clip_length_value'][1] < 0:\n raise ValueError('option CSA_clip_length_value must be a single positive or a negative and a positive number')\n ## meta_parameters.cs_exponent == 1.0\n b = 1.0\n ## meta_parameters.cs_multiplier == 1.0\n self.cs = 1.0 * (es.sp.weights.mueff + 2)**b / (es.N**b + (es.sp.weights.mueff + 3)**b)\n\n self.damps = es.opts['CSA_dampfac'] * (0.5 +\n 0.5 * min([1, (es.sp.lam_mirr / (0.159 * es.sp.popsize) - 1)**2])**1 +\n 2 * max([0, ((es.sp.weights.mueff - 1) / (es.N + 1))**es.opts['CSA_damp_mueff_exponent'] - 1]) +\n self.cs\n )\n self.max_delta_log_sigma = 1 # in symmetric use (strict lower bound is -cs/damps anyway)\n\n if self.disregard_length_setting:\n es.opts['CSA_clip_length_value'] = [0, 0]\n ## meta_parameters.cs_exponent == 1.0\n b = 1.0 * 0.5\n ## meta_parameters.cs_multiplier == 1.0\n self.cs = 1.0 * (es.sp.weights.mueff + 1)**b / (es.N**b + 2 * es.sp.weights.mueff**b)\n self.damps = es.opts['CSA_dampfac'] * 1 # * (1.1 - 1/(es.N+1)**0.5)\n if es.opts['verbose'] > 1:\n print('CMAAdaptSigmaCSA Parameters: ')\n for k, v in self.__dict__.items():\n print(' ', k, ':', v)\n self.ps = np.zeros(es.N)\n self._ps_updated_iteration = -1\n self.is_initialized = True",
"def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")",
"def BestFit(self,initialParameterValues = None, method = None, fixedParams=None):\n\n if fixedParams:\n if not isinstance(fixedParams, list):\n fixedParams=[fixedParams]\n #Check now if the name is correct\n l_index=[]\n for index, par in enumerate(fixedParams):\n pName, pValue = par\n if pName not in self.theory.parameterNameList0:\n print \"%s is not a valid name. Ignored\" %pName\n l_index.append(index)\n if l_index:\n for i in l_index:\n fixedParams.pop(i)\n\n self.theory.SetFixedParams(fixedParams)\n\n if initialParameterValues is None:\n initialParameterValues = self.theory.initialParameterValues\n #d = numpy.ones(len(initialParamaeterValues))\n start_time = time.time()\n if method is None or method == 'lm':\n out = scipy.optimize.minpack.leastsq(self.Residual,initialParameterValues,full_output=1, ftol=1.e-16)\n elif method == 'boldAccel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = BoldAccel.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n elif method == 'bold':\n initialParameterValues = numpy.array(initialParameterValues)\n out = Bold.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n #out = minpack.leastsq(self.Residual,self.AnalyJac,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,Cgoal=4.e04)\n elif method == 'lm_accel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = numrec.leastsq(self.Residual,self.AnalyJac,initialParameterValues,full_output=1,verbose=True, flags=[],maxfev=500)\n else:\n print \"fitting method is not included\"\n out = None\n end_time = time.time()\n print \"fitting took (mins)\", (end_time-start_time)/60.\n print \"number of function evals:\", f_counter\n \n if fixedParams:\n outputParameterValues = self.MergeFixedAndVariableParams(fixedParams,out[0])\n self.theory.SetFixedParams()\n else:\n outputParameterValues = out[0]\n\n\n return outputParameterValues, out",
"def makeFit(self):\n if not self.fitModel.params:\n return\n cs = self.spectrum\n self.worker.make_model_curve(cs, allData=csi.allLoadedItems)\n\n dfparams = cs.fitParams\n lcfRes = dfparams['lcf_result']\n self.fitR.setText('R={0:.5g}'.format(lcfRes['R']))\n self.updateFitResults()\n self.fitReady.emit()",
"def BestFit(self,initialParameterValues=None, method=None , fixedParams=None):\n\n if fixedParams:\n if not isinstance(fixedParams, list):\n fixedParams=[fixedParams]\n #Check now if the name is correct\n l_index=[]\n for index, par in enumerate(fixedParams):\n pName, pValue = par\n if pName not in self.theory.parameterNameList0:\n print \"%s is not a valid name. Ignored\" %pName\n l_index.append(index)\n if l_index:\n for i in l_index:\n fixedParams.pop(i)\n self.SetFixedParams(fixedParams)\n\n if initialParameterValues is None:\n initialParameterValues = self.theory.initialParameterValues\n #d = numpy.ones(len(initialParameterValues))\n start_time = time.time()\n if method is None or method == 'lm':\n out = minpack.leastsq(self.Residual,initialParameterValues,full_output=1, ftol=1.e-16)\n elif method == 'boldAccel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = BoldAccel.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n elif method == 'bold':\n initialParameterValues = numpy.array(initialParameterValues)\n out = Bold.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n #out = minpack.leastsq(self.Residual,self.AnalyJac,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,Cgoal=4.e04)\n elif method == 'lm_accel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = numrec.leastsq(self.Residual,self.AnalyJac,initialParameterValues,full_output=1,verbose=True, flags=[],maxfev=500)\n else:\n print \"fitting method is not included\"\n end_time = time.time()\n print \"fitting took time (mins): \", (end_time-start_time)/60.\n print \"number of function_calls:\", f_counter\n \n if fixedParams:\n outputParameterValues = self.MergeFixedAndVariableParams(fixedParams,out[0])\n else:\n outputParameterValues = out[0]\n\n return outputParameterValues, out",
"def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()",
"def fit(self, samples, values, nopt=None, corr_model_params=None):\n from scipy.linalg import cholesky\n\n if self.verbose:\n print('UQpy: Running Kriging.fit')\n\n def log_likelihood(p0, cm, s, f, y):\n # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference\n m = s.shape[0]\n n = s.shape[1]\n r__, dr_ = cm(x=s, s=s, params=p0, dt=True)\n try:\n cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True)\n except np.linalg.LinAlgError:\n return np.inf, np.zeros(n)\n\n # Product of diagonal terms is negligible sometimes, even when cc exists.\n if np.prod(np.diagonal(cc)) == 0:\n return np.inf, np.zeros(n)\n\n cc_inv = np.linalg.inv(cc)\n r_inv = np.matmul(cc_inv.T, cc_inv)\n f__ = cc_inv.dot(f)\n y__ = cc_inv.dot(y)\n\n q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE\n\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n\n # Design parameters\n beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__))\n\n # Computing the process variance (Eq: 3.13, DACE)\n sigma_ = np.zeros(y.shape[1])\n\n ll = 0\n for out_dim in range(y.shape[1]):\n sigma_[out_dim] = (1 / m) * (np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2)\n # Objective function:= log(det(sigma**2 * R)) + constant\n ll = ll + (np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2\n\n # Gradient of loglikelihood\n # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press,\n # 2006, ISBN 026218253X. (Page 114, Eq.(5.9))\n residual = y - np.matmul(f, beta_)\n gamma = np.matmul(r_inv, residual)\n grad_mle = np.zeros(n)\n for in_dim in range(n):\n r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv))\n tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual))\n for out_dim in range(y.shape[1]):\n alpha = gamma / sigma_[out_dim]\n tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim]\n cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m\n grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace(np.matmul(tmp1, cov_der))\n\n return ll, grad_mle\n\n if nopt is not None:\n self.nopt = nopt\n if corr_model_params is not None:\n self.corr_model_params = corr_model_params\n self.samples = np.array(samples)\n\n # Number of samples and dimensions of samples and values\n nsamples, input_dim = self.samples.shape\n output_dim = int(np.size(values) / nsamples)\n\n self.values = np.array(values).reshape(nsamples, output_dim)\n\n # Normalizing the data\n if self.normalize:\n self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0)\n self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0)\n s_ = (self.samples - self.sample_mean) / self.sample_std\n y_ = (self.values - self.value_mean) / self.value_std\n else:\n s_ = self.samples\n y_ = self.values\n\n self.F, jf_ = self.reg_model(s_)\n\n # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters\n if self.op:\n starting_point = self.corr_model_params\n minimizer, fun_value = np.zeros([self.nopt, input_dim]), np.zeros([self.nopt, 1])\n for i__ in range(self.nopt):\n p_ = self.optimizer(log_likelihood, starting_point, args=(self.corr_model, s_, self.F, y_),\n **self.kwargs_optimizer)\n minimizer[i__, :] = p_[0]\n fun_value[i__, 0] = p_[1]\n # Generating new starting points using log-uniform distribution\n if i__ != self.nopt - 1:\n starting_point = stats.reciprocal.rvs([j[0] for j in self.bounds], [j[1] for j in self.bounds], 1,\n random_state=self.random_state)\n if min(fun_value) == np.inf:\n raise NotImplementedError(\"Maximum likelihood estimator failed: Choose different starting point or \"\n \"increase nopt\")\n t = np.argmin(fun_value)\n self.corr_model_params = minimizer[t, :]\n\n # Updated Correlation matrix corresponding to MLE estimates of hyperparameters\n self.R = self.corr_model(x=s_, s=s_, params=self.corr_model_params)\n # Compute the regression coefficient (solving this linear equation: F * beta = Y)\n c = np.linalg.cholesky(self.R) # Eq: 3.8, DACE\n c_inv = np.linalg.inv(c)\n f_dash = np.linalg.solve(c, self.F)\n y_dash = np.linalg.solve(c, y_)\n q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n # Design parameters (beta: regression coefficient)\n self.beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash))\n\n # Design parameter (R * gamma = Y - F * beta = residual)\n self.gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, self.beta)))\n\n # Computing the process variance (Eq: 3.13, DACE)\n self.err_var = np.zeros(output_dim)\n for i in range(output_dim):\n self.err_var[i] = (1 / nsamples) * (np.linalg.norm(y_dash[:, i] - np.matmul(f_dash, self.beta[:, i])) ** 2)\n\n self.F_dash, self.C_inv, self.G = f_dash, c_inv, g_\n\n if self.verbose:\n print('UQpy: Kriging fit complete.')",
"def test_linear_fit_model_set_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5 * x * x, -2 * x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)",
"def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs",
"def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")",
"def fit(az, alt, data, lmax, label=None, degrees=True, realOnly=True):\n \n terms = mathutil.sphfit(az, alt, data, lmax=lmax, degrees=degrees, realOnly=realOnly)\n fit = mathutil.sphval(terms, az, alt, degrees=degrees, realOnly=realOnly)\n diff = data - fit\n \n if label is not None:\n print \" \"+str(label)\n print \" Peak Differences:\", data.max(), fit.max()\n print \" Model Differences:\", diff.min(), diff.mean(), diff.max()\n print \" Model RMS:\", (diff**2).sum()\n \n return terms",
"def fit(self, resonance_file, experiment, out_paths):\n # Set up temporary file names #\n inp = temp_file_gen('Sammy_fit','inp')\n par = temp_file_gen('Sammy_fit','par')\n cov = temp_file_gen('Sammy_fit','cov')\n ndf = temp_file_gen('Sammy_fit','ndf')\n parout = temp_file_gen('Sammy_fit','out.par')\n covout = temp_file_gen('Sammy_fit','out.cov')\n #\n # Construct SAMMY input using resonance_file and information about the #\n # 'experiment' #\n self.endf2inp_par_ndf(resonance_file, [inp, par, ndf], \n experiment[1], flag_all = True)\n #\n # Change from MLBW formalism if this was in original file. #\n # Reich-Moore will be used instead, which is recommended. #\n self.modify_inp(inp, keyremove = ['mlbw formalism is wanted'])\n #\n # Fit to total cross section data without prior #\n message = self.g_least_squares(inp, par, experiment['total'],\n parout, covout)\n shutil.move(parout, par)\n shutil.move(covout, cov)\n #\n # Check if convergence was reached. Otherwise, something is bad. #\n if message[:len('Did not converge')] == 'Did not converge':\n raise RuntimeError(message)\n #\n # Perform a Beyesian update using capture data\n self.bayesian([inp, par, cov], experiment['capture'], [parout, covout])\n #\n # Construct ENDF formatted files from output #\n self.inp_par_ndf_cov2endfs([inp, parout, ndf, covout], out_paths)\n #\n # Include ENDF file paths in ResonanceFile instance to return\n resonance_file_out = ResonanceFile(out_paths[0], resonance_file.nuclide)\n resonance_file_out.cov = ResonanceCovFile(out_paths[1])\n #\n # Clean up\n if self.cleanup:\n for p in [inp, par, cov, ndf, parout, covout]: os.remove(p)\n #\n return resonance_file_out",
"def fit_altscan_position(self,data,scan_maps):\n fname = data.filename.split('/')[-1]\n\n # We do Jupiter in the Az/El frame but celestial in sky frame\n if not 0 in self.feedlist:\n return \n self.model.set_fixed(**{})\n\n def limfunc(P):\n A,x0,sigx,y0,sigy,phi,B = P\n if (sigx < 0) | (sigy < 0):\n return True\n if (phi < -np.pi/2.) | (phi >= np.pi/2.):\n return True\n return False\n\n self.alt_scan_parameters = self.model.get_param_names()\n self.alt_scan_fits ={'CW':{'Values':np.zeros((self.model.nparams)),\n 'Errors':np.zeros((self.model.nparams)),\n 'Chi2': np.zeros((2))},\n 'CCW':{'Values':np.zeros((self.model.nparams)),\n 'Errors':np.zeros((self.model.nparams)),\n 'Chi2': np.zeros(2)}}\n for key in ['CW','CCW']:\n m,c,x,y,P0 = self.prepare_maps(scan_maps[key]['map'],scan_maps[key]['cov'],scan_maps[key]['xygrid'])\n\n freq = 30\n P0_priors = self.get_fwhm_prior(freq,1)\n # Perform the least-sqaures fit\n try:\n result, error,samples,min_chi2,ddof = self.model(P0, (x,y), m, c,\n P0_priors=P0_priors,return_array=True)\n self.alt_scan_fits[key]['Values'][:] = result\n self.alt_scan_fits[key]['Errors'][:] = error\n self.alt_scan_fits[key]['Chi2'][:] = min_chi2,ddof\n\n except ValueError as e:\n try:\n self.logger(f'{fname}:emcee:{e}',error=e)\n except TypeError:\n self.logger(f'{fname}:emcee:{e}')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def config_specific_par(self, scifile, inp_par=None):\n # Start with instrument wide\n par = super().config_specific_par(scifile, inp_par=inp_par)\n\n if self.get_meta_value(scifile, 'idname') == 'OsirisMOS':\n par['reduce']['findobj']['find_trim_edge'] = [1,1]\n par['calibrations']['slitedges']['sync_predict'] = 'pca'\n par['calibrations']['slitedges']['det_buffer'] = 1\n elif self.get_meta_value(scifile, 'idname') == 'OsirisLongSlitSpectroscopy':\n # Do not tweak the slit edges for longslit\n par['calibrations']['flatfield']['tweak_slits'] = False\n\n # Wavelength calibration and setup-dependent parameters\n if self.get_meta_value(scifile, 'dispname') == 'R300B':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R300B.fits'\n par['reduce']['findobj']['find_min_max'] = [750, 2051]\n par['calibrations']['slitedges']['det_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['smash_range'] = [0.38, 0.62]\n par['calibrations']['flatfield']['slit_illum_finecorr'] = False\n par['reduce']['cube']['wave_min'] = 3600.0\n par['reduce']['cube']['wave_max'] = 7200.0\n elif self.get_meta_value(scifile, 'dispname') == 'R300R':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R300R.fits'\n par['reduce']['findobj']['find_min_max'] = [750, 2051]\n par['calibrations']['slitedges']['det_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['smash_range'] = [0.38, 0.62]\n par['calibrations']['flatfield']['slit_illum_finecorr'] = False\n par['reduce']['cube']['wave_min'] = 4800.0\n par['reduce']['cube']['wave_max'] = 10000.0\n elif self.get_meta_value(scifile, 'dispname') == 'R500B':\n par['calibrations']['wavelengths']['lamps'] = ['HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R500B.fits'\n par['reduce']['findobj']['find_min_max'] = [500, 2051]\n par['reduce']['cube']['wave_min'] = 3600.0\n par['reduce']['cube']['wave_max'] = 7200.0\n elif self.get_meta_value(scifile, 'dispname') == 'R500R':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R500R.fits'\n par['reduce']['findobj']['find_min_max'] = [450, 2051]\n par['reduce']['cube']['wave_min'] = 4800.0\n par['reduce']['cube']['wave_max'] = 10000.0\n elif self.get_meta_value(scifile, 'dispname') == 'R1000B':\n par['calibrations']['wavelengths']['lamps'] = ['ArI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R1000B.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R1000R':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R1000R.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2000B':\n par['calibrations']['wavelengths']['fwhm'] = 15.0\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2000B.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500U':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500U.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500V':\n par['calibrations']['wavelengths']['lamps'] = ['HgI','NeI','XeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500V.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500R':\n par['calibrations']['wavelengths']['lamps'] = ['ArI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500R.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500I':\n par['calibrations']['wavelengths']['lamps'] = ['ArI,XeI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500I.fits'\n par['sensfunc']['algorithm'] = 'IR'\n par['sensfunc']['IR']['telgridfile'] = \"TelFit_MaunaKea_3100_26100_R20000.fits\"\n else:\n msgs.warn('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...')\n par['calibrations']['wavelengths']['method'] = 'holy-grail'\n\n # Return\n return par",
"def SetRSoftSF(self, etol_rad = None, mus = None, Ls = None, \\\n radial_Xs = None, radial_Ys = None, etol_ang = None, \\\n xis = None, lambdas = None, zetas = None, angular_Xs = None, \\\n angular_Ys = None, angular_Zs = None):\n # Initializes global cutoff radius\n Rc_global = 0\n\n # Checks if any radial inputs used. If so, if any parameters are \n # not None then throws an error assuming the user is confused. \n # Checks all inputs are valid.\n if any(v is None for v in [etol_rad, mus, Ls, radial_Xs, radial_Ys]):\n if any(v is not None for v in (etol_rad, mus, Ls, radial_Xs, \\\n radial_Ys)):\n print('ERROR: If radial structure functions are used, must ')\n print(' supply etol_rad, mus, Ls, radial_Xs, radial_Ys ')\n print(' to SetRSoftSF')\n sys.exit(-1)\n else:\n \n # Marks that it contains radial structure functions\n self.__containsRadial = True \n\n # Initializes radial structure function variables\n if etol_rad > 0 and etol_rad < 1: \n self.etol_radial = etol_rad\n else:\n print('ERROR: 0 < etol_rad < 1 used in SetRSoftSF')\n sys.exit(-1)\n if any(len(mus) != len(arr) for arr in (Ls, radial_Xs, \\\n radial_Ys)):\n print('ERROR: Length of mus, radial_Xs, and radial_Ys in ')\n print(' SetRSoftSF must be equal')\n sys.exit(-1)\n self.mus = mus\n self.Ls = Ls \n if np.all(np.mod(radial_Xs,1)==0):\n self.radial_Xs = radial_Xs.astype(int)\n else:\n print('ERROR: radial_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(radial_Ys,1)==0):\n self.radial_Ys = radial_Ys.astype(int)\n else:\n print('ERROR: radial_Ys used in SetRSoftSF must be integers')\n sys.exit(-1)\n\n # Outputs radial cut-off radii\n print('Calculating radial cutoff...')\n Rc_max = 0.0\n for SF in range(len(mus)):\n mu = mus[SF]\n L = Ls[SF]\n X = radial_Xs[SF]\n Y = radial_Ys[SF]\n Rc = mu+L*sqrt(log(1/etol_rad))\n print(' mu='+str(mu)+', L='+str(L)+', X='+str(X)+', Y='+\\\n str(Y)+' --> Rc='+str(Rc))\n if Rc > Rc_max:\n Rc_max = Rc \n print('Rc_radial='+str(Rc_max))\n print(' ')\n print('--------------------------------------------------------')\n if Rc_max > Rc_global:\n Rc_global = Rc_max\n\n # Checks if any angular inputs used. If so, if any parameters are \n # not None then throws an error assuming the user is confused. \n # Checks all inputs are valid.\n if any(v is None for v in [etol_ang, xis, lambdas, angular_Xs, \n angular_Ys, angular_Zs]):\n if any(v is not None for v in (etol_ang, xis, lambdas, zetas, \\\n angular_Xs, angular_Ys, angular_Zs)):\n print('ERROR: If angular structure functions are used, must ')\n print(' supply etol_ang, xis, lambdas, zetas, angular_Xs,')\n print(' angular_Ys, angular_Zs')\n print(' to SetRSoftSF')\n sys.exit(-1)\n else:\n\n # Marks that contains angular structure functions\n self.__containsAngular = True \n\n # Initializes angular structure function variables\n if etol_ang > 0 and etol_ang < 1: \n self.etol_angular = etol_ang\n else:\n print('ERROR: 0 < etol_ang < 1 used in SetRSoftSF')\n sys.exit(-1)\n if any(len(xis) != len(arr) for arr in (lambdas, zetas, \\\n angular_Xs, angular_Ys, angular_Zs)):\n print('ERROR: Length of xis, zetas, angular_Xs, angular_Ys, ')\n print(' and angular_Zs in SetRSoftSF must be equal')\n sys.exit(-1)\n self.xis = xis\n if np.all(np.abs(lambdas)==1):\n self.lambdas = lambdas\n else:\n print('ERROR: lambdas used in SetRSoftSF must be +/-1')\n sys.exit(-1)\n if np.all(np.mod(zetas,1)==0):\n self.zetas = zetas.astype(int)\n else:\n print('ERROR: angular_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Xs,1)==0):\n self.angular_Xs = angular_Xs.astype(int)\n else:\n print('ERROR: angular_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Ys,1)==0):\n self.angular_Ys = angular_Ys.astype(int)\n else:\n print('ERROR: angular_Ys used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Zs,1)==0):\n self.angular_Zs = angular_Zs.astype(int)\n else:\n print('ERROR: angular_Zs used in SetRSoftSF must be integers')\n sys.exit(-1)\n\n # Outputs radial cut-off radii\n print('Calculating angular cutoff...')\n Rc_max = 0.0\n for SF in range(len(xis)):\n xi = xis[SF]\n l = lambdas[SF]\n zeta = zetas[SF]\n X = angular_Xs[SF]\n Y = angular_Ys[SF]\n Z = angular_Zs[SF]\n if l==1:\n Rc = xi*sqrt(2.0*log(1.0/etol_ang)/3.0)\n else:\n Rc = xi*sqrt(log(1.0/etol_ang)/2.0)\n print(' xi='+str(xi)+', lambda='+str(l)+', zeta='+str(zeta)+\\\n ', X='+str(X)+', Y='+str(Y)+', Z='+str(Z)+' --> Rc='+str(Rc))\n if Rc > Rc_max:\n Rc_max = Rc \n print('Rc_angular='+str(Rc_max))\n print(' ')\n print('--------------------------------------------------------')\n if Rc_max > Rc_global:\n Rc_global = Rc_max\n\n # Sets structure functions into netCDF file\n self.__SetSFParams()\n\n print('Rc='+str(Rc_global))",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def SetParams(ss, sheet, setMsg):\n if sheet == \"\":\n ss.Params.ValidateSheets(go.Slice_string([\"Network\", \"Sim\"]))\n ss.SetParamsSet(\"Base\", sheet, setMsg)\n if ss.ParamSet != \"\" and ss.ParamSet != \"Base\":\n sps = ss.ParamSet.split()\n for ps in sps:\n ss.SetParamsSet(ps, sheet, setMsg)\n if ss.Learn == LearnType.Hebbian:\n ss.SetParamsSet(\"Hebbian\", sheet, setMsg)\n elif ss.Learn == LearnType.ErrorDriven:\n ss.SetParamsSet(\"ErrorDriven\", sheet, setMsg)",
"def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)",
"def set_c_values(self,nu=3,xd=2,a12sq=1,a13sq=1,a1=0,b1=0,p1=0,\n vx=0,vy=0,vz=0,epsilon=0.,omega=1.5*np.pi,\n a2=np.pi,b2=0*np.pi,p2=0.5*np.pi,silent=False):\n \n c_target = np.zeros(18)\n c_target[0] = nu*self.sigma0_RG \n xd_abs = xd*self.sigma2_RG\n c_target[4:10] = set_fGij(xd_abs,a12sq,a13sq,a1,b1,p1)\n c_target[10:13] = np.array([vx,vy,vz])\n c_target[13:18] = set_VGij(epsilon,omega,a2,b2,p2)\n \n if silent == False:\n print (\"Constrain peak parameters: \")\n if 'f0' in self.CONS or 'full' in self.CONS: \n print (\"f0: \",\"nu = %.1f\"%nu, \"$\\sigma_0$\")\n if 'f1' in self.CONS or 'full' in self.CONS: \n print (\"f1: \",\"f1,x = f1,y = f1,z = 0\")\n if 'f2' in self.CONS or 'full' in self.CONS: \n print (\"f2: \",r\"xd = {:.1f} $\\sigma_2$, a12sq = {:.1f}, a13sq = {:.1f},a1={:.2f}, b1={:.2f}, p1={:.2f}\".format(xd,a12sq,a13sq,a1,b1,p1))\n if 'vx' in self.CONS or 'full' in self.CONS: \n print (\"vx = {:.1f} km/s\".format(vx)) \n if 'vy' in self.CONS or 'full' in self.CONS: \n print (\"vy = {:.1f} km/s\".format(vy))\n if 'vz' in self.CONS or 'full' in self.CONS: \n print (\"vz = {:.1f} km/s\".format(vz))\n if 'TG' in self.CONS or 'full' in self.CONS: \n print (\"TG: \",\"epsilon = {:.1f} km/s/Mpc, omega = {:.2f}, a2={:.2f}, b2={:.2f}, p2={:.2f}\".format(epsilon,omega,a2,b2,p2))\n \n return c_target[self.cmask]",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def __init__(\r\n self,\r\n centre=30.0, # <- **PyAutoFit** recognises these constructor arguments\r\n normalization=1.0, # <- are the Exponential`s model parameters.\r\n rate=0.01,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.rate = rate",
"def default_pypeit_par(cls):\n par = super().default_pypeit_par()\n\n # Wavelengths\n # 1D wavelength solution with OH lines\n par['calibrations']['wavelengths']['rms_threshold'] = 1.0\n par['calibrations']['wavelengths']['sigdetect']=[5,10,10,10,10,20,30,30,30,30,30,10,30,30,60,30,30,10,20,30,10]\n par['calibrations']['wavelengths']['n_first']=2\n par['calibrations']['wavelengths']['n_final']=[3,3,3,2,4,4,4,3,4,4,4,3,4,4,4,4,4,4,6,6,4]\n par['calibrations']['wavelengths']['lamps'] = ['OH_FIRE_Echelle']\n #par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']\n par['calibrations']['wavelengths']['method'] = 'reidentify'\n par['calibrations']['wavelengths']['cc_thresh'] = 0.35\n par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_echelle.fits'\n par['calibrations']['wavelengths']['match_toler']=30.0\n\n # Echelle parameters\n par['calibrations']['wavelengths']['echelle'] = True\n# par['calibrations']['wavelengths']['ech_fix_format'] = True\n par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4\n par['calibrations']['wavelengths']['ech_norder_coeff'] = 6\n par['calibrations']['wavelengths']['ech_sigrej'] = 3.0\n\n # Always correct for flexure, starting with default parameters\n par['scienceframe']['process']['sigclip'] = 20.0\n par['scienceframe']['process']['satpix'] ='nothing'\n\n # Set slits and tilts parameters\n par['calibrations']['tilts']['tracethresh'] = 5\n par['calibrations']['slitedges']['edge_thresh'] = 3.\n par['calibrations']['slitedges']['trace_thresh'] = 10.\n par['calibrations']['slitedges']['fit_order'] = 5\n par['calibrations']['slitedges']['max_shift_adj'] = 0.5\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.5\n par['calibrations']['slitedges']['left_right_pca'] = True\n par['calibrations']['slitedges']['pca_order'] = 3\n\n # Model entire slit\n par['reduce']['extraction']['model_full_slit'] = True # local sky subtraction operates on entire slit\n par['reduce']['findobj']['maxnumber_sci'] = 2 # Slit is narrow so allow one object per order\n par['reduce']['findobj']['maxnumber_std'] = 1 # Slit is narrow so allow one object per order\n\n # Processing steps\n turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False,\n use_darkimage=False)\n par.reset_all_processimages_par(**turn_off)\n # Do not correct for flexure\n par['flexure']['spec_method'] = 'skip'\n\n # Set the default exposure time ranges for the frame typing\n par['calibrations']['standardframe']['exprng'] = [None, 60]\n par['calibrations']['arcframe']['exprng'] = [20, None]\n par['calibrations']['darkframe']['exprng'] = [20, None]\n par['scienceframe']['exprng'] = [20, None]\n\n # Sensitivity function parameters\n # Sensitivity function parameters\n par['sensfunc']['algorithm'] = 'IR'\n par['sensfunc']['polyorder'] = 5\n par['sensfunc']['IR']['maxiter'] = 2\n # place holder for telgrid file\n par['sensfunc']['IR']['telgridfile'] = 'TelFit_LasCampanas_3100_26100_R20000.fits'\n\n return par",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def hrc_gain_fit_gaus(c_input):\n#\n#--- if an obsid is provided, analyize that, else get new obsids from databases\n#\n\n if mcf.chkNumeric(c_input):\n candidate_list = [c_input]\n else:\n candidate_list = arlist.hrc_gain_find_ar_lac()\n\n if len(candidate_list) > 0:\n for obsid in candidate_list:\n file = extract_hrc_evt2(obsid)\n if file == 'na':\n continue\n#\n#--- get a file name header for the later use\n#\n temp = re.split('N', file)\n hname = temp[0]\n#\n#--- extract information from the fits file header\n#\n [obsid, detnam, date_obs, date_end, tstart, tstop, ra_pnt, dec_pnt, ra_nom, dec_nom, roll_pnt, foc_len, defocus, sim_x, sim_y, sim_z] = find_header_info(file)\n#\n#--- find the diffrence between real AR Lac position and nominal postion so that we can determin how much area we should include \n#\n ra_diff = abs(ra - ra_nom) * 60.0\n dec_diff = abs(dec - dec_nom) * 60.0\n rad_diff = math.sqrt(ra_diff * ra_diff + dec_diff * dec_diff)\n\n if rad_diff < 10.0:\n fit_rad = 60.0\n else:\n fit_rad = 200.0\n#\n#--- find a location of the brightest object (assume it is AR Lac) in sky coordinates\n#\n [x, y] = find_center(file)\n#\n#--- extract pha values in the given area\n#\n pha = extract_pha(file, x, y, fit_rad)\n#\n#--- create pha count distribution\n#\n pmax = max(pha) + 1\n pha_bin = [x for x in range(0, pmax)]\n pha_hist = [0 for x in range(0, pmax)]\n\n for ent in pha:\n pha_hist[ent] += 1\n#\n#--- print out the distirbution results\n#\n outfile = data_dir + hname + '_pha.dat'\n fo = open(outfile, 'w')\n for i in range(0, pmax):\n line = str(pha_bin[i]) + '\\t' + str(pha_hist[i]) + '\\n'\n fo.write(line)\n fo.close()\n#\n#--- find median point\n#\n med = find_med(pha_hist)\n#\n#--- fit a normal distribution on the data\n#\n [amp, center, width] = fit_gauss(pha_bin, pha_hist)\n#\n#--- print out the fitting result\n#\n outfile = house_keeping + 'fitting_results'\n\n copied_file = outfile + '~'\n cmd = 'cp ' + outfile + ' ' + copied_file\n os.system(cmd)\n\n fo = open(outfile, 'a')\n line = str(obsid) + '\\t' + date_obs + '\\t' + str(tstart) + '\\t' + detnam + '\\t' + str(ra_pnt) + '\\t' + str(dec_pnt) + '\\t\\t'\n line = line + str(round(ra_diff,3)) + '\\t' + str(round(dec_diff, 3)) + '\\t' + str(round(rad_diff,3)) + '\\t' + str(med) + '\\t\\t'\n line = line + str(round(center, 3)) + '\\t' + str(round(amp, 3)) + '\\t' + str(round(width, 3)) + '\\t'\n line = line + str(roll_pnt) + '\\t' + str(foc_len) + '\\t' + str(defocus) + '\\t'\n line = line + str(sim_x) + '\\t' + str(sim_y) + '\\t' + str(sim_z) + '\\n'\n fo.write(line)\n fo.close()\n#\n#--- plot the data\n#\n outfile = plot_dir + hname + '_gfit.png'\n plot_gauss(pha_bin, pha_hist, amp, center, width, file, outfile)\n#\n#--- remove the evt2 file\n#\n mcf.rm_file(file)",
"def recenter_samples(ts, chains, logls, sigmafactor=0.1):\n\n sf=sigmafactor\n\n T=ts[-1]-ts[0]\n \n ibest=np.argmax(logls)\n p0=params.Parameters(np.reshape(chains, (-1, chains.shape[-1]))[ibest, :])\n\n ncycle=T/p0.P\n ncorr=T/p0.tau\n nobs=len(ts)\n\n samples=params.Parameters(np.copy(chains))\n\n assert samples.npl == 1, 'require exactly one planet'\n assert samples.nobs == 1, 'require exactly one observatory'\n\n samples.V = np.random.normal(loc=p0.V, scale=sf*p0.sigma/np.sqrt(nobs), size=samples.V.shape)\n samples.sigma0 = np.random.lognormal(mean=np.log(p0.sigma0), sigma=sf/np.sqrt(nobs), size=samples.simag0.shape)\n samples.sigma = np.random.lognormal(mean=np.log(p0.sigma), sigma=sf/np.sqrt(ncorr), size=samples.sigma.shape)\n samples.tau = np.random.lognormal(mean=np.log(p0.tau), sigma=sf/np.sqrt(ncorr), size=samples.tau.shape)\n samples.K = np.random.normal(loc=p0.K, scale=sf*p0.K/np.sqrt(nobs), size=samples.K.shape)\n samples.n = np.random.lognormal(mean=np.log(p0.n), sigma=sf/np.sqrt(ncycle), size=samples.n.shape)\n samples.chi = np.random.lognormal(mean=np.log(p0.chi), sigma=sf/np.sqrt(ncycle), size=samples.chi.shape)\n samples.e = np.random.lognormal(mean=np.log(p0.e), sigma=sf/np.sqrt(ncycle), size=samples.e.shape)\n samples.omega = np.random.lognormal(mean=np.log(p0.omega), sigma=sf/np.sqrt(ncycle), size=samples.omega.shape)\n\n return samples",
"def maxlik_fitlinearc(lls_dict, neval=100, nevalC=50, min_dark=0.1, slope_pivot=911.*u.AA, **kwargs):\n from scipy.special import gammaln\n # Setup\n spec, xspec, gdwv, NHI, tau0 = setup_lls_fit_analy(lls_dict['spec_fil'], lls_dict['z'], lls_dict['windows'],\n lls_dict['NHI_mnx'], nNHI=neval, **kwargs)\n cdict = lls_dict['cdict']\n npix = gdwv.size\n\n # Check continuum\n if cdict['type'] == 'Fit_line':\n gdC = []\n for rng in cdict['analy']:\n idx = ((xspec.wavelength > rng[0]*u.AA) &\n (xspec.wavelength < rng[1]*u.AA) &\n (xspec.sig > 0))\n gdC = gdC + list(np.where(idx)[0])\n gdC = np.array(gdC)\n # Grab continuum\n conti = xspec.flux[gdC]\n sig_conti = xspec.sig[gdC]\n # Set continuum range\n medC = np.median(conti.value)\n C0_val = np.linspace(cdict['C0_range'][0], cdict['C0_range'][1], num=nevalC)\n C1_val = np.linspace(cdict['C1_range'][0], cdict['C1_range'][1], num=nevalC)\n cdict['slope_pivot'] = slope_pivot.value\n else:\n raise ValueError('Not ready for this type of continuum')\n\n # Generate arrays (value x NHI x Cval)\n # Arrays -- The following is for HSLA outputs\n count_array = np.outer(np.round(spec['GROSSCOUNTS']*spec['EXP_PIX']).data.flatten()[gdwv], np.ones(neval))\n dark_dumb = np.median(((spec['GROSSCOUNTS']-spec['NETCOUNTS'])*spec['EXP_PIX']).data.flatten()[gdwv])\n dark_dumb = max(dark_dumb, min_dark)\n dark_array = np.outer(dark_dumb*np.ones(len(gdwv)), np.ones(neval))\n calib_array = np.outer(1./spec['FLUXFACTOR'].data.flatten()[gdwv], np.ones(neval))\n expt_array = np.outer(spec['EXP_PIX'].data.flatten()[gdwv], np.ones(neval))\n #pdb.set_trace()\n # Continuum\n obs_conti_array = np.outer(conti.value, np.ones(nevalC))\n sig_conti_array = np.outer(sig_conti, np.ones(nevalC))\n # Grids\n count_grid = np.zeros((npix, neval, nevalC, nevalC))\n dark_grid = np.zeros((npix, neval, nevalC, nevalC))\n calib_grid = np.zeros((npix, neval, nevalC, nevalC))\n expt_grid = np.zeros((npix, neval, nevalC, nevalC))\n # Fill\n for ii in range(nevalC):\n for jj in range(nevalC):\n count_grid[:, :, ii, jj] = count_array\n dark_grid[:, :, ii, jj] = dark_array\n calib_grid[:, :, ii, jj] = calib_array\n expt_grid[:, :, ii, jj] = expt_array\n # Model Continuum in LL\n LL_conti_sub_grid = np.zeros((npix, nevalC, nevalC))\n LL_conti_grid = np.zeros((npix, neval, nevalC, nevalC))\n LL_conti_array = np.outer((xspec.wavelength[gdwv]-\n slope_pivot*(1+lls_dict['z'])), C1_val)\n for jj,C0 in enumerate(C0_val):\n LL_conti_sub_grid[:,jj,:] = LL_conti_array + C0\n for jj in range(neval):\n LL_conti_grid[:,jj,:,:] = LL_conti_sub_grid\n\n # Model Continuum redward\n conti_array = np.outer((xspec.wavelength[gdC]-slope_pivot*(1+lls_dict['z'])), C1_val)\n model_conti_grid = np.zeros((gdC.size, nevalC, nevalC))\n for ii,C0 in enumerate(C0_val):\n model_conti_grid[:, ii, :] = conti_array + C0\n\n # Observed + error (Continuum points only)\n obs_conti_grid = np.zeros((gdC.size, nevalC, nevalC))\n sig_conti_grid = np.zeros((gdC.size, nevalC, nevalC))\n for ii in range(nevalC):\n obs_conti_grid[:, ii, :] = obs_conti_array\n sig_conti_grid[:, ii, :] = sig_conti_array\n\n # tau\n tau_array = np.zeros( (npix, neval) )\n for kk, iNHI in enumerate(NHI):\n tau_array[:, kk] = tau0 * 10.**(iNHI-NHI[0])\n tau_grid = np.zeros((npix, neval, nevalC, nevalC))\n for ii in range(nevalC):\n for jj in range(nevalC):\n tau_grid[:, :, ii, jj] = tau_array\n\n # Giddy up -- Likelihood\n model_flux = LL_conti_grid * np.exp(-1 * tau_grid)\n model_flux = np.maximum(model_flux, 0.)\n model_counts = model_flux*calib_grid*expt_grid + dark_grid\n lnP_LL = -1*model_counts + count_grid * np.log(model_counts) - gammaln(count_grid+1)\n lnP_C = -1*(obs_conti_grid-model_conti_grid)**2 / 2 / (sig_conti_grid**2)\n\n # Sum\n sum_LL = np.sum(lnP_LL, axis=0)\n sum_C = np.sum(lnP_C, axis=0)\n lnL = sum_LL\n for ii in range(neval):\n lnL[ii,:,:] += sum_C\n\n # Free up memory\n del model_counts, model_flux, LL_conti_grid, tau_grid, lnP_LL, expt_grid, calib_grid\n\n # Return\n return NHI, C0_val, C1_val, lnL",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')",
"def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)",
"def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])",
"def leoGaussFit(self,zeroX_to_LEO_limit,calib_zeroX_to_peak,calib_gauss_width,evap_threshold):\r\n\r\n\r\n\t\t#run the scatteringPeakInfo method to retrieve various peak attributes \r\n\t\tself.scatteringPeakInfo()\r\n\t\t\r\n\t\t#get the baseline\r\n\t\tbaseline = self.scatteringBaseline\r\n\t\t\r\n\t\t#get the zero-crossing for the particle\r\n\t\tzero_crossing_pt_LEO = self.zeroCrossing(evap_threshold)\r\n\t\t\r\n\t\tif zero_crossing_pt_LEO < 0: #ie we can't find the zero crossing\r\n\t\t\tself.LF_scattering_amp = -2\r\n\t\t\tself.LF_baseline = -2\r\n\t\t\tself.LF_results = []\r\n\t\t\t#self.LF_max_index = -2\r\n\t\t\tself.beam_center_pos = -2\r\n\t\t\t\r\n\t\telse:\r\n\t\t\t#LEO max index sets the x-limit for fitting based on the desired magnification factor\r\n\t\t\tLEO_max_index = int(round(zero_crossing_pt_LEO-zeroX_to_LEO_limit))\r\n\t\t\tself.LF_max_index = LEO_max_index\r\n\t\t\tLEO_min_index = 0\r\n\t\t\t\r\n\t\t\tx_vals_all = self.getAcqPoints()\r\n\t\t\tself.LF_x_vals_to_use = x_vals_all[LEO_min_index:LEO_max_index]\r\n\r\n\t\t\ty_vals_all = self.getScatteringSignal()\r\n\t\t\tself.LF_y_vals_to_use = y_vals_all[LEO_min_index:LEO_max_index]\r\n\t\t\t\r\n\t\t\tself.beam_center_pos = zero_crossing_pt_LEO-calib_zeroX_to_peak\r\n\t\t\t\t\t\t\t\r\n\t\t\tdef LEOGauss(x, a, b):\r\n\t\t\t\treturn b+a*np.exp((-(x-self.beam_center_pos)**2)/(2*calib_gauss_width**2)) #Gaussian\r\n\t\t\t\r\n\t\t\t#run the fitting\r\n\t\t\ttry:\r\n\t\t\t\tpopt, pcov = curve_fit(LEOGauss, self.LF_x_vals_to_use, self.LF_y_vals_to_use)\r\n\t\t\texcept:\r\n\t\t\t\tpopt, pcov = [-1,-1], [np.nan, np.nan] \r\n\r\n\t\t\tself.LF_scattering_amp = popt[0] \r\n\t\t\tself.LF_baseline = popt[1]\r\n\t\t\t\r\n\t\t\tfit_result = []\r\n\t\t\tfor x in x_vals_all:\r\n\t\t\t\tfit_result.append(LEOGauss(x,popt[0],popt[1]))\r\n\t\t\tself.LF_results = fit_result",
"def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }",
"def fit(self):\n # Check if log transform is necessary.\n if self.log_transform:\n values = self.base**np.dot(self.X_inv, self.binary.log.phenotypes)\n else:\n values = np.dot(self.X_inv, self.binary.phenotypes)\n self.epistasis.values = values",
"def __init__(\r\n self,\r\n centre=30.0, # <- **PyAutoFit** recognises these constructor arguments\r\n normalization=1.0, # <- are the Gaussian`s model parameters.\r\n sigma=5.0,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.sigma = sigma",
"def __init__(self, model, data):\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None",
"def load_params(self, event):\n \n self.robot_type = rospy.get_param(\"robot_type\" , 'pendulum' )\n self.robot_config = rospy.get_param(\"robot_config\", 'wrist-only' )\n self.robot_ctl = rospy.get_param(\"controller\", 'RfixCTC' )\n self.fixed_mode = rospy.get_param(\"fixed_mode\", 1 )\n \n \n ###############################################\n # Load robot model for the right configuration\n if self.robot_config == 'wrist-only':\n self.R = Proto.SingleRevoluteDSDM()\n \n elif self.robot_config == 'dual-plane' :\n self.R = Proto.TwoPlanarSerialDSDM()\n \n else:\n self.R = None\n \n ###############################################\n # Load controller\n if self.robot_ctl == 'RfixCTC' :\n self.Ctl = RminCTC.RfixComputedTorqueController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminCTC' :\n self.Ctl = RminCTC.RminComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RfixSLD' :\n self.Ctl = RminCTC.RfixSlidingModeController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminSLD' :\n self.Ctl = RminCTC.RminSlidingModeController( self.R )\n \n elif self.robot_ctl == 'RollCTC' :\n self.Ctl = RollCTC.RolloutComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl = RollCTC.RolloutSlidingModeController( self.R )\n \n else:\n self.Ctl = None\n \n \n if self.robot_config == 'wrist-only':\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 2 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0,0] ) )\n \n elif self.robot_config == 'dual-plane' :\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 4 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0.0,0.0,0.0,0.0] ) )\n #self.x_d = np.array( [-3.14 , 0 , 0 , 0] )\n \n # Gen ctl params\n self.Ctl.hysteresis = rospy.get_param(\"hysteresis\", True )\n self.Ctl.min_delay = rospy.get_param(\"min_delay\", 0.5 )\n \n self.Ctl.w0 = rospy.get_param(\"w0\", 1 )\n self.Ctl.zeta = rospy.get_param(\"zeta\", 0.7 )\n \n self.Ctl.lam = rospy.get_param(\"lam\", 1 )\n self.Ctl.nab = rospy.get_param(\"nab\", 1 )\n self.Ctl.D = rospy.get_param(\"D\", 0 )\n \n self.Ctl.horizon = rospy.get_param(\"horizon\", 0.5 )\n self.Ctl.sim_dt = rospy.get_param(\"sim_dt\", 0.1 )\n \n self.Ctl.domain_check = rospy.get_param(\"domain_check\", False )\n \n # Base policy param for roll \n if self.robot_ctl == 'RollCTC' :\n self.Ctl.FixCtl.lam = self.Ctl.lam\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl.FixCtl.lam = self.Ctl.lam \n self.Ctl.FixCtl.nab = self.Ctl.nab \n self.Ctl.FixCtl.D = self.Ctl.D",
"def apply_ols_to_subject(total_s, total_r, r_outliers = False, smooth = False):\n\t#for sub in range(total_s+1)[1:]:\n\t\t#for run in range(total_r+1)[1:]:\n\tfor sub in range(1,17):\n\t\tfor run in range(1,4):\n\t\t\tdata = get_image(run, sub).get_data()\n\t\t\tif r_outliers == True:\n\t\t\t\tdata = remove_outliers(data)\n\t\t\tif smooth == True:\n\t\t\t\tdata = smooth_data(data, 2)\n\t\t\tbehavdata = get_behav(run, sub)\n\t\t\tprint(\"run:\", run, \"sub:\", sub)\n\t\t\tdesign = build_design(data, behavdata)\n\t\t\tif sub == 1 and run == 1:\n\t\t\t\tgain_loss_betas_2d = regression_fit(data, design)[2:,:]\n\t\t\telse: \n\t\t\t\tbetas = regression_fit(data, design)[2:,:]\n\t\t\t\tgain_loss_betas_2d = np.concatenate((gain_loss_betas_2d, betas), axis=0)\n\t\n\treturn gain_loss_betas_2d",
"def fun_set(self):\n\n self.type.set(self.xtl._scattering_type)\n # self.energy_kev.set(8)\n self.theta_offset.set(self.xtl._scattering_theta_offset)\n self.theta_min.set(self.xtl._scattering_min_theta)\n self.theta_max.set(self.xtl._scattering_max_theta)\n self.twotheta_min.set(self.xtl._scattering_min_two_theta)\n self.twotheta_max.set(self.xtl._scattering_max_two_theta)\n\n if self.orientation.get() == 'Reflection':\n self.direction_h.set(self.xtl._scattering_specular_direction[0])\n self.direction_k.set(self.xtl._scattering_specular_direction[1])\n self.direction_l.set(self.xtl._scattering_specular_direction[2])\n else:\n self.direction_h.set(self.xtl._scattering_parallel_direction[0])\n self.direction_k.set(self.xtl._scattering_parallel_direction[1])\n self.direction_l.set(self.xtl._scattering_parallel_direction[2])",
"def fit_clicked(self):\n variables = self.model.parameters['variables']\n bounds = {}\n guess = {}\n\n for var in variables:\n bounds[var] = (self.view.ui.table_var_map[var + 'min'].value(),\n self.view.ui.table_var_map[var + 'max'].value())\n guess[var] = self.view.ui.table_var_map[var + 'guess'].value()\n\n self.model.parameters['bounds'] = bounds\n self.model.parameters['guess'] = guess\n self.model.parameters['Norm'] = self.view.ui.radiobutton_Norm.isChecked()\n self.model.parameters['method'] = self.view.ui.combobox_Method.currentText()\n\n try:\n self.model.do_fit()\n except Exception as e:\n self.logger.error(e)",
"def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)",
"def __init__(self, lam=1.0):\n self.lam = lam\n\n # these are set in fit\n self.b = None # float\n self.w = None # (nvars, ) array",
"def __init__(self, x, y, pRanges, xerr=None, yerr=None, flag=None, lnlikeType=\"Nukers\",\n fix_slope=None, fix_intercept=None):\n self.x = x\n self.y = y\n self.pRanges = pRanges\n self.flag = flag\n self.xerr = xerr\n self.yerr = yerr\n self.fix_slope = fix_slope\n self.fix_intercept = fix_intercept\n nfix = 0\n if not fix_slope is None:\n nfix += 1\n if not fix_intercept is None:\n nfix += 1\n ndim = len(pRanges)\n if (ndim + nfix) == 2:\n print(\"[linfit]: The model uncertainty is NOT considered!\")\n elif (ndim + nfix) == 3:\n print(\"[linfit]: The model uncertainty is considered!\")\n else:\n raise ValueError(\"[linfit]: The parameter number ({0}) is incorrect!\".format(ndim))\n self.ndim = ndim\n if (xerr is None) & (yerr is None):\n xerr = np.zeros_like(x)\n yerr = np.ones_like(y)\n else:\n if xerr is None:\n xerr = np.zeros_like(x)\n if yerr is None:\n yerr = np.zeros_like(y)\n if lnlikeType == \"Nukers\":\n self.lnlike = lnlike_Nukers\n self.parNames = [\"beta\", \"alpha\", \"epsy0\"]\n elif lnlikeType == \"gcs\":\n self.lnlike = lnlike_gcs\n self.parNames = []\n if fix_slope is None:\n self.parNames.append(\"m\")\n if fix_intercept is None:\n self.parNames.append(\"b\")\n self.parNames.append(\"lnf\")\n elif lnlikeType == \"naive\":\n self.lnlike = lnlike_naive\n self.parNames = [\"m\", \"b\", \"lnf\"]\n elif lnlikeType == \"perp\":\n self.lnlike = lnlike_perp\n self.parNames = [\"theta\", \"bv\", \"V\"]\n elif lnlikeType == \"perp2\":\n self.lnlike = lnlike_perp2\n self.parNames = [\"theta\", \"b\", \"V\"]\n else:\n raise ValueError(\"[linfit]: The lnlike function ({0}) is not recognised!\".format(lnlike))\n self.lnlikeType = lnlikeType",
"def set_parameter_values(self, c5=None, lm=1.0):\n\n self._c5 = c5\n self._lm = lm\n\n self._update()",
"def calibrate(self, poly_n=0, analytes=None, drift_correct=False,\n srm_errors=False, srms_used=['NIST610', 'NIST612', 'NIST614']):\n # MAKE CALIBRATION CLEVERER!?\n # USE ALL DATA OR AVERAGES?\n # IF POLY_N > 0, STILL FORCE THROUGH ZERO IF ALL\n # STDS ARE WITHIN ERROR OF EACH OTHER (E.G. AL/CA)\n # can store calibration function in self and use *coefs?\n # check for identified srms\n\n if analytes is None:\n analytes = self.analytes\n elif isinstance(analytes, str):\n analytes = [analytes]\n\n if not hasattr(self, 'srmtabs'):\n self.srm_id_auto(srms_used)\n\n # calibration functions\n def calib_0(P, x):\n return x * P[0]\n\n def calib_n(P, x):\n # where p is a list of polynomial coefficients n items long,\n # corresponding to [..., 2nd, 1st, 0th] order coefficients\n return np.polyval(P, x)\n\n # wrapper for ODR fitting\n def odrfit(x, y, fn, coef0, sx=None, sy=None):\n dat = odr.RealData(x=x, y=y,\n sx=sx, sy=sy)\n m = odr.Model(fn)\n mod = odr.ODR(dat, m, coef0)\n mod.run()\n return un.uarray(mod.output.beta, mod.output.sd_beta)\n\n # make container for calibration params\n if not hasattr(self, 'calib_params'):\n self.calib_params = pd.DataFrame(columns=self.analytes)\n\n # set up calibration functions\n if not hasattr(self, 'calib_fns'):\n self.calib_fns = {}\n\n for a in analytes:\n if poly_n == 0:\n self.calib_fns[a] = calib_0\n p0 = [1]\n else:\n self.calib_fns[a] = calib_n\n p0 = [1] * (poly_n - 1) + [0]\n\n # calculate calibrations\n if drift_correct:\n for n, g in self.srmtabs.loc[a, :].groupby(level=0):\n if srm_errors:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n sy=self.srmtabs.loc[a, 'srm_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n else:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n uTime = g.index.get_level_values('uTime').values.mean()\n self.calib_params.loc[uTime, a] = p\n else:\n if srm_errors:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n sy=self.srmtabs.loc[a, 'srm_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n else:\n p = odrfit(x=self.srmtabs.loc[a, 'meas_mean'].values,\n y=self.srmtabs.loc[a, 'srm_mean'].values,\n sx=self.srmtabs.loc[a, 'meas_err'].values,\n fn=self.calib_fns[a],\n coef0=p0)\n self.calib_params.loc[0, a] = p\n\n # apply calibration\n for d in tqdm(self.data, desc='Calibration'):\n try:\n d.calibrate(self.calib_fns, self.calib_params, analytes, drift_correct=drift_correct)\n except:\n print(d.sample + ' failed - probably first or last SRM\\nwhich is outside interpolated time range.')\n\n self.focus_stage = 'calibrated'\n # # save calibration parameters\n # # self.save_calibration()\n return",
"def _prepare_fit(self):\n self.lower_conf_int, self.upper_conf_int, self.upper_whisker_res = None, None, None\n self.model_fit = None\n self.residuals, self.residuals_forecast, self.fittedvalues = None, None, None",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')",
"def add_X_NNLO_fixa0_fit(axe, xran, values, errors, fill=False, save=False):\n B = values[\"B\"]\n F_0 = values[\"F_0\"]\n\n LAMBDA4 = values[\"Lambda4\"]\n LAMBDA3 = values[\"Lambda3\"]\n # LAMBDA12 = values[\"Lambda12\"]\n km = values[\"km\"]\n kf = values[\"kf\"]\n\n x = np.linspace(xran[0], xran[1], num=500)\n\n gamma_2 = values[\"gamma_2\"]\n\n Msqr = x * (8 * (np.pi**2) * (F_0**2))\n arg4 = LAMBDA4**2 / Msqr\n arg3 = LAMBDA3**2 / Msqr\n # arg12 = LAMBDA12**2 / Msqr\n\n l1 = -0.4\n l2 = 4.3\n\n Lambda1sqr = (phys_pion**2) * np.exp(l1)\n Lambda2sqr = (phys_pion**2) * np.exp(l2)\n\n lnLambda12sqr = (7.0 * np.log(Lambda1sqr) + 8.0 * np.log(Lambda2sqr)) / 15.0\n lambda12sqr = np.exp(lnLambda12sqr)\n\n arg12 = lambda12sqr / Msqr\n\n lm = 1.0 / 51.0 * (60.0 * np.log(arg12) - 9.0 * np.log(arg3) + 49.0)\n lf = 1.0 / 30.0 * (30.0 * np.log(arg12) + 6.0 * np.log(arg3) - 6.0 * np.log(arg4) + 23.0)\n\n y = F_0 * (1.0 + x * np.log(arg4) - 5.0 / 4.0 * (x**2) * (lf)**2 + kf * x**2) / (1 + gamma_2 * (0.05**2))\n\n plots = []\n paramstring = \" \".join(\"${}={}$\".format(format_parameters(k), print_paren_error(float(v), float(errors[k])))\n for k, v in sorted(values.iteritems()))\n paramstring = \"$ M_\\pi<{}$\".format(values[\" M_\\pi<\"])\n plabel = \"NNLO Mss=0 fit: {}\".format(paramstring)\n plabel = \"NNLO\"\n if \"cutoff\" in values:\n plabel += \" $M_\\pi < {}$\".format(values[\"cutoff\"])\n addplot(plots, axe, fill, save, x=x, y=y, params={\"label\":plabel, \"ls\":\"--\", \"lw\":4})\n\n return plots",
"def _fit_data(args):\n\twarnings.simplefilter(\"ignore\")\n\tmod=args[0]\n\n\targs=args[1]\n\tif isinstance(mod, tuple):\n\t\tversion = mod[1]\n\t\tmod = mod[0]\n\telse:\n\t\tversion = None\n\tdust_dict={'CCM89Dust':sncosmo.CCM89Dust,'OD94Dust':sncosmo.OD94Dust,'F99Dust':sncosmo.F99Dust}\n\tif args['dust']:\n\t\tdust=dust_dict[args['dust']]()\n\telse:\n\t\tdust=[]\n\teffect_names=args['effect_names']\n\teffect_frames=args['effect_frames']\n\teffects=[dust for i in range(len(effect_names))] if effect_names else []\n\teffect_names=effect_names if effect_names else []\n\teffect_frames=effect_frames if effect_frames else []\n\tif not isinstance(effect_names,(list,tuple)):\n\t\teffects=[effect_names]\n\tif not isinstance(effect_frames,(list,tuple)):\n\t\teffects=[effect_frames]\n\tif isinstance(mod,str):\n\t\tmodName=mod+'_'+version if version else deepcopy(mod)\n\telse:\n\t\tmodName=mod.name+'_'+version if version else deepcopy(mod)\n\n\tif isinstance(mod,str) or isinstance(mod,sncosmo.Source):\n\t\tsource=sncosmo.get_source(mod)\n\t\tsmod = sncosmo.Model(source=source,effects=effects,effect_names=effect_names,effect_frames=effect_frames)\n\telse:\n\t\tsmod=mod\n\tparams=args['params'] if args['params'] else [x for x in smod.vparam_names]\n\tfits=newDict()\n\tdcurve=args['curve']\n\t#if not np.any([smod.bandoverlap(band) for band in dcurve.bands]):\n\t# raise RuntimeError(\"No band overlap for model %s\"%modName)\n\tfits.method=args['fitting_method']\n\tfits.bounds=args['bounds'] if args['bounds'] else {}\n\tfits.ignore=args['ignore'] if args['ignore'] else []\n\tfits.constants = args['constants'] if args['constants'] else {x: y for x, y in zip(dcurve.meta.keys(),dcurve.meta.values()) if x != 'info'}\n\n\n\tno_bound = {x for x in params if x in _needs_bounds and x not in fits.bounds.keys() and x not in fits.constants.keys()}\n\tif no_bound:\n\t\tparams=list(set(params)-no_bound)\n\tparams= [x for x in params if x not in fits.ignore and x not in fits.constants.keys()]\n\tfits.params = params\n\tif fits.constants is not None:\n\t\ttry:\n\t\t\tsmod.set(**fits.constants)\n\t\texcept:\n\t\t\traise RuntimeError('You may have some parameters in \"constants\" that are not in your model.')\n\n\tif args['doFit']:\n\t\tif args['fitting_method']=='mcmc':\n\t\t\tfits.res, fits.model = args['sn_func'][args['fitting_method']](dcurve.table, smod, fits.params, fits.bounds, **args['props'])\n\t\telif args['fitting_method']=='nest':\n\t\t\tfits.res, fits.model = args['sn_func'][args['fitting_method']](dcurve.table, smod, fits.params, fits.bounds,guess_amplitude_bound=True, verbose=False, **args['props'])\n\t\telse:\n\t\t\tfits.res, fits.model = args['sn_func'][args['fitting_method']](dcurve.table, smod, fits.params, fits.bounds,verbose=False, **args['props'])\n\t\treturn(pyParz.parReturn(fits))\n\telse:\n\t\tfits.model=smod\n\t\tfits.res=newDict()\n\t\tfits.res['vparam_names']=fits.params\n\t\treturn (fits)",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)",
"def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()",
"def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55",
"def add_X_NNLO_all_fit(axe, xran, values, errors, fill=False, save=False):\n B = values[\"B\"]\n F_0 = values[\"F_0\"]\n LAMBDA4 = values[\"Lambda4\"]\n LAMBDA3 = values[\"Lambda3\"]\n # LAMBDA12 = values[\"Lambda12\"]\n km = values[\"km\"]\n kf = values[\"kf\"]\n\n x = np.linspace(xran[0], xran[1], num=500)\n\n Msqr = x * (8 * (np.pi**2) * (F_0**2))\n arg4 = LAMBDA4**2 / Msqr\n arg3 = LAMBDA3**2 / Msqr\n # arg12 = LAMBDA12**2 / Msqr\n\n l1 = -0.4\n l2 = 4.3\n\n Lambda1sqr = (phys_pion**2) * np.exp(l1)\n Lambda2sqr = (phys_pion**2) * np.exp(l2)\n\n lnLambda12sqr = (7.0 * np.log(Lambda1sqr) + 8.0 * np.log(Lambda2sqr)) / 15.0\n lambda12sqr = np.exp(lnLambda12sqr)\n\n arg12 = lambda12sqr / Msqr\n\n lm = 1.0 / 51.0 * (60.0 * np.log(arg12) - 9.0 * np.log(arg3) + 49.0)\n lf = 1.0 / 30.0 * (30.0 * np.log(arg12) + 6.0 * np.log(arg3) - 6.0 * np.log(arg4) + 23.0)\n\n y = F_0 * (1.0 + x * np.log(arg4) - 5.0 / 4.0 * (x**2) * (lf)**2 + kf * x**2)\n\n plots = []\n paramstring = \" \".join(\"${}={}$\".format(format_parameters(k), print_paren_error(float(v), float(errors[k])))\n for k, v in sorted(values.iteritems()))\n paramstring = \"$ M_\\pi<{}$\".format(values[\" M_\\pi<\"])\n plabel = \"NNLO Mss=0 fit: {}\".format(paramstring)\n plabel = \"NNLO $a\\\\to 0$ $\\Delta Mss=0$ \"\n plabel = \"NNLO\"\n\n if \"cutoff\" in values:\n plabel += \" $M_\\pi < {}$\".format(values[\"cutoff\"])\n addplot(plots, axe, fill, save, x=x, y=y, params={\"label\":plabel, \"ls\":\"--\", \"lw\":4})\n\n return plots",
"def gaussian_fit(self):\r\n\r\n self.df5 = pd.DataFrame(columns=['Slit Number', 'Centre', 'Centre_err', 'Sigma', 'Sigma_err', 'FWHM', 'FWHM_err', 'Height', 'Height_err'])\r\n QDot_slits = self.QDot_detection()\r\n\r\n if len(QDot_slits) > 0: \r\n self.plot_data = pd.DataFrame(columns=[f\"{QDot_slits[0]}\"], index=self.energies)\r\n else:\r\n self.plot_data = pd.DataFrame(index=self.energies)\r\n\r\n for slit_number in QDot_slits:\r\n sel = self.df4[f'{slit_number}']\r\n self.plot_data[f'{slit_number}'] = sel\r\n \r\n # Makes a good first guess for the fit values of the gaussian\r\n max_intensity = max(sel)\r\n central_energy = sel[sel==max_intensity].index.values\r\n central_energy = central_energy[0]\r\n\r\n # Fits a gaussian model to the selected data and shows the output\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(sel, x=self.energies, weights=1 / np.sqrt(sel), center = central_energy, amplitude = max_intensity, sigma = 1, nan_policy= 'omit')\r\n \r\n self.plot_data[f'{slit_number} best fit'] = fit.best_fit\r\n\r\n # Appends the fit data for the variables to a new dataframe and shows the fit results with errors\r\n fit_variables = [slit_number]\r\n for key in fit.params:\r\n if key in ['center', 'sigma', 'fwhm', 'height']:\r\n fit_variables.append(fit.params[key].value)\r\n fit_variables.append(fit.params[key].stderr)\r\n \r\n self.df5 = self.df5.append({'Slit Number': fit_variables[0], 'Centre': fit_variables[1], 'Centre_err': fit_variables[2], 'Sigma': fit_variables[3], 'Sigma_err': fit_variables[4], 'FWHM': fit_variables[5], 'FWHM_err': fit_variables[6], 'Height': fit_variables[7], 'Height_err': fit_variables[8]}, ignore_index=True)\r\n \r\n return self.plot_data, self.df5",
"def ceQTL(counts, dos, cov_mat, rsid):\n acov_mat = cov_mat.copy(deep=True)\n acov_mat['soi'] = dos.ix[rsid, acov_mat.index]\n res = sm.OLS(counts, acov_mat).fit()\n return(res)",
"def fit(zs, ys, L, lam_1, lam_2, rho=10, maxiter=100, verbose=True, warm_start=None,\n eps_abs = 1e-5, eps_rel = 1e-5):\n K = int(zs.max() + 1)\n N, n = ys.shape\n Ys, cts = [], []\n for i in range(K):\n idx = zs == i\n cts.append(idx.sum()) #N_i, number of samples per z\n ys_i = ys[idx]\n Ys.append(ys_i.T @ ys_i)\n \n if verbose:\n print (\"Fitting covariance stratified model.\")\n print (\"%d stratification values, %d data points, %d dimensions\" % (K, N, n))\n print (\"%d\" % (K * n * n), \"optimization variables\")\n print (\"lam_1 = %3.3e, lam_2 = %3.3e, rho = %3.3e, maxiter=%d\" % (lam_1, lam_2, rho, maxiter))\n print (\"count per stratification value:\", cts)\n print (Ys[0].shape)\n\n shape = (K, n, n)\n if warm_start is None:\n warm_start = []\n for _ in range(5):\n warm_start.append(np.zeros(shape))\n inv_covs_loss, inv_covs_reg, inv_covs_lapl, U_1, U_2 = warm_start\n \n solve = factorized(L.tocsc() + rho * sparse.eye(K, format='csc'))\n \n for _ in range(maxiter):\n # inv_covs_loss\n for i in range(K):\n if cts[i] == 0:\n inv_covs_loss[i] = (inv_covs_lapl[i] - U_1[i])\n continue\n w, v = np.linalg.eigh((rho/cts[i]) * (inv_covs_lapl[i] - U_1[i]) - Ys[i]/cts[i])\n w_new = (w*cts[i]/rho + np.sqrt((w*cts[i]/rho)**2 + 4*cts[i]/rho))/2\n inv_covs_loss[i] = v @ np.diag(w_new) @ v.T \n \n # inv_covs_reg\n for i in range(K):\n inv_covs_reg[i][np.arange(n), np.arange(n)] = np.diag(inv_covs_lapl[i] - U_2[i] - lam_1/rho) #diagonal elements\n \n st2 = soft_threshold(inv_covs_lapl[i] - U_2[i], lam_2 / rho)\n od_idx = np.where(~np.eye(n,dtype=bool)) #gets off_diags\n inv_covs_reg[i][od_idx] = st2[od_idx] \n \n # inv_covs_lapl\n rhs = (inv_covs_loss + inv_covs_reg) / 2 + (U_1 + U_2) / 2\n rhs *= rho\n inv_covs_lapl_new = solve(rhs.reshape(K, n*n)).reshape(shape)\n S = rho * np.repeat(inv_covs_lapl_new - inv_covs_lapl, 2, axis=0)\n inv_covs_lapl = inv_covs_lapl_new.copy()\n\n # U_1\n R_1 = inv_covs_loss - inv_covs_lapl\n U_1 += R_1\n \n # U_2\n R_2 = inv_covs_reg - inv_covs_lapl\n U_2 += R_2\n \n R = np.concatenate([R_1, R_2], axis=0)\n \n # stopping criterion\n eps_pri = np.sqrt(2 * K * n * n) * eps_abs + eps_rel * max(np.linalg.norm(np.concatenate([inv_covs_loss, inv_covs_reg], axis=0)),\n np.linalg.norm(np.repeat(inv_covs_lapl, 2, axis=0)))\n eps_dual = np.sqrt(K * n * n) * eps_abs + eps_rel * np.linalg.norm(np.concatenate([U_1, U_2], axis=0))\n if verbose:\n print (np.linalg.norm(R), np.linalg.norm(S), eps_pri, eps_dual)\n \n return inv_covs_loss, inv_covs_reg, inv_covs_lapl",
"def __init__(self, fluxlutman, noise_parameters_CZ, fitted_stepresponse_ty):\n super().__init__()\n self.value_names = ['Cost func', 'Cond phase', 'L1', 'L2', 'avgatefid_pc', 'avgatefid_compsubspace_pc',\n 'phase_q0', 'phase_q1', 'avgatefid_compsubspace', 'avgatefid_compsubspace_pc_onlystaticqubit', 'population_02_state']\n self.value_units = ['a.u.', 'deg', '%', '%', '%', '%', 'deg', 'deg', '%', '%', '%']\n self.fluxlutman = fluxlutman\n self.noise_parameters_CZ = noise_parameters_CZ\n self.fitted_stepresponse_ty=fitted_stepresponse_ty # list of 2 elements: stepresponse (=y)\n # as a function of time (=t)",
"def test_SLM():\n samples = 10\n predictors = 3\n\n grid = list(create_parameter_grid(samples, predictors))\n Y = np.random.rand(samples, 10242, predictors)\n\n for i in range(len(grid)):\n # Skip exceptions that we know error.\n if grid[i][\"surf\"] is None:\n if grid[i][\"correction\"] is not None and \"rft\" in grid[i][\"correction\"]:\n continue\n if grid[i][\"Y_idx\"] > 1 and grid[i][\"two_tailed\"] is False:\n continue\n\n try:\n slm = SLM(\n model=grid[i][\"model\"],\n contrast=grid[i][\"contrast\"],\n surf=grid[i][\"surf\"],\n mask=grid[i][\"mask\"],\n correction=grid[i][\"correction\"],\n two_tailed=grid[i][\"two_tailed\"],\n )\n slm.fit(Y[:, :, 0 : grid[i][\"Y_idx\"]])\n except Exception as e:\n print(\"Error on run:\", i)\n print(\"SLM failed with the following parameters:\")\n print(\"Model: \", grid[i][\"model\"])\n print(\"Contrast: \", grid[i][\"contrast\"])\n print(\"Surface: \", grid[i][\"surf\"])\n print(\"Mask: \", grid[i][\"mask\"])\n print(\"Correction: \", grid[i][\"correction\"])\n print(\"Two_tailed: \", grid[i][\"two_tailed\"])\n print(\"Y_idx: \", grid[i][\"Y_idx\"])\n raise e",
"def __init__(\r\n self,\r\n centre=0.0, # <- PyAutoFit recognises these constructor arguments\r\n intensity=0.1, # <- are the Gaussian`s model parameters.\r\n sigma=0.01,\r\n ):\r\n\r\n self.centre = centre\r\n self.intensity = intensity\r\n self.sigma = sigma",
"def fit_line(_sn, l_crop, model='da2014'):\n from scipy import interpolate\n #load normalised models and linearly interp models onto spectrum wave\n m_wave,m_flux_n,m_param = norm_models(model=model)\n sn_w = _sn[:,0]\n m_flux_n_i = interpolate.interp1d(m_wave,m_flux_n,kind='linear')(sn_w)\n #Crops models and spectra in a line region, renorms models, calculates chi2\n tmp_lines_m, lines_s, l_chi2 = [],[],[]\n for i in range(len(l_crop)):\n l_c0,l_c1 = l_crop[i,0],l_crop[i,1] \n l_m = m_flux_n_i.transpose()[(sn_w>=l_c0)&(sn_w<=l_c1)].transpose()\n l_s = _sn[(sn_w>=l_c0)&(sn_w<=l_c1)]\n l_m = l_m*np.sum(l_s[:,1])/np.sum(l_m,axis=1).reshape([len(l_m),1])\n l_chi2.append( np.sum(((l_s[:,1]-l_m)/l_s[:,2])**2,axis=1) )\n tmp_lines_m.append(l_m)\n lines_s.append(l_s)\n #mean chi2 over lines and stores best model lines for output\n lines_chi2, lines_m = np.sum(np.array(l_chi2),axis=0), []\n is_best = lines_chi2==lines_chi2.min()\n for i in range(len(l_crop)): lines_m.append(tmp_lines_m[i][is_best][0])\n best_TL = m_param[is_best][0]\n return lines_s,lines_m,best_TL,m_param,lines_chi2",
"def LeastSquareFit_Fixed_Parameter(self,params0,FixedIndices):\n\t\tnpl = self.Observations.nplanets\n\t\tassert len(params0.reshape(-1)) == npl * 5, \"Shape of initial parameter does not match what is required for the number of planets!\"\n\t\t\t\n\t\ttarget_data = np.array([])\n\t\terrors = np.array([])\n\t\t\n\t\tfor time,err in zip(self.Observations.transit_times,self.Observations.transit_uncertainties):\n\t\t\ttarget_data = np.append(target_data,time)\n\t\t\terrors = np.append(errors,err)\n\t\t\n\t\ttFinal = self.Observations.tFinal() + np.max(self.Observations.PeriodEstimates)\n\t\t\n\t\tFixedPars = (params0.reshape(1,npl*5))[0,FixedIndices]\n\t\t\n\t\tdef objectivefn(x):\n\t\t\tinpars = np.insert( x , np.array(FixedIndices)-np.arange(len(FixedIndices)) , FixedPars )\n\t\t\t#print inpars\t\n\t\t\ttransits,success = self.MCMC_CoplanarParam_TransitTimes(inpars,tFinal)\n\t\t\t\n\t\t\tanswer = np.array([],dtype=float)\n\t\t\tfor i,t in enumerate(transits):\n\t\t\t\ttnums = self.Observations.transit_numbers[i]\n\t\t\t\ttry:\n\t\t\t\t\tanswer = np.append( answer,np.array(t[tnums]) )\n\t\t\t\texcept:\n\t\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t#\n\t\t\ttry:\n\t\t\t\tttvchi2 = (answer - target_data)/errors\n\t\t\texcept:\n\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t\n\t\t\treturn ttvchi2\n\t\t\n\t\tfitpars = leastsq(objectivefn, np.delete(params0,FixedIndices) ,full_output=1)[0]\n\t\treturn np.insert( fitpars, np.array(FixedIndices)-np.arange(len(FixedIndices)), FixedPars)",
"def __GetSFParams(self):\n\n dim_names = list(self.__nc_RSoft_I.dimensions.keys())\n if 'radial_structure_functions' in dim_names:\n self.__containsRadial = True\n self.__n_SF_rad = len(self.__nc_RSoft_I.variables['mus'][:])\n self.etol_radial = self.__nc_RSoft_I.radial_error_tolerance\n self.mus = self.__nc_RSoft_I.variables['mus'][:]\n self.Ls = self.__nc_RSoft_I.variables['Ls'][:]\n self.radial_Xs = self.__nc_RSoft_I.variables['radial_Xs'][:]\n self.radial_Ys = self.__nc_RSoft_I.variables['radial_Ys'][:]\n if 'angular_structure_functions' in dim_names:\n self.__containsAngular = True\n self.etol_angular = self.__nc_RSoft_I.angular_error_tolerance\n self.__n_SF_ang = len(self.__nc_RSoft_I.variables['xis'][:])\n self.xis = self.__nc_RSoft_I.variables['xis'][:]\n self.zetas = self.__nc_RSoft_I.variables['zetas'][:]\n self.lambdas = self.__nc_RSoft_I.variables['lambdas'][:]\n self.angular_Xs = self.__nc_RSoft_I.variables['angular_Xs'][:]\n self.angular_Ys = self.__nc_RSoft_I.variables['angular_Ys'][:]\n self.angular_Zs = self.__nc_RSoft_I.variables['angular_Zs'][:]",
"def fit():\n pass",
"def fitRateSpectrum(Times, Data, Rates, w, Lnorm='ridge', standardizeData=True, CalcNdof=False, rho=0.5):\n\n \n if Lnorm == 'lasso':\n # Use L1-norm Lasso regression\n try:\n from scikits.learn.linear_model import Lasso \n except:\n print 'Error: could NOT import Lasso from scikits.learn.linear_model. Using L2 norm (ridge).'\n Lnorm = 'ridge'\n\n if Lnorm == 'enet':\n # Use L1-L2-mixture norm Lasso regression\n try:\n from scikits.learn.linear_model import ElasticNet\n except:\n print 'Error: could NOT import ElasticNet from scikits.learn.linear_model. Using L2 norm (ridge).'\n Lnorm = 'ridge'\n\n\n if Lnorm == 'lasso':\n\n lasso = Lasso(alpha = w, fit_intercept=False) # assume the data is already \"centered\" -- i.e. no zero rate\n X, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)\n #print 'X.shape', X.shape, 'Data.shape', Data.shape\n lasso.fit(X, Data, max_iter=1e6, tol=1e-7)\n A = lasso.coef_\n\n # Compute \"residual sum of squares\" (note loss function is different for L1-norm)\n y_pred_lasso = lasso.predict(X)\n diff = y_pred_lasso - Data\n\n\n elif Lnorm == 'enet':\n\n # NOTE: The convention for rho is backwards in scikits.learn, instead of rho we must send (1-rho)\n enet = ElasticNet(alpha = w, rho=(1.-rho), fit_intercept=False) # assume the data is already \"centered\" -- i.e. no zero rate\n X, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)\n #print 'X.shape', X.shape, 'Data.shape', Data.shape\n #enet.fit(X, Data, max_iter=1e6, tol=1e-7)\n enet.fit(X, Data, max_iter=1e6, tol=1e-3) # for testing\n A = enet.coef_\n\n # Compute \"residual sum of squares\" (note loss function is different for L1-norm)\n y_pred_enet = enet.predict(X)\n diff = y_pred_enet - Data\n\n\n elif Lnorm == 'ridge':\n X, Xmean = Xmatrix(Rates, Times, w, standardizeData=standardizeData )\n Xinv = linalg.pinv(X)\n\n y = np.array( Data.tolist() + [0. for k in Rates] )\n if standardizeData:\n y - y.mean()\n A = np.dot(Xinv, y)\n\n # Compute \"residual sum of squares\" (note loss function is different for L1-norm)\n diff = SumSpectra(A, Rates, Times) - Data\n\n rss = np.dot(diff,diff) # Residual sum of squares\n\n if CalcNdof:\n Xsub, Xmean = Xsubmatrix(Rates, Times, standardizeData=standardizeData)\n XT = np.transpose(Xsub)\n I_XT = np.eye(XT.shape[0])\n I_X = np.eye(Xsub.shape[0])\n Xtemp = np.dot(Xsub, np.linalg.inv(np.dot(XT,Xsub) + w*I_XT))\n ndof = np.trace(I_X - np.dot(Xtemp,XT))\n else:\n ndof = None\n\n return A, rss, ndof",
"def __init__(self,x=0.1,E=10.0, mpar={}, topchem='He', topden=1.78e-4, botchem='Sr50Cl100H110493.721O55246.86', botden=1.0032, element='Sr', line='Ka1', vslit= 0.04, detlen=10.5, qoff=0.0, yscale=1,int_bg=0, Rc=0, sur_den=0,ion_depth=0):\n if type(x)==list:\n self.x=np.array(x)\n else:\n self.x=x\n self.E=E\n self.__mpar__ = mpar\n self.topchem = topchem\n self.topden = topden\n self.botchem = botchem\n self.botden = botden\n self.element = element\n self.line = line\n self.vslit = vslit\n self.detlen = detlen\n self.qoff = qoff\n self.yscale = yscale\n self.int_bg = int_bg\n self.Rc = Rc\n self.sur_den = sur_den\n self.ion_depth = ion_depth\n elelist = xdb.atomic_symbols\n linelist = list(xdb.xray_lines(98).keys())\n self.choices={'element':elelist,'line': linelist} #If there are choices available for any fixed parameters\n self.output_params = {}\n self.init_params()\n self.__fit__=False\n self.__avoganum__ = scipy.constants.Avogadro\n self.__eleradius__ = scipy.constants.physical_constants['classical electron radius'][0]*1e10 #classic electron radius in \\AA",
"def set_params(**kwargs):\n\t\tif \"study\" in kwargs.keys():\n\t\t\traise TypeError(\"Got an unexpected keyword argument: 'study'\")\n\t\telse:\n\t\t\tfor i in range(len(_RECOGNIZED_ELEMENTS_)):\n\t\t\t\t__settings[_RECOGNIZED_ELEMENTS_[i]] = __fractional(\n\t\t\t\t\t_RECOGNIZED_ELEMENTS_[i], study = \"LC18\", **kwargs)[0]",
"def setShapeParameters(self, pars):\n\t\tif not self.chi2init:\n\t\t\traise RuntimeError(\"chi2 not inited, no knowledge about shape parameters\")\n\t\tif not len(pars) == self.nPar:\n\t\t\tprint \"GRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"\n\t\t\tprint pars\n\t\t\tprint \"GRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"\n\t\t\traise ValueError(\"Number of shape parameters does not match len(pars) = \" + str(len(pars)) + \" != self.nPar = \" + str(self.nPar))\n\t\tcountPar = 0\n\t\tfor s in range(self.nSect):\n\t\t\tif not s in self.funcs:\n\t\t\t\tcontinue\n\t\t\tfor f in self.funcs[s]:\n\t\t\t\tf.setParameters(pars[countPar:countPar + f.nPar])\n\t\t\t\tcountPar += f.nPar",
"def plot_NHI_model(lls_dict, ax, lsz=12., touch=False, scl=1., csz=10.):\n from linetools.spectra.plotting import get_flux_plotrange\n\n spec, xspec, gdp, NHI, tau0 = setup_lls_fit_analy(lls_dict['spec_fil'], lls_dict['z'], lls_dict['windows'], lls_dict['NHI_mnx'])\n # Scale\n xspec.data['flux'] *= scl\n # Limits\n xmnx = [lls_dict['windows'][0][0], 940.*(1+lls_dict['z'])]\n if lls_dict['cdict']['type'] == 'Gaussian':\n ymnx = [-1*lls_dict['cdict']['sig'], lls_dict['cdict']['best']+4*lls_dict['cdict']['sig']]\n elif lls_dict['cdict']['type'] == 'Fixed':\n ymnx = [-0.1*lls_dict['cdict']['value'], 1.5*lls_dict['cdict']['value']]\n elif lls_dict['cdict']['type'] == 'Fit_const':\n ymnx = [-1*(lls_dict['cdict']['fit_val'][0]-lls_dict['cdict']['fit_val'][1]),\n 3*(lls_dict['cdict']['fit_val'][2]-lls_dict['cdict']['fit_val'][0])+\n lls_dict['cdict']['fit_val'][0]]\n elif lls_dict['cdict']['type'] == 'Fit_line':\n if gdp is None:\n gdp = (xspec.wavelength>xmnx[0]*u.AA) & (xspec.wavelength<xmnx[1]*u.AA)\n conti = lls_dict['cdict']['best'][0] + lls_dict['cdict']['best'][1]*(\n xspec.wavelength[gdp].value-lls_dict['cdict']['slope_pivot']*(1+lls_dict['z']))\n mx = np.max(conti)\n ymnx = [-0.1*mx, mx*1.3]\n else:\n raise ValueError(\"Need to setup this continuum model\")\n # Extend xmnx\n if lls_dict['cdict']['type'] in ['Fit_line', 'Fit_const']:\n xmx = 0.\n for rng in lls_dict['cdict']['analy']:\n xmx = max(xmx, rng[1])\n xmnx[1] = xmx+3.\n # Scale\n ymnx = np.array(ymnx)*scl\n # Finally\n #idx = (xspec.wavelength > xmnx[0]*u.AA) & (xspec.wavelength < xmnx[1]*u.AA)\n idx = gdp\n f_ymnx = get_flux_plotrange(xspec.flux[idx].value)\n ymnx[1] = max(ymnx[1],f_ymnx[1])\n\n\n # Axes\n #ax.xaxis.set_minor_locator(plt.MultipleLocator(0.5))\n ax.xaxis.set_major_locator(plt.MultipleLocator(20.))\n #ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1))\n #ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax.set_xlim(xmnx)\n ax.set_ylim(ymnx)\n if scl == 1.:\n ax.set_ylabel(r'$f_\\lambda$ (cgs)', size=lsz)\n else:\n ax.set_ylabel(r'$f_\\lambda$ ($10^{-15}$ cgs)', size=lsz)\n\n\n # Plot data\n ax.plot(xspec.wavelength, xspec.flux, color='black', drawstyle='steps-mid',\n zorder=2)\n try:\n ax.plot(xspec.wavelength, scl*xspec.sig, ':', color='red', zorder=1)\n except ValueError:\n pdb.set_trace()\n\n # Binned\n if False:\n binsz = 5.\n binwv = np.arange(1040., 1200., binsz)*u.AA\n binspec = xspec.rebin(binwv)\n gdp = binspec.wavelength.value < 910.*(1+lls_dict['z'])\n ax.scatter(binspec.wavelength.value[gdp]+binsz/2., binspec.flux[gdp],\n color='yellow', zorder=300)\n #edgecolor='none')#, alpha=0.5)\n\n # Best continuum\n if lls_dict['cdict']['type'] == 'Gaussian':\n conti = lls_dict['cdict']['best']*np.ones_like(xspec.flux.value)\n elif lls_dict['cdict']['type'] == 'Fixed':\n conti = lls_dict['cdict']['value']*np.ones_like(xspec.flux.value)\n elif lls_dict['cdict']['type'] == 'Fit_const':\n conti = lls_dict['cdict']['fit_val'][0]*np.ones_like(xspec.flux.value)\n elif lls_dict['cdict']['type'] == 'Fit_line':\n conti = lls_dict['cdict']['best'][0] + lls_dict['cdict']['best'][1]*(\n xspec.wavelength.value-lls_dict['cdict']['slope_pivot']*(1+lls_dict['z']))\n ax.plot(xspec.wavelength, conti*scl, '--', color='green', zorder=3)\n ax.minorticks_on()\n if touch is True:\n pass\n #ax.get_xaxis().set_ticks([])\n else:\n ax.set_xlabel('Wavelength (Ang)', size=lsz)\n\n # Best Model\n mclr = 'lightblue'\n wv_rest = xspec.wavelength / (lls_dict['z']+1)\n energy = wv_rest.to(u.eV, equivalencies=u.spectral())\n tau0 = (10.**lls_dict['fit_NHI'][0] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n if lls_dict['analy_type'] in ['Fit_Conti', 'Vary_Conti']:\n if lls_dict['fit_NHI'][0] != lls_dict['fit_NHI'][2]:\n best_model = scl*conti * np.exp(-1*tau0)\n abs = tau0 > 0.\n ax.plot(xspec.wavelength[abs], best_model[abs], color=mclr, zorder=100)\n\n # Continuum Error\n clr_ce = 'lightgreen'\n alpha_ce = 0.4\n if lls_dict['cdict']['type'] == 'Gaussian':\n cwv = tau0 == 0.\n npix = np.sum(cwv)\n ax.fill_between(xspec.wavelength.value[cwv],\n [scl*lls_dict['cdict']['best']+lls_dict['cdict']['sig']]*npix,\n [scl*lls_dict['cdict']['best']-lls_dict['cdict']['sig']]*npix,\n color=clr_ce, alpha=alpha_ce, zorder=50)\n elif lls_dict['cdict']['type'] == 'Fit_const':\n for rng in lls_dict['cdict']['analy']:\n idx = ((xspec.wavelength > rng[0]*u.AA) &\n (xspec.wavelength < rng[1]*u.AA) &\n (xspec.sig > 0))\n gdC = np.where(idx)[0]\n ax.fill_between(xspec.wavelength.value[gdC],\n [scl*lls_dict['cdict']['fit_val'][1]]*gdC.size,\n [scl*lls_dict['cdict']['fit_val'][2]]*gdC.size,\n color=clr_ce, alpha=alpha_ce, zorder=50)\n elif lls_dict['cdict']['type'] == 'Fit_line':\n #xdb.set_trace()\n if 'fit_val' in lls_dict['cdict']:\n for rng in lls_dict['cdict']['analy']:\n idx = ((xspec.wavelength > rng[0]*u.AA) &\n (xspec.wavelength < rng[1]*u.AA) &\n (xspec.sig > 0))\n gdC = np.where(idx)[0]\n #\n sig0 = (lls_dict['cdict']['fit_val'][0][2]-lls_dict['cdict']['fit_val'][0][1])/2.\n sig1 = (lls_dict['cdict']['fit_val'][1][2]-lls_dict['cdict']['fit_val'][1][1])/2.\n sigl = np.sqrt(sig0**2 +\n sig1**2*(lls_dict['cdict']['slope_pivot']*(1+lls_dict['z'])-\n xspec.wavelength.value[gdC])**2)\n ax.fill_between(xspec.wavelength.value[gdC],\n scl*(conti[gdC] + sigl),\n scl*(conti[gdC] - sigl),\n color=clr_ce, alpha=alpha_ce, zorder=50)\n\n # Model with error (limits too)\n taulow = (10.**lls_dict['fit_NHI'][1] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n try:\n low_model = scl*conti * np.exp(-1*taulow)\n except ValueError:\n pdb.set_trace()\n tauhi = (10.**lls_dict['fit_NHI'][2] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n hi_model = scl*conti * np.exp(-1*tauhi)\n mwv = tau0 > 0.\n ax.fill_between(xspec.wavelength.value[mwv], low_model[mwv],\n hi_model[mwv], color=mclr, alpha=0.3, zorder=100)\n\n # Finish\n ax.plot(xmnx, [0.,0.], '--', color='gray')",
"def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p",
"def LeastSquareParametersFit(self,params0,inclination_data=None):\n\t\tnpl = self.Observations.nplanets\n\t\tif len(params0.reshape(-1)) == npl * 5:\n\t\t\tcoplanar = True\n\t\telif len(params0.reshape(-1)) == npl * 7:\n\t\t\tcoplanar = False\n\t\telse:\n\t\t\tprint(\"Shape of initial parameter does not match what is required for the number of planets!\")\n\t\t\traise\n\t\t\t\n\t\ttarget_data = np.array([])\n\t\terrors = np.array([])\n\t\t\n\t\tfor time,err in zip(self.Observations.transit_times,self.Observations.transit_uncertainties):\n\t\t\ttarget_data = np.append(target_data,time)\n\t\t\terrors = np.append(errors,err)\n\t\t\n\t\ttFinal = self.Observations.tFinal() + np.max(self.Observations.PeriodEstimates)\n\t\t\n\t\tdef objectivefn(x):\n\t\t\t\n\t\t\tif coplanar:\n\t\t\t\ttransits,success = self.MCMC_CoplanarParam_TransitTimes(x,tFinal)\n\t\t\telse:\n\t\t\t\ttransits,success = self.MCMC_Param_TransitTimes(x,tFinal)\n\t\t\tif\tinclination_data:\n\t\t\t\t\tassert not coplanar, \"Inclination data should not be include for coplanar fits\"\n\t\t\t\t\tcosi = np.abs( np.cos( x.reshape(-1,7)[:,4] ) )\n\t\t\t\t\tcosi0 = inclination_data[0]\n\t\t\t\t\tcosi_err = inclination_data[0]\n\t\t\t\t\tinc_chi2 = (cosi - cosi0) / cosi_err\n\t\t\t\n\t\t\tanswer = np.array([],dtype=float)\n\t\t\tfor i,t in enumerate(transits):\n\t\t\t\ttnums = self.Observations.transit_numbers[i]\n\t\t\t\ttry:\n\t\t\t\t\tanswer = np.append( answer,np.array(t[tnums]) )\n\t\t\t\texcept:\n\t\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t#\n\t\t\ttry:\n\t\t\t\tttvchi2 = (answer - target_data)/errors\n\t\t\texcept:\n\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t\n\t\t\tif inclination_data:\n\t\t\t\treturn np.append(ttvchi2,inc_chi2)\n\t\t\telse:\n\t\t\t\treturn ttvchi2\n\t\t\n\t\treturn leastsq(objectivefn, params0,full_output=1)",
"def add_X_NNLO_fit(axe, xran, values, errors, fill=False, save=False):\n B = values[\"B\"]\n F_0 = values[\"F_0\"]\n\n LAMBDA4 = values[\"Lambda4\"]\n LAMBDA3 = values[\"Lambda3\"]\n # LAMBDA12 = values[\"Lambda12\"]\n km = values[\"km\"]\n kf = values[\"kf\"]\n\n x = np.linspace(xran[0], xran[1], num=500)\n\n Msqr = x * (8 * (np.pi**2) * (F_0**2))\n arg4 = LAMBDA4**2 / Msqr\n arg3 = LAMBDA3**2 / Msqr\n # arg12 = LAMBDA12**2 / Msqr\n\n l1 = -0.4\n l2 = 4.3\n\n Lambda1sqr = (phys_pion**2) * np.exp(l1)\n Lambda2sqr = (phys_pion**2) * np.exp(l2)\n\n lnLambda12sqr = (7.0 * np.log(Lambda1sqr) + 8.0 * np.log(Lambda2sqr)) / 15.0\n lambda12sqr = np.exp(lnLambda12sqr)\n\n arg12 = lambda12sqr / Msqr\n\n lm = 1.0 / 51.0 * (60.0 * np.log(arg12) - 9.0 * np.log(arg3) + 49.0)\n lf = 1.0 / 30.0 * (30.0 * np.log(arg12) + 6.0 * np.log(arg3) - 6.0 * np.log(arg4) + 23.0)\n\n y = F_0 * (1.0 + x * np.log(arg4) - 5.0 / 4.0 * (x**2) * (lf)**2 + kf * x**2)\n\n plots = []\n paramstring = \" \".join(\"${}={}$\".format(format_parameters(k), print_paren_error(float(v), float(errors[k])))\n for k, v in sorted(values.iteritems()))\n paramstring = \"$ M_\\pi<{}$\".format(values[\" M_\\pi<\"])\n plabel = \"NNLO {}\".format(paramstring)\n plabel = \"NNLO\"\n if \"cutoff\" in values:\n plabel += \" $M_\\pi < {}$\".format(values[\"cutoff\"])\n addplot(plots, axe, fill, save, x=x, y=y, params={\"label\":plabel, \"ls\":\"--\", \"lw\":4})\n\n return plots",
"def _ssc(pars, nu):\n\t(log10_gamma_max, redshift, delta, log10_R, log10_B, log10_Norm, index, log10_gamma_c) = pars\n\n\t# define from the input parameters the dictionary to be feeded to the model\n\t# we neglect the time-dependent part for now\n\ttime_grid = dict(time_min=0, time_max=3, time_bins=50, time_inj=2)\n\t# gamma grid\n\tgamma_grid = dict(log10_gamma_min=2, log10_gamma_max=log10_gamma_max, gamma_bins=50)\n\t# emission region, again time dependent part is ignored\n\temission_region = dict(log10_R=log10_R, R_unit='cm', delta=delta,\n\t\t\t\t\t\t log10_B=log10_B, B_unit='G', t_esc=1.5, z=redshift)\n\t# injected spectrum\n\tinjected_spectrum = dict(type='ExponentialCutoffPowerLaw',\n\t\t\t\t\t\t\t log10_Norm=log10_Norm,\n\t\t\t\t\t\t\t Norm_unit='cm-3',\n\t\t\t\t\t\t\t index=index,\n\t\t\t\t\t\t\t log10_gamma_c=log10_gamma_c)\n\n\t# dump into a tmp yaml file\n\twith open('tmp_config.yaml', 'w') as yaml_file:\n\t\tyaml.dump({'time_grid': time_grid,\n\t\t\t\t 'gamma_grid': gamma_grid,\n\t\t\t\t 'emission_region': emission_region,\n\t\t\t\t 'injected_spectrum': injected_spectrum},\n\t\t\t\t yaml_file, default_flow_style=False)\n\n\n\t# initialize the ssc model\n\tmodel = BaseModel('tmp_config.yaml')\n\n\t# define the base electron population for now just as the injected one\n\tgamma = model.gamma\n\tN_e = model.N_e_inj(gamma)\n\n\t# test synchrotron\n\tsyn = Synchrotron(model)\n\tic = InverseCompton(model)\n\n\tobs_nu = nu * u.Hz\n\t# de - boosting, for intrinsic values\n\tnu = obs_nu / model.blob.delta\n\n\t# transform to energy\n\tE = const.h * obs_nu\n\n\tsyn_flux = syn.flux(nu, N_e, self_absorption=True)\n\tic_flux = ic.flux(nu, N_e, ebl_absorption=True)\n\n\tsed = (E**2*(syn_flux + ic_flux)).to('erg cm-2 s-1')\n\n\treturn sed.value",
"def fitModel(self, params:lmfit.Parameters=None):\r\n if params is None:\r\n params = self.params\r\n self.initializeRoadRunnerModel()\r\n if self.parametersToFit is not None:\r\n self.optimizer = Optimizer.optimize(self.calcResiduals, params,\r\n self._fitterMethods, logger=self.logger,\r\n numRestart=self._numRestart)\r\n self.minimizerResult = self.optimizer.minimizerResult\r\n # Ensure that residualsTS and fittedTS match the parameters\r\n self.updateFittedAndResiduals(params=self.params)",
"def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]",
"def fit_energylaw(showplots = False):\r\n #Data is from Cosmlc Ray Muon Spectrum In the Atmoephere M. Circella et al 1993 Fig 4\r\n #(at 15KM. conversion from depth to altitude using https://www.engineeringtoolbox.com/air-altitude-pressure-d_462.html)\r\n #Units are GeV/c vs (cm^2 s sr Gev / c) ^ -1\r\n data = np.array([[.4, .025], [.5, .017], [.7, .01], [1, .008], [1.25, .004], [1.8, .003], [2.5, .0015], [5,.00035], [18, .00001]])\r\n xbounds = [.1, 100]\r\n #Fit data to ax^b\r\n data_log = np.log(data)\r\n fits = np.polyfit(data_log[:,0], data_log[:,1], 1)\r\n a = np.exp(fits[1])\r\n b = fits[0]\r\n if(showplots):\r\n fitdata = np.polyfit(data_log[:,0], data_log[:,1], 1,cov=True)\r\n print(fitdata[1])\r\n x = np.linspace(.4, 50, 1000)\r\n plt.scatter(data[:,0], data[:,1], label=\"Data from Circella\")\r\n plt.loglog(x, a * x **b, color=\"green\", label=\"ax^b fit\")\r\n plt.xlabel(\"Muon Energy (GeV/c)\")\r\n plt.ylabel(\"Differential Intensity (cm^2 s sr Gev / c)^-1\")\r\n plt.title(\"Fitting Flux vs Energy at 15km from Circella et al.\")\r\n plt.legend()\r\n plt.show()\r\n f = lambda x: a * x**b\r\n return f, xbounds",
"def fit_psp(data, search_window, clamp_mode, sign=0, exp_baseline=True, baseline_like_psp=False, refine=True, init_params=None, fit_kws=None, ui=None): \n import pyqtgraph as pg\n prof = pg.debug.Profiler(disabled=True, delayed=False)\n prof(\"args: %s %s %s %s %s %s %s %s\" % (search_window, clamp_mode, sign, exp_baseline, baseline_like_psp, refine, init_params, fit_kws))\n \n if ui is not None:\n ui.clear()\n ui.console.setStack()\n ui.plt1.plot(data.time_values, data.data)\n ui.plt1.addLine(x=search_window[0], pen=0.3)\n ui.plt1.addLine(x=search_window[1], pen=0.3)\n prof('plot')\n\n if fit_kws is None:\n fit_kws = {}\n if init_params is None:\n init_params = {}\n\n method = 'leastsq'\n fit_kws.setdefault('maxfev', 500)\n\n # good fit, slow\n # method = 'Nelder-Mead'\n \n # fit_kws.setdefault('options', {\n # 'maxiter': 300, \n \n # # 'disp': True,\n # })\n \n # good fit\n # method = 'Powell'\n # fit_kws.setdefault('options', {'maxfev': 200, 'disp': True})\n\n # bad fit\n # method = 'CG'\n # fit_kws.setdefault('options', {'maxiter': 100, 'disp': True})\n\n # method = 'L-BFGS-B'\n # fit_kws.setdefault('options', {'maxiter': 100, 'disp': True})\n\n # take some measurements to help constrain fit\n data_min = data.data.min()\n data_max = data.data.max()\n data_mean = data.mean()\n \n baseline_mode = float_mode(data.time_slice(None, search_window[0]).data)\n \n # set initial conditions depending on whether in voltage or current clamp\n # note that sign of these will automatically be set later on based on the \n # the *sign* input\n if clamp_mode == 'ic':\n amp_init = init_params.get('amp', .2e-3)\n amp_max = min(100e-3, 3 * (data_max-data_min))\n rise_time_init = init_params.get('rise_time', 5e-3)\n decay_tau_init = init_params.get('decay_tau', 50e-3)\n exp_tau_init = init_params.get('exp_tau', 50e-3)\n exp_amp_max = 100e-3\n elif clamp_mode == 'vc':\n amp_init = init_params.get('amp', 20e-12)\n amp_max = min(500e-12, 3 * (data_max-data_min))\n rise_time_init = init_params.get('rise_time', 1e-3)\n decay_tau_init = init_params.get('decay_tau', 4e-3)\n exp_tau_init = init_params.get('exp_tau', 4e-3)\n exp_amp_max = 10e-9\n else:\n raise ValueError('clamp_mode must be \"ic\" or \"vc\"')\n\n # Set up amplitude initial values and boundaries depending on whether *sign* are positive or negative\n if sign == -1:\n amp = (-amp_init, -amp_max, 0)\n elif sign == 1:\n amp = (amp_init, 0, amp_max)\n elif sign == 0:\n amp = (0, -amp_max, amp_max)\n else:\n raise ValueError('sign must be 1, -1, or 0')\n \n # initial condition, lower boundary, upper boundary\n base_params = {\n 'yoffset': (init_params.get('yoffset', baseline_mode), data_min, data_max),\n 'rise_time': (rise_time_init, rise_time_init/10., rise_time_init*10.),\n 'decay_tau': (decay_tau_init, decay_tau_init/10., decay_tau_init*10.),\n 'rise_power': (2, 'fixed'),\n 'amp': amp,\n }\n \n # specify fitting function and set up conditions\n psp = StackedPsp()\n if exp_baseline:\n if baseline_like_psp:\n exp_min = 0 if sign == 1 else -exp_amp_max \n exp_max = 0 if sign == -1 else exp_amp_max \n base_params['exp_tau'] = 'decay_tau'\n else:\n exp_min = -exp_amp_max \n exp_max = exp_amp_max \n base_params['exp_tau'] = (exp_tau_init, exp_tau_init / 10., exp_tau_init * 20.)\n base_params['exp_amp'] = (0.01 * sign * amp_init, exp_min, exp_max)\n else:\n base_params.update({'exp_amp': (0, 'fixed'), 'exp_tau': (1, 'fixed')})\n \n # print(clamp_mode, base_params, sign, amp_init)\n \n # if weight is None: #use default weighting\n # weight = np.ones(len(y))\n # else: #works if there is a value specified in weight\n # if len(weight) != len(y):\n # raise Exception('the weight and array vectors are not the same length') \n # fit_kws['weights'] = weight\n\n # Round 1: coarse fit\n\n # Coarse search xoffset\n n_xoffset_chunks = max(1, int((search_window[1] - search_window[0]) / 1e-3))\n xoffset_chunks = np.linspace(search_window[0], search_window[1], n_xoffset_chunks+1)\n xoffset = [{'xoffset': ((a+b)/2., a, b)} for a,b in zip(xoffset_chunks[:-1], xoffset_chunks[1:])]\n \n prof('prep for coarse fit')\n\n # Find best coarse fit \n search = SearchFit(psp, [xoffset], params=base_params, x=data.time_values, data=data.data, fit_kws=fit_kws, method=method)\n for i,result in enumerate(search.iter_fit()):\n pass\n # prof(' coarse fit iteration %d/%d: %s %s' % (i, len(search), result['param_index'], result['params']))\n fit = search.best_result.best_values\n prof(\"coarse fit done (%d iter)\" % len(search))\n\n if ui is not None:\n br = search.best_result\n ui.plt1.plot(data.time_values, br.best_fit, pen=(0, 255, 0, 100))\n\n if not refine:\n return search.best_result\n\n # Round 2: fine fit\n \n # Fine search xoffset\n fine_search_window = (max(search_window[0], fit['xoffset']-1e-3), min(search_window[1], fit['xoffset']+1e-3))\n n_xoffset_chunks = max(1, int((fine_search_window[1] - fine_search_window[0]) / .2e-3))\n xoffset_chunks = np.linspace(fine_search_window[0], fine_search_window[1], n_xoffset_chunks + 1)\n xoffset = [{'xoffset': ((a+b)/2., a, b)} for a,b in zip(xoffset_chunks[:-1], xoffset_chunks[1:])]\n\n # Search amp / rise time / decay tau to avoid traps\n rise_time_inits = base_params['rise_time'][0] * 1.2**np.arange(-1,6)\n rise_time = [{'rise_time': (x,) + base_params['rise_time'][1:]} for x in rise_time_inits]\n\n decay_tau_inits = base_params['decay_tau'][0] * 2.0**np.arange(-1,2)\n decay_tau = [{'decay_tau': (x,) + base_params['decay_tau'][1:]} for x in decay_tau_inits]\n\n search_params = [\n rise_time, \n decay_tau, \n xoffset,\n ]\n \n # if 'fixed' not in base_params['exp_amp']:\n # exp_amp_inits = [0, amp_init*0.01, amp_init]\n # exp_amp = [{'exp_amp': (x,) + base_params['exp_amp'][1:]} for x in exp_amp_inits]\n # search_params.append(exp_amp)\n\n # if no sign was specified, search from both sides \n if sign == 0:\n amp = [{'amp': (amp_init, -amp_max, amp_max)}, {'amp': (-amp_init, -amp_max, amp_max)}]\n search_params.append(amp)\n\n prof(\"prepare for fine fit %r\" % base_params)\n\n # Find best fit \n search = SearchFit(psp, search_params, params=base_params, x=data.time_values, data=data.data, fit_kws=fit_kws, method=method)\n for i,result in enumerate(search.iter_fit()):\n pass\n prof(' fine fit iteration %d/%d: %s %s' % (i, len(search), result['param_index'], result['params']))\n fit = search.best_result\n prof('fine fit done (%d iter)' % len(search))\n\n return fit",
"def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')",
"def test_linear_fit_2d_model_set_fixed_parameters(self):\n init_model = models.Polynomial2D(\n degree=2,\n c1_0=[1, 2],\n c0_1=[-0.5, 1],\n n_models=2,\n fixed={\"c1_0\": True, \"c0_1\": True},\n )\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)",
"def akaike_info_criterion_lsq(ssr, n_params, n_samples):\n return akaike_info_criterion(\n -0.5 * n_samples * np.log(ssr / n_samples), n_params, n_samples\n )",
"def test_fit(self):\n\n # Generate data with known parameters\n a, loc, scale = 1.0, 3.0, 5.0\n data = gamma.rvs(a, loc, scale, size=100000)\n\n # Fit the model and check parameters\n copula = GammaUnivariate()\n copula.fit(data)\n self.assertAlmostEqual(copula.a, a, places=1)\n self.assertAlmostEqual(copula.loc, loc, places=1)\n self.assertAlmostEqual(copula.scale, scale, places=1)",
"def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')",
"def fit(self, int_spec, cr_spec=None, minos=0., refit=True, **kwargs):\n\n self._int_spec = lambda EGeV, **kwargs: int_spec(EGeV, **kwargs)\n\n if cr_spec is not None:\n self._cr_spec = lambda EGeV, **kwargs: cr_spec(EGeV, **kwargs)\n else:\n self._cr_spec = None\n\n fitarg = self.fill_fitarg(**kwargs)\n\n t1 = time.time()\n self.run_migrad(fitarg, **kwargs)\n\n try:\n self._m.hesse()\n logging.debug(\"Hesse matrix calculation finished\")\n except RuntimeError as e:\n logging.warning(\n \"*** Hesse matrix calculation failed: {0}\".format(e)\n )\n\n logging.debug(self._m.fval)\n self.__repeat_migrad(**kwargs)\n logging.debug(self._m.fval)\n\n fmin = self._m.fmin\n\n if not fmin.hesse_failed:\n try:\n self._corr = self._m.np_matrix(correlation=True)\n except:\n self._corr = -1\n\n logging.debug(self._m.values)\n\n if self._m.valid and minos:\n for k in self._par_names:\n if kwargs['fix'][k]:\n continue\n self._m.minos(k, minos)\n logging.debug(\"Minos finished\")\n\n else:\n self.__print_failed_fit()\n\n logging.info('fit took: {0}s'.format(time.time() - t1))\n for k in self._par_names:\n if kwargs['fix'][k]:\n err = np.nan\n else:\n err = self._m.errors[k]\n logging.info('best fit {0:s}: {1:.5e} +/- {2:.5e}'.format(k, self._m.values[k], err))",
"def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)",
"def fitDispCurve(fileName, caliConst):\n AAdata = np.loadtxt(fileName + '.txt')\n k=caliConst[3]\n n=caliConst[2]\n #Be sure to change this appropriately to the fixed dye conc\n x=k*((fixed_dye_conc-n)/55.5)\n n = n\n #here is the code with N fixed to MG's N value\n AAshg=lambda c, B,a,K: B+np.square(a*(x/(1+x+(K*((c-n)/55.5)))))\n '''\n #Not used currently, could be useful in the future\n #More advanced fitting\n C = (fixed_dye_conc * k) / 55.5;\n D = (k * n) / 55.5;\n AAshgadv = lambda x, B, a, K : B + np.square(a * \\\n (((((K * x) / 55.5) + C + D) - \\\n np.sqrt(np.square((((K * x) / 55.5) + C + D)) -\\\n (4 * (((K * n) / 55.5) + D) * C))) \\\n /(2 * ((((K * n) / 55.5) + D)))))\n '''\n aaconst, aacorr = sp.curve_fit(AAshg, AAdata[:,0],AAdata[:,1],p0=(0,10,0.0001)\\\n ,maxfev=1000000,bounds=([0,0,0],[min(AAdata[:,1]), np.inf, np.inf]))\n \n return aaconst, aacorr, AAdata",
"def Train(self, n_eg=None, K=5, \\\n C_parameters=np.array([0.01, 0.1, 1.0, 10., 100.]), \\\n norm_SF=True, norm_plane=True, tol_=0.0001, \\\n max_iter_=10000):\n # ERROR messages\n if not self.__openSFI:\n print('ERROR: Must use OpenSFI before Train')\n sys.exit(-1)\n if not self.__containsParticles:\n print('ERROR: RSoftSF file must contain particles')\n sys.exit(-1)\n if not self.__openDynamicsI:\n print('ERROR: Must use OpenDynamicsI before Train')\n sys.exit(-1)\n if not self.__selectTrainingSet:\n print('ERROR: Must use SelectTrainingSet before Train')\n sys.exit(-1)\n\n # Initialize outputs\n self.radial_plane = np.zeros((self.__n_type, self.__n_SF_rad))\n self.angular_plane = np.zeros((self.__n_type, self.__n_SF_ang))\n self.intercept = np.zeros(self.__n_type)\n n_SF = self.__n_SF_rad + self.__n_SF_ang\n\n # Initialization of structure function and plane norms\n if not self.__train:\n self.__InitializeNorms(norm_SF, norm_plane)\n if norm_SF:\n mean_SF = self._mean_SF\n std_SF = self._std_SF\n else:\n mean_SF = np.zeros(n_SF)\n std_SF = np.ones(n_SF)\n\n # For each particle type\n for idx_type, type_ in enumerate(self.__types_unique):\n # Initializes model and grid search\n linear_svm = svm.LinearSVC(\\\n dual=False, tol=tol_, max_iter=max_iter_)\n grid_search = skl.model_selection.GridSearchCV(linear_svm,\n param_grid={'C':C_parameters}, cv=K,\n refit=True)\n\n # Obtains particles used for training\n idx_type_SF = np.where(self.__types==type_)[0]\n particles_R = skl.utils.shuffle(self.training_R[idx_type])\n particles_NR = skl.utils.shuffle(self.training_NR[idx_type])\n if n_eg is None:\n n_eg_type = min(len(particles_R), len(particles_NR))\n else:\n n_eg_type = n_eg[idx_type]\n particles_R = particles_R[:n_eg_type]\n particles_NR = particles_NR[:n_eg_type]\n training_particles = np.concatenate([particles_R, particles_NR])\n\n # Obtains structure functions (X) and targets (Y) for training\n mean_SF_type = mean_SF[idx_type_SF]\n std_SF_type = std_SF[idx_type_SF]\n X_full = self._ParticlesToSFs(training_particles)\n X_type = (X_full[:,idx_type_SF]-mean_SF_type)/std_SF_type\n Y = np.concatenate((np.ones(n_eg_type), np.zeros(n_eg_type)))\n\n # Trains plane and stores results\n grid_search.fit(X_type, Y)\n plane = grid_search.best_estimator_.coef_[0]\n intercept = grid_search.best_estimator_.intercept_[0]\n\n # Converts from normalized SFs to un-normalized\n plane = plane / std_SF_type\n intercept = intercept - np.dot(plane,mean_SF_type)\n\n # Normalizes plane so that sofness has an std of 1\n if norm_plane:\n cov = self._cov_SF[idx_type_SF][:,idx_type_SF]\n np.savetxt('cov'+str(type_)+'.dat', cov)\n a = np.dot(plane,np.dot(cov,plane))\n plane /= np.sqrt(a)\n intercept /= np.sqrt(a)\n\n # Stores results\n if self.__containsRadial:\n idx_type_SF_rad = np.where(self.radial_Xs==type_)[0]\n self.radial_plane[idx_type, idx_type_SF_rad] = \\\n plane[:self.__n_SF_rad]\n if self.__containsAngular:\n idx_type_SF_ang = np.where(self.angular_Xs==type_)[0]\n self.angular_plane[idx_type, idx_type_SF_ang] = \\\n plane[-self.__n_SF_ang:]\n self.intercept[idx_type] = intercept\n\n # Marks program as run\n self.__train = True\n\n # Allows for chained statements\n return self",
"def __init__(self, settings,study):\n \n # Store the study #\n ###################\n \n self._study = study\n self._parameters_size = self._study.geometry.parameters_size\n \n # Read settings #\n ################# \n if hasattr(settings, 'global_sample_function'):\n # Use given function and ignore bounds\n self._global_sample_function = settings.global_sample_function\n self._global_parameters_bounds = None\n else:\n # If no function, use uniform rand with given boundaries if provided. If not, assume [0,1]\n if hasattr(settings, 'global_parameters_bounds'):\n self._global_parameters_bounds = np.array(settings.global_parameters_bounds)\n else:\n self._global_parameters_bounds = [(0, 1)]*self._parameters_size\n \n self._global_sample_function = lambda: self._global_parameters_bounds[:,0] + (self._global_parameters_bounds[:,1]-self._global_parameters_bounds[:,0])*np.random.rand(1,self._parameters_size).flatten()\n \n\n if hasattr(settings, 'global_result_constraint'):\n self._global_result_constraint = settings.global_result_constraint\n else:\n self._global_result_constraint = None \n \n if hasattr(settings, 'local_result_constraint'):\n self._local_result_constraint = settings.local_result_constraint\n else:\n self._local_result_constraint = None\n \n if hasattr(settings, 'local_max_iterations'):\n self._local_max_iterations = settings.local_max_iterations\n else:\n self._local_max_iterations = 50\n \n if hasattr(settings, 'local_method'):\n self._local_method = settings.local_method\n else:\n self._local_method = 'L-BFGS-B'\n \n if hasattr(settings, 'local_scaling_factor'):\n self._local_scaling_factor = settings.local_scaling_factor\n else:\n self._local_scaling_factor = 1\n \n if hasattr(settings, 'local_ftol'):\n self._local_ftol = settings.local_ftol\n else:\n self._local_ftol = 1e-5\n \n if hasattr(settings, 'local_pgtol'):\n self._local_pgtol = settings.local_pgtol\n else:\n self._local_pgtol = 1e-5\n \n # Wavelength settings for lumopt \n if hasattr(settings, 'local_wavelength_start'):\n self._local_wavelength_start = settings.local_wavelength_start\n else:\n self._local_wavelength_start = 1550e-9\n \n if hasattr(settings, 'local_wavelength_stop'):\n self._local_wavelength_stop = settings.local_wavelength_stop\n else:\n self._local_wavelength_stop = 1550e-9\n \n if hasattr(settings, 'local_wavelength_points'):\n self._local_wavelength_points = settings.local_wavelength_points\n else:\n self._local_wavelength_points = 1\n \n # Keep track of the latest random restart. Run a first simulation with\n # the initial parameters already stored in the geometry\n self._new_param = None",
"def asaxsseqeval(data,param,asaxsenergies,chemshift,fprimefile,samples=None,seqname=None,element=0):\n if samples is None:\n samples=utils.unique([param[i]['Title'] for i in range(0,len(data))]);\n print \"Found samples: \", samples\n if type(samples)!=types.ListType:\n samples=[samples];\n if seqname is not None:\n logfile=open('%s.log' % seqname,'wt')\n logfile.write('ASAXS sequence name: %s\\n' % seqname)\n logfile.write('Time: %s' % time.asctime())\n asaxsecalib=[];\n #asaxsenergies=np.array(utils.unique(asaxsenergies,lambda a,b:(abs(a-b)<2)))\n asaxsenergies=np.array(asaxsenergies);\n for j in range(0,len(asaxsenergies)):\n asaxsecalib.append([param[i]['EnergyCalibrated']\n for i in range(0,len(data)) \n if abs(param[i]['Energy']-asaxsenergies[j])<2][0]);\n asaxsecalib=np.array(asaxsecalib);\n \n print \"Calibrated ASAXS energies:\", asaxsecalib\n fprimes=B1io.readf1f2(fprimefile);\n pylab.plot(fprimes[:,0],fprimes[:,1],'b-');\n pylab.plot(fprimes[:,0],fprimes[:,2],'r-');\n asaxsf1=np.interp(asaxsecalib-chemshift,fprimes[:,0],fprimes[:,1]);\n asaxsf2=np.interp(asaxsecalib-chemshift,fprimes[:,0],fprimes[:,2]);\n print \"f' values\", asaxsf1\n print \"f'' values\", asaxsf2\n if seqname is not None:\n logfile.write('Calibrated ASAXS energies:\\n')\n for i in range(len(asaxsenergies)):\n logfile.write(\"%f -> %f\\tf1=%f\\tf2=%f\\n\" % (asaxsenergies[i],asaxsecalib[i],asaxsf1[i],asaxsf2[i]))\n logfile.write('Chemical shift (eV): %f\\n' % chemshift)\n logfile.write('Atomic number supplied by the user: %d\\n' % element)\n logfile.write('fprime file: %s\\n' % fprimefile)\n pylab.plot(asaxsecalib-chemshift,asaxsf1,'b.',markersize=10);\n pylab.plot(asaxsecalib-chemshift,asaxsf2,'r.',markersize=10);\n pylab.legend(['f1','f2'],loc='upper left');\n pylab.xlabel('Photon energy (eV)');\n pylab.ylabel('Anomalous corrections (e.u.)');\n pylab.title('Anomalous correction factors')\n if seqname is not None:\n pylab.savefig('%s_f1f2.eps' % seqname,dpi=300,transparent='True',format='eps')\n if len(asaxsenergies)<3:\n print \"At least 3 energies should be given!\"\n return\n for s in samples:\n print \"Evaluating sample %s\" % s\n if seqname is not None:\n logfile.write('Sample: %s\\n' % s)\n q=None;\n counter=None;\n fsns=None\n for k in range(0,len(data)): #collect the intensities energy-wise.\n if param[k]['Title']!=s:\n continue\n if q is None:\n q=np.array(data[k]['q']);\n NQ=len(q);\n Intensity=np.zeros((len(q),len(asaxsenergies)))\n Errors=np.zeros((len(q),len(asaxsenergies)))\n counter=np.zeros((1,len(asaxsenergies)))\n fsns=[[] for l in range(len(asaxsenergies))]\n if np.sum(q-np.array(data[k]['q']))>0:\n print \"Check the datasets once again: different q-scales!\"\n continue;\n energyindex=np.absolute(asaxsenergies-param[k]['Energy'])<2\n Intensity[:,energyindex]=Intensity[:,energyindex]+np.array(data[k]['Intensity']).reshape(NQ,1);\n Errors[:,energyindex]=Intensity[:,energyindex]+(np.array(data[k]['Error']).reshape(NQ,1))**2;\n counter[0,energyindex]=counter[0,energyindex]+1;\n if pylab.find(len(energyindex))>0:\n print pylab.find(energyindex)[0]\n fsns[pylab.find(energyindex)[0]].append(param[k]['FSN']);\n Errors=np.sqrt(Errors)\n Intensity=Intensity/np.kron(np.ones((NQ,1)),counter)\n if seqname is not None:\n for i in range(0,len(asaxsenergies)):\n logfile.write('FSNs for energy #%d:' % i)\n for j in fsns[i]:\n logfile.write('%d' % j)\n logfile.write('\\n')\n datatosave=np.zeros((len(q),2*len(asaxsenergies)+1))\n datatosave[:,0]=q;\n for i in range(len(asaxsenergies)):\n datatosave[:,2*i+1]=Intensity[:,i]\n datatosave[:,2*i+2]=Errors[:,i]\n np.savetxt('%s_%s_ie.txt' % (seqname, s),datatosave,delimiter='\\t')\n # now we have the Intensity and Error matrices fit to feed to asaxsbasicfunctions()\n N,M,R,DN,DM,DR=asaxsbasicfunctions(Intensity,Errors,asaxsf1,asaxsf2,element=element);\n sep12,dsep12,sep23,dsep23,R1,dR1=asaxspureresonant(Intensity[:,0],Intensity[:,1],Intensity[:,2],\n Errors[:,0],Errors[:,1],Errors[:,2],\n asaxsf1[0],asaxsf1[1],asaxsf1[2],\n asaxsf2[0],asaxsf2[1],asaxsf2[2])\n Ireconst=N+M*2*asaxsf1[0]+R*(asaxsf1[0]**2+asaxsf2[0]**2)\n if seqname is not None:\n datatosave=np.zeros((len(q),7))\n datatosave[:,0]=q;\n datatosave[:,1]=N.flatten(); datatosave[:,2]=DN.flatten();\n datatosave[:,3]=M.flatten(); datatosave[:,4]=DM.flatten();\n datatosave[:,5]=R.flatten(); datatosave[:,6]=DR.flatten();\n np.savetxt('%s_%s_basicfun.txt' % (seqname, s),datatosave,delimiter='\\t')\n datatosave[:,1]=sep12.flatten(); datatosave[:,2]=dsep12.flatten();\n datatosave[:,3]=sep23.flatten(); datatosave[:,4]=dsep23.flatten();\n datatosave[:,5]=R1.flatten(); datatosave[:,6]=dR1.flatten();\n np.savetxt('%s_%s_separation.txt' % (seqname, s),datatosave,delimiter='\\t')\n pylab.figure()\n #pylab.errorbar(q,Intensity[:,0],Errors[:,0],label='I_0',marker='.')\n #pylab.errorbar(q,N.flatten(),DN.flatten(),label='Nonresonant',marker='.')\n #pylab.errorbar(q,M.flatten(),DM.flatten(),label='Mixed',marker='.')\n #pylab.errorbar(q,R.flatten(),DR.flatten(),label='Resonant',marker='o')\n pylab.plot(q,Intensity[:,0],label='I_0',marker='.')\n pylab.plot(q,N.flatten(),label='Nonresonant',marker='.')\n pylab.plot(q,M.flatten(),label='Mixed',marker='.')\n pylab.plot(q,R.flatten(),label='Resonant',marker='o')\n pylab.plot(q,Ireconst.flatten(),label='I_0_reconstructed',marker='.')\n pylab.title(\"ASAXS basic functions for sample %s\" % s)\n pylab.xlabel(u\"q (1/%c)\" % 197)\n pylab.ylabel(\"Scattering cross-section (1/cm)\")\n pylab.gca().set_xscale('log');\n pylab.gca().set_yscale('log');\n pylab.legend();\n pylab.savefig('%s_%s_basicfun.eps'%(seqname,s),dpi=300,format='eps',transparent=True)\n pylab.figure()\n #pylab.errorbar(q,Intensity[:,0],Errors[:,0],label='I_0',marker='.')\n #pylab.errorbar(q,sep12,dsep12,label='(I_0-I_1)/(f1_0-f1_1)',marker='.')\n #pylab.errorbar(q,sep23,dsep23,label='(I_1-I_2)/(f1_1-f1_2)',marker='.')\n #pylab.errorbar(q,R1.flatten(),dR1.flatten(),label='Pure resonant',marker='.')\n pylab.plot(q,Intensity[:,0],label='I_0',marker='.')\n pylab.plot(q,sep12,label='(I_0-I_1)/(f1_0-f1_1)',marker='.')\n pylab.plot(q,sep23,label='(I_1-I_2)/(f1_1-f1_2)',marker='.')\n pylab.plot(q,R1.flatten(),label='Pure resonant',marker='.')\n \n pylab.title(\"ASAXS separated and pure resonant terms for sample %s\" % s)\n pylab.xlabel(u\"q (1/%c)\" % 197)\n pylab.ylabel(\"Scattering cross-section (1/cm)\")\n pylab.gca().set_xscale('log');\n pylab.gca().set_yscale('log');\n pylab.legend();\n pylab.savefig('%s_%s_separation.eps'%(seqname,s),dpi=300,format='eps',transparent=True)\n logfile.close()\n pylab.show()",
"def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)",
"def __init__(\r\n self,\r\n centre: float = 0.0, # <- PyAutoFit recognises these constructor arguments are the model\r\n normalization: float = 0.1, # <- parameters of the Gaussian.\r\n rate: float = 0.01,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.rate = rate"
] | [
"0.66335446",
"0.6191415",
"0.59800696",
"0.58964324",
"0.5816966",
"0.5668345",
"0.56514496",
"0.5646242",
"0.5645791",
"0.561993",
"0.56091666",
"0.55902165",
"0.55819917",
"0.55689085",
"0.5561756",
"0.5520965",
"0.5512555",
"0.5506377",
"0.550578",
"0.549302",
"0.54860383",
"0.5479055",
"0.5478156",
"0.5470998",
"0.5431236",
"0.53811806",
"0.537957",
"0.5372441",
"0.535648",
"0.5354056",
"0.5335094",
"0.5327093",
"0.53268445",
"0.53239214",
"0.5289816",
"0.5276378",
"0.5261532",
"0.52572185",
"0.5254237",
"0.5249218",
"0.5245818",
"0.5238978",
"0.52348965",
"0.5232834",
"0.5223955",
"0.5219814",
"0.52190363",
"0.521874",
"0.52183664",
"0.5217067",
"0.52131504",
"0.5211507",
"0.5209789",
"0.52042735",
"0.52009434",
"0.5197378",
"0.5195538",
"0.51930165",
"0.5191871",
"0.5185212",
"0.5183725",
"0.517361",
"0.5169013",
"0.51612383",
"0.5158356",
"0.51570696",
"0.5156015",
"0.51544267",
"0.515122",
"0.51323277",
"0.5132161",
"0.5126996",
"0.5119752",
"0.51171947",
"0.5115487",
"0.5113961",
"0.5111713",
"0.51112014",
"0.51038045",
"0.5098667",
"0.5095476",
"0.50939155",
"0.5091994",
"0.50891894",
"0.5087579",
"0.50867546",
"0.5079042",
"0.5076462",
"0.5069498",
"0.5069436",
"0.5067059",
"0.50634015",
"0.5053028",
"0.50420594",
"0.5040066",
"0.50368655",
"0.50343585",
"0.50305843",
"0.50286967",
"0.5014758"
] | 0.750283 | 0 |
Evaluate the powerlaw slope of the mass profile from its powerlaw relation with effective radius | def get_gamma_from_R_eff(self, R_eff):
log_R_eff = np.log10(R_eff)
gam_minus_2 = log_R_eff*self.a + self.b + np.random.randn()*self.intrinsic_scatter
return gam_minus_2 + 2.0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power(self):\n return irradiance_on_plane(self.vnorm, self.h,\n self.date, self.lat) * self.s * self.eff",
"def _powerlaw(self, x: np.ndarray, y: np.ndarray) -> float:\n\n # regress\n def _regress(x, y):\n slope, intercept, rval, pval, err = linregress(x, y)\n return slope, rval\n\n # log of inputs\n logx = np.log(x)\n logy = np.log(y)\n\n # naive fit\n rmin = self.rmin\n if rmin is None:\n exponent, rval = _regress(logx, logy)\n return exponent\n\n # iteratively trim the fat tail\n for ymin in np.unique(y):\n\n # trim off the fat tail\n greater_than = y >= ymin\n logx_ = logx[greater_than]\n logy_ = logy[greater_than]\n exponent, rval = _regress(logx_, logy_)\n\n # check convergence\n if abs(rval) > rmin:\n return exponent\n\n # give up\n return np.nan",
"def compute_slope(self):\n\n # assign variables\n slope = 'slope'\n aspect = 'aspect'\n dx = 'dx'\n dy = 'dy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_dx = 'grow_dx'\n grow_dy = 'grow_dy'\n\n # compute slope and partial derivatives\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n dx=dx,\n dy=dy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dx,\n value=grow_dx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dx}={grow_dx}\".format(\n dx=dx,\n grow_dx=grow_dx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dy,\n value=grow_dy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dy}={grow_dy}\".format(\n dy=dy,\n grow_dy=grow_dy),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['grow_slope',\n 'grow_dx',\n 'grow_dy'],\n flags='f')\n\n return slope, dx, dy",
"def sigma_slope(x, sigma_y):\n w = 1./sigma_y**2\n denom = np.sum(w)*np.sum(w*x**2)-np.sum(w*x)**2\n if denom <= 0:\n return np.nan\n else:\n result = np.sqrt(np.sum(w)/denom )\n return result",
"def obj_sqrt_slope(X, Y, lbd, beta, sigma):\n n = X.shape[0]\n return sigma + np.sum((Y - X@beta)**2) / (2 * n * sigma) + np.sum(sigma * lbd * np.sort(abs(beta))[::-1])",
"def w(self):\n # w must be a CArray\n raise NotImplementedError(\"Linear normalizer should define the slope.\")",
"def best_fit_slope(xs, ys):\n m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /\n ((mean(xs)**2) - mean(xs**2)))\n return m",
"def obj_slope(X, Y, lbd, beta):\n n = X.shape[0]\n return np.sum((Y - X@beta)**2)/n + np.sum(lbd * np.sort(abs(beta))[::-1])",
"def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))",
"def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")",
"def wadic_slopes(self,terms=None):\n\t\tNP = [(a,self.multiplicity(a)) for a in range(self.num_coefs)]\n\t\tif terms==None:\n\t\t\treturn NewtonPolygon(NP).slopes()\n\t\telse:\n\t\t\treturn NewtonPolygon(NP).slopes()[0:terms]",
"def powder_XRD(crystal,wavelength, get_mults=False):\n \n # The wavenumber of the input wavelength\n nu = 2*n.pi/wavelength\n\n # Make a list of the accessible rlvs\n rlvs = find_accessible_rlvs(crystal,wavelength)\n \n # Now we calculate the scattering intensity from each rlv\n intensities = {\n tuple(rlv): n.abs(crystal.structure_factor(rlv))**2\n for rlv in rlvs}\n \n # Now sum up all rlvs with the same magnitude. We also\n # get rid of all the scattering vectors with 0 intensity\n magnitudes = {}\n multiplicities = {}\n for rlv, intensity in intensities.items():\n repeat = False\n mag = n.linalg.norm(rlv)\n for oldmag in magnitudes:\n if n.isclose(mag,oldmag):\n magnitudes[oldmag] += intensity\n multiplicities[oldmag] += 1\n repeat = True\n break\n if not repeat and not n.isclose(mag,0):\n multiplicities[mag] = 1\n magnitudes[mag] = intensity\n \n # Now we reformat the multiplicity data in a nice way\n multiplicities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n multiplicity\n for mag, multiplicity in multiplicities.items()\n if not n.allclose(magnitudes[mag],0)}\n\n # And now we calculate the scattering intensities\n # (a.u. per steradian) as a function of scattering angle\n intensities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n intensity * \n # This factor corrects for the fact that the same total\n # power in the debye scherrer rings is more\n # concentrated when 2\\theta is near 0 or 2pi\n 1 / n.sin(2*n.arcsin(mag/(2*nu))) *\n # This factor corrects for the probability that any\n # given crystal domain will scatter into the rlv\n 1 / mag *\n # This factor corrects for polarization effects,\n # Assuming an unpolarized input beam and no polarization\n # analysis\n (1 + n.cos(2*n.arcsin(mag/(2*nu)))**2)/2\n for mag, intensity in magnitudes.items()\n if not n.allclose(intensity,0)}\n if get_mults:\n return intensities, multiplicities\n else:\n return intensities",
"def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)",
"def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b",
"def evaluate( self, mu ) :\n\n P = 0.\n for l, c_l in enumerate( self.coefficients ) : P += ( l + 0.5 ) * c_l * Legendre( l, mu, checkXRange = False ) \n return( P )",
"def calc_slope(self, left, right):\n return (left[1] - right[1]) / (left[0] - right[0])",
"def power_output_candidate_wind_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_5[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_5[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)",
"def get_slope(self, device_type_name):\n\n if device_type_name in [\"SOLN\", \"BEND\",\"BLEN\",\"KICK\"]:\n # Solenoid devices use 'uA'.\n return 0.00055586\n elif device_type_name in [\"BLM\",\"LBLM\",\"CBLM\",\"PBLM\"]:\n # Beam loss monitors set threshold in Volts initially\n return 1.6/65536\n else:\n raise ValueError(\"Function \\\"__get_slope(device_type_name={}, fault_name={})\\\". Invalid device type name\"\n .format(device_type_name, fault_name))",
"def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ",
"def test_regression_of_slope_sum_distribution():\n\n meaningful_stats = pd.read_pickle(\n 'files/meaningfull_stats.pkl')\n\n print meaningful_stats['std'].tolist()\n print meaningful_stats['returns_diff'].tolist()\n\n def make_float(array):\n \"\"\"\n takes an array and makes all the number in it floats\n \"\"\"\n finial_array = []\n\n for number in array:\n finial_array.append(float(number))\n return finial_array\n\n seaborn.regplot(meaningful_stats['std'], meaningful_stats['returns_diff'])\n\n plt.title(\"STD and Returns\")\n\n plt.axhline(y=00, color='r', linestyle='-')\n\n plt.show()",
"def linear_slope_fit(wf, mean_y, sigma_y, slope, intercept):\n\n sum_x = sum_x2 = sum_xy = sum_y = mean_y[0] = sigma_y[0] = 0\n isum = len(wf)\n\n for i,value in enumerate(wf):\n sum_x += i \n sum_x2 += i**2\n sum_xy += (value * i)\n sum_y += value\n mean_y += (value-mean_y) / (i+1)\n sigma_y += (value-mean_y)**2\n\n\n sigma_y /= (isum + 1)\n np.sqrt(sigma_y, sigma_y)\n\n\n slope[0] = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept[0] = (sum_y - sum_x * slope[0])/isum",
"def residuals_PL(self, p, data, x):\n err = data - self.PowerLaw(x,p)\n return err",
"def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope",
"def piecewise_avg_slope_var(MaskedDataVector,time,redn,gain): \n localbeta = []\n localn = []\n localvar = []\n #loop over each sections of the ramp.\n slices = np.ma.notmasked_contiguous(MaskedDataVector)\n if slices is None : #When no unmasked pixels exist\n return np.nan, np.nan\n\n tf = np.median(np.diff(time)) # The frame time estimate\n for k in range(len(slices)) :\n n = len(MaskedDataVector[slices[k]])\n if n > 2 : #At least 3 points are there to calculate slope\n t = time[slices[k]]\n Sx = t.sum(dtype=np.float64)\n Sxx = (np.square(t)).sum(dtype=np.float64)\n Sy = MaskedDataVector[slices[k]].sum(dtype=np.float64)\n Sxy = (MaskedDataVector[slices[k]]*t).sum(dtype=np.float64)\n #append localbeta, localalpha, localn and localsigma\n beta = (n*Sxy - Sx*Sy)/ (n*Sxx - Sx**2)\n localbeta.append(beta)\n localn.append(n)\n localvar.append(varience_of_slope(beta,n,tf,redn,gain))\n #calculate the average beta with weights 1/localvarience \n if len(localvar) > 0 : \n AvgSlope, weightsum =np.average(localbeta,weights=1.0/np.asarray(localvar),\n returned=True)\n Varience = 1/weightsum\n return AvgSlope, Varience\n else :\n return np.nan, np.nan",
"def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])",
"def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)",
"def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R",
"def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)",
"def compute_rmse(y, tx, w):\n return np.sqrt(2*compute_mse(y,tx,w))",
"def rmsle(self) -> float:\n return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))",
"def powerLaw(minskew,minkurt,transform,x):\n exponent = 0.05\n while exponent < 20:\n y = x**exponent\n (newskew,newkurt) = computeMoments(y)\n (minskew,minkurt,transform) = checkMin(minskew,minkurt,newskew,newkurt,transform,exponent)\n exponent *= 1.5\n #endwhile\n return (minskew,minkurt,transform)",
"def prox_slope(x, h, lbd):\n # reorder the lambda to make it coincide with the order of x\n sort_idx = np.argsort(abs(x))\n rank_x = np.arange(len(x))[np.argsort(sort_idx)]\n return np.sign(x) * np.clip(abs(x) - lbd[rank_x] * h, 0, None)",
"def spectral_abs_slope_mean(data, fft_data):\n spec = np.abs(fft_data)\n slope = np.abs(np.diff(spec))\n return np.mean(slope)",
"def LA_contribution(self):\n pr=paraxial(self.entrance_pupil,0)\n #hnu=-u*self.entrance_pupil #n=1\n pr.propagate(self.surfaces)\n #print('hnu',hnu,1/hnu)\n #print('paraxial y ',pr.y[1:])\n #print('paraxial nu',pr.nu[:-1])\n #print('paraxial u ',pr.nu[:-1]/self.get_n()[:-1])\n #print('paraxial u ',pr.nu[:-1]/self.get_n()[:-1]/hnu/5.715023)\n #print('paraxial i ',pr.i[1:])\n ni=self.get_n()[:-1]*pr.i[1:]\n #print('ni',ni)\n marginal=beam_field()\n marginal.single_beam_from_Kingslake_Q(self.entrance_pupil,0) #marginal beam\n marginal.propagate(self.surfaces)\n Q=marginal.Kingslake_Qabs(self.surfaces)[:,0]\n Q_=marginal.Kingslake_Q_abs(self.surfaces)[:,0]\n #print('marginal Q ',marginal.Kingslake_Qabs(ls.surfaces)[:,0])\n #print('marginal Q\\'',marginal.Kingslake_Q_abs(ls.surfaces)[:,0])\n #print(Q-Q_)\n #print('paraxial nu\\'',pr.nu[1:])\n #print('sin Uk\\'',marginal.U)\n target_surface=len(self.surfaces)-1\n #print(marginal.U[3,0,1]*pr.nu[target_surface])\n nusinU=marginal.U[3,0,1]*pr.nu[target_surface] #n'u'sinU'_k all values at end focus\n LA=-(Q-Q_)*ni/nusinU\n #print('spherical LA contribution',LA)\n #print('sum',sum(LA))\n return LA",
"def calc_rmsle(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass",
"def _pwr_std_temp(rpm, MP, altitude):\n # get the power at sea level (i.e. point B on the left side of the Lycoming power chart)\n \n # get pwr at two even hundreds of rpm, and then interpolate\n if rpm >= 2600:\n rpm1 = 2600\n elif rpm <= 1800:\n rpm1 = 1800\n else:\n rpm1 = rpm - rpm % 100\n\n rpm2 = rpm1 + 100\n \n pwr_SL1 = _pwr_sl(rpm1, MP)\n pwr_SL2 = _pwr_sl(rpm2, MP)\n # print \"SL Pwr 1=\", pwr_SL1\n # print \"SL Pwr 2=\", pwr_SL2\n \n # get power at full throttle at this rpm and MP at altitude (i.e. point A on the right side of the Lycoming power chart)\n # density ratio at point A on the right side of the Lycoming power chart)\n pwr_FT1, DR_FT1 = _hp_at_MP_and_altitude(rpm1, MP)\n pwr_FT2, DR_FT2 = _hp_at_MP_and_altitude(rpm2, MP)\n # print \"FT pwr 1=\", pwr_FT1\n # print \"FT pwr 2=\", pwr_FT2\n # print \"DR FT 1=\", DR_FT1\n # print \"DR FT 2=\", DR_FT2\n \n # density ratio at sea level\n DR_sl = 1\n \n # density ratio for the actual conditions (i.e. point D on the right side of the Lycoming power chart)\n DR_test = SA.alt2density_ratio(altitude)\n # print \"DR_test=\", DR_test\n \n # function is unstable if the DR at FT is close to 1. This sends the slope off to unpredictable values.\n slope1=(pwr_FT1 - pwr_SL1) / (DR_FT1 - DR_sl)\n slope2=(pwr_FT2 - pwr_SL2) / (DR_FT2 - DR_sl)\n \n if MP > 28:\n if slope1 < -80:\n slope1=-62\n elif slope1> -60:\n slope1=-62\n if slope2< -80:\n slope2 = -62\n elif slope2> -60:\n slope2=-62\n \n # print \"slope1=\", slope1\n # print \"slope2=\", slope2\n \n pwr_std_temp1 = pwr_SL1 + (DR_test - DR_sl) * slope1\n pwr_std_temp2 = pwr_SL2 + (DR_test - DR_sl) * slope2\n # print \"Pwr Std Temp 1=\", pwr_std_temp1\n # print \"Pwr Std Temp 2=\", pwr_std_temp2\n pwr_std_temp = pwr_std_temp1 + (rpm - rpm1) * (pwr_std_temp2 - pwr_std_temp1) / (rpm2 - rpm1)\n\n return pwr_std_temp",
"def spectral_slope(sign, fs):\n f, ff = plotfft(sign, fs)\n if not(list(f)):\n return 0\n else:\n if not (len(f) * np.dot(f, f) - np.sum(f) ** 2):\n return 0\n else:\n return (len(f) * np.dot(f, ff) - np.sum(f) * np.sum(ff)) / (len(f) * np.dot(f, f) - np.sum(f) ** 2)",
"def power_output_existing_wind_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_4[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * m.C_MC[g, y])\r\n == 0)\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_4[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE)) * m.C_MC[g, y])\r\n == 0)",
"def compute_rmse(y, tx, w):\n return np.sqrt(2 * compute_mse(y, tx, w))",
"def calculate_slope(cnt):\n y = OrderedDict(cnt.most_common())\n v=np.log(list(y.values()))\n k=np.log(np.arange(1,len(v)+1,1))\n return linregress(k,v)",
"def massrich_parameters(self):\n print(\"\\nMass-Richness Power Law: M200 = norm * (N200 / 20) ^ slope\")\n print(\" norm:\", self._massrich_norm)\n print(\" slope:\", self._massrich_slope)",
"def calc_power(field):\r\n\r\n poynt_in_points = 0.5*numpy.real(field.p * numpy.conj(field.vn))\r\n power = numpy.sum(poynt_in_points)\r\n power *= field.one_pixel_area\r\n return power",
"def calc_lamb(self, x_surface, geom):\n\n return self.rfl",
"def getSlope(self):\n return math.tan(self.angle)",
"def power(self):\r\n return self.model * self.percent / 100",
"def w(lam, gam, p):\n return np.sqrt((1 - lam*np.cos(2*np.pi*p ) )**2 + (gam*lam*np.sin(2*np.pi*p ) )**2 )",
"def plot_powerlaw(self, **kwargs):\n\n if self.gamma is None:\n self.exponent()\n p = powerlaw.plot(exponent=-self.gamma,\n xmax=self.max_deg, xmin=self.k_min,\n **kwargs\n )\n pylab.show()\n return p",
"def solve_beta_sqrt_slope(X, Y, lbd_vec, h=2.0, lr=5.0):\n p = X.shape[1]\n\n sigma_prev, sigma_new = np.var(Y)**0.5, np.var(Y)**0.5\n beta_prev, beta_new = np.zeros(p), solve_beta_slope(X, Y, sigma_new*lbd_vec)\n\n i = 1.0\n while abs(obj_sqrt_slope(X, Y, lbd_vec, beta_prev, sigma_prev) - obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new)) > lr:\n sigma_prev, beta_prev = sigma_new, beta_new\n sigma_new = sigma_new - (h/i) * ( 1 - np.var(Y - X@beta_new)/sigma_new**2 )\n beta_new = solve_beta_slope(X, Y, sigma_new*lbd_vec)\n i += 1\n \n# if i % 100 == 0: print('step1: i=', i)\n# print(\"i=\", i)\n# print('sigma_prev, sigma_new: ', sigma_prev, sigma_new)\n# print('obj value:', obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new))\n# print('difference: ', abs(obj_sqrt_slope(X, Y, lbd_vec, beta_prev, sigma_prev) - obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new)))\n return beta_new",
"def square_trick(bias, slope, predictor, current_value, learning_rate):\n predicted_value = bias + slope*predictor\n slope += learning_rate*predictor*(current_value-predicted_value)\n bias += learning_rate*(current_value-predicted_value)\n return slope, bias",
"def __call__(self, x):\n\n np.multiply(x, self.slope, out=x)\n np.add(x, self.bias, out=x)\n return x",
"def slope_from_origin(self):\n\n return self.y / self.x",
"def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term",
"def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)",
"def get_slope(x, y, deg=1, err=[]):\n inverse_error = []\n for i in err:\n inv = 1/i\n inverse_error.append(i)\n\n if len(err)>0:\n z = np.polyfit(x, y, deg, w=inverse_error)\n else:\n z = np.polyfit(x, y, deg)\n\n m, b = z\n p = np.poly1d(z)\n\n return m, b, p",
"def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power",
"def surface_slope(self, dB=False, sig0=False):\n # evaluate the slope of the used brdf\n brdf_slope = self.SRF.brdf_theta_diff(\n t_0=self.t_0, t_ex=self.t_ex, p_0=self.p_0,\n p_ex=self.p_ex, geometry = 'mono',\n param_dict=self.param_dict, return_symbolic=False,\n n=1)\n # evaluate the used brdf\n brdf_val = self.SRF.brdf(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict)\n\n # vegetated soil contribution\n I_vegs_slope = (self.I0\n * np.exp(-(2*self.V.tau / self._mu_0))\n * (self._mu_0 * brdf_slope\n - (2 * self.V.tau / self._mu_0 + 1)\n * np.sin(self.t_0) * brdf_val))\n\n # bare soil contribution\n I_bs_slope = self.I0 * (self._mu_0 * brdf_slope\n - np.sin(self.t_0) * brdf_val)\n\n I_slope = self.SRF.NormBRDF * (\n (1. - self.bsf) * I_vegs_slope\n + self.bsf * I_bs_slope)\n\n if sig0 is False and dB is False:\n return I_slope\n else:\n I_val = self.surface()\n if sig0 is True and dB is False:\n return 4. * np.pi * (self._mu_0 * I_slope\n - np.sin(self.t_0) * I_val)\n elif sig0 is False and dB is True:\n return 10./np.log(10) * I_slope / I_val\n elif sig0 is True and dB is True:\n return 10./np.log(10) * (I_slope / I_val\n - np.tan(self.t_0))",
"def varience_of_slope(slope,NoOfPoints,tframe,redn,gain):\n Var = 6*(NoOfPoints**2 + 1)*np.abs(slope) / (5*NoOfPoints*(NoOfPoints**2 -1)*tframe) +\\\n 12*(redn**2 + gain**2 / 12.)/(NoOfPoints*(NoOfPoints**2 -1)*tframe**2)\n return Var",
"def get_slope(self) -> str:\n return self.query('slope,?')",
"def risefit(self, p, x, y, risepower, mode=0):\n assert mode in [-1, 0, 1]\n ix = np.argmin(np.fabs(x-p[2]))\n tm = np.zeros_like(x)\n expf = (x[ix:]-p[2])/p[1]\n pclip = 1.e3\n nclip = 0.\n expf[expf>pclip]= pclip\n expf[expf<-nclip] = -nclip\n tm[ix:] = p[0] * (1.0 - np.exp(-expf))**risepower\n if mode == 0:\n return tm - y\n elif mode == 1:\n return np.linalg.norm(tm-y)\n elif mode == -1:\n return tm\n else:\n raise ValueError('doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)')",
"def periodicity_metric(light_curve_rms, sm_phase_rms):\n return (sm_phase_rms ** 2) / (light_curve_rms ** 2)",
"def lapse(self):\n pass",
"def __call__(self, x):\n return self.slope * x + self.ordinate",
"def powerflow_rule(_m, l, y, s, t):\r\n\r\n return (- m.sigma_27[l, y, s, t] + m.sigma_28[l, y, s, t]\r\n + (m.INCIDENCE_MATRIX[l, self.g(l)] * m.lamb[self.g(l), y, s, t])\r\n + (m.INCIDENCE_MATRIX[l, self.h(l)] * m.lamb[self.h(l), y, s, t])\r\n == 0)",
"def findslopes(img):\n img = img.astype(np.float32)\n DY = np.array([[-1,-1,-1],[0, 0, 0],[1, 1, 1]]) * 1/6\n DX = DY.transpose()\n gradx = cv2.filter2D(src=img, ddepth=-1, kernel=DX)\n grady = cv2.filter2D(src=img, ddepth=-1, kernel=DY)\n\n D2Y = np.array([[0.5, 1, 0.5], [-1, -2, -1], [0.5, 1, 0.5]]) * 0.5\n D2X = D2Y.transpose()\n DXY = np.array([[-1, 0, 1], [0, 0, 0], [1, 0, -1]]) * 1/4\n grad2x = cv2.filter2D(src=img, ddepth=-1, kernel=D2X)\n grad2y = cv2.filter2D(src=img, ddepth=-1, kernel=D2Y)\n gradxy = cv2.filter2D(src=img, ddepth=-1, kernel=DXY)\n\n slopes = gradx**2 + grady**2\n slopes2 = grad2x**2 + grad2y**2 + 2 * gradxy**2\n\n return (slopes, gradx, grady, slopes2, grad2x, grad2y, gradxy)",
"def power_radiated(horn_width, horn_height):\n # Calculate the normalized power radiated\n return horn_width * horn_height / (4.0 * 120.0 * pi)",
"def slope_from_origin(self):\n\n return (self.y / self.x)",
"def slope_and_mse(x, y, Rbool=False):\n s, o, r_value, p_value, std_err = linregress(x, y)\n ypred = s * x + o\n\n mse = np.average((y - ypred) ** 2)\n\n if Rbool:\n return s, mse, r_value\n else:\n return s, mse",
"def linear_slope(self, dim=\"time\", nan_policy=\"none\"):\n return linear_slope(self._obj, dim=dim, nan_policy=nan_policy)",
"def _calc_indirect_effect(x, y, m):\n x = stats.zscore(x)\n y = stats.zscore(y)\n m = stats.zscore(m)\n direct_effect = sm.OLS(y, sm.add_constant(x)).fit().params[1]\n xs = np.stack((x, m), axis=1)\n remaining_effect = sm.OLS(y, sm.add_constant(xs)).fit().params[1]\n indirect_effect = direct_effect - remaining_effect\n proportion_mediated = 1 - remaining_effect/direct_effect\n return indirect_effect, proportion_mediated",
"def powerlawFitWithOutliers(x, y, e, outtriangle='power.png'):\n x = np.asarray(x)\n y = np.asarray(y)\n e = np.asarray(e)\n # theta will be an array of length 2 + N, where N is the number of points\n # theta[0] is the amplitude, theta[1] is the power,\n # and theta[2 + i] is the weight g_i\n def log_prior(theta):\n #g_i needs to be between 0 and 1 and limits for the amplitude and power\n if (all(tmp > 0. for tmp in theta[2:]) and all(tmp < 1. for tmp in theta[2:])) and \\\n -2. < theta[1] < -0.05 and 0. < theta[0] < 3.e2:\n return 0\n else:\n return -np.inf # recall log(0) = -inf\n\n def log_likelihood(theta, x, y, e, sigma_B):\n dy = y - theta[0] * x**theta[1]\n g = np.clip(theta[2:], 0, 1) # g<0 or g>1 leads to NaNs in logarithm\n logL1 = np.log(g) - 0.5 * np.log(2 * np.pi * e ** 2) - 0.5 * (dy / e) ** 2\n logL2 = np.log(1 - g) - 0.5 * np.log(2 * np.pi * sigma_B ** 2) - 0.5 * (dy / sigma_B) ** 2\n return np.sum(np.logaddexp(logL1, logL2))\n\n def log_posterior(theta, x, y, e, sigma_B):\n return log_prior(theta) + log_likelihood(theta, x, y, e, sigma_B)\n\n #find starting point\n def squared_loss(theta, x=x, y=y, e=e):\n dy = y - theta[0] * x**theta[1]\n return np.sum(0.5 * (dy / e) ** 2)\n\n theta1 = optimize.fmin(squared_loss, [10, -0.3], disp=False)\n\n ndim = 2 + len(x) # number of parameters in the model\n nwalkers = 400 # number of MCMC walkers\n nburn = 1000 # \"burn-in\" period to let chains stabilize\n nsteps = 10000 # number of MCMC steps to take\n\n # set theta near the maximum likelihood, with\n starting_guesses = np.zeros((nwalkers, ndim))\n starting_guesses[:, :2] = np.random.normal(theta1, 1, (nwalkers, 2))\n starting_guesses[:, 2:] = np.random.normal(0.5, 0.1, (nwalkers, ndim - 2))\n\n #initiate sampler\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x, y, e, 10])\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(starting_guesses, nburn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n sampler.reset()\n\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, nburn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n print \"Running MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, nsteps, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #sample shape = (nwalkers, nsteps, ndim)\n sample = sampler.chain.reshape(-1, ndim)\n\n params = np.mean(sample[:, :2], 0)\n g = np.mean(sample[:, 2:], 0)\n outliers = (g < 0.5)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index][:2]\n errors = [sampler.flatchain[:, i].std() for i in xrange(ndim)][:2]\n\n fig = triangle.corner(sample, labels=['amplitude', 'power'] + len(x)*['Gi', ])\n fig.savefig(outtriangle)\n plt.close()\n\n return params, params_fit, errors, outliers",
"def lam(E):\n return (12398.4/E)*1e-10",
"def fepsp_slope(trace):\n bsl=np.mean(trace[0:3])\n peak=np.mean(trace[np.argmin(trace)-1:np.argmin(trace)+1]) \n amp=trf.val_dist(bsl,peak)\n \n twenty=int(trf.find_nearest_ind(trace[0:np.argmin(trace)+1],bsl-amp*0.2))\n eighty=int(trf.find_nearest_ind(trace[0:np.argmin(trace)+1],bsl-amp*0.8))\n slope=np.mean(np.diff(trace[twenty:eighty]))\n return slope,trace[twenty],trace[eighty],trace[np.argmin(trace)]",
"def lorentz(self, X, xm, amp, w):\n return amp / (1 + ((X - xm) / (w / 2)) ** 2)",
"def pearson_r(self):\n return ((self.x - self.x.mean()) * (self.y - self.error_weighted_average(self.y, self.dy))).sum() / self.x.std() / self.y.std()",
"def linear_error(X, y, w):\n\n return np.where(y != np.sign(np.dot(X, w)), 1.0, 0.0).mean()",
"def test_powers(self):\n l = np.array([0, 1, 2])\n r = np.linspace(0, 1, 11) # rho coordinates\n\n correct_vals = np.array([np.ones_like(r), r, r**2]).T\n correct_ders = np.array([np.zeros_like(r), np.ones_like(r), 2 * r]).T\n\n values = powers(r, l, dr=0)\n derivs = powers(r, l, dr=1)\n\n np.testing.assert_allclose(values, correct_vals, atol=1e-8)\n np.testing.assert_allclose(derivs, correct_ders, atol=1e-8)",
"def wr(x,y,xcen,ycen,sigma):\n res=np.exp(-((x-xcen)**2+(y-ycen)**2)/(2.*sigma**2))/(2.*np.pi*sigma**2) \n return res",
"def evaluate(self, x):\n try:\n\n z = ((x - self[\"mu\"]) + (1.j) * abs(self[\"al\"])) / \\\n (numpy.sqrt(2.0) * abs(self[\"ad\"]))\n y = self[\"A\"] * numpy.real(sps.wofz(z))\n y /= (abs(self[\"ad\"]) * numpy.sqrt(2.0 * numpy.pi))\n y += x * self[\"lin\"] + self[\"off\"]\n\n y[numpy.where(numpy.isnan(y))] = 0.0\n except FloatingPointError as fpe:\n raise(PE.PyAFloatingPointError(\"The following floating point error occurred:\\n \" + str(fpe) + \"\\n\" +\n \"Current Parameter values:\\n\" +\n str(self.parameters()),\n solution=[\"Try to rescale/shift your abscissa. For instance, put\" +\n \"the spectral line you try to fit at position `0`.\"]))\n return y",
"def getwientemp(_inputdata, _distance, _derr, _id):\n # Maxwell-Boltzmann distribution formula probability density function\n def curve(_x, _a, _scale):\n _a1 = np.sqrt(2 / np.pi)\n _a2 = _x**2 / (2 * _a**2)\n return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3\n\n # Set pyplot style to be consistent through the program\n plt.style.use('seaborn-whitegrid')\n\n # Convert the distance in parsecs to metres\n _distance = 3.0857 * 10**16 * _distance\n _derr = 3.0857 * 10**16 * _derr\n # Create array for x and y axis data\n _xdata = _inputdata[:, 0]\n _ydata = _inputdata[:, 1]\n _ydatalum = _ydata\n\n # Iterate through each band and convert from Janskys to W/m^2/um\n i = 0\n while i < 5:\n _ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)\n i += 1\n # Calculate optimal values and covariance using scipy curve_fit function\n _popt, _pcov = curve_fit(curve, _xdata, _ydata)\n # Create x axis to plot curve against\n _x = np.linspace(0, 5, 100)\n # Determine y value for each point on the x axis\n _yplot = curve(_x, *_popt)\n # Plot the curve to the screen\n plt.plot(_x, _yplot)\n # Determine the area under the graph, integral gives total energy recieved per m^2\n _area = np.trapz(_yplot, dx=5/100)\n # Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance\n _lum = 4 * np.pi * _distance**2 * _area\n _lumerr = 4 * np.pi * _distance * _derr * _area\n # Peak value of Maxwell-Boltzmann distribution\n _mu = 2 * _popt[0] * np.sqrt(2 / np.pi)\n\n # Plot data on the graph\n plt.plot(_xdata, _ydata, '.')\n # Set axis labels\n plt.xlabel('Wavelength (um)')\n plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')\n if _id == 1:\n _str = 'Large Star'\n else:\n _str = 'Small Star'\n\n # Calculate effective surface temperature using Wien's law\n _wien = round_sig(2898 / _mu)\n # Round luminosity to 2 significant figures\n _lum = round_sig(_lum)\n # Set graph title\n plt.suptitle('Black Body Plot for the ' + _str)\n # Save to current folder\n _filename = _str + '.png'\n plt.savefig(_filename)\n # Display to the screen\n plt.show()\n\n # Returns calculated values\n return _lum, _lumerr, _wien",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def slope(tr, sigma=None):\n tr = tr.astype(float)\n if sigma is not None:\n tr = nd.gaussian_filter1d(tr, sigma=sigma)\n m, M = np.argmin(tr), np.argmax(tr)\n a = np.abs((tr[m] - tr[M]) / (m - M))\n return a",
"def solve_beta_slope(X, Y, lbd_vec, h=0.1, lr=5.0):\n n, p = X.shape[0], X.shape[1]\n \n# i = 0\n beta_prev = np.zeros(p)\n beta_new = np.ones(p)\n while abs(obj_slope(X, Y, lbd_vec, beta_prev)-obj_slope(X, Y, lbd_vec, beta_new)) > lr:\n beta_prev = beta_new\n beta_new = prox_slope(beta_new - (h/n) * (X.T @ (X @ beta_new - Y)), h/n, lbd_vec)\n \n# i += 1\n# if i % 2 == 0:\n# print(i)\n# print(\"prev value: \", obj_slope(X, Y, lbd_vec, beta_prev))\n# print(\"new value: \", obj_slope(X, Y, lbd_vec, beta_new))\n# print(sum(abs(beta_new)))\n# print(beta_new)\n return beta_new",
"def compute_gradient_mse(y, tx, w):\n e = y - tx.dot(w)\n\n return -tx.T.dot(e) / len(e)",
"def rmse(self):\n lam = self.lam()\n weights = lam / lam.sum()\n weighted_var = self.var() * weights\n rmse = np.sqrt(weighted_var.sum())\n return rmse",
"def _regression_slope_metric(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.slope",
"def get_blr_strength(self,\n blr: BoundedLinearRegressions) -> float:\n # Return 0 if slopes are different signs.\n if blr.minima_regression.slope * blr.maxima_regression.slope < 0:\n return 0\n\n # Return 0 if not enough data.\n if len(blr.candles) <= 3:\n return 0\n\n # Find high and low prices of the trendline period.\n high_price = max([candle.high for candle in blr.candles])\n low_price = max([candle.low for candle in blr.candles])\n\n # Find start and end of the period.\n start_moment = max([candle.moment for candle in blr.candles])\n end_moment = min([candle.moment for candle in blr.candles])\n\n # Take signal strength to be the average of the two slopes.\n minima_slope_pct = abs(blr.minima_regression.y_of_x(end_moment) - blr.minima_regression.y_of_x(start_moment)) \\\n / max(0.01, high_price - low_price)\n maxima_slope_pct = abs(blr.maxima_regression.y_of_x(end_moment) - blr.maxima_regression.y_of_x(start_moment)) \\\n / max(0.01, high_price - low_price)\n signal_strength = (minima_slope_pct + maxima_slope_pct) / 2.0\n\n # Scale down signal strength.\n signal_strength = min(1, signal_strength / 5.0)\n\n # Ensure the signal strength has the correct sign.\n if blr.minima_regression.slope < 0:\n signal_strength += -1\n\n return signal_strength",
"def RMSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sqrt(np.sum(np.power((y - f), 2)) / n)\r\n return J",
"def slope(x1, y1, x2, y2):\r\n delta_y = y2-y1\r\n delta_x = x2-x1\r\n return delta_y / delta_x",
"def evaluate(self,x,y):\n\n #below function comes from MATLAB Peaks function\n # return np.multiply(3*np.power((1-x), 2), np.exp(-np.power(x,2) - np.power((y+1), 2))) - np.multiply(10 * (x/5.0 - np.power(x,3) - np.power(y,5)), np.exp(-np.power(x,2)-np.power(y,2)))#- np.exp(-np.power(x+1,2)-np.power(y,2))/3.0\n # return -np.power((x-50),2) - np.power(y, 2)-3\n return 5- (np.multiply(np.multiply(np.sin(x), np.sin(y)), np.power(x,2)) + np.power(y,2))",
"def slopes(self,k,terms=None):\n\t\tNP = []\n\t\tp=self.p\n\t\tif terms==None:\n\t\t\td = len(self.series)\n\t\telse:\n\t\t\td = min(len(self.series),2*terms+10) ### HACKING HERE\n\t\tif p == 2:\n\t\t\te = 2\n\t\telse:\n\t\t\te = 1\n\t\tfor i in range(d):\n\t\t\ty = 0\n\t\t\tfor ss_wt in self[i]:\n\t\t\t\tk_ss = ss_wt[0]\n\t\t\t\tmult = ss_wt[1]\n\t\t\t\tif ss_wt[0] == \"p\":\n\t\t\t\t\ty += ss_wt[1]\n\t\t\t\telse:\t\t\t\t\t\t\n\t\t\t\t\t#### added by john 10/17, see form_ghost_shell for instructions\n\t\t\t\t\tif k_ss >= 0:\n\t\t\t\t\t\ty += (valuation(k-k_ss,p)+e)*mult\n\t\t\t\t\tif k_ss < 0:\n\t\t\t\t\t\ty += mult\n\t\t\tNP += [(i,y)]\n\n\t\tif terms==None:\n\t\t\treturn NewtonPolygon(NP).slopes()\n\t\telse:\n\t\t\treturn NewtonPolygon(NP).slopes()[0:terms]",
"def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J",
"def solve_power(self):\n e = self.normalized_effect_size()\n power = FTestPower().solve_power(\n effect_size=e\n ,df_num=self.df_denom\n ,df_denom=self.df_num\n ,alpha=self.alpha\n ,power=None\n ,ncc=1\n )\n return power",
"def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )",
"def _compute_pointwise_slopes(self, alpha):\n for i, region_slope in enumerate(self.region_slopes):\n if i == 0:\n self.slopes[i] = alpha * region_slope\n else:\n # slope[k]=skMin+alpha*(-skMax+skMin)\n # skMin-skMax in the paper. But if you calculate it you see\n # the paper is wrong, but the expression is right.\n slope_min_i = (\n 4.0 * region_slope - self.slopes[i - 1]) / 3.0\n slope_max_i = 4.0 * region_slope - 3 * self.slopes[i - 1]\n self.slopes[\n i] = slope_min_i + alpha * (slope_max_i - slope_min_i)\n\n # check positivity of a and b constants.\n a, b, _c, _d = self.evaluate_spline_coeffs(i)\n if a < 0 or b < 0:\n # no good, go to next beta.\n break\n else:\n # if we didn't break in any region, this beta is appropriate.\n return True\n\n return False # This beta is invalid",
"def compute_slope(x_jnt_0, y_jnt_0, x_jnt_1, y_jnt_1):\n if x_jnt_0 == x_jnt_1:\n return None\n return (y_jnt_1 - y_jnt_0) / (x_jnt_1 - x_jnt_0)",
"def test_variance_of_slope_sums():\n\n ticker = 'GOOG'\n main_df = pd.read_pickle(settings.settings_dict['stock_data_path'])\n\n main_df = sample_slopes.create_slope_sum(main_df)\n\n slope_sums = main_df[ticker + \"slope_sum\"]\n\n print np.mean(main_df[ticker + \"slope_sum\"])\n print np.std(main_df[ticker + \"slope_sum\"])\n\n std = pd.rolling_std(slope_sums, window=20)\n\n _, ax2 = plt.subplots()\n\n ax2.plot(slope_sums)\n ax2.plot(slope_sums + std)\n ax2.plot(slope_sums - std)\n plt.legend(['Slope_Sum ', 'Slope_Sum +1 Std', 'Slope_Sum -1 Std'])\n plt.title(ticker + ' varrience of slope sum')\n plt.show()",
"def power_output_candidate_solar_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_7[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)\r\n\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_7[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)",
"def calc_slopes(data):\n spikes = data['spikes'].item()\n mem = data['mem']\n mslopes = []\n for m, s in zip(mem, spikes.itervalues()):\n if len(s) == 0:\n print(\"No spikes: Skipping\")\n _mslope = 0\n else:\n _mslope, ign = nt.norm_firing_slope(m, s, 15*mV, 10*ms, w=2*ms)\n mslopes.append(_mslope)\n data['mslopes'] = mslopes\n return data",
"def slope(start, end):\n\tx1 = start[0]\n\ty1 = start[1]\n\tx2 = end[0]\n\ty2 = end[1]\n\ttop = float(y2 - y1) \n\tbot = float(x2 - x1)\n\tif bot == 0:\n\t\treturn None\n\telse:\n\t\treturn top / bot",
"def ellipse_dist_ratio_poly(self, theta, lwr):\n\n \"\"\"\n\n Params for FWD fit\n array([ 9.99999989e-01, 8.10852195e+07, 1.95444928e+00, 7.96543026e-02])\n this one is un-needed, since it's approximation y = 1\n\n Params for FWD_DIAG fit\n array([-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n\n Params for ORTHOG fit\n array([-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n\n Params for BCK_DIAG fit\n array([-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n\n Params for BCK fit\n array([ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n \"\"\"\n\n #fitting function\n def f(x,params):\n return params[0] + (1.0 / (params[1]*(x+params[2])**params[3]))\n\n #force float math, in case theta is an integer\n theta = float(theta)\n\n #into an angle index form:\n t = abs(int(4.0*theta/np.pi))\n\n if (t == 0) or (t == 8):\n return 1.0\n elif (t == 1) or (t == 7):\n #forward diagonal\n return f(lwr, [-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n elif (t == 2) or (t == 6):\n #orthogonal\n return f(lwr, [-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n elif (t == 3) or (t == 5):\n #backward diagonal\n return f(lwr, [-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n elif t == 4:\n #backward\n return f(lwr, [ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n else:\n #hmmm... TODO\n return 0.0",
"def calAlpha(self, offset_arr):\n\n # time_start =time.time()\n x_arr, y_arr = zip(*offset_arr)\n r_value = stats.linregress(x_arr, y_arr)[2]\n try:\n medslope, medintercept = stats.mstats.theilslopes(y_arr, x_arr)[0:2]\n except FloatingPointError as e:\n logging.error(\"CRITICAL: theilslopes FloatingPointError {} for arrays y_arr {} and x_arr {} of domain {}\".format(e, y_arr, x_arr, self.domain))\n except Exception as e:\n logging.error(\"CRITICAL: theilslopes Other error {} for arrays y_arr {} and x_arr {} of domain {}\".format(e, y_arr, x_arr, self.domain))\n raise\n\n return medslope, medintercept, r_value, r_value**2"
] | [
"0.6757806",
"0.6462289",
"0.6391217",
"0.6186872",
"0.61128086",
"0.5965611",
"0.59109455",
"0.58782387",
"0.5849429",
"0.5842808",
"0.5838932",
"0.58238125",
"0.5823275",
"0.5812738",
"0.5809786",
"0.5801164",
"0.57895917",
"0.5785309",
"0.57821614",
"0.5743392",
"0.57384443",
"0.57312053",
"0.5684633",
"0.5637274",
"0.55828613",
"0.5580899",
"0.5576676",
"0.5575982",
"0.5560625",
"0.55602634",
"0.5543295",
"0.5537741",
"0.55297256",
"0.5529342",
"0.550579",
"0.54986924",
"0.5494651",
"0.54936165",
"0.5483367",
"0.54825526",
"0.5474191",
"0.5463933",
"0.546152",
"0.54590607",
"0.5454753",
"0.54503894",
"0.5434936",
"0.54292107",
"0.54269856",
"0.5424928",
"0.54245996",
"0.54122096",
"0.54102397",
"0.54094684",
"0.54079527",
"0.5391197",
"0.5388268",
"0.53878546",
"0.5386704",
"0.53848016",
"0.5379863",
"0.53740233",
"0.5368245",
"0.5359652",
"0.53576857",
"0.53528476",
"0.53512836",
"0.53465164",
"0.5346458",
"0.5343458",
"0.5342476",
"0.53401357",
"0.5328318",
"0.5323514",
"0.5323008",
"0.5321827",
"0.53190136",
"0.53135985",
"0.53133476",
"0.53126204",
"0.5305752",
"0.5298616",
"0.52984226",
"0.52961004",
"0.5276984",
"0.5274837",
"0.5274618",
"0.5272924",
"0.5266518",
"0.5265094",
"0.52597195",
"0.5248977",
"0.5246609",
"0.52458763",
"0.52324134",
"0.52293485",
"0.5228341",
"0.5228262",
"0.52239245",
"0.5213537",
"0.5203851"
] | 0.0 | -1 |
Evaluate the powerlaw slope of the mass profile from its powerlaw relation with effective radius | def get_gamma_from_vel_disp(self, vel_disp):
gam_minus_2 = vel_disp*self.a_v + self.b_v + np.random.randn()*self.int_v
return gam_minus_2 + 2.0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power(self):\n return irradiance_on_plane(self.vnorm, self.h,\n self.date, self.lat) * self.s * self.eff",
"def _powerlaw(self, x: np.ndarray, y: np.ndarray) -> float:\n\n # regress\n def _regress(x, y):\n slope, intercept, rval, pval, err = linregress(x, y)\n return slope, rval\n\n # log of inputs\n logx = np.log(x)\n logy = np.log(y)\n\n # naive fit\n rmin = self.rmin\n if rmin is None:\n exponent, rval = _regress(logx, logy)\n return exponent\n\n # iteratively trim the fat tail\n for ymin in np.unique(y):\n\n # trim off the fat tail\n greater_than = y >= ymin\n logx_ = logx[greater_than]\n logy_ = logy[greater_than]\n exponent, rval = _regress(logx_, logy_)\n\n # check convergence\n if abs(rval) > rmin:\n return exponent\n\n # give up\n return np.nan",
"def compute_slope(self):\n\n # assign variables\n slope = 'slope'\n aspect = 'aspect'\n dx = 'dx'\n dy = 'dy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_dx = 'grow_dx'\n grow_dy = 'grow_dy'\n\n # compute slope and partial derivatives\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n dx=dx,\n dy=dy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dx,\n value=grow_dx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dx}={grow_dx}\".format(\n dx=dx,\n grow_dx=grow_dx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dy,\n value=grow_dy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dy}={grow_dy}\".format(\n dy=dy,\n grow_dy=grow_dy),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['grow_slope',\n 'grow_dx',\n 'grow_dy'],\n flags='f')\n\n return slope, dx, dy",
"def sigma_slope(x, sigma_y):\n w = 1./sigma_y**2\n denom = np.sum(w)*np.sum(w*x**2)-np.sum(w*x)**2\n if denom <= 0:\n return np.nan\n else:\n result = np.sqrt(np.sum(w)/denom )\n return result",
"def obj_sqrt_slope(X, Y, lbd, beta, sigma):\n n = X.shape[0]\n return sigma + np.sum((Y - X@beta)**2) / (2 * n * sigma) + np.sum(sigma * lbd * np.sort(abs(beta))[::-1])",
"def w(self):\n # w must be a CArray\n raise NotImplementedError(\"Linear normalizer should define the slope.\")",
"def best_fit_slope(xs, ys):\n m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /\n ((mean(xs)**2) - mean(xs**2)))\n return m",
"def obj_slope(X, Y, lbd, beta):\n n = X.shape[0]\n return np.sum((Y - X@beta)**2)/n + np.sum(lbd * np.sort(abs(beta))[::-1])",
"def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))",
"def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")",
"def wadic_slopes(self,terms=None):\n\t\tNP = [(a,self.multiplicity(a)) for a in range(self.num_coefs)]\n\t\tif terms==None:\n\t\t\treturn NewtonPolygon(NP).slopes()\n\t\telse:\n\t\t\treturn NewtonPolygon(NP).slopes()[0:terms]",
"def powder_XRD(crystal,wavelength, get_mults=False):\n \n # The wavenumber of the input wavelength\n nu = 2*n.pi/wavelength\n\n # Make a list of the accessible rlvs\n rlvs = find_accessible_rlvs(crystal,wavelength)\n \n # Now we calculate the scattering intensity from each rlv\n intensities = {\n tuple(rlv): n.abs(crystal.structure_factor(rlv))**2\n for rlv in rlvs}\n \n # Now sum up all rlvs with the same magnitude. We also\n # get rid of all the scattering vectors with 0 intensity\n magnitudes = {}\n multiplicities = {}\n for rlv, intensity in intensities.items():\n repeat = False\n mag = n.linalg.norm(rlv)\n for oldmag in magnitudes:\n if n.isclose(mag,oldmag):\n magnitudes[oldmag] += intensity\n multiplicities[oldmag] += 1\n repeat = True\n break\n if not repeat and not n.isclose(mag,0):\n multiplicities[mag] = 1\n magnitudes[mag] = intensity\n \n # Now we reformat the multiplicity data in a nice way\n multiplicities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n multiplicity\n for mag, multiplicity in multiplicities.items()\n if not n.allclose(magnitudes[mag],0)}\n\n # And now we calculate the scattering intensities\n # (a.u. per steradian) as a function of scattering angle\n intensities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n intensity * \n # This factor corrects for the fact that the same total\n # power in the debye scherrer rings is more\n # concentrated when 2\\theta is near 0 or 2pi\n 1 / n.sin(2*n.arcsin(mag/(2*nu))) *\n # This factor corrects for the probability that any\n # given crystal domain will scatter into the rlv\n 1 / mag *\n # This factor corrects for polarization effects,\n # Assuming an unpolarized input beam and no polarization\n # analysis\n (1 + n.cos(2*n.arcsin(mag/(2*nu)))**2)/2\n for mag, intensity in magnitudes.items()\n if not n.allclose(intensity,0)}\n if get_mults:\n return intensities, multiplicities\n else:\n return intensities",
"def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)",
"def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b",
"def evaluate( self, mu ) :\n\n P = 0.\n for l, c_l in enumerate( self.coefficients ) : P += ( l + 0.5 ) * c_l * Legendre( l, mu, checkXRange = False ) \n return( P )",
"def calc_slope(self, left, right):\n return (left[1] - right[1]) / (left[0] - right[0])",
"def power_output_candidate_wind_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_5[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_5[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)",
"def get_slope(self, device_type_name):\n\n if device_type_name in [\"SOLN\", \"BEND\",\"BLEN\",\"KICK\"]:\n # Solenoid devices use 'uA'.\n return 0.00055586\n elif device_type_name in [\"BLM\",\"LBLM\",\"CBLM\",\"PBLM\"]:\n # Beam loss monitors set threshold in Volts initially\n return 1.6/65536\n else:\n raise ValueError(\"Function \\\"__get_slope(device_type_name={}, fault_name={})\\\". Invalid device type name\"\n .format(device_type_name, fault_name))",
"def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ",
"def test_regression_of_slope_sum_distribution():\n\n meaningful_stats = pd.read_pickle(\n 'files/meaningfull_stats.pkl')\n\n print meaningful_stats['std'].tolist()\n print meaningful_stats['returns_diff'].tolist()\n\n def make_float(array):\n \"\"\"\n takes an array and makes all the number in it floats\n \"\"\"\n finial_array = []\n\n for number in array:\n finial_array.append(float(number))\n return finial_array\n\n seaborn.regplot(meaningful_stats['std'], meaningful_stats['returns_diff'])\n\n plt.title(\"STD and Returns\")\n\n plt.axhline(y=00, color='r', linestyle='-')\n\n plt.show()",
"def linear_slope_fit(wf, mean_y, sigma_y, slope, intercept):\n\n sum_x = sum_x2 = sum_xy = sum_y = mean_y[0] = sigma_y[0] = 0\n isum = len(wf)\n\n for i,value in enumerate(wf):\n sum_x += i \n sum_x2 += i**2\n sum_xy += (value * i)\n sum_y += value\n mean_y += (value-mean_y) / (i+1)\n sigma_y += (value-mean_y)**2\n\n\n sigma_y /= (isum + 1)\n np.sqrt(sigma_y, sigma_y)\n\n\n slope[0] = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept[0] = (sum_y - sum_x * slope[0])/isum",
"def residuals_PL(self, p, data, x):\n err = data - self.PowerLaw(x,p)\n return err",
"def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope",
"def piecewise_avg_slope_var(MaskedDataVector,time,redn,gain): \n localbeta = []\n localn = []\n localvar = []\n #loop over each sections of the ramp.\n slices = np.ma.notmasked_contiguous(MaskedDataVector)\n if slices is None : #When no unmasked pixels exist\n return np.nan, np.nan\n\n tf = np.median(np.diff(time)) # The frame time estimate\n for k in range(len(slices)) :\n n = len(MaskedDataVector[slices[k]])\n if n > 2 : #At least 3 points are there to calculate slope\n t = time[slices[k]]\n Sx = t.sum(dtype=np.float64)\n Sxx = (np.square(t)).sum(dtype=np.float64)\n Sy = MaskedDataVector[slices[k]].sum(dtype=np.float64)\n Sxy = (MaskedDataVector[slices[k]]*t).sum(dtype=np.float64)\n #append localbeta, localalpha, localn and localsigma\n beta = (n*Sxy - Sx*Sy)/ (n*Sxx - Sx**2)\n localbeta.append(beta)\n localn.append(n)\n localvar.append(varience_of_slope(beta,n,tf,redn,gain))\n #calculate the average beta with weights 1/localvarience \n if len(localvar) > 0 : \n AvgSlope, weightsum =np.average(localbeta,weights=1.0/np.asarray(localvar),\n returned=True)\n Varience = 1/weightsum\n return AvgSlope, Varience\n else :\n return np.nan, np.nan",
"def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])",
"def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)",
"def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R",
"def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)",
"def compute_rmse(y, tx, w):\n return np.sqrt(2*compute_mse(y,tx,w))",
"def rmsle(self) -> float:\n return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))",
"def powerLaw(minskew,minkurt,transform,x):\n exponent = 0.05\n while exponent < 20:\n y = x**exponent\n (newskew,newkurt) = computeMoments(y)\n (minskew,minkurt,transform) = checkMin(minskew,minkurt,newskew,newkurt,transform,exponent)\n exponent *= 1.5\n #endwhile\n return (minskew,minkurt,transform)",
"def prox_slope(x, h, lbd):\n # reorder the lambda to make it coincide with the order of x\n sort_idx = np.argsort(abs(x))\n rank_x = np.arange(len(x))[np.argsort(sort_idx)]\n return np.sign(x) * np.clip(abs(x) - lbd[rank_x] * h, 0, None)",
"def spectral_abs_slope_mean(data, fft_data):\n spec = np.abs(fft_data)\n slope = np.abs(np.diff(spec))\n return np.mean(slope)",
"def LA_contribution(self):\n pr=paraxial(self.entrance_pupil,0)\n #hnu=-u*self.entrance_pupil #n=1\n pr.propagate(self.surfaces)\n #print('hnu',hnu,1/hnu)\n #print('paraxial y ',pr.y[1:])\n #print('paraxial nu',pr.nu[:-1])\n #print('paraxial u ',pr.nu[:-1]/self.get_n()[:-1])\n #print('paraxial u ',pr.nu[:-1]/self.get_n()[:-1]/hnu/5.715023)\n #print('paraxial i ',pr.i[1:])\n ni=self.get_n()[:-1]*pr.i[1:]\n #print('ni',ni)\n marginal=beam_field()\n marginal.single_beam_from_Kingslake_Q(self.entrance_pupil,0) #marginal beam\n marginal.propagate(self.surfaces)\n Q=marginal.Kingslake_Qabs(self.surfaces)[:,0]\n Q_=marginal.Kingslake_Q_abs(self.surfaces)[:,0]\n #print('marginal Q ',marginal.Kingslake_Qabs(ls.surfaces)[:,0])\n #print('marginal Q\\'',marginal.Kingslake_Q_abs(ls.surfaces)[:,0])\n #print(Q-Q_)\n #print('paraxial nu\\'',pr.nu[1:])\n #print('sin Uk\\'',marginal.U)\n target_surface=len(self.surfaces)-1\n #print(marginal.U[3,0,1]*pr.nu[target_surface])\n nusinU=marginal.U[3,0,1]*pr.nu[target_surface] #n'u'sinU'_k all values at end focus\n LA=-(Q-Q_)*ni/nusinU\n #print('spherical LA contribution',LA)\n #print('sum',sum(LA))\n return LA",
"def calc_rmsle(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass",
"def _pwr_std_temp(rpm, MP, altitude):\n # get the power at sea level (i.e. point B on the left side of the Lycoming power chart)\n \n # get pwr at two even hundreds of rpm, and then interpolate\n if rpm >= 2600:\n rpm1 = 2600\n elif rpm <= 1800:\n rpm1 = 1800\n else:\n rpm1 = rpm - rpm % 100\n\n rpm2 = rpm1 + 100\n \n pwr_SL1 = _pwr_sl(rpm1, MP)\n pwr_SL2 = _pwr_sl(rpm2, MP)\n # print \"SL Pwr 1=\", pwr_SL1\n # print \"SL Pwr 2=\", pwr_SL2\n \n # get power at full throttle at this rpm and MP at altitude (i.e. point A on the right side of the Lycoming power chart)\n # density ratio at point A on the right side of the Lycoming power chart)\n pwr_FT1, DR_FT1 = _hp_at_MP_and_altitude(rpm1, MP)\n pwr_FT2, DR_FT2 = _hp_at_MP_and_altitude(rpm2, MP)\n # print \"FT pwr 1=\", pwr_FT1\n # print \"FT pwr 2=\", pwr_FT2\n # print \"DR FT 1=\", DR_FT1\n # print \"DR FT 2=\", DR_FT2\n \n # density ratio at sea level\n DR_sl = 1\n \n # density ratio for the actual conditions (i.e. point D on the right side of the Lycoming power chart)\n DR_test = SA.alt2density_ratio(altitude)\n # print \"DR_test=\", DR_test\n \n # function is unstable if the DR at FT is close to 1. This sends the slope off to unpredictable values.\n slope1=(pwr_FT1 - pwr_SL1) / (DR_FT1 - DR_sl)\n slope2=(pwr_FT2 - pwr_SL2) / (DR_FT2 - DR_sl)\n \n if MP > 28:\n if slope1 < -80:\n slope1=-62\n elif slope1> -60:\n slope1=-62\n if slope2< -80:\n slope2 = -62\n elif slope2> -60:\n slope2=-62\n \n # print \"slope1=\", slope1\n # print \"slope2=\", slope2\n \n pwr_std_temp1 = pwr_SL1 + (DR_test - DR_sl) * slope1\n pwr_std_temp2 = pwr_SL2 + (DR_test - DR_sl) * slope2\n # print \"Pwr Std Temp 1=\", pwr_std_temp1\n # print \"Pwr Std Temp 2=\", pwr_std_temp2\n pwr_std_temp = pwr_std_temp1 + (rpm - rpm1) * (pwr_std_temp2 - pwr_std_temp1) / (rpm2 - rpm1)\n\n return pwr_std_temp",
"def spectral_slope(sign, fs):\n f, ff = plotfft(sign, fs)\n if not(list(f)):\n return 0\n else:\n if not (len(f) * np.dot(f, f) - np.sum(f) ** 2):\n return 0\n else:\n return (len(f) * np.dot(f, ff) - np.sum(f) * np.sum(ff)) / (len(f) * np.dot(f, f) - np.sum(f) ** 2)",
"def power_output_existing_wind_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_4[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * m.C_MC[g, y])\r\n == 0)\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_4[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE)) * m.C_MC[g, y])\r\n == 0)",
"def compute_rmse(y, tx, w):\n return np.sqrt(2 * compute_mse(y, tx, w))",
"def calculate_slope(cnt):\n y = OrderedDict(cnt.most_common())\n v=np.log(list(y.values()))\n k=np.log(np.arange(1,len(v)+1,1))\n return linregress(k,v)",
"def massrich_parameters(self):\n print(\"\\nMass-Richness Power Law: M200 = norm * (N200 / 20) ^ slope\")\n print(\" norm:\", self._massrich_norm)\n print(\" slope:\", self._massrich_slope)",
"def calc_power(field):\r\n\r\n poynt_in_points = 0.5*numpy.real(field.p * numpy.conj(field.vn))\r\n power = numpy.sum(poynt_in_points)\r\n power *= field.one_pixel_area\r\n return power",
"def calc_lamb(self, x_surface, geom):\n\n return self.rfl",
"def getSlope(self):\n return math.tan(self.angle)",
"def power(self):\r\n return self.model * self.percent / 100",
"def w(lam, gam, p):\n return np.sqrt((1 - lam*np.cos(2*np.pi*p ) )**2 + (gam*lam*np.sin(2*np.pi*p ) )**2 )",
"def plot_powerlaw(self, **kwargs):\n\n if self.gamma is None:\n self.exponent()\n p = powerlaw.plot(exponent=-self.gamma,\n xmax=self.max_deg, xmin=self.k_min,\n **kwargs\n )\n pylab.show()\n return p",
"def solve_beta_sqrt_slope(X, Y, lbd_vec, h=2.0, lr=5.0):\n p = X.shape[1]\n\n sigma_prev, sigma_new = np.var(Y)**0.5, np.var(Y)**0.5\n beta_prev, beta_new = np.zeros(p), solve_beta_slope(X, Y, sigma_new*lbd_vec)\n\n i = 1.0\n while abs(obj_sqrt_slope(X, Y, lbd_vec, beta_prev, sigma_prev) - obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new)) > lr:\n sigma_prev, beta_prev = sigma_new, beta_new\n sigma_new = sigma_new - (h/i) * ( 1 - np.var(Y - X@beta_new)/sigma_new**2 )\n beta_new = solve_beta_slope(X, Y, sigma_new*lbd_vec)\n i += 1\n \n# if i % 100 == 0: print('step1: i=', i)\n# print(\"i=\", i)\n# print('sigma_prev, sigma_new: ', sigma_prev, sigma_new)\n# print('obj value:', obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new))\n# print('difference: ', abs(obj_sqrt_slope(X, Y, lbd_vec, beta_prev, sigma_prev) - obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new)))\n return beta_new",
"def square_trick(bias, slope, predictor, current_value, learning_rate):\n predicted_value = bias + slope*predictor\n slope += learning_rate*predictor*(current_value-predicted_value)\n bias += learning_rate*(current_value-predicted_value)\n return slope, bias",
"def __call__(self, x):\n\n np.multiply(x, self.slope, out=x)\n np.add(x, self.bias, out=x)\n return x",
"def slope_from_origin(self):\n\n return self.y / self.x",
"def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term",
"def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)",
"def get_slope(x, y, deg=1, err=[]):\n inverse_error = []\n for i in err:\n inv = 1/i\n inverse_error.append(i)\n\n if len(err)>0:\n z = np.polyfit(x, y, deg, w=inverse_error)\n else:\n z = np.polyfit(x, y, deg)\n\n m, b = z\n p = np.poly1d(z)\n\n return m, b, p",
"def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power",
"def surface_slope(self, dB=False, sig0=False):\n # evaluate the slope of the used brdf\n brdf_slope = self.SRF.brdf_theta_diff(\n t_0=self.t_0, t_ex=self.t_ex, p_0=self.p_0,\n p_ex=self.p_ex, geometry = 'mono',\n param_dict=self.param_dict, return_symbolic=False,\n n=1)\n # evaluate the used brdf\n brdf_val = self.SRF.brdf(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict)\n\n # vegetated soil contribution\n I_vegs_slope = (self.I0\n * np.exp(-(2*self.V.tau / self._mu_0))\n * (self._mu_0 * brdf_slope\n - (2 * self.V.tau / self._mu_0 + 1)\n * np.sin(self.t_0) * brdf_val))\n\n # bare soil contribution\n I_bs_slope = self.I0 * (self._mu_0 * brdf_slope\n - np.sin(self.t_0) * brdf_val)\n\n I_slope = self.SRF.NormBRDF * (\n (1. - self.bsf) * I_vegs_slope\n + self.bsf * I_bs_slope)\n\n if sig0 is False and dB is False:\n return I_slope\n else:\n I_val = self.surface()\n if sig0 is True and dB is False:\n return 4. * np.pi * (self._mu_0 * I_slope\n - np.sin(self.t_0) * I_val)\n elif sig0 is False and dB is True:\n return 10./np.log(10) * I_slope / I_val\n elif sig0 is True and dB is True:\n return 10./np.log(10) * (I_slope / I_val\n - np.tan(self.t_0))",
"def varience_of_slope(slope,NoOfPoints,tframe,redn,gain):\n Var = 6*(NoOfPoints**2 + 1)*np.abs(slope) / (5*NoOfPoints*(NoOfPoints**2 -1)*tframe) +\\\n 12*(redn**2 + gain**2 / 12.)/(NoOfPoints*(NoOfPoints**2 -1)*tframe**2)\n return Var",
"def get_slope(self) -> str:\n return self.query('slope,?')",
"def risefit(self, p, x, y, risepower, mode=0):\n assert mode in [-1, 0, 1]\n ix = np.argmin(np.fabs(x-p[2]))\n tm = np.zeros_like(x)\n expf = (x[ix:]-p[2])/p[1]\n pclip = 1.e3\n nclip = 0.\n expf[expf>pclip]= pclip\n expf[expf<-nclip] = -nclip\n tm[ix:] = p[0] * (1.0 - np.exp(-expf))**risepower\n if mode == 0:\n return tm - y\n elif mode == 1:\n return np.linalg.norm(tm-y)\n elif mode == -1:\n return tm\n else:\n raise ValueError('doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)')",
"def periodicity_metric(light_curve_rms, sm_phase_rms):\n return (sm_phase_rms ** 2) / (light_curve_rms ** 2)",
"def lapse(self):\n pass",
"def __call__(self, x):\n return self.slope * x + self.ordinate",
"def powerflow_rule(_m, l, y, s, t):\r\n\r\n return (- m.sigma_27[l, y, s, t] + m.sigma_28[l, y, s, t]\r\n + (m.INCIDENCE_MATRIX[l, self.g(l)] * m.lamb[self.g(l), y, s, t])\r\n + (m.INCIDENCE_MATRIX[l, self.h(l)] * m.lamb[self.h(l), y, s, t])\r\n == 0)",
"def findslopes(img):\n img = img.astype(np.float32)\n DY = np.array([[-1,-1,-1],[0, 0, 0],[1, 1, 1]]) * 1/6\n DX = DY.transpose()\n gradx = cv2.filter2D(src=img, ddepth=-1, kernel=DX)\n grady = cv2.filter2D(src=img, ddepth=-1, kernel=DY)\n\n D2Y = np.array([[0.5, 1, 0.5], [-1, -2, -1], [0.5, 1, 0.5]]) * 0.5\n D2X = D2Y.transpose()\n DXY = np.array([[-1, 0, 1], [0, 0, 0], [1, 0, -1]]) * 1/4\n grad2x = cv2.filter2D(src=img, ddepth=-1, kernel=D2X)\n grad2y = cv2.filter2D(src=img, ddepth=-1, kernel=D2Y)\n gradxy = cv2.filter2D(src=img, ddepth=-1, kernel=DXY)\n\n slopes = gradx**2 + grady**2\n slopes2 = grad2x**2 + grad2y**2 + 2 * gradxy**2\n\n return (slopes, gradx, grady, slopes2, grad2x, grad2y, gradxy)",
"def power_radiated(horn_width, horn_height):\n # Calculate the normalized power radiated\n return horn_width * horn_height / (4.0 * 120.0 * pi)",
"def slope_from_origin(self):\n\n return (self.y / self.x)",
"def slope_and_mse(x, y, Rbool=False):\n s, o, r_value, p_value, std_err = linregress(x, y)\n ypred = s * x + o\n\n mse = np.average((y - ypred) ** 2)\n\n if Rbool:\n return s, mse, r_value\n else:\n return s, mse",
"def linear_slope(self, dim=\"time\", nan_policy=\"none\"):\n return linear_slope(self._obj, dim=dim, nan_policy=nan_policy)",
"def _calc_indirect_effect(x, y, m):\n x = stats.zscore(x)\n y = stats.zscore(y)\n m = stats.zscore(m)\n direct_effect = sm.OLS(y, sm.add_constant(x)).fit().params[1]\n xs = np.stack((x, m), axis=1)\n remaining_effect = sm.OLS(y, sm.add_constant(xs)).fit().params[1]\n indirect_effect = direct_effect - remaining_effect\n proportion_mediated = 1 - remaining_effect/direct_effect\n return indirect_effect, proportion_mediated",
"def powerlawFitWithOutliers(x, y, e, outtriangle='power.png'):\n x = np.asarray(x)\n y = np.asarray(y)\n e = np.asarray(e)\n # theta will be an array of length 2 + N, where N is the number of points\n # theta[0] is the amplitude, theta[1] is the power,\n # and theta[2 + i] is the weight g_i\n def log_prior(theta):\n #g_i needs to be between 0 and 1 and limits for the amplitude and power\n if (all(tmp > 0. for tmp in theta[2:]) and all(tmp < 1. for tmp in theta[2:])) and \\\n -2. < theta[1] < -0.05 and 0. < theta[0] < 3.e2:\n return 0\n else:\n return -np.inf # recall log(0) = -inf\n\n def log_likelihood(theta, x, y, e, sigma_B):\n dy = y - theta[0] * x**theta[1]\n g = np.clip(theta[2:], 0, 1) # g<0 or g>1 leads to NaNs in logarithm\n logL1 = np.log(g) - 0.5 * np.log(2 * np.pi * e ** 2) - 0.5 * (dy / e) ** 2\n logL2 = np.log(1 - g) - 0.5 * np.log(2 * np.pi * sigma_B ** 2) - 0.5 * (dy / sigma_B) ** 2\n return np.sum(np.logaddexp(logL1, logL2))\n\n def log_posterior(theta, x, y, e, sigma_B):\n return log_prior(theta) + log_likelihood(theta, x, y, e, sigma_B)\n\n #find starting point\n def squared_loss(theta, x=x, y=y, e=e):\n dy = y - theta[0] * x**theta[1]\n return np.sum(0.5 * (dy / e) ** 2)\n\n theta1 = optimize.fmin(squared_loss, [10, -0.3], disp=False)\n\n ndim = 2 + len(x) # number of parameters in the model\n nwalkers = 400 # number of MCMC walkers\n nburn = 1000 # \"burn-in\" period to let chains stabilize\n nsteps = 10000 # number of MCMC steps to take\n\n # set theta near the maximum likelihood, with\n starting_guesses = np.zeros((nwalkers, ndim))\n starting_guesses[:, :2] = np.random.normal(theta1, 1, (nwalkers, 2))\n starting_guesses[:, 2:] = np.random.normal(0.5, 0.1, (nwalkers, ndim - 2))\n\n #initiate sampler\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x, y, e, 10])\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(starting_guesses, nburn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n sampler.reset()\n\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, nburn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n print \"Running MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, nsteps, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #sample shape = (nwalkers, nsteps, ndim)\n sample = sampler.chain.reshape(-1, ndim)\n\n params = np.mean(sample[:, :2], 0)\n g = np.mean(sample[:, 2:], 0)\n outliers = (g < 0.5)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index][:2]\n errors = [sampler.flatchain[:, i].std() for i in xrange(ndim)][:2]\n\n fig = triangle.corner(sample, labels=['amplitude', 'power'] + len(x)*['Gi', ])\n fig.savefig(outtriangle)\n plt.close()\n\n return params, params_fit, errors, outliers",
"def lam(E):\n return (12398.4/E)*1e-10",
"def fepsp_slope(trace):\n bsl=np.mean(trace[0:3])\n peak=np.mean(trace[np.argmin(trace)-1:np.argmin(trace)+1]) \n amp=trf.val_dist(bsl,peak)\n \n twenty=int(trf.find_nearest_ind(trace[0:np.argmin(trace)+1],bsl-amp*0.2))\n eighty=int(trf.find_nearest_ind(trace[0:np.argmin(trace)+1],bsl-amp*0.8))\n slope=np.mean(np.diff(trace[twenty:eighty]))\n return slope,trace[twenty],trace[eighty],trace[np.argmin(trace)]",
"def lorentz(self, X, xm, amp, w):\n return amp / (1 + ((X - xm) / (w / 2)) ** 2)",
"def pearson_r(self):\n return ((self.x - self.x.mean()) * (self.y - self.error_weighted_average(self.y, self.dy))).sum() / self.x.std() / self.y.std()",
"def linear_error(X, y, w):\n\n return np.where(y != np.sign(np.dot(X, w)), 1.0, 0.0).mean()",
"def test_powers(self):\n l = np.array([0, 1, 2])\n r = np.linspace(0, 1, 11) # rho coordinates\n\n correct_vals = np.array([np.ones_like(r), r, r**2]).T\n correct_ders = np.array([np.zeros_like(r), np.ones_like(r), 2 * r]).T\n\n values = powers(r, l, dr=0)\n derivs = powers(r, l, dr=1)\n\n np.testing.assert_allclose(values, correct_vals, atol=1e-8)\n np.testing.assert_allclose(derivs, correct_ders, atol=1e-8)",
"def wr(x,y,xcen,ycen,sigma):\n res=np.exp(-((x-xcen)**2+(y-ycen)**2)/(2.*sigma**2))/(2.*np.pi*sigma**2) \n return res",
"def evaluate(self, x):\n try:\n\n z = ((x - self[\"mu\"]) + (1.j) * abs(self[\"al\"])) / \\\n (numpy.sqrt(2.0) * abs(self[\"ad\"]))\n y = self[\"A\"] * numpy.real(sps.wofz(z))\n y /= (abs(self[\"ad\"]) * numpy.sqrt(2.0 * numpy.pi))\n y += x * self[\"lin\"] + self[\"off\"]\n\n y[numpy.where(numpy.isnan(y))] = 0.0\n except FloatingPointError as fpe:\n raise(PE.PyAFloatingPointError(\"The following floating point error occurred:\\n \" + str(fpe) + \"\\n\" +\n \"Current Parameter values:\\n\" +\n str(self.parameters()),\n solution=[\"Try to rescale/shift your abscissa. For instance, put\" +\n \"the spectral line you try to fit at position `0`.\"]))\n return y",
"def getwientemp(_inputdata, _distance, _derr, _id):\n # Maxwell-Boltzmann distribution formula probability density function\n def curve(_x, _a, _scale):\n _a1 = np.sqrt(2 / np.pi)\n _a2 = _x**2 / (2 * _a**2)\n return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3\n\n # Set pyplot style to be consistent through the program\n plt.style.use('seaborn-whitegrid')\n\n # Convert the distance in parsecs to metres\n _distance = 3.0857 * 10**16 * _distance\n _derr = 3.0857 * 10**16 * _derr\n # Create array for x and y axis data\n _xdata = _inputdata[:, 0]\n _ydata = _inputdata[:, 1]\n _ydatalum = _ydata\n\n # Iterate through each band and convert from Janskys to W/m^2/um\n i = 0\n while i < 5:\n _ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)\n i += 1\n # Calculate optimal values and covariance using scipy curve_fit function\n _popt, _pcov = curve_fit(curve, _xdata, _ydata)\n # Create x axis to plot curve against\n _x = np.linspace(0, 5, 100)\n # Determine y value for each point on the x axis\n _yplot = curve(_x, *_popt)\n # Plot the curve to the screen\n plt.plot(_x, _yplot)\n # Determine the area under the graph, integral gives total energy recieved per m^2\n _area = np.trapz(_yplot, dx=5/100)\n # Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance\n _lum = 4 * np.pi * _distance**2 * _area\n _lumerr = 4 * np.pi * _distance * _derr * _area\n # Peak value of Maxwell-Boltzmann distribution\n _mu = 2 * _popt[0] * np.sqrt(2 / np.pi)\n\n # Plot data on the graph\n plt.plot(_xdata, _ydata, '.')\n # Set axis labels\n plt.xlabel('Wavelength (um)')\n plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')\n if _id == 1:\n _str = 'Large Star'\n else:\n _str = 'Small Star'\n\n # Calculate effective surface temperature using Wien's law\n _wien = round_sig(2898 / _mu)\n # Round luminosity to 2 significant figures\n _lum = round_sig(_lum)\n # Set graph title\n plt.suptitle('Black Body Plot for the ' + _str)\n # Save to current folder\n _filename = _str + '.png'\n plt.savefig(_filename)\n # Display to the screen\n plt.show()\n\n # Returns calculated values\n return _lum, _lumerr, _wien",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def slope(tr, sigma=None):\n tr = tr.astype(float)\n if sigma is not None:\n tr = nd.gaussian_filter1d(tr, sigma=sigma)\n m, M = np.argmin(tr), np.argmax(tr)\n a = np.abs((tr[m] - tr[M]) / (m - M))\n return a",
"def solve_beta_slope(X, Y, lbd_vec, h=0.1, lr=5.0):\n n, p = X.shape[0], X.shape[1]\n \n# i = 0\n beta_prev = np.zeros(p)\n beta_new = np.ones(p)\n while abs(obj_slope(X, Y, lbd_vec, beta_prev)-obj_slope(X, Y, lbd_vec, beta_new)) > lr:\n beta_prev = beta_new\n beta_new = prox_slope(beta_new - (h/n) * (X.T @ (X @ beta_new - Y)), h/n, lbd_vec)\n \n# i += 1\n# if i % 2 == 0:\n# print(i)\n# print(\"prev value: \", obj_slope(X, Y, lbd_vec, beta_prev))\n# print(\"new value: \", obj_slope(X, Y, lbd_vec, beta_new))\n# print(sum(abs(beta_new)))\n# print(beta_new)\n return beta_new",
"def compute_gradient_mse(y, tx, w):\n e = y - tx.dot(w)\n\n return -tx.T.dot(e) / len(e)",
"def rmse(self):\n lam = self.lam()\n weights = lam / lam.sum()\n weighted_var = self.var() * weights\n rmse = np.sqrt(weighted_var.sum())\n return rmse",
"def _regression_slope_metric(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.slope",
"def get_blr_strength(self,\n blr: BoundedLinearRegressions) -> float:\n # Return 0 if slopes are different signs.\n if blr.minima_regression.slope * blr.maxima_regression.slope < 0:\n return 0\n\n # Return 0 if not enough data.\n if len(blr.candles) <= 3:\n return 0\n\n # Find high and low prices of the trendline period.\n high_price = max([candle.high for candle in blr.candles])\n low_price = max([candle.low for candle in blr.candles])\n\n # Find start and end of the period.\n start_moment = max([candle.moment for candle in blr.candles])\n end_moment = min([candle.moment for candle in blr.candles])\n\n # Take signal strength to be the average of the two slopes.\n minima_slope_pct = abs(blr.minima_regression.y_of_x(end_moment) - blr.minima_regression.y_of_x(start_moment)) \\\n / max(0.01, high_price - low_price)\n maxima_slope_pct = abs(blr.maxima_regression.y_of_x(end_moment) - blr.maxima_regression.y_of_x(start_moment)) \\\n / max(0.01, high_price - low_price)\n signal_strength = (minima_slope_pct + maxima_slope_pct) / 2.0\n\n # Scale down signal strength.\n signal_strength = min(1, signal_strength / 5.0)\n\n # Ensure the signal strength has the correct sign.\n if blr.minima_regression.slope < 0:\n signal_strength += -1\n\n return signal_strength",
"def RMSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sqrt(np.sum(np.power((y - f), 2)) / n)\r\n return J",
"def slope(x1, y1, x2, y2):\r\n delta_y = y2-y1\r\n delta_x = x2-x1\r\n return delta_y / delta_x",
"def evaluate(self,x,y):\n\n #below function comes from MATLAB Peaks function\n # return np.multiply(3*np.power((1-x), 2), np.exp(-np.power(x,2) - np.power((y+1), 2))) - np.multiply(10 * (x/5.0 - np.power(x,3) - np.power(y,5)), np.exp(-np.power(x,2)-np.power(y,2)))#- np.exp(-np.power(x+1,2)-np.power(y,2))/3.0\n # return -np.power((x-50),2) - np.power(y, 2)-3\n return 5- (np.multiply(np.multiply(np.sin(x), np.sin(y)), np.power(x,2)) + np.power(y,2))",
"def slopes(self,k,terms=None):\n\t\tNP = []\n\t\tp=self.p\n\t\tif terms==None:\n\t\t\td = len(self.series)\n\t\telse:\n\t\t\td = min(len(self.series),2*terms+10) ### HACKING HERE\n\t\tif p == 2:\n\t\t\te = 2\n\t\telse:\n\t\t\te = 1\n\t\tfor i in range(d):\n\t\t\ty = 0\n\t\t\tfor ss_wt in self[i]:\n\t\t\t\tk_ss = ss_wt[0]\n\t\t\t\tmult = ss_wt[1]\n\t\t\t\tif ss_wt[0] == \"p\":\n\t\t\t\t\ty += ss_wt[1]\n\t\t\t\telse:\t\t\t\t\t\t\n\t\t\t\t\t#### added by john 10/17, see form_ghost_shell for instructions\n\t\t\t\t\tif k_ss >= 0:\n\t\t\t\t\t\ty += (valuation(k-k_ss,p)+e)*mult\n\t\t\t\t\tif k_ss < 0:\n\t\t\t\t\t\ty += mult\n\t\t\tNP += [(i,y)]\n\n\t\tif terms==None:\n\t\t\treturn NewtonPolygon(NP).slopes()\n\t\telse:\n\t\t\treturn NewtonPolygon(NP).slopes()[0:terms]",
"def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J",
"def solve_power(self):\n e = self.normalized_effect_size()\n power = FTestPower().solve_power(\n effect_size=e\n ,df_num=self.df_denom\n ,df_denom=self.df_num\n ,alpha=self.alpha\n ,power=None\n ,ncc=1\n )\n return power",
"def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )",
"def _compute_pointwise_slopes(self, alpha):\n for i, region_slope in enumerate(self.region_slopes):\n if i == 0:\n self.slopes[i] = alpha * region_slope\n else:\n # slope[k]=skMin+alpha*(-skMax+skMin)\n # skMin-skMax in the paper. But if you calculate it you see\n # the paper is wrong, but the expression is right.\n slope_min_i = (\n 4.0 * region_slope - self.slopes[i - 1]) / 3.0\n slope_max_i = 4.0 * region_slope - 3 * self.slopes[i - 1]\n self.slopes[\n i] = slope_min_i + alpha * (slope_max_i - slope_min_i)\n\n # check positivity of a and b constants.\n a, b, _c, _d = self.evaluate_spline_coeffs(i)\n if a < 0 or b < 0:\n # no good, go to next beta.\n break\n else:\n # if we didn't break in any region, this beta is appropriate.\n return True\n\n return False # This beta is invalid",
"def compute_slope(x_jnt_0, y_jnt_0, x_jnt_1, y_jnt_1):\n if x_jnt_0 == x_jnt_1:\n return None\n return (y_jnt_1 - y_jnt_0) / (x_jnt_1 - x_jnt_0)",
"def test_variance_of_slope_sums():\n\n ticker = 'GOOG'\n main_df = pd.read_pickle(settings.settings_dict['stock_data_path'])\n\n main_df = sample_slopes.create_slope_sum(main_df)\n\n slope_sums = main_df[ticker + \"slope_sum\"]\n\n print np.mean(main_df[ticker + \"slope_sum\"])\n print np.std(main_df[ticker + \"slope_sum\"])\n\n std = pd.rolling_std(slope_sums, window=20)\n\n _, ax2 = plt.subplots()\n\n ax2.plot(slope_sums)\n ax2.plot(slope_sums + std)\n ax2.plot(slope_sums - std)\n plt.legend(['Slope_Sum ', 'Slope_Sum +1 Std', 'Slope_Sum -1 Std'])\n plt.title(ticker + ' varrience of slope sum')\n plt.show()",
"def power_output_candidate_solar_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_7[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)\r\n\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_7[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)",
"def calc_slopes(data):\n spikes = data['spikes'].item()\n mem = data['mem']\n mslopes = []\n for m, s in zip(mem, spikes.itervalues()):\n if len(s) == 0:\n print(\"No spikes: Skipping\")\n _mslope = 0\n else:\n _mslope, ign = nt.norm_firing_slope(m, s, 15*mV, 10*ms, w=2*ms)\n mslopes.append(_mslope)\n data['mslopes'] = mslopes\n return data",
"def slope(start, end):\n\tx1 = start[0]\n\ty1 = start[1]\n\tx2 = end[0]\n\ty2 = end[1]\n\ttop = float(y2 - y1) \n\tbot = float(x2 - x1)\n\tif bot == 0:\n\t\treturn None\n\telse:\n\t\treturn top / bot",
"def ellipse_dist_ratio_poly(self, theta, lwr):\n\n \"\"\"\n\n Params for FWD fit\n array([ 9.99999989e-01, 8.10852195e+07, 1.95444928e+00, 7.96543026e-02])\n this one is un-needed, since it's approximation y = 1\n\n Params for FWD_DIAG fit\n array([-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n\n Params for ORTHOG fit\n array([-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n\n Params for BCK_DIAG fit\n array([-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n\n Params for BCK fit\n array([ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n \"\"\"\n\n #fitting function\n def f(x,params):\n return params[0] + (1.0 / (params[1]*(x+params[2])**params[3]))\n\n #force float math, in case theta is an integer\n theta = float(theta)\n\n #into an angle index form:\n t = abs(int(4.0*theta/np.pi))\n\n if (t == 0) or (t == 8):\n return 1.0\n elif (t == 1) or (t == 7):\n #forward diagonal\n return f(lwr, [-0.00650758, 0.57761793, 0.35369061, 1.87834152])\n elif (t == 2) or (t == 6):\n #orthogonal\n return f(lwr, [-0.02014989, 5.7007518 , -0.83345416, 0.97711175])\n elif (t == 3) or (t == 5):\n #backward diagonal\n return f(lwr, [-0.01608705, 9.44079769, -0.92071169, 0.89094967])\n elif t == 4:\n #backward\n return f(lwr, [ -0.01451187, 10.92674105, -0.93514904, 0.87868538])\n else:\n #hmmm... TODO\n return 0.0",
"def calAlpha(self, offset_arr):\n\n # time_start =time.time()\n x_arr, y_arr = zip(*offset_arr)\n r_value = stats.linregress(x_arr, y_arr)[2]\n try:\n medslope, medintercept = stats.mstats.theilslopes(y_arr, x_arr)[0:2]\n except FloatingPointError as e:\n logging.error(\"CRITICAL: theilslopes FloatingPointError {} for arrays y_arr {} and x_arr {} of domain {}\".format(e, y_arr, x_arr, self.domain))\n except Exception as e:\n logging.error(\"CRITICAL: theilslopes Other error {} for arrays y_arr {} and x_arr {} of domain {}\".format(e, y_arr, x_arr, self.domain))\n raise\n\n return medslope, medintercept, r_value, r_value**2"
] | [
"0.6757806",
"0.6462289",
"0.6391217",
"0.6186872",
"0.61128086",
"0.5965611",
"0.59109455",
"0.58782387",
"0.5849429",
"0.5842808",
"0.5838932",
"0.58238125",
"0.5823275",
"0.5812738",
"0.5809786",
"0.5801164",
"0.57895917",
"0.5785309",
"0.57821614",
"0.5743392",
"0.57384443",
"0.57312053",
"0.5684633",
"0.5637274",
"0.55828613",
"0.5580899",
"0.5576676",
"0.5575982",
"0.5560625",
"0.55602634",
"0.5543295",
"0.5537741",
"0.55297256",
"0.5529342",
"0.550579",
"0.54986924",
"0.5494651",
"0.54936165",
"0.5483367",
"0.54825526",
"0.5474191",
"0.5463933",
"0.546152",
"0.54590607",
"0.5454753",
"0.54503894",
"0.5434936",
"0.54292107",
"0.54269856",
"0.5424928",
"0.54245996",
"0.54122096",
"0.54102397",
"0.54094684",
"0.54079527",
"0.5391197",
"0.5388268",
"0.53878546",
"0.5386704",
"0.53848016",
"0.5379863",
"0.53740233",
"0.5368245",
"0.5359652",
"0.53576857",
"0.53528476",
"0.53512836",
"0.53465164",
"0.5346458",
"0.5343458",
"0.5342476",
"0.53401357",
"0.5328318",
"0.5323514",
"0.5323008",
"0.5321827",
"0.53190136",
"0.53135985",
"0.53133476",
"0.53126204",
"0.5305752",
"0.5298616",
"0.52984226",
"0.52961004",
"0.5276984",
"0.5274837",
"0.5274618",
"0.5272924",
"0.5266518",
"0.5265094",
"0.52597195",
"0.5248977",
"0.5246609",
"0.52458763",
"0.52324134",
"0.52293485",
"0.5228341",
"0.5228262",
"0.52239245",
"0.5213537",
"0.5203851"
] | 0.0 | -1 |
Sample (one minus) the axis ratio of the lens galaxy from the Rayleigh distribution with scale that depends on velocity dispersion | def get_axis_ratio(self, vel_disp):
scale = self.a*vel_disp + self.b
q = 0.0
while q < self.lower:
q = 1.0 - np.random.rayleigh(scale, size=None)
return q | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def get_scale():\r\n\r\n \r\n return 0.5",
"def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]",
"def scale(self):\n return self.distribution.scale",
"def rvs(self):\n return float(self.interp(random.rand()))",
"def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample",
"def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)",
"def getScale(self):\n return self.factor**self.turnOn",
"def GetScale(self):\n ...",
"def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3",
"def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.0):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho",
"def naturalAspectRatio(self):\n return math.sin(self.view_angle_h) / math.sin(self.view_angle_v)",
"def sample_radii(size=1):\n interp_func = InterpolatedUnivariateSpline(m_grid, np.log(r_grid), k=1)\n return np.exp(interp_func(np.random.uniform(0, 1, size=size))) * u.kpc",
"def rvs(self, size: int) -> np.ndarray:\n return np.random.randn(size, self.ndim) * self.scales + self.means",
"def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5",
"def scale(self):\n return self._gev_bijector.scale",
"def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)",
"def sphvol(r):\n return (4./3.)*np.pi*(r**3.)",
"def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw",
"def scale(self):",
"def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3",
"def scale(self):\n return self.scale_factor / CONSTANTS.AU",
"def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")",
"def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio",
"def volume_unit_ball(d_dimensions: int, norm=2) -> float:\n\n # get ball\n if norm == 0:\n b = float(\"inf\")\n elif norm == 1:\n b = 1.0\n elif norm == 2:\n b = 2.0\n else:\n raise ValueError(f\"Unrecognized norm: {norm}\")\n\n return (np.pi ** (0.5 * d_dimensions)) ** d_dimensions / gamma(b / d_dimensions + 1)",
"def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d",
"def s_multiplier(self):\n return 4 * np.pi * (self.bins[:, 1]/2)**2",
"def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol",
"def get_mpg():\n return uniform(20.0, 50.0)",
"def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios",
"def sphere_volume(r):\n return (4/3) * 3.14159 * r**3",
"def sivina(self):\n return (self.r + self.g + self.b) / 3",
"def spaxel_scale(scale=4, wave=1.0):\n\n scale_rad = scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wave * 1e-6)\n print(rho)",
"def pointChargeElecFluxDensity(q: T, r: T) -> float:\n return q / surfaceIntSphere(r)",
"def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])",
"def stdsize(image,r=30):\n image = square(image)\n s,_ = image.shape\n return interpolation.zoom(image,(r+0.5)/float(s))",
"def doppler_scale(self):\n return self._dopplerscale",
"def scaling(self):\n return self.__scaling",
"def pull_arm(self):\n return np.random.normal(loc = 0, scale = 1)+self.mean",
"def _ratio(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = sim / ref\n out.attrs[\"units\"] = \"\"\n return out",
"def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins",
"def get_scale_parameter(self):\n\n shape_in_gamma_func = float(1 + (1 / self._shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self._scale_parameter = self._mean_fire_recurrence / gamma_func",
"def radialApproxEffect(hubdist1,hubdist2,width,length):\n #Grating coordinates\n x,y = np.meshgrid(np.linspace(-width,width,1000),\\\n np.linspace(-length,length,1000))\n y1 = y + hubdist1\n y2 = y + hubdist2\n\n #Convert to period and yaw angle\n period1 = np.sqrt(x**2+y1**2)/hubdist1*160. #nm\n period2 = np.sqrt(x**2+y2**2)/hubdist2*160. #nm\n yaw = blazeYaw(1.5*np.pi/180,2.4,3,160.)\n yaw1 = np.pi/2 - np.arctan(x/y1) + yaw\n yaw2 = np.pi/2 - np.arctan(x/y2) + yaw\n\n #Determine alpha and beta\n beta0,alpha0 = litBetaAlpha(1.5*np.pi/180,2.4,3,160.)\n alpha1 = alpha0 + 3*2.4/period1*np.sin(yaw1)\n alpha2 = alpha0 + 3*2.4/period2*np.sin(yaw2)\n beta1 = beta0 + (3*2.4/period1)*np.cos(yaw1)\n beta2 = beta0 + (3*2.4/period2)*np.cos(yaw2)\n\n #Determine spot shifts\n x1 = hubdist2*(alpha1/beta1)\n x2 = hubdist2*(alpha2/beta2)\n \n\n pdb.set_trace()\n \n return x1,x2",
"def radial_velocity(wv_obj, fx_obj, sig_obj, wv_std, fx_std, sig_std, obj_name, std_name, rv_std, rv_std_err, order,\n xcorr_width, cut, cutstart, cutend):\n\n # The more random iterations, the better... but it takes longer\n n_iter = 1000\n\n # Step 1: Fix the spectra:\n # * Select only the region in which they overlap\n # * Make a new stretched wavelength array (for sub-pixel precision work)\n # * Interpolate the data onto the new wavelength array\n # * Remove large scale slopes so we only compare line and band features\n\n # Find where standard and object overlap ---------------\n wv_min = max([min(wv_std), min(wv_obj)])\n wv_max = min([max(wv_std), max(wv_obj)])\n\n n_pix_std = len(wv_std)\n\n # Creates ln standard wavelength array ---------------------------------\n # AR 2013.0423 The wavelength array only covers the overlap region. Also, I'm folding the rebinning by 10 into this statement.\n acoef_std = (n_pix_std * 10 - 1) / (math.log(wv_max) - math.log(wv_min))\n bcoef_std = (n_pix_std * 10) - (acoef_std * math.log(wv_max))\n\n arr = np.arange(n_pix_std * 10) + 1\n wv_ln_std = np.exp((arr - bcoef_std) / acoef_std)\n\n # AR 2012.1018: Find the conversion between pixels and velocity. This will vary from instrument\n # to instrument and spectral order to spectral order, so we should preferentially calculate this\n # based on the actual input spectrum.\n # AR 2013.0422: Change the calculation to happen AFTER the corrected wavelength scale has been made\n # Find the average pixel/spectrum offset\n # Note: even though it's called micron_per_pix, it will still work if the wavelengths are\n # angstroms instead (it really converts <wavelength unit> to km/s)\n\n # Interpolate data onto same ln wavelength scale -------------------------------\n\n fx_interp_std = np.interp(wv_ln_std, wv_std, fx_std)\n fx_interp_obj = np.interp(wv_ln_std, wv_obj, fx_obj)\n sig_interp_std = np.interp(wv_ln_std, wv_std, sig_std) # AR 2012.1018 Also need to rebin sig\n sig_interp_obj = np.interp(wv_ln_std, wv_obj, sig_obj) # AR 2012.1018 Also need to rebin sig\n\n # Rebin Data ----------------------------\n\n wv_arr_std = np.asarray(wv_ln_std, dtype=float)\n fx_arr_obj = np.asarray(fx_interp_obj, dtype=float)\n fx_arr_std = np.asarray(fx_interp_std, dtype=float)\n sig_arr_obj = np.asarray(sig_interp_obj, dtype=float)\n sig_arr_std = np.asarray(sig_interp_std, dtype=float)\n\n datalen = len(fx_arr_obj)\n\n # Step 2: Measure vsini:\n # Note that as of 2015.0605, this doesn't actually work.\n\n # AR 2014.0922: For vsini:\n # In a loop:\n # Take the standard spectrum\n # broaden it to width X\n # autocorrelate,\n # measure width of gaussian Y (this is supposed to give you a means of translating between width-of-cross-correlation and vsini)\n # Fit function solving Y for X.\n # For each cross correlation of object and standard:\n # Determine vsini\n\n pix_scale = (2.99792458 * 10 ** 5) / acoef_std\n\n # vsinirange = [1,2,5,10,20,30,40,50,60,80,100,100]\n # widthrange = []\n # for v in vsinirange:\n # # Make convolution kernel for v km/s\n # kernel = lsf_rotate(pix_scale,v)\n # # Broaden the standard spectrum\n # fx_obj_wide = np.correlate(fx_arr_obj, kernel, mode='same')\n # # Rectify the spectrum\n # fx_obj_orig = (fx_arr_obj - np.mean(fx_arr_obj))/np.std(fx_arr_obj,ddof=1)\n # fx_obj_wide = (fx_obj_wide - np.mean(fx_obj_wide))/np.std(fx_obj_wide,ddof=1)\n #\n # # Remove a cubic (flatten the spectrum)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_wide)\n # fx_obj_wide = fx_obj_wide - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_orig)\n # fx_obj_orig = fx_obj_orig - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n #\n # # Cross-correlate the spectrum with its broadened self\n # ycorr = np.correlate(fx_obj_orig, fx_obj_wide, mode='full')\n # # Now determine where the peak is (should be near 0)\n # length = len(ycorr)\n # xcorr = np.arange(length) - length//2\n # xmid = np.argmax(ycorr)\n # ymax = np.max(ycorr)\n # # Chop out just the portion of the array near the peak\n # xcorr_min=xmid-xcorr_width\n # xcorr_max=xmid+xcorr_width\n # ycorr1=ycorr[xcorr_min:xcorr_max]\t#isolate section of array with gaussian\n # xcorr1=xcorr[xcorr_min:xcorr_max] #isolate the same section of the pixel range\n #\n # # set up initial values for gaussian fitting via chi2\n # sig = 10\n # sky = np.min(ycorr1)/1.2\n # # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n # sky2 = (ycorr1[-1]-ycorr1[0])/(xcorr1[-1]-xcorr1[0])\n # lnamp = np.log(ymax/1.2-sky)\t# guess some values\n # mean = xcorr[xmid]\n #\n # amp = np.exp(lnamp)\n # sig2 = sig**2\n # # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n # def chi2(p):\t#define gaussian function for fitting\n # sig2=p[2] ** 2\n # m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4]*xcorr1\n # return (ycorr1 - m)\n #\n # # Fit the gaussian.\n # popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n # lnamp, mean, sig, sky, sky2 = popt\n #\n # amp = np.exp(lnamp)\n # # record the width\n # widthrange.append(sig)\n #\n # # Plot all the widths to get a width-vsini curve\n # vsinicoeff,popt = op.curve_fit(quartic,np.asarray(widthrange),np.asarray(vsinirange))\n #\n # relationx = np.arange(50,200,1)\n # relationy = vsinicoeff[0]+vsinicoeff[1]*relationx+vsinicoeff[2]*relationx**2+vsinicoeff[3]*relationx**3+vsinicoeff[4]*relationx**4\n # figv = plt.figure(1)\n # axv = figv.add_subplot(211)\n # axv.scatter(widthrange,vsinirange)\n # axv.plot(relationx,relationy)\n # #ax.text(70,100,\"{0:} {1:} {2:} {3:} {4:}\".format(vsinicoeff))\n\n # 3. Cross-correlate the data, using n_iter trials:\n # * Generate two random gaussian noises scaled to the uncertainty on the fluxes\n # * Apply those gaussian noises to the standard and target stars\n # * Cross-correlate the standard and target stars\n # * Find and then cut out just the part of the cross-correlation curve near the maximum\n # * Set up gaussian\n # * Fit gaussian to that center part\n # * Save fitted parameters (pixel shift aka mean of gaussian, width aka stddev of gaussian)\n # * Repeat n_iter times\n\n # Cross correlation loop --------------------------------\n pix_shift = np.array([]) # initialize array for pixel shift values\n pix_width = np.zeros(n_iter) # initialize array for pixel width values\n l = 0\n\n # using the xrange generator rather than making a full list saves memory\n while len(pix_shift) < n_iter:\n # prepare the randomized data\n # GETTING ARRAYS READY FOR CROSS CORRELATION\n\n\n # Randomize noise:\n # create gaussian distribution of random numbers b/t 1 and -1, multiply err by numbers, add numbers to flux\n # I have drastically simplified the arrays here AR 2013.0319\n # AR 2013.0318: There was a problem, previously: noise was a fixed value, not linked to the known error values\n\n # AR 2013.0321: Speed fix. Rather than step through the array and generate one\n # normally-distributed error value scaled to the SNR at that point, I will generate an\n # array of normally-distributed error values scaled to 1, and then multiply by the SNR:\n # One array generation, one array multiplication.\n\n rand_dist = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n rand_dist2 = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n\n fx_temp_obj = np.asarray(fx_arr_obj + rand_dist * sig_arr_obj)\n fx_temp_std = np.asarray(fx_arr_std + rand_dist2 * sig_arr_std)\n mean_obj = np.mean(fx_temp_obj)\n mean_std = np.mean(fx_temp_std)\n stddev_obj = np.std(fx_temp_obj, ddof=1)\n stddev_std = np.std(fx_temp_std, ddof=1)\n\n # Regularize data (subtract mean, divide by std dev) (Should definitely be done AFTER noise was added)\n fx_reg_temp_obj = fx_temp_obj - mean_obj\n fx_reg_temp_obj = fx_reg_temp_obj / stddev_obj\n fx_reg_temp_std = fx_temp_std - mean_std\n fx_reg_temp_std = fx_reg_temp_std / stddev_std\n\n # curve fit - remove a cubic AR 2012.1113\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_obj)\n fx_reg_temp_obj = fx_reg_temp_obj - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_std)\n fx_reg_temp_std = fx_reg_temp_std - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n\n # CROSS CORRELATION\n\n # compute the cross-correlation between the two spectra\n\n ycorr = np.correlate(fx_reg_temp_obj, fx_reg_temp_std, mode='full')\n # time required: 0.045 seconds average\n\n # http://stackoverflow.com/questions/12323959/fast-cross-correlation-method-in-python\n # conv1 = np.zeros(datalen * 2)\n # conv1[datalen/2:datalen/2+datalen] = fx_reg_temp_obj\n # conv2 = fx_reg_temp_std[::-1]\n # ycorr = signal.fftconvolve(conv1,conv2, mode='valid')\n # time required: 0.006 seconds average, but it segfaults by the third try.\n\n ## slight smoothing AR 2013.0315\n # ycorr = scipy.ndimage.filters.gaussian_filter1d(ycorr,11)\n\n # create the x offset axis (same length as ycorr, with 0 in the MIDDLE)\n length = len(ycorr)\n xcorr = np.arange(length) - length // 2\n # AR 2012.1126 Select a tiny piece around the maximum to fit with a gaussian.\n xmid = np.argmax(ycorr)\n ymax = np.max(ycorr)\n # now take just the portion of the array that matters\n xcorr_min = int(xmid - xcorr_width)\n xcorr_max = int(xmid + xcorr_width)\n ycorr1 = ycorr[xcorr_min:xcorr_max] # isolate section of array with gaussian\n xcorr1 = xcorr[xcorr_min:xcorr_max] # isolate the same section of the pixel range\n ycorr2 = ycorr[xcorr_min - 50:xcorr_max + 50]\n xcorr2 = xcorr[xcorr_min - 50:xcorr_max + 50]\n\n # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n def chi2(p): # define gaussian function for fitting\n sig2 = p[2] ** 2\n m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4] * xcorr1\n return (ycorr1 - m)\n\n # set up initial values for chi2\n sig = 10\n sky = np.min(ycorr1) / 1.2\n # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n sky2 = (ycorr1[-1] - ycorr1[0]) / (xcorr1[-1] - xcorr1[0])\n lnamp = np.log(ymax / 1.2 - sky) # guess some values\n mean = xcorr[xmid]\n\n amp = np.exp(lnamp)\n sig2 = sig ** 2\n\n popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n lnamp, mean, sig, sky, sky2 = popt\n\n amp = np.exp(lnamp)\n\n # print_num=len(pix_shift)%100\n print_num = l % 100\n if print_num == 0:\n ## Uncomment the following to make a plot every 500 fits.\n # fig = plt.figure(l)\n # ax = fig.add_subplot(111)\n # my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mean) ** 2) / sig**2))) + sky + sky2 * xcorr1\n # ax.plot(xcorr1,my_gauss,'r--')\n # ax.plot(xcorr2,ycorr2,'#000000')\n # ax.plot(xcorr1,ycorr1-my_gauss,'#00CC00')\n ##if abs(mean - xcorr[xmid]) > 5:\n ## print \"Mean is off\",mean,xcorr[xmid]\n # figname='rv_{0:}_{1:}_{2:}_{3:}.png'.format(std_name,obj_name,order,l)\n # ax.set_xlim(xcorr[xcorr_min-50],xcorr[xcorr_max+50])\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n print\n \"amp={0: 12.4f} mu={1: 10.4f} sig={2: 9.4f} sky={3: 11.4f} sky2={4: 8.4f} n_entries={5:}\".format(amp,\n mean,\n sig,\n sky,\n sky2,\n len(\n pix_shift))\n\n l += 1\n if (cut == 0) | (mean > np.float(cutstart)) & (mean < np.float(cutend)):\n pix_shift = np.append(pix_shift, mean)\n # if ier < 5:\n # I'm calculating the vsini now because I need errors, and the vsini calculation is not linear.\n # pix_width[l] = vsinicoeff[0] + vsinicoeff[1] * sig + vsinicoeff[2] * sig**2 + vsinicoeff[3] * sig**3 + vsinicoeff[4] * sig**4\n\n # End cross correlation loop ---------------------------------\n\n # 4. Find the RV\n # All 5000 rv fits have been calculated and stored in arrays\n # 4a. Cut out outlier RVs. Useful if the cross-correlation produces occasional bad results. Use cutstart and cutend to force the code to only fit a gaussian to a certain region. Don't over-use this to force the result you want, though.\n # 4b. Compute the mean pixel shift and pixel shift uncertainty.\n # 4c. Convert pixel shift into RV\n # 4d. Shift the wavelength array appropriately - all lines should now line up.\n\n ## Uncomment this to print out an example cross-correlation diagram\n # fig = plt.figure(2)\n # ax = fig.add_subplot(111)\n # ax.plot(xcorr,ycorr,'k')\n # figname='rv_{0:}_{1:}_{2:}_xcorr.png'.format(std_name,obj_name,order)\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n\n # Turn the list of pixel shifts into a numpy array\n pix_shift = np.asarray(pix_shift)\n\n # 4a. Cut out outliers from the pixel shift\n if cut == 1:\n pix_shift = pix_shift[np.where((pix_shift > np.float(cutstart)) & (pix_shift < np.float(cutend)))]\n\n # 4b. Compute the mean pixel shift (rv value) and pixel shift uncertainty (RV uncertainty).\n\n print\n l, len(pix_shift), np.float(len(pix_shift)) / np.float(n_iter) * 100.0\n\n mu = np.mean(pix_shift)\n sigma = np.std(pix_shift, ddof=1)\n\n # vsini = np.mean(pix_width)\n # vsini_err = np.std(pix_width,ddof=1)\n\n # axh = figv.add_subplot(212)\n # n, bins, patches=axh.hist(pix_width,bins=30,normed=1.0,facecolor='green',align='mid')\n # figv.savefig('vsiniplot.png')\n # plt.clf()\n # plt.close()\n\n # 4c. Transform pixel shift to shift in radial velocity\n\n # AR 2013.0423: The actually appropriate method requires a speed-of-light correction. This works for both angstroms and microns.\n rv_meas = (2.99792458 * 10 ** 5 * mu) / acoef_std\n rv_meas_err = (2.99792458 * 10 ** 5 * sigma) / acoef_std\n\n # 4d. Apply shift to arrays\n wv_rvcorr_obj = wv_arr_std * (1 - rv_meas / (2.99792458 * 10 ** 5))\n\n ## 5. Create plots ---------------------------------\n # The plots are the only reason find_rv.py needs to know the names of either star, or the RV of the standard.\n\n # Plot object and standard so you can clearly see that shift exists --------------------------------\n fig = plt.figure(1)\n\n # AR 2013.0703 Regularize the spectra for display purposes in the final graph\n # I'm using the mean and stddev of the last random-added attempt so it won't be perfect...\n fx_reg_obj = fx_arr_obj - mean_obj\n fx_reg_obj = fx_reg_obj / stddev_obj\n fx_reg_std = fx_arr_std - mean_std\n fx_reg_std = fx_arr_std / stddev_std\n\n # Plots target and standard with shift applied\n ax1 = fig.add_subplot(311)\n ax1.plot(wv_rvcorr_obj, fx_reg_obj, 'red')\n ax1.plot(wv_arr_std, fx_reg_std, 'blue')\n ax1.set_xlabel('wavelength (microns)')\n ax1.set_ylabel('normalized flux')\n target = 'Target: %s' % (obj_name)\n standard = 'Standard: %s' % (std_name)\n ax1.annotate(target, xy=(.7, .9), xycoords='axes fraction', xytext=(.6, .9), textcoords='axes fraction',\n color='red')\n ax1.annotate(standard, xy=(.7, .8), xycoords='axes fraction', xytext=(.6, .8), textcoords='axes fraction',\n color='blue')\n\n sig2 = sig ** 2\n my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mu) ** 2) / sig2))) + sky + sky2 * xcorr1\n\n # Plots example of gaussian fit to cross correlation function\n ax2 = fig.add_subplot(312)\n ax2.plot(xcorr1, ycorr1, 'k.')\n ax2.plot(xcorr1, my_gauss, 'r--', linewidth=2)\n ax2.plot(xcorr1, ycorr1 - my_gauss, '#00CC00')\n ax2.set_xlabel('example of fit to cross correlation function')\n ax2.set_xlim(xcorr[xcorr_min - 50], xcorr[xcorr_max + 50])\n # print pix_shift\n\n\n ## Plot histogram of pixel shift values --------------------------------\n ax3 = fig.add_subplot(313)\n n, bins, patches = plt.hist(pix_shift, bins=30, normed=1.0, facecolor='green', align='mid')\n # Plot best fit gaussian over histogram\n y = mlab.normpdf(bins, mu, sigma)\n ax3.plot(bins, y, 'r--', linewidth=2)\n ax3.set_xlabel('radial velocity of target (pixels)')\n ax3.set_ylabel('frequency (normalized)')\n rad = 'RV = %.3f +/- %.3f' % (rv_meas, rv_meas_err)\n corr = 'RV (corr) = %.3f +/- %.3f' % (rv_std + rv_meas, (rv_std_err ** 2 + rv_meas_err ** 2) ** (0.5))\n # vsinistr = 'VsinI = %.3f +/- %.3f' % (vsini,vsini_err)\n ax3.annotate(rad, xy=(.66, .9), xycoords='axes fraction', xytext=(.66, .9), textcoords='axes fraction',\n color='black')\n ax3.annotate(corr, xy=(.6, .8), xycoords='axes fraction', xytext=(.60, .8), textcoords='axes fraction',\n color='black')\n # ax3.annotate(vsinistr,xy=(.6,.6),xycoords='axes fraction',xytext=(.60,.6),textcoords='axes fraction',color='black')\n ax3.annotate('{0:+5.2f} {1: 5.2f}'.format(mu, sigma), xy=(.05, .9), xycoords='axes fraction', xytext=(.05, .9),\n textcoords='axes fraction', color='black')\n ax3.annotate('{0:5.3f} km/s/pix'.format((2.99792458 * 10 ** 5) / acoef_std), xy=(.05, .8), xycoords='axes fraction',\n xytext=(.05, .8), textcoords='axes fraction', color='black')\n fig.subplots_adjust(hspace=.3)\n\n figname = 'rv_%s_%s_%d.png' % (std_name, obj_name, order)\n fig.savefig(figname)\n fig.clf()\n plt.close()\n\n # plt.figure(l+1)\n # plt.hist(pix_shift)\n\n # END RADIAL VELOCITY FUNCTION -----------------------------------------\n return rv_meas, rv_meas_err",
"def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)",
"def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)",
"def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume",
"def vratio(self):\n return self.run_command('vratio')[0]",
"def flux_ratio(self):\n return self._flux_ratio",
"def scaling(self):\n return self._scaling",
"def scaling(self):\n return self._scaling",
"def r0(self):\n return self.p[0] / self.p[1]",
"def update_r(self):\n self.gamma_r = self.gamma_s - self.gamma_q\n self.Sigma_r = self.Sigma_s - self.Sigma_q",
"def normalizing_constant(self):\n\t\tdim = self.train_data.shape[1]\n\t\treturn 1 / (2 * np.pi * ((self.bandwidth) ** 2)) ** (dim / 2)",
"def sos_correction(self, ratio):\n\n # Correct velocities\n self.u_mps = self.u_mps * ratio\n self.v_mps = self.v_mps * ratio",
"def scale(self):\n return self._scale",
"def calc_hypercube_volume(r: float, n: int) -> float:\n return (r * 2) ** n",
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]",
"def Mass_in_R(self, r):\n return self.int_over_density(r)",
"def scale(self):\n return self._a",
"def gen_img_settings_quality(l):\n \n lhalf = 0.5*l\n \n ### sphere radius\n \n sphere_radius = 0.7\n #sphere_rgbcolor = [0.25,0.65,0.65]\n \n ### RESOLUTION\n \n img_widthpx = 1024\n img_heightpx = 1024\n\n ### includes and defaults\n\n povray_includes = [\"colors.inc\", \"textures.inc\", \"shapes.inc\"]\n povray_defaults = [vapory.Finish( 'ambient', 0.1,\n\t \t\t\t 'diffuse', 0.65,\n\t\t \t\t 'specular', 0.5,\n\t\t\t \t 'shininess', 0.53,\n\t\t\t\t 'opacity', 1.0)]\n\n\n ### light sources\n\n sun1 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', 'White')\n sun2 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', [0.7, 0.7, 0.7])\n\n ### background\n\n background = vapory.Background('color', [1,1,1])\n\n ### camera\n\n #povray_cam = vapory.Camera('angle', 75, 'location', [-15 , 15.0+0.5,15.0-0.25],'look_at', [0.25 , 15.0+0.5, 15.0-0.25])\n povray_cam = vapory.Camera('location', [lhalf, lhalf, -1.01*lhalf], 'look_at', [lhalf,lhalf,0], 'angle', 90)\n\n ### text\n # If desired include this in the povray_objects - array declared in the loop\n #text1 = vapory.Text( 'ttf', '\"timrom.ttf\"' ,'\"Division:\"', 0.01, 0.0, 'scale', [0.5,0.5,0.5],'rotate', [0,90,0], 'translate' , [0.0 , 15.0+2.75-1 , 15.0+1.5], vapory.Pigment('Black') ) \n\n ### render quality\n\n quality = 10\n \n return sphere_radius, img_widthpx, img_heightpx, povray_includes, povray_defaults, sun1, sun2, background, povray_cam, quality",
"def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))",
"def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter",
"def calc_hypersphere_volume(r: float, n: int) -> float:\n return (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)",
"def _getScalesRand(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate(\n (_scales, sp.array([sp.sqrt(self.jitter[term_i])])))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales",
"def normScale( x, y ):\n if x == 0 and y == 0:\n return 0\n else:\n return 1.0 / pow( x*x + y*y, 0.5 )",
"def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))",
"def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)",
"def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu",
"def getSphereRadius(self):\n return 1.5",
"def normalise( self, rWantedMax = 100. ):\n nWantedMax = int( self.getSampleMaxValue() * rWantedMax / 100)\n nCurrentMax = max( self.data.max(), -self.data.min() )\n rRatio = nWantedMax / float(nCurrentMax)\n if( nCurrentMax == nWantedMax ):\n return False\n logging.info( \"nCurrentMax: %s\" % nCurrentMax )\n logging.info( \"nWantedMax: %s\" % nWantedMax ) \n logging.info( \"applying a %f ratio to the whole sound\" % rRatio )\n self.data *= rRatio # another option is to make a np.round(self.data*rRatio), but it's perhaps less linear (on a linear elevation for example)\n return True",
"def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # random normal too slow\n #out_random = np.random.normal(0, 1, size = volume.shape)\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out",
"def get_scale_parameter(self):\n\n if self.scale_parameter == 0.0:\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\n return self.scale_parameter\n else:\n return self.scale_parameter",
"def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r",
"def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out",
"def golden_ratio():\n print((1+math.sqrt(5))/2)",
"def current_ratio(self):\n return self.current_assets / self.current_liabilities",
"def generate(self) -> np.ndarray:\n self.sample_r()\n self.sample_z()\n ind: np.ndarray = np.less_equal(self.z_sample, self.r_sample)\n self.r_sample = self.r_sample[ind]\n self.z_sample = self.z_sample[ind]\n return np.sort(np.sqrt(np.subtract(np.square(self.r_sample), np.square(self.z_sample))))",
"def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])",
"def normalize(self):\n if self.norm():\n self._ar = self._ar / self.norm()",
"def pears():\r\n quad_1 = x_array - x_mean()\r\n quad_2 = y_array - y_mean()\r\n top = sum(quad_1 * quad_2)\r\n quad_3 = sum(quad_1 ** 2)\r\n quad_4 = sum(quad_2 ** 2)\r\n bottom = np.sqrt(quad_3 * quad_4)\r\n pears = top / bottom\r\n return pears",
"def sample_from_unit_ball(rng, dim):\n vec = rng.randn(dim)\n return vec / np.sqrt(np.sum(vec**2))",
"def specular(self) -> float:\n return self.GetSpecular()",
"def test_vel_width():\n import astropy.units as u\n spiral_arm = survey.get_spiral_slice(track = \"Perseus\", \n vel_width = 20.)\n spiral_arm2 = survey.get_spiral_slice(track = \"Perseus\", \n vel_width = 20.*u.km/u.s)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)",
"def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")",
"def rolloff_scale(self):\n return self._rolloffscale",
"def width_v_a(model: SingleRhNeutrinoModel) -> float:\n u = 0.5 * np.tan(2 * model.theta)\n return 9 * ALPHA_EM * GF**2 / (256 * np.pi**4) * model.mx**5 * u**2",
"def rvs(self, *args, **kwds):\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = np.logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n if np.all(scale == 0):\n return loc * np.ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n self._random_state = check_random_state(rndm)\n\n if isinstance(size, tuple):\n if len(size) > 0:\n raise ValueError(size)\n else:\n pass\n elif not isinstance(size, int):\n raise ValueError(size)\n\n low = np.log(args[0] - 0.4999)\n high = np.log(args[1] + 0.4999)\n size = self._random_state.randint(args[2], args[3] + 1)\n self._size = size\n vals = np.rint(\n np.exp(self._random_state.uniform(low=low, high=high, size=size))\n ).astype(int)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n vals = tuple([int(val) for val in vals])\n\n return vals",
"def sample_agn_luminosity(self, z):\n\t\t# Assign redshift bin\n\t\tis_less_than_right_edge = (z < self.z_bins)\n\t\talpha = self.alphas[is_less_than_right_edge][0]\n\t\tbeta = self.betas[is_less_than_right_edge][0]\n\t\tM_star = self.M_stars[is_less_than_right_edge][0]\n\n\t\t# Evaluate function\n\t\tpmf = self.get_double_power_law(alpha, beta, M_star)\n\n\t\t# Sample luminosity\n\t\tsampled_M = np.random.choice(self.M_grid, None, replace=True, p=pmf)\n\t\treturn sampled_M",
"def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))",
"def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]",
"def realize(self):\n if numpy.random.rand() > self._P:\n self._w *= 0.0",
"def set_ratio(self, value):\n scene = self.scenes[self.current_scene]\n scene.set_perspective(ratio=value)\n self.redraw()",
"def frechet_var_approx(dist_proj):\n return torch.mean(dist_proj ** 2).item()",
"def test_jam_axi_rms():\n np.random.seed(123)\n xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T\n\n inc = 60. # Assumed galaxy inclination\n r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk\n a = 40 # Scale length in arcsec\n vr = 2000*np.sqrt(r)/(r+a) # Assumed velocity profile\n vel = vr * np.sin(np.radians(inc))*xbin/r # Projected velocity field\n sig = 8700/(r+a) # Assumed velocity dispersion profile\n rms = np.sqrt(vel**2 + sig**2) # Vrms field in km/s\n\n surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])\n sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])\n qObs = np.full_like(sigma, 0.57)\n\n distance = 16.5 # Assume Virgo distance in Mpc (Mei et al. 2007)\n mbh = 1e8 # Black hole mass in solar masses\n beta = np.full_like(surf, 0.3)\n\n surf_lum = surf # Assume self-consistency\n sigma_lum = sigma\n qobs_lum = qObs\n surf_pot = surf\n sigma_pot = sigma\n qobs_pot = qObs\n\n sigmapsf = 0.6\n pixsize = 0.8\n goodbins = r > 10 # Arbitrarily exclude the center to illustrate how to use goodbins\n\n # The model is similar but not identical to the adopted kinematics!\n rmsModel, ml, chi2, flux = jam_axi_rms(\n surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, plot=True, rms=rms, sigmapsf=sigmapsf,\n beta=beta, pixsize=pixsize, tensor='zz', goodbins=goodbins)\n plt.pause(0.01)",
"def _scale(waveform):\n # Get random scale factor\n scale_factor = tf.random_uniform(shape=[], minval=0.5, maxval=2.5, dtype=tf.float32)\n\n return waveform * scale_factor",
"def calculate_gear_ratio(front_gear, back_gear):\n return front_gear/back_gear",
"def test_mixing_ratio_dimensions():\n p = 998. * units.mbar\n e = 73.75 * units.hPa\n assert str(mixing_ratio(e, p).units) == 'dimensionless'",
"def sample_spherical(self):\n vec = np.random.randn(self.dims, self.arms)\n vec /= np.linalg.norm(vec, axis=0)\n self.contexts = vec.T",
"def compute_scale(self, box, plane):\n center, normal = plane\n vertex_dots = [np.dot(vertex, normal) for vertex in box[1:]]\n vertex_dots = np.sort(vertex_dots)\n center_dot = np.dot(center, normal)\n scales = center_dot / vertex_dots[:4]\n return np.mean(scales)"
] | [
"0.5984484",
"0.5951197",
"0.5821933",
"0.5732663",
"0.57284033",
"0.567641",
"0.56427747",
"0.56382126",
"0.5614669",
"0.56115395",
"0.5601904",
"0.5573603",
"0.5534662",
"0.55199206",
"0.54970366",
"0.54887855",
"0.5470825",
"0.5469452",
"0.5453029",
"0.54443336",
"0.54377395",
"0.54361856",
"0.5432325",
"0.54116726",
"0.5410369",
"0.54055005",
"0.54039025",
"0.54000986",
"0.53984237",
"0.5394167",
"0.539172",
"0.5382458",
"0.5372501",
"0.5362778",
"0.534237",
"0.53421605",
"0.5338337",
"0.53357774",
"0.53349197",
"0.53081346",
"0.53015816",
"0.52989775",
"0.5292666",
"0.52892095",
"0.52816767",
"0.52812535",
"0.52799845",
"0.52749836",
"0.52733266",
"0.52685577",
"0.52685577",
"0.52658826",
"0.52622604",
"0.5255107",
"0.524976",
"0.5249241",
"0.52414453",
"0.52371293",
"0.5228722",
"0.5213565",
"0.52132654",
"0.5201776",
"0.5199404",
"0.5195616",
"0.5186215",
"0.51842445",
"0.5180033",
"0.51772577",
"0.51708025",
"0.51701546",
"0.5157222",
"0.515592",
"0.51493216",
"0.51478654",
"0.5140613",
"0.5138449",
"0.51369274",
"0.51299185",
"0.5127124",
"0.5125921",
"0.5125846",
"0.51255316",
"0.5123558",
"0.5121582",
"0.51198167",
"0.5111662",
"0.510736",
"0.51054466",
"0.50925636",
"0.5090776",
"0.50904715",
"0.5088079",
"0.50876373",
"0.5082914",
"0.50819147",
"0.5079094",
"0.50724703",
"0.5071392",
"0.50705963",
"0.506378"
] | 0.7055274 | 0 |
Evaluate the double power law at the given grid of absolute magnitudes | def get_double_power_law(self, alpha, beta, M_star):
denom = 10.0**(0.4*(alpha + 1.0)*(self.M_grid - M_star))
denom += 10.0**(0.4*(beta + 1.0)*(self.M_grid - M_star))
dn = 1.0/denom
dn /= np.sum(dn)
return dn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_powers(self):\n l = np.array([0, 1, 2])\n r = np.linspace(0, 1, 11) # rho coordinates\n\n correct_vals = np.array([np.ones_like(r), r, r**2]).T\n correct_ders = np.array([np.zeros_like(r), np.ones_like(r), 2 * r]).T\n\n values = powers(r, l, dr=0)\n derivs = powers(r, l, dr=1)\n\n np.testing.assert_allclose(values, correct_vals, atol=1e-8)\n np.testing.assert_allclose(derivs, correct_ders, atol=1e-8)",
"def main():\n print 'Running the power method...'\n dim = input('Give the dimension : ')\n nbit = input('How many iterations ? ')\n j = complex(0, 1)\n rnd = np.random.normal(0, 1, (dim, dim)) \\\n + np.random.normal(0, 1, (dim, dim))*j\n nbs = np.random.normal(0, 1, (dim, 1)) \\\n + np.random.normal(0, 1, (dim, 1))*j\n rndmat = np.matrix(rnd)\n rndvec = np.matrix(nbs)\n eigmax = power_method(rndmat, rndvec, nbit)\n check(rndmat, eigmax)",
"def test_powell(self):\n fun = get_problem('powell', dimension=2, lower=-4, upper=5)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)",
"def beinflumat(x_axis, y_axis, e_eff):\n len_x = len(x_axis)\n len_y = len(y_axis)\n influence_matrix_complete = np.zeros((len_x, len_y, len_x, len_y))\n\n # generate coordinate grids\n a_factor = (x_axis[-1] - x_axis[0]) / (len_x - 1) / 2\n b_factor = (y_axis[-1] - y_axis[0]) / (len_y - 1) / 2\n x_grid = __beinflumatgrid(x_axis)\n y_grid = __beinflumatgrid(y_axis)\n\n # use numexpr to evaluate expressions\n xpa = ne.evaluate('x_grid + a_factor')\n xma = ne.evaluate('x_grid - a_factor')\n ypb = ne.evaluate('y_grid + b_factor')\n ymb = ne.evaluate('y_grid - b_factor')\n\n # calculate complete influence matrix\n for j in range(0, len_y):\n for j_prime in range(0, len_y):\n influence_matrix_complete[:, j, :, j_prime] = \\\n (np.multiply(xpa, np.log(\n np.divide(\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa)))))) +\n (ypb[j, j_prime]) * np.log(\n np.divide(\n (xpa +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n (xma +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma))))) +\n np.multiply(xma, np.log(\n np.divide(\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma)))))) +\n (ymb[j, j_prime]) * np.log(\n np.divide(\n (xma +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n (xpa +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa))))))\n\n return influence_matrix_complete * 1 / e_eff * 2 / pi",
"def runpower_one(matrix, n):\n\t#get initial vector\n\tv = np.zeros(n)\n\tw = np.zeros(n)\n\tfor j in range(n):\n\t\tv[j] = np.random.uniform(0,1)\n\t#print 'matrix', matrix\n\t#print 'v', v\n\tT = 10000 #number of iterations\n\ttol = 1e-06\n\toldnormw = 0\n\tfor t in range(T):\n\t\tw = matrix.dot(v)\n\t\t#print 't', t, 'w',w\n\t\tnormw = (np.inner(w,w))**.5\n\t\tv = w/normw\n\t\t#print 't',t,'v',v\n\t\t#print 't',t,'normw',normw, 'old', oldnormw\n\t\tif np.abs(normw - oldnormw)/normw < tol:\n\t\t\t#print ' breaking'\n\t\t\tbreak\n\t\toldnormw = normw\n\treturn normw, v",
"def compute_power(self, visibilities):\n # Grid visibilities only if we're not using \"grid_centres\"\n\n if self.baselines_type != \"grid_centres\":\n if(self.n_obs==1):\n visgrid,kernel_weights = self.grid_visibilities(visibilities)\n else:\n visgrid,kernel_weights = self.grid_visibilities_parallel(visibilities)\n else:\n visgrid = visibilities\n \n # Transform frequency axis\n visgrid = self.frequency_fft(visgrid, self.frequencies, self.ps_dim, taper=signal.blackmanharris, n_obs = self.n_obs)#self.frequency_taper)\n\n # Get 2D power from gridded vis.\n power2d = self.get_power(visgrid,kernel_weights, ps_dim=self.ps_dim)\n\n if(os.path.exists(self.datafile[0][:-4]+\".kernel_weights.npy\")==False):\n np.save(self.datafile[0][:-4]+\".kernel_weights.npy\",kernel_weights)\n \n return power2d",
"def testAlphaTwoNllsMatchANormalDistribution(self):\n x = jnp.linspace(-10, 10, 1000)\n scale = 1.7\n nll = self.variant(self._distribution.nllfun)(x, 2, scale)\n nll_true = -scipy.stats.norm(0., scale).logpdf(x)\n chex.assert_tree_all_close(nll, nll_true, atol=1e-5, rtol=1e-5)",
"def lawsonite():\n\n rho = 3090.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 214.; C[0,1] = 69.; C[0,2] = 82.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 226.; C[1,2] = 65.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 259.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 60.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 17.\n\n return C, rho",
"def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha",
"def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)",
"def test_closure():\n x = torch.randn(300_000, 3)\n Ys = [o3.spherical_harmonics(l, x, normalize=True) for l in range(0, 3 + 1)]\n for l1, Y1 in enumerate(Ys):\n for l2, Y2 in enumerate(Ys):\n m = Y1[:, :, None] * Y2[:, None, :]\n m = m.mean(0) * 4 * math.pi\n if l1 == l2:\n i = torch.eye(2 * l1 + 1)\n assert (m - i).abs().max() < 0.01\n else:\n assert m.abs().max() < 0.01",
"def get_power(frames, num_fft):\n #a = get_magnitude(frames, num_fft)\n #b = np.square(a)\n #print('max : ', np.max(a))\n #print('min : ', np.min(a))\n #print('sq max : ', np.max(b))\n #print('sq min : ', np.min(b))\n #print(a.shape)\n #print(b.shape)\n #return b/num_fft\n return np.square(get_magnitude(frames, num_fft) / np.sqrt(num_fft))",
"def power(self):\n return irradiance_on_plane(self.vnorm, self.h,\n self.date, self.lat) * self.s * self.eff",
"def compute_zp_self_energy_double_grid(self):\n self.check_temperatures()\n self.self_energy = (\n self.sum_qpt_functions_double_grid('get_zp_self_energy_sternheimer',\n 'get_zp_self_energy_active'))",
"def Power(A:np.array ,x=None,N=25,tol=1e-10) -> (float,np.array):\n\n if x is None:\n x = np.random.rand(A.shape[0])\n\n for i in range(N):\n x_new = np.dot(A,x)\n x_new_norm = np.linalg.norm(x_new ,np.inf)\n\n if np.allclose(x, (x_new/x_new_norm), rtol=tol):\n print('Itr:', i)\n break\n\n x = x_new / x_new_norm\n micra = np.dot(x, np.dot(A,x)) / np.dot(x,x)\n\n return micra, x",
"def power2d(f,*kw):\n ft2=np.abs(np.fft.fft2(f))**2\n m=f.shape\n k=np.meshgrid(range(m[0]),range(m[1]))\n k=np.sqrt(k[0]**2+k[1]**2)\n a=2\n k0=1.0/a**0.5\n k1=1.0*a**0.5\n power=[]\n kk=[]\n while(k1 <= m[0]//2):\n kk.append((k0*k1)**0.5)\n w=np.where((k>k0) & (k <= k1))\n power.append(ft2[w].sum())\n k0=k1\n k1=k1*a\n pl.loglog(kk,power,*kw)",
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def compute_dynamical_zp_renormalization_double_grid(self):\n self.check_temperatures()\n self.zero_point_renormalization = (\n self.sum_qpt_functions_double_grid('get_zpr_static_sternheimer',\n 'get_zpr_dynamical_active'))\n self.renormalization_is_dynamical = True",
"def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))",
"def dim_pow(dims, exp):\n return (\n dims[0] * exp,\n dims[1] * exp,\n dims[2] * exp,\n dims[3] * exp,\n dims[4] * exp,\n dims[5] * exp,\n dims[6] * exp,\n )",
"def FO2(lam):\n return 1.096 + 1.385 *1e-3 *lam**(-2) + 1.448 *1e-4 *lam**(-4)",
"def dd_xpowalpha(cls,grid,alpha,cutoff=False):\n grid.l.info('bc.hom: Setting initial data to (-x)^alpha.')\n grid.l.debug('bc.hom: Parameters to dd_xpowalpha: alpha={},cutoff={}'.format(alpha,cutoff))\n if alpha is 0:\n def tmp(x): return float(x[1]<=0)\n return cls._tpl(grid, tmp) \n\n if cutoff:\n def tmp(x):\n return sum(pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n else:\n def tmp(x):\n return sum(pow(float(x[i]>=0)*x[i],alpha)-pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n return cls._tpl(grid, tmp)",
"def alorentz(self, X, xm, amp, w, a):\n # w(x) = 2 * w / (1 + np.exp(a * (X - xm)))\n return amp / (1 + ((X - xm) / ((2 * w / (1 + np.exp(a * (X - xm)))) / 2)) ** 2)",
"def potential_energy_per_mode(proj_displ,eigvals): #,hess=None,check=False): \n return 0.5 * ( np.square(proj_displ).T * eigvals ).T #, 0.5 * proj_displ * omega_sqr @ proj_displ",
"def spatial_expval(map_):\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * x), np.sum(map_ * y)",
"def eval_func_on_grid(f, re, im, N):\n l = re[1] - re[0]\n h = im[1] - im[0]\n resL = N*l #horizontal resolution\n resH = N*h #vertical resolution\n x = np.linspace(re[0], re[1],resL)\n y = np.linspace(im[0], im[1], resH)\n x, y = np.meshgrid(x,y)\n z = x + 1j*y\n w = f(z)\n return w",
"def __pow__(self, a: float) -> np.ndarray:\n return np.e**(a*self.logarithm)",
"def sqrty():\n return Operator([[(1.+1.j)/2,(-1-1.j)/2],[(1.+1.j)/2,(1.+1.j)/2]])",
"def __beinflumatgrid(axis):\n len_axis = len(axis)\n vec = np.zeros((1, len_axis))\n vec[0, :] = axis\n vertical_ax = np.zeros((len_axis, 1))\n vertical_ax[:, 0] = axis\n grid = np.repeat(vec, len_axis, axis=0)\n return np.absolute(np.subtract(grid, vertical_ax))",
"def powellsumfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n absx = np.abs(x)\n\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n scores += absx[:, i] ** (i + 1)\n\n return scores",
"def lambertw(x):\n eps = 1e-8\n w = x\n while True:\n ew = math.exp(w)\n w_new = w - (w * ew - x) / (w * ew + ew)\n if abs(w - w_new) <= eps:\n break\n w = w_new\n return w",
"def _evaluate_xyz(self,x,y,z=0.):\n return -np.pi * self._rhoc_M /(self.n+1.)*self.a**3*self._b*self._c * \\\n _potInt(x, y, z, self._a2, self._b2*self._a2, self._c2*self._a2, self.n)",
"def scipy_fun2(x):\n answer = 0.0\n for i in range(0, len(x)):\n answer += (math.pow(-1,i))*(math.pow((i+1),math.pow(x[i], (i+1))))\n return answer",
"def solve_power(self):\n e = self.normalized_effect_size()\n power = FTestPower().solve_power(\n effect_size=e\n ,df_num=self.df_denom\n ,df_denom=self.df_num\n ,alpha=self.alpha\n ,power=None\n ,ncc=1\n )\n return power",
"def compute_dynamical_zp_renormalization_modes_double_grid(self):\n self.check_temperatures()\n self.zero_point_renormalization_modes = (\n self.sum_qpt_functions_double_grid('get_zpr_static_sternheimer_modes',\n 'get_zpr_dynamical_active_modes'))\n self.renormalization_is_dynamical = True",
"def lam(E):\n return (12398.4/E)*1e-10",
"def test_pow_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = x**3 + y**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), 0.0)\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2), 9.0)\n assert equals(f.derivative_at((y, y), {x: 1.5, y:2.5}, order=2), 15.0)\n f = (x-y)**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n -6.0*(1.5-2.5))",
"def eval_func(individual):\n \n tiled = np.tile(individual, (tile_factor, tile_factor))\n return calculate_force_on_sample(tiled, lam_frac_=lambda_factor)",
"def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power",
"def grid_eval(self, gridaxes):\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n # make sure axes are one-dimensional\n if not all(np.ndim(ax) == 1 for ax in gridaxes):\n gridaxes = tuple(np.squeeze(ax) for ax in gridaxes)\n assert all(ax.ndim == 1 for ax in gridaxes), \\\n \"Grid axes should be one-dimensional\"\n colloc = [collocation(self.kvs[i], gridaxes[i]) for i in range(self.sdim)]\n return apply_tprod(colloc, self.coeffs)",
"def f_mw(f):\n return np.power(f, 2)",
"def exponentiate_and_normalize(values, dim=0):\n\n return torch.exp(lognormexp(values, dim=dim))",
"def exponentiate_and_normalize(values, dim=0):\n\n return torch.exp(lognormexp(values, dim=dim))",
"def w(lam, gam, p):\n return np.sqrt((1 - lam*np.cos(2*np.pi*p ) )**2 + (gam*lam*np.sin(2*np.pi*p ) )**2 )",
"def dcintegrand(z,omegalambda,omegam,omegak):\n return 1./adotovera(z,omegalambda,omegam,omegak)",
"def _magsqr(z):\n return np.abs(z) ** 2",
"def u(x, y, l, p):\n\n # Helical beam has a radially symmetrical amplitude,\n # so the amplitude function is only dependent on the\n # distance from the origin to the x, y coordinates.\n r = rho(x,y)\n\n # Evaluate the equation from Sundbeck.\n return (-1)**p * (np.sqrt(2) * r/w)**l * \\\n sp.genlaguerre(p, l)(2 * r**2 / w**2) * \\\n np.exp(- r**2 / w**2)",
"def analyticSol (x):\n\treturn x*(1-x);",
"def FN2(lam):\n return 1.034 + 3.17 *1e-4 *lam**(-2)",
"def make_evaluation_grids(W, M, N):\n nu = (np.arange(W * M, dtype=float) + 0.5) / (2 * M)\n x = np.arange(N + 1, dtype=float) / (2 * N)\n return nu, x",
"def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)",
"def do_test_values(self, vel=numpy.array((3e6, 4e4, 1e2)), bf=numpy.array((0, 0, -2)),\n ef=numpy.array((0, 0, 1e6)), charge=4*e_chg):\n res = sim.lorentz(vel, ef, bf, charge)\n exp = charge*(ef + numpy.cross(vel, bf))\n nptest.assert_allclose(res, exp)",
"def evaluate(params, grid):\n return np.zeros(grid.shape)",
"def test_orthorhombic_sims(cell_dimensions, crystal_params):\n # Multiple of 6 works nicely with the p2 crystal\n cell_dimensions = cell_dimensions * 6\n with crystal_params.temp_context(cell_dimensions=cell_dimensions):\n snapshot = init_from_crystal(crystal_params)\n snapshot = equilibrate(snapshot, crystal_params, equil_type=\"crystal\")\n snapshot = make_orthorhombic(snapshot)\n temp_context = hoomd.context.initialize(crystal_params.hoomd_args)\n production(snapshot, temp_context, crystal_params, dynamics=False)",
"def housegen(x):\n\n a = np.linalg.norm(x)\n if a == 0:\n u = x\n u[0] = np.sqrt(2)\n return u, a\n \n if x[0] == 0:\n r = 1\n else:\n r = x[0] / abs(x[0])\n\n u = np.conj(r) * x / a\n u[0] = u[0] + 1\n u = u / np.sqrt(u[0])\n \n a = -r*a\n\n return u, a",
"def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]):\n output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name\n input_column_names = [colname for colname in df.columns if output_column_name != colname]\n results = np.zeros((len(exponents), len(input_column_names)))\n for rownum, exponent in enumerate(exponents):\n for colnum, column_name in enumerate(input_column_names):\n results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name])\n results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power'))\n # results.plot(logx=True)\n return results",
"def spectral_laplace(x_values, dd_math_function, sigma, ua, ub):\n B = []\n for x in x_values:\n B += [-dd_math_function(x, sigma)]\n B[0] = ua\n B[len(x_values) - 1] = ub\n #B ferdig\n A=[]\n for i in range (len(x_values)):\n a = []\n for j in range (len(x_values)):\n if i == 0 or i == len(x_values) - 1:\n a.append(lagrange(x_values, j, x_values[i]))\n else:\n a.append(dd_lagrange(x_values, j, x_values[i]))\n A.append(a)\n #A ferdig\n return np.linalg.solve(A, B)",
"def test_super_exponential(self):\n nsteps = 100\n self.dt = 0.1\n self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \\\n else np.zeros(self.Nc)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n W0 = np.copy(self.syns.W)\n\n self.rule.alpha = 1\n self.rule.beta = 0\n tau = self.rule.tau1\n\n j1 = nsteps/3\n j2 = nsteps\n\n self.tutor.out_fct = lambda i: (self.rule.theta +\n (10 if i == j1-1 else 0))*np.ones(self.Ns)\n delta1 = j1*self.dt\n\n self.syns.W = np.copy(W0)\n sim.run(delta1)\n\n change1 = self.syns.W - W0\n self.assertGreater(np.linalg.norm(change1), 1e-10)\n \n self.tutor.out_fct = lambda i: (self.rule.theta +\n (10 if i == j2-1 else 0))*np.ones(self.Ns)\n delta2 = j2*self.dt\n\n self.syns.W = np.copy(W0)\n sim.run(delta2)\n change2 = self.syns.W - W0\n self.assertGreater(np.linalg.norm(change2), 1e-10)\n\n ratio = change1/change2\n ratio_exp = ((delta1/delta2)*(np.exp(-(delta1 - delta2)/tau))\n *np.ones(np.shape(ratio)))\n\n self.assertLess(np.max(np.abs(ratio - ratio_exp)/ratio), 0.05)",
"def prox_l1_norm(w, lamb):\n\treturn np.sign(w) * np.maximum( np.abs(w) - lamb, 0)",
"def test_matrix_power(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power(t, precision=jax.lax.Precision.HIGHEST)\n expected = np.linalg.matrix_power(matrix, t)\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)",
"def test_l1norm () :\n n = 10\n rfs = RewardFnSpace(list(range(n)))\n for i in range(10): \n b = rfs.bs[i]\n rfs.lp += b == 0\n rfs.lp.solve()\n rfs._setCoeffs()\n coeffs = np.array(rfs.coeffs)\n assert(np.linalg.norm(coeffs - np.ones(n)) < 1e-4)",
"def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))",
"def sqrtx():\n return Operator([[(1.+1.j)/2,(1.-1.j)/2],[(1.-1.j)/2,(1.+1.j)/2]])",
"def normalequation(X_values, y_values, l_val = 0):\n l_matrix = np.eye(N = X_values.shape[1])\n l_matrix[0][0] = 0\n l_matrix = l_matrix * l_val\n return np.linalg.pinv(X_values.T @ X_values + l_matrix) @ X_values.T @ y_values",
"def __abs__(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()",
"def housegen(x):\n a = linalg.norm(x)\n if a == 0:\n u=x; u[0]=sqrt(2); return u, a\n if x[0] == 0:\n r = 1\n else:\n r =x[0]/abs(x[0])\n u = conj(r)*x/a\n u[0]=u[0]+1\n u=u/sqrt(u[0])\n a=-r*a\n return u, a",
"def _numba_rwg0_evaluate(\n element_index,\n shapeset_evaluate,\n local_coordinates,\n grid_data,\n local_multipliers,\n normal_multipliers,\n):\n reference_values = shapeset_evaluate(local_coordinates)\n npoints = local_coordinates.shape[1]\n result = _np.empty((3, 3, npoints), dtype=_np.float64)\n\n edge_lengths = _np.empty(3, dtype=_np.float64)\n edge_lengths[0] = _np.linalg.norm(\n grid_data.vertices[:, grid_data.elements[0, element_index]]\n - grid_data.vertices[:, grid_data.elements[1, element_index]]\n )\n edge_lengths[1] = _np.linalg.norm(\n grid_data.vertices[:, grid_data.elements[2, element_index]]\n - grid_data.vertices[:, grid_data.elements[0, element_index]]\n )\n edge_lengths[2] = _np.linalg.norm(\n grid_data.vertices[:, grid_data.elements[1, element_index]]\n - grid_data.vertices[:, grid_data.elements[2, element_index]]\n )\n\n for index in range(3):\n result[:, index, :] = (\n local_multipliers[element_index, index]\n * edge_lengths[index]\n / grid_data.integration_elements[element_index]\n * grid_data.jacobians[element_index].dot(reference_values[:, index, :])\n )\n return result",
"def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)",
"def norm(alpha, F):\n return inner_product(alpha, F, alpha)",
"def psi_xm(E_val,lec,lam):\n x = np.linspace(0, xm, n+1) # grid in the x-direction\n y = np.zeros(n+1) # wave-function in individual points\n # initial conditions\n y[0] = 0\n y[1] = 1.0\n #\n for i in range(1,n):\n y[i + 1] = (2 - 5 * dx2 * f(i, E_val,lec,lam) / 6) * y[i] - (1 + dx2 * f(i-1, E_val,lec,lam) / 12) * y[i - 1]\n y[i + 1] /= (1 + dx2 * f(i+1, E_val,lec,lam) / 12)\n return y[n]-asymptotic_boundary(-E_val)",
"def pn(x, w_list):\n\treturn sum(map(lambda w: w[1] * np.power(x, w[0]), enumerate(w_list)))",
"def six_cubed():\n print(math.pow(6,3))",
"def divergence(w0, lambda0, M2=1):\n return 2*w0/z_rayleigh(w0, lambda0, M2)",
"def draw_powerlaw(alpha, rng, N=1):\n if alpha == -1:\n alpha = -1.0000001\n # Normalization factor\n x0, x1 = rng\n C = (alpha + 1) / (x1**(alpha + 1) - x0**(alpha + 1))\n \n if N==1:\n u = np.random.random()\n else:\n u = np.random.random(N)\n x = ((u * (alpha + 1)) / C + x0**(alpha + 1))**(1./(alpha + 1))\n\n return x",
"def E(z, omega_m, omega_l):\n return 1 / np.sqrt(omega_m * (1 + z) ** 3 + omega_l)",
"def eval_grads(self, data):\n drho = np.zeros_like(self.rhos)\n dW = np.zeros_like(self.W)\n da = np.zeros_like(self.a)\n dc = np.zeros_like(self.c)\n dba = np.zeros_like(self.bs['alpha'])\n dVa = np.zeros_like(self.Vs['alpha'])\n dbm = np.zeros_like(self.bs['mu'])\n dVm = np.zeros_like(self.Vs['mu'])\n dbs = np.zeros_like(self.bs['sigma'])\n dVs = np.zeros_like(self.Vs['sigma'])\n\n const = 0.5 * np.log(2. * np.pi)\n for i in range(self.Ndim - 1, -1, -1):\n a = self.rhos[i] * self.a[i]\n h = 0.5 * (a + np.abs(a)) # ReLU\n dlt = (data[:, i][:, None] - self.mus[i][None, :])\n dlt /= self.sigmas[i][None, :]\n dlt = np.mean(dlt, axis=0)\n phi = 0.5 * dlt ** 2. - np.log(self.sigmas[i]) - const\n pi = self.alphas[i] * phi\n pi /= np.mean(pi)\n dza = pi - self.alphas[i]\n dba[i] = dza\n dVa[i] = dza[None, :] * h[:, None]\n dzm = pi * dlt\n dzm *= self.slow_factor # apparently this is a `tight' component\n dbm[i] = dzm\n dVm[i] = dzm[None, :] * h[:, None]\n dzs = pi * (dlt ** 2. - 1)\n dbs[i] = dzs\n dVs[i] = dzs[None, :] * h[:, None]\n\n # dh has shape Nhidden x Ncomponents (?)\n dh = dza * dVa[i] + dzm * dVm[i] + dzs * dVs[i]\n dpsi = 1. * (dh > 0)\n\n # collapse to a scalar or vector of Nhidden?\n drho[i] = np.mean(dpsi)\n\n if i == 0:\n dc = da[i]\n else:\n da[i - 1] = da[i] + np.mean(dpsi * self.rhos[i], axis=1)\n dW[:, i] = np.mean(da[i - 1][:, None] * data[:, i][None, :],\n axis=1)\n self.a[i - 1] = self.a[i] - np.mean(data[:, i][None, :] *\n self.W[:, i][:, None],\n axis=1)\n\n return -drho, -dc, -dW, -dba, -dVa, -dbm, -dVm, -dbs, -dVs",
"def test_symmetry_surface_average_1(self):\n\n def test(grid):\n r = grid.nodes[:, 0]\n t = grid.nodes[:, 1]\n z = grid.nodes[:, 2] * grid.NFP\n true_surface_avg = 5\n function_of_rho = 1 / (r + 0.35)\n f = (\n true_surface_avg\n + np.cos(t)\n - 0.5 * np.cos(z)\n + 3 * np.cos(t) * np.cos(z) ** 2\n - 2 * np.sin(z) * np.sin(t)\n ) * function_of_rho\n np.testing.assert_allclose(\n surface_averages(grid, f),\n true_surface_avg * function_of_rho,\n rtol=1e-15,\n err_msg=type(grid),\n )\n\n # these tests should be run on relatively low resolution grids,\n # or at least low enough so that the asymmetric spacing test fails\n L = [3, 3, 5, 3]\n M = [3, 6, 5, 7]\n N = [2, 2, 2, 2]\n NFP = [5, 3, 5, 3]\n sym = np.asarray([True, True, False, False])\n # to test code not tested on grids made with M=.\n even_number = 4\n n_theta = even_number - sym\n\n # asymmetric spacing\n with pytest.raises(AssertionError):\n theta = 2 * np.pi * np.asarray([t**2 for t in np.linspace(0, 1, max(M))])\n test(LinearGrid(L=max(L), theta=theta, N=max(N), sym=False))\n\n for i in range(len(L)):\n test(LinearGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(LinearGrid(L=L[i], theta=n_theta[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(QuadratureGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i]))\n test(ConcentricGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n # nonuniform spacing when sym is False, but spacing is still symmetric\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )",
"def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst): \n dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths\n l1 = dblt_mu[0] * (1+z)\n l2 = dblt_mu[1] * (1+z)\n\n sigma = np.sqrt(sigma_gal**2 + sigma_inst**2)\n\n norm = (sigma*np.sqrt(2*np.pi))\n term1 = ( i1 / norm ) * np.exp(-(x-l1)**2/(2*sigma**2))\n term2 = ( i2 / norm ) * np.exp(-(x-l2)**2/(2*sigma**2)) \n return (c*x + term1 + term2)",
"def mw_f(mw):\n return np.power(mw, 0.5)",
"def calc_power(field):\r\n\r\n poynt_in_points = 0.5*numpy.real(field.p * numpy.conj(field.vn))\r\n power = numpy.sum(poynt_in_points)\r\n power *= field.one_pixel_area\r\n return power",
"def sqrtw():\n return Operator([[(1.+1.j)/2,-1.j/np.sqrt(2)],[1./np.sqrt(2),(1.+1.j)/2]])",
"def fedorenko(tau, h, grid):\n relation = tau / h\n new_grid = np.zeros(len(grid) - 1)\n for m in range(1, len(grid) - 1):\n new_grid[m-1] = grid[m] - np.dot(relation, grid[m] - grid[m-1]) - np.dot(relation / 2 * (relation - relation ** 2), grid[m-1] - 2 * grid[m] + grid[m+1])\n new_grid = np.insert(new_grid, 0, grid[0])\n return new_grid",
"def test_9(self):\n for _ in range(1000):\n num_free = np.random.randint(1, 100)\n values = np.random.uniform(-1000.0, 1000.0, size=num_free)\n py = get_scales_magnitudes(values)\n f90 = fort_debug.wrapper_get_scales_magnitude(values, num_free)\n assert_almost_equal(py, f90)",
"def power_output_candidate_wind_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_5[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_5[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + ((m.DELTA[y] * m.RHO[y, s]) * (1 + (1 / m.INTEREST_RATE)) * (\r\n m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])))\r\n == 0)",
"def element_power_consistent_with_bus_power_3ph(net, rtol=1e-2):\r\n bus_p_a = pd.Series(data=0., index=net.bus.index)\r\n bus_q_a = pd.Series(data=0., index=net.bus.index)\r\n bus_p_b = pd.Series(data=0., index=net.bus.index)\r\n bus_q_b = pd.Series(data=0., index=net.bus.index)\r\n bus_p_c = pd.Series(data=0., index=net.bus.index)\r\n bus_q_c = pd.Series(data=0., index=net.bus.index)\r\n\r\n for idx, tab in net.ext_grid.iterrows():\r\n bus_p_a.at[tab.bus] -= net.res_ext_grid_3ph.p_a_mw.at[idx]\r\n bus_q_a.at[tab.bus] -= net.res_ext_grid_3ph.q_a_mvar.at[idx]\r\n bus_p_b.at[tab.bus] -= net.res_ext_grid_3ph.p_b_mw.at[idx]\r\n bus_q_b.at[tab.bus] -= net.res_ext_grid_3ph.q_b_mvar.at[idx]\r\n bus_p_c.at[tab.bus] -= net.res_ext_grid_3ph.p_c_mw.at[idx]\r\n bus_q_c.at[tab.bus] -= net.res_ext_grid_3ph.q_c_mvar.at[idx]\r\n\r\n for idx, tab in net.load.iterrows():\r\n bus_p_a.at[tab.bus] += net.res_load_3ph.p_mw.at[idx]/3\r\n bus_q_a.at[tab.bus] += net.res_load_3ph.q_mvar.at[idx] /3\r\n bus_p_b.at[tab.bus] += net.res_load_3ph.p_mw.at[idx]/3\r\n bus_q_b.at[tab.bus] += net.res_load_3ph.q_mvar.at[idx] /3\r\n bus_p_c.at[tab.bus] += net.res_load_3ph.p_mw.at[idx]/3\r\n bus_q_c.at[tab.bus] += net.res_load_3ph.q_mvar.at[idx] /3\r\n\r\n for idx, tab in net.asymmetric_load.iterrows():\r\n bus_p_a.at[tab.bus] += net.res_asymmetric_load_3ph.p_a_mw.at[idx]\r\n bus_q_a.at[tab.bus] += net.res_asymmetric_load_3ph.q_a_mvar.at[idx]\r\n bus_p_b.at[tab.bus] += net.res_asymmetric_load_3ph.p_b_mw.at[idx]\r\n bus_q_b.at[tab.bus] += net.res_asymmetric_load_3ph.q_b_mvar.at[idx]\r\n bus_p_c.at[tab.bus] += net.res_asymmetric_load_3ph.p_c_mw.at[idx]\r\n bus_q_c.at[tab.bus] += net.res_asymmetric_load_3ph.q_c_mvar.at[idx]\r\n\r\n for idx, tab in net.asymmetric_sgen.iterrows():\r\n bus_p_a.at[tab.bus] -= net.res_asymmetric_sgen_3ph.p_a_mw.at[idx]\r\n bus_q_a.at[tab.bus] -= net.res_asymmetric_sgen_3ph.q_a_mvar.at[idx]\r\n bus_p_b.at[tab.bus] -= net.res_asymmetric_sgen_3ph.p_b_mw.at[idx]\r\n bus_q_b.at[tab.bus] -= net.res_asymmetric_sgen_3ph.q_b_mvar.at[idx]\r\n bus_p_c.at[tab.bus] -= net.res_asymmetric_sgen_3ph.p_c_mw.at[idx]\r\n bus_q_c.at[tab.bus] -= net.res_asymmetric_sgen_3ph.q_c_mvar.at[idx]\r\n\r\n for idx, tab in net.sgen.iterrows():\r\n bus_p_a.at[tab.bus] -= net.res_sgen_3ph.p_mw.at[idx] / 3\r\n bus_q_a.at[tab.bus] -= net.res_sgen_3ph.q_mvar.at[idx] / 3\r\n bus_p_b.at[tab.bus] -= net.res_sgen_3ph.p_mw.at[idx] / 3\r\n bus_q_b.at[tab.bus] -= net.res_sgen_3ph.q_mvar.at[idx] / 3\r\n bus_p_c.at[tab.bus] -= net.res_sgen_3ph.p_mw.at[idx] / 3\r\n bus_q_c.at[tab.bus] -= net.res_sgen_3ph.q_mvar.at[idx] / 3\r\n\r\n assert allclose(net.res_bus_3ph.p_a_mw.values, bus_p_a.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.q_a_mvar.values, bus_q_a.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.p_b_mw.values, bus_p_b.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.q_b_mvar.values, bus_q_b.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.p_c_mw.values, bus_p_c.values, equal_nan=True, rtol=rtol)\r\n assert allclose(net.res_bus_3ph.q_c_mvar.values, bus_q_c.values, equal_nan=True, rtol=rtol)",
"def six_cubed():\n print(math.pow(6, 3))",
"def compute_absolute_power(freqs, powers, band, method='sum'):\n\n _, band_powers = trim_spectrum(freqs, powers, band)\n abs_power = get_avg_func(method)(band_powers)\n\n return abs_power",
"def householder_stability(N=20, display=False):\n B = np.random.randn(N, N) # i.i.d normal matrix\n C = np.random.randn(N, N) # i.i.d normal matrix\n Q, _ = np.linalg.qr(B) # Forming orthogonal Matrix\n R = np.triu(C) # Casting C into upper-triangular form\n\n # Forming A with Q & R as 'exact QR factorisation'\n # Note: minimal rounding applies but is negligible here\n A = Q @ R\n Q2, R2 = np.linalg.qr(A) # Numerical householder QR\n norm1 = np.linalg.norm(Q2 - Q)\n norm2 = np.linalg.norm(R2 - R)\n norm3 = np.linalg.norm(Q2 @ R2 - A)\n\n if display:\n print(\"||Q2 - Q|| = %s\" %norm1)\n print(\"||R2 - R|| = %s\" %norm2)\n print(\"||Q2R2 - A|| = %s\" %norm3)\n return (norm1, norm2, norm3)",
"def M(latitude):\n return a*(1.0-e2)/pow((1.0-e2)*pow(math.sin(latitude),2.0),3.0/2.0);",
"def test_rhs(self):\n\n _, _, generator = setup_lmde_frames_and_generator(self.basic_model, solver_frame=self.X)\n\n t = 13.1231\n y = np.eye(2, dtype=complex)\n\n output = generator(t, y, in_frame_basis=True).data\n\n X = np.array(self.X.data)\n X_diag, U = np.linalg.eigh(X)\n Uadj = U.conj().transpose()\n gen = (\n -1j\n * 2\n * np.pi\n * (self.w * np.array(self.Z.data) / 2 + self.r * np.cos(2 * np.pi * self.w * t) * X / 2)\n )\n expected = (\n Uadj @ expm(1j * t * X) @ gen @ expm(-1j * t * X) @ U + 1j * np.diag(X_diag)\n ) @ y\n\n self.assertTrue(np.allclose(expected, output))",
"def test_equal_site_hamiltonian(L):\n site_dims = [2] * L\n site_ops = [pauli.X] * L\n bond_ops = [np.kron(pauli.Z, pauli.Z)] * (L - 1)\n full_hamiltonian = ExDiagPropagator(None, site_dims, site_ops, bond_ops, 0.01).H\n mp_norm = get_norm_of_hamiltonian(site_ops, bond_ops)\n assert abs(np.linalg.norm(full_hamiltonian) - mp_norm) == pytest.approx(0.0)",
"def policy_evaluation_on_grid_world() -> ValueFunction:\n return get_policy_evaluation(grid_world, 0.9999, 0.0001)",
"def get_power(self, gridded_vis, kernel_weights, ps_dim=2):\n logger.info(\"Calculating the power spectrum\")\n PS = []\n for vis in gridded_vis:\n # The 3D power spectrum\n power_3d = np.absolute(vis) ** 2\n \n if ps_dim == 2:\n P = angular_average_nd(\n field=power_3d,\n coords=[self.uvgrid, self.uvgrid, self.eta],\n bins=self.u_edges, n=ps_dim,\n weights=np.sum(kernel_weights, axis=2), # weights,\n bin_ave=False,\n )[0]\n \n elif ps_dim == 1:\n \n P = angular_average_nd(\n field=power_3d,\n coords=[self.uvgrid, self.uvgrid, self.eta],\n bins=self.u_edges,\n weights=kernel_weights,\n bin_ave=False,\n )[0]\n \n P[np.isnan(P)] = 0\n PS.append(P)\n\n return PS",
"def ridge(X, y, lam):\n W = np.dot(linalg.pinv((np.dot(X.T, X) + np.sqrt(lam) * np.eye(X.shape[0]))), np.dot(X.T, y))\n return W",
"def walls(r,param):\r\n V = param[0]\r\n sig = param[1]\r\n L = param[2]\r\n\r\n a = 1/sig\r\n\r\n\r\n x0 = L/2.\r\n y0 = 0.\r\n V0 = 10000*V\r\n Rx = 0.01*L\r\n Ry = 0.6*L\r\n\r\n x = r[0] - x0*np.sign(r[0])\r\n y = r[1] - y0*np.sign(r[1])\r\n px = np.sqrt(x**2)\r\n py = np.sqrt(y**2)\r\n\r\n f1 = V0*(1/(1 + np.exp((px-Rx)/a)))*(1/(1 + np.exp((py-Ry)/a)))\r\n\r\n x0 = 0.\r\n y0 = L/2.\r\n V0 = 10000*V\r\n Rx = 0.6*L\r\n Ry = 0.01*L\r\n\r\n x = r[0] - x0*np.sign(r[0])\r\n y = r[1] - y0*np.sign(r[1])\r\n px = np.sqrt(x**2)\r\n py = np.sqrt(y**2)\r\n\r\n f2 = V0*(1/(1 + np.exp((px-Rx)/a)))*(1/(1 + np.exp((py-Ry)/a)))\r\n\r\n value = f1+f2\r\n return value",
"def pump_power(dp, m_dot, rho, eta):\r\n return 1 / eta * dp * m_dot / rho",
"def x_for_initial_time_grid(self):\n tmp = self._Y * self._sqrt_eig_val.reshape(self._num_ev,1) \n if self.verbose > 1:\n print(\"calc process via matrix prod ...\")\n res = np.tensordot(tmp, self._eig_vec, axes=([0],[1])).flatten()\n if self.verbose > 1:\n print(\"done!\")\n \n return res",
"def compute_quotients(X = np.zeros((1,1,2))):\r\n \r\n start=time.time()\r\n rad = X[:,:,1]\r\n\r\n X = np.reshape(X,(X.shape[0],1,X.shape[1]*X.shape[2]))\r\n\r\n drad = np.asarray([[item[0]/item[1] if item[1] != 0 else 0 for item in list(itertools.combinations(rad[sample],2))] \\\r\n for sample in range(X.shape[0])])\r\n\r\n dradsum = np.asarray([[item[0]/item[1] if item[1] != 0 else 0 for item in itertools.combinations([ \\\r\n item[0]+item[1] for item in list(itertools.combinations(rad[sample],2))], 2)] \\\r\n for sample in range(drad.shape[0])])\r\n \r\n drad = np.reshape(drad,(drad.shape[0],1,drad.shape[-1]))\r\n drads = np.reshape(dradsum,(dradsum.shape[0],1,dradsum.shape[-1]))\r\n\r\n X = np.concatenate((drad,drads), axis=2)\r\n print('Geometric and packing factors computed in', round(time.time()-start,2),' s')\r\n np.save('X', X)\r\n \r\n return X",
"def __abs__(self):\n return self.square() ** 0.5",
"def wallis_product(n_terms):\n # XXX : The n_terms is an int that corresponds to the number of\n # terms in the product. For example 10000.\n prod=1\n for i in range(1,1+n_terms):\n left = (2 * i)/(2 * i - 1)\n right = (2 * i)/(2 * i + 1)\n total = left * right\n prod=prod*total\n return 2*prod"
] | [
"0.59356666",
"0.5698591",
"0.5668886",
"0.5603993",
"0.5581548",
"0.5566813",
"0.5537873",
"0.5494465",
"0.5466236",
"0.5465886",
"0.5446438",
"0.53736854",
"0.5353577",
"0.53370494",
"0.5319311",
"0.5316912",
"0.53169",
"0.5298397",
"0.5294312",
"0.52837807",
"0.5272999",
"0.5252929",
"0.5251431",
"0.5251265",
"0.52405477",
"0.5216255",
"0.51934534",
"0.5192531",
"0.51902205",
"0.518204",
"0.5179678",
"0.51694685",
"0.5168928",
"0.51662236",
"0.5165052",
"0.5156214",
"0.51418096",
"0.51414526",
"0.5138624",
"0.5132292",
"0.51226",
"0.5119569",
"0.5119569",
"0.51155156",
"0.5111647",
"0.51103663",
"0.5091537",
"0.50915295",
"0.5088456",
"0.50847644",
"0.50841343",
"0.5082804",
"0.50778157",
"0.50777245",
"0.5076083",
"0.50752294",
"0.5073753",
"0.5072535",
"0.50710213",
"0.50707936",
"0.5070138",
"0.5064408",
"0.5058331",
"0.50559026",
"0.5054618",
"0.5053257",
"0.50519973",
"0.5049335",
"0.5048718",
"0.5042833",
"0.504056",
"0.50390416",
"0.50335276",
"0.50288355",
"0.502665",
"0.5024326",
"0.5022296",
"0.5019941",
"0.50182146",
"0.500809",
"0.50055504",
"0.50002193",
"0.49985114",
"0.4992753",
"0.49911952",
"0.49851888",
"0.49787387",
"0.4975408",
"0.4973049",
"0.49727994",
"0.49676692",
"0.49645293",
"0.49640083",
"0.49628296",
"0.49627662",
"0.49626902",
"0.49600565",
"0.4955235",
"0.49518743",
"0.49363905"
] | 0.536298 | 12 |
Sample the AGN luminosity from the redshiftbinned luminosity function | def sample_agn_luminosity(self, z):
# Assign redshift bin
is_less_than_right_edge = (z < self.z_bins)
alpha = self.alphas[is_less_than_right_edge][0]
beta = self.betas[is_less_than_right_edge][0]
M_star = self.M_stars[is_less_than_right_edge][0]
# Evaluate function
pmf = self.get_double_power_law(alpha, beta, M_star)
# Sample luminosity
sampled_M = np.random.choice(self.M_grid, None, replace=True, p=pmf)
return sampled_M | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_luminosity(red, green, blue):\r\n return (0.299 * red) + (0.587 * green) + (0.114 * blue)",
"def luminance(self):\n \n return (self.r + self.g + self.b) // 3",
"def compute_radiocore_luminosity(MBH, L_AGN):\n\tL_X = bolcorr_hardX(L_AGN)\n\tm = log10(MBH / u.Msun)\n\t# Merloni, Heinz & Di Matteo (2003)\n\tlogLR = 0.6 * log10(L_X/(u.erg/u.s)) + 0.78 * m + 7.33\n\treturn 10**logLR * u.erg/u.s",
"def sRGBLuminance(x):\n lin=linearFromsRGB3(x)\n return lin[0]*0.2126+lin[1]*0.7152+lin[2]*0.0722",
"def average_luminosity(self, delta=1e-10):\n cumsum = 0.0\n for pix in self.pixels:\n cumsum += math.log10(delta + pix.luminosity())\n\n return math.pow(10, cumsum / len(self.pixels))",
"def luminance(rgb):\n \n (r, g, b) = rgb\n return (r + g + b) // 3",
"def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum",
"def get_luminosity(self):\n\n h, l, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n return l",
"def pixelLuminance (r, g, b):\n assert (type(r) == int and type(g) == int and type(b) == int)\n assert (0<=r<=255 and 0<=g<=255 and 0<=b<=255)\n return roundHalfUp((.2126*r)+(.7152*g)+(.0722*b))",
"def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)",
"def loadLuminosityFunction(self):\n\n tab = np.genfromtxt(self.fname[0], skip_header=self.skip_header)\n if not self.evolve:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, self.nzbins))\n\n else:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, 1))\n\n if self.ecol is not None:\n self.ye = np.zeros(self.luminosity_function.shape)\n imult = 1\n else:\n self.ye = None\n imult = 2\n\n self.magmean = tab[:,self.xcol]\n\n if self.nzbins==1:\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,self.ecol]\n else:\n if not self.evolve:\n assert((tab.shape[1]-1)==self.nzbins)\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,i*imult+self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,i*imult+self.ecol]\n else:\n for j in range(self.nbands):\n self.luminosity_function[:,j,0] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,0] = tab[:,self.ecol]\n\n self.xmean = self.magmean\n self.y = self.luminosity_function",
"def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetLuminance(self)",
"def luminance(self, color):\n return 0.2426 * color[2] + 0.7152 * color[1] + 0.0722 * color[0]",
"def luminosity(r,T,autoDebug=True):\n\t#-----------BEGIN ERROR CHECKING----------\n\tif autoDebug:\n\t\tsam.type_check(r, sam.TYPES_math, \"r\")\n\t\tsam.type_check(T, sam.TYPES_math, \"T\")\n\t\tsam.value_check(r,.0,\">\",\"r\")\n\t\tsam.value_check(T,.0,\">\",\"T\")\n\t#-----------END ERROR CHECKING----------\n\n\tL = 4 * sam.CONSTANT_pi * r**2 * sam.CONSTANT_SB* T**4\n\treturn L",
"def sRGBGrayscale(x):\n rellum=sRGBLuminance(x)\n return [rellum,rellum,rellum]",
"def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetLuminance(self)",
"def calculate_lux(r: int, g: int, b: int) -> float:\n # This only uses RGB ... how can we integrate clear or calculate lux\n # based exclusively on clear since this might be more reliable?\n illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)\n\n return illuminance",
"def get_uthreshold(img):\n import noiselevel\n # sigma=Table.read('noiselevel.csv',format='csv')['sigma'][0]\n sigma = noiselevel.getnoiselevel(img,ranges=(-30,30),toplot=False)\n \n thres = sigma*np.sqrt(2*np.log(img.size))\n return thres, sigma",
"def _value_as_luminance(self):\n return round(float(self._value), 1)",
"def herbel_luminosities(redshift, alpha, a_m, b_m, size=None,\n x_min=0.00305,\n x_max=1100.0, resolution=100):\n\n if size is None and np.shape(redshift):\n size = np.shape(redshift)\n\n luminosity_star = _calculate_luminosity_star(redshift, a_m, b_m)\n\n x_sample = schechter(alpha, x_min, x_max, resolution=resolution, size=size)\n\n return luminosity_star * x_sample",
"def luminance(self) -> float:\n use_option = 1\n\n if use_option == 1:\n # 1st option\n msb = 0\n msb_2nd = 1\n while msb != msb_2nd:\n msb = self.read_byte_data(Reg.luminance_msb)\n lsb = self.read_byte_data(Reg.luminance_lsb)\n msb_2nd = self.read_byte_data(Reg.luminance_msb)\n\n elif use_option == 2:\n # 2nd option, which does not work on rpi OSError: [Errno 95] Operation not supported\n wr_msb = i2c_msg.write(self.device_addr, [Reg.luminance_msb])\n rd_msb = i2c_msg.read(self.device_addr, 1)\n wr_lsb = i2c_msg.write(self.device_addr, [Reg.luminance_lsb])\n rd_lsb = i2c_msg.read(self.device_addr, 1)\n self.i2c_rdwr(wr_msb, rd_msb, wr_lsb, rd_lsb)\n msb = ord(rd_msb.data)\n lsb = ord(rd_lsb.data)\n\n # Convert the data to lux\n exponent = (msb & 0xF0) >> 4\n mantissa = ((msb & 0x0F) << 4) | (lsb & 0x0F)\n return 2.0 ** exponent * mantissa * 0.045",
"def test_str_luminous_intensity(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"luminous_intensity\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x46,\n 0xB,\n 0xBE,\n 0x7E,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 8943.623046875)\n self.assertEqual(sensor.unit_of_measurement(), \"cd\")\n self.assertEqual(sensor.ha_device_class(), \"illuminance\")",
"def luminosity_function(abs_mag, redshift):\n\n # L/L_*(z) = 10**(0.4 * (M_*(z) - M))\n L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))\n\n # Phi*(z) = 10**(log(Phi*(z))\n phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3\n\n # QLF slopes\n alpha1 = -3.35 # alpha in Table 2\n alpha2 = -0.37 # beta in Table 2\n\n Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1\n\n return Phi",
"def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum",
"def random_noise_levels():\n log_min_shot_noise = math.log(0.0001)\n log_max_shot_noise = math.log(0.012)\n log_shot_noise = random.uniform(log_min_shot_noise, log_max_shot_noise)\n shot_noise = math.exp(log_shot_noise)\n\n line = lambda x: 2.18 * x + 1.20\n log_read_noise = line(log_shot_noise) + random.gauss(mu=0.0, sigma=0.26)\n read_noise = math.exp(log_read_noise)\n return shot_noise, read_noise",
"def compute_noise_levels(tr, cfg):\n from obspy.signal.trigger import classic_sta_lta\n tr_snr = tr.copy()\n tr_snr.filter(\"bandpass\", freqmin=cfg.sig_noise.SNR_FREQ[0],\n freqmax=cfg.sig_noise.SNR_FREQ[1])\n wa = int(cfg.sig_noise.SNR_WIN[1]*tr.stats.sampling_rate)\n wb = int(cfg.sig_noise.SNR_WIN[0]*tr.stats.sampling_rate)\n # Prevent failing due to en(data) < nlta error\n if len(tr_snr.data) < wa or len(tr_snr.data) < wb:\n noise_level = 100.0\n return noise_level\n snr = classic_sta_lta(tr_snr.data, wa, wb)\n snr_smooth = do_smooth(snr, cfg.sig_noise.SNR_SMOOTH_WIN,\n tr.stats.sampling_rate)\n thresh_snr = np.nanmax(snr_smooth) * 0.4\n A = (snr_smooth - thresh_snr)\n A = A[np.where(A > 0)]\n if len(snr_smooth[wb:-wa]) == 0: # In case zerodivision error\n noise_level = 9999.9\n return noise_level\n noise_level = (len(A) / len(snr_smooth[wb:-wa])) * 100\n return noise_level",
"def intensityPSF_BlRd(N=1000):\n col_seq = [( 59/255., 76/255., 192/255.), ( 68/255., 90/255., 204/255.),\n ( 77/255., 104/255., 215/255.), ( 87/255., 117/255., 225/255.),\n ( 98/255., 130/255., 234/255.), (108/255., 142/255., 241/255.),\n (119/255., 154/255., 247/255.), (130/255., 165/255., 251/255.),\n (141/255., 176/255., 254/255.), (152/255., 185/255., 255/255.),\n (163/255., 194/255., 255/255.), (174/255., 201/255., 253/255.),\n (184/255., 208/255., 249/255.), (194/255., 213/255., 244/255.),\n (204/255., 217/255., 238/255.), (213/255., 219/255., 230/255.),\n (221/255., 221/255., 221/255.), (229/255., 216/255., 209/255.),\n (236/255., 211/255., 197/255.), (241/255., 204/255., 185/255.),\n (245/255., 196/255., 173/255.), (247/255., 187/255., 160/255.),\n (247/255., 177/255., 148/255.), (247/255., 166/255., 135/255.),\n (244/255., 154/255., 123/255.), (241/255., 141/255., 111/255.),\n (236/255., 127/255., 99/255.)]\n\n cdict = {'red': ((0.00000000, col_seq[0][0], col_seq[0][0]),\n (0.00769231, col_seq[1][0], col_seq[1][0]),\n (0.01538462, col_seq[2][0], col_seq[2][0]),\n (0.02307692, col_seq[3][0], col_seq[3][0]),\n (0.03076923, col_seq[4][0], col_seq[4][0]),\n (0.03846154, col_seq[5][0], col_seq[5][0]),\n (0.04615385, col_seq[6][0], col_seq[6][0]),\n (0.05384615, col_seq[7][0], col_seq[7][0]),\n (0.06153846, col_seq[8][0], col_seq[8][0]),\n (0.06923077, col_seq[9][0], col_seq[9][0]),\n (0.07692308, col_seq[10][0], col_seq[10][0]),\n (0.08461538, col_seq[11][0], col_seq[11][0]),\n (0.09230769, col_seq[12][0], col_seq[12][0]),\n (0.10000000, col_seq[13][0], col_seq[13][0]),\n (0.10769231, col_seq[14][0], col_seq[14][0]),\n (0.18205128, col_seq[15][0], col_seq[15][0]),\n (0.25641026, col_seq[16][0], col_seq[16][0]),\n (0.33076923, col_seq[17][0], col_seq[17][0]),\n (0.40512821, col_seq[18][0], col_seq[18][0]),\n (0.47948718, col_seq[19][0], col_seq[19][0]),\n (0.55384615, col_seq[20][0], col_seq[20][0]),\n (0.62820513, col_seq[21][0], col_seq[21][0]),\n (0.70256410, col_seq[22][0], col_seq[22][0]),\n (0.77692308, col_seq[23][0], col_seq[23][0]),\n (0.85128205, col_seq[24][0], col_seq[24][0]),\n (0.92564103, col_seq[25][0], col_seq[25][0]),\n (1.00000000, col_seq[26][0], col_seq[26][0])),\n 'green': ((0.00000000, col_seq[0][1], col_seq[0][1]),\n (0.00769231, col_seq[1][1], col_seq[1][1]),\n (0.01538462, col_seq[2][1], col_seq[2][1]),\n (0.02307692, col_seq[3][1], col_seq[3][1]),\n (0.03076923, col_seq[4][1], col_seq[4][1]),\n (0.03846154, col_seq[5][1], col_seq[5][1]),\n (0.04615385, col_seq[6][1], col_seq[6][1]),\n (0.05384615, col_seq[7][1], col_seq[7][1]),\n (0.06153846, col_seq[8][1], col_seq[8][1]),\n (0.06923077, col_seq[9][1], col_seq[9][1]),\n (0.07692308, col_seq[10][1], col_seq[10][1]),\n (0.08461538, col_seq[11][1], col_seq[11][1]),\n (0.09230769, col_seq[12][1], col_seq[12][1]),\n (0.10000000, col_seq[13][1], col_seq[13][1]),\n (0.10769231, col_seq[14][1], col_seq[14][1]),\n (0.18205128, col_seq[15][1], col_seq[15][1]),\n (0.25641026, col_seq[16][1], col_seq[16][1]),\n (0.33076923, col_seq[17][1], col_seq[17][1]),\n (0.40512821, col_seq[18][1], col_seq[18][1]),\n (0.47948718, col_seq[19][1], col_seq[19][1]),\n (0.55384615, col_seq[20][1], col_seq[20][1]),\n (0.62820513, col_seq[21][1], col_seq[21][1]),\n (0.70256410, col_seq[22][1], col_seq[22][1]),\n (0.77692308, col_seq[23][1], col_seq[23][1]),\n (0.85128205, col_seq[24][1], col_seq[24][1]),\n (0.92564103, col_seq[25][1], col_seq[25][1]),\n (1.00000000, col_seq[26][1], col_seq[26][1])),\n 'blue': ((0.00000000, col_seq[0][2], col_seq[0][2]),\n (0.00769231, col_seq[1][2], col_seq[1][2]),\n (0.01538462, col_seq[2][2], col_seq[2][2]),\n (0.02307692, col_seq[3][2], col_seq[3][2]),\n (0.03076923, col_seq[4][2], col_seq[4][2]),\n (0.03846154, col_seq[5][2], col_seq[5][2]),\n (0.04615385, col_seq[6][2], col_seq[6][2]),\n (0.05384615, col_seq[7][2], col_seq[7][2]),\n (0.06153846, col_seq[8][2], col_seq[8][2]),\n (0.06923077, col_seq[9][2], col_seq[9][2]),\n (0.07692308, col_seq[10][2], col_seq[10][2]),\n (0.08461538, col_seq[11][2], col_seq[11][2]),\n (0.09230769, col_seq[12][2], col_seq[12][2]),\n (0.10000000, col_seq[13][2], col_seq[13][2]),\n (0.10769231, col_seq[14][2], col_seq[14][2]),\n (0.18205128, col_seq[15][2], col_seq[15][2]),\n (0.25641026, col_seq[16][2], col_seq[16][2]),\n (0.33076923, col_seq[17][2], col_seq[17][2]),\n (0.40512821, col_seq[18][2], col_seq[18][2]),\n (0.47948718, col_seq[19][2], col_seq[19][2]),\n (0.55384615, col_seq[20][2], col_seq[20][2]),\n (0.62820513, col_seq[21][2], col_seq[21][2]),\n (0.70256410, col_seq[22][2], col_seq[22][2]),\n (0.77692308, col_seq[23][2], col_seq[23][2]),\n (0.85128205, col_seq[24][2], col_seq[24][2]),\n (0.92564103, col_seq[25][2], col_seq[25][2]),\n (1.00000000, col_seq[26][2], col_seq[26][2]))}\n\n psfblrd = _mplb.colors.LinearSegmentedColormap('psfblrd', cdict, N)\n return psfblrd",
"def enhance(img, window=30):\n hp = highPassFilter(img, window=window)\n tmp = grayscale(img) + laplacian(img)\n return tmp",
"def nyt(image):\n\n ### Get all pixels into a list\n pixels = []\n \n for x in range(image.width):\n print(\"getting the pixel at x = \", x)\n \n for y in range(image.height):\n pixel = image.getpixel((x, y))\n pixels.append(pixel)\n \n print(pixels[:10])\n \n ### We have the list of pixels, and we need to sort it by luminance.\n \n ### Create a new list that has a pixel's luminance stored with the pixel\n ### Each element of this list will look like (luminance, (r, g, b))\n ### Then, when we sort this list, it will sort on luminance, and when tied,\n ### will break the ties based on r (and then on g)\n \n pixels_with_luminance = []\n for pixel in pixels:\n lum = luminance(pixel)\n pwl = (lum, pixel)\n pixels_with_luminance.append(pwl)\n \n print(\"pixels with luminance:\", pixels_with_luminance[:10])\n \n ### Now we can sort the pixels based on luminance:\n pixels_with_luminance.sort()\n print(\"pixels with luminance after sorting:\", pixels_with_luminance[:10])",
"def intensity(self) -> int:",
"def illuminance_sensor():\n\n\tsensor_name = \"illuminance\"\n\treg_addr = 26\n\tdata_len = 1\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Illuminance\", Illuminance, 2)\n\tresult = data.illuminance\n\n\tdelete_sensor(sensor_name)\n\treturn result",
"def current_average_luma(camera):\n camera.capture('/home/pi/Desktop/image1.jpg')#camera take picture\n img = Image.open(\"/home/pi/Desktop/image1.jpg\") #opens image\n \n luma=0 #sum of the lumenance of each pixels\n pixels = img.width*img.height #number of pixels\n \n for x in range(img.width):\n for y in range(img.height):\n (r, g, b) = img.getpixel((x,y))#get colour touple \n luma += (0.2126*r + 0.7152*g + 0.0722*b) #calculate luma of RGB data, then add to total\n #END for\n #END for\n \n img.close()#ensure to properly close the image\n return luma/pixels #return average of all pixels",
"def test_changeIlluminationLevel(self):\n fade_to_black = \"Your environs fade to black due to Ineffable Spooky Magic.\"\n no_change = \"You do it. Swell.\"\n dark_to_light = \"Your environs are suddenly alight.\"\n brighten = \"Your environs seem slightly brighter.\"\n endarken = \"Your environs seem slightly dimmer.\"\n Manipulator.createFor(self.playerWrapper.actor)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n\n ll = self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location)\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 0\",\n [no_change])\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 100\",\n [dark_to_light],\n [dark_to_light])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 110\",\n [brighten],\n [brighten])\n self.assertEquals(ll.candelas, 110)\n\n self._test(\n \"illuminate 100\",\n [endarken],\n [endarken])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n self.assertEquals(ll.candelas, 0)",
"def cs4243_histnorm(image, grey_level=256):\n res_image = image.copy()\n ##your code here ###\n min_pixel = np.amin(res_image)\n max_pixel = np.amax(res_image)\n res_image = (res_image - min_pixel) / (max_pixel - min_pixel) * (grey_level-1)\n ####\n return res_image",
"def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light",
"def rsample(self, return_pretanh_value=False):\n ind = torch.nn.functional.gumbel_softmax(self.weight,tau=1, hard=True)\n # onehot batch x num_d\n normal = Normal(self.normal_mean[ind.bool()],self.normal_std[ind.bool()])\n z = normal.rsample()\n\n if return_pretanh_value:\n return torch.tanh(z), z\n else:\n return torch.tanh(z)",
"def set_random_glcolor(n):\n\n if ('gl_color' in n.knobs()\n and not n['gl_color'].value()\n and not n.name().startswith('_')):\n\n color = colorsys.hsv_to_rgb(random.random(), 0.8, 1)\n color = tuple(int(i * 255) for i in color)\n n['gl_color'].setValue(\n color[0] << 24 | color[1] << 16 | color[2] << 8)",
"def resample(self):\n # propagate networks\n self.z = self.prior_latent_distribution.sample()\n # reconstruct image\n self.y_hat_raw = self.fcomb(self.unet_features, self.z)\n\n return self.y_hat_raw",
"def _generate_red_noise(num_points, lag1_autocorrelation):\n\n white_noise_values = numpy.random.normal(\n loc=0., scale=WHITE_NOISE_STDEV, size=num_points)\n\n red_noise_values = numpy.full(num_points, numpy.nan)\n for i in range(num_points):\n if i == 0:\n red_noise_values[i] = white_noise_values[i]\n continue\n\n red_noise_values[i] = (\n lag1_autocorrelation * red_noise_values[i - 1] +\n numpy.sqrt(1 - lag1_autocorrelation ** 2) * white_noise_values[i]\n )\n\n return red_noise_values",
"def _sample(self, rnn_output, temperature):\n pass",
"def get_luminosity(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n return all_data[name][3]\n except KeyError:\n raise KeyError(\"No sensor with that name\")",
"def naive_gaussian_noise(true_depth: np.ndarray) -> np.ndarray:\n return true_depth + np.random.normal(0, 0.0012 + 0.0019 * np.square(true_depth - 0.4))",
"def augment_brightness(image):\n rand_brightness = .25 + np.random.uniform()\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n image[:, :, 2] = image[:, :, 2] * rand_brightness\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\n return image",
"def testStatisticsRamp(self):\n\n \n nx = 101\n ny = 64\n img = afwImage.ImageF(afwGeom.Extent2I(nx, ny))\n \n z0 = 10.0\n dzdx = 1.0\n mean = z0 + (nx/2)*dzdx\n stdev = 0.0\n for y in range(ny):\n for x in range(nx):\n z = z0 + dzdx*x\n img.set(x, y, z)\n stdev += (z - mean)*(z - mean)\n\n stdev = math.sqrt(stdev/(nx*ny - 1))\n \n stats = afwMath.makeStatistics(img, afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN)\n testmean = stats.getValue(afwMath.MEAN)\n teststdev = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(stats.getValue(afwMath.NPOINT), nx*ny)\n self.assertEqual(testmean, mean)\n self.assertEqual(teststdev, stdev )\n \n stats = afwMath.makeStatistics(img, afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean, meanErr = stats.getResult(afwMath.MEAN)\n sd = stats.getValue(afwMath.STDEV)\n \n self.assertEqual(mean, img.get(nx/2, ny/2))\n self.assertEqual(meanErr, sd/math.sqrt(img.getWidth()*img.getHeight()))\n \n # ===============================================================================\n # sjb code for percentiles and clipped stats\n \n stats = afwMath.makeStatistics(img, afwMath.MEDIAN)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEDIAN))\n \n stats = afwMath.makeStatistics(img, afwMath.IQRANGE)\n self.assertEqual(dzdx*(nx - 1)/2.0, stats.getValue(afwMath.IQRANGE))\n \n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(z0 + dzdx*(nx - 1)/2.0, stats.getValue(afwMath.MEANCLIP))",
"def sphere_l_intensity(img):\n pixels = []\n for j in range(0, img.shape[0]):\n for i in range(1, 40):\n pixels.append(img[j, i])\n\n return np.mean(pixels)",
"def lighting_consumption_variables(N_occ, grnd_flr_a, openings, light_access_factor, low_energy_bulb_ratio):\n\n mean_light_energy = 59.73 * (grnd_flr_a * N_occ) ** 0.4714\n\n C1 = 1 - 0.5 * low_energy_bulb_ratio\n\n window_openings = (o for o in openings if not o.opening_type.roof_window and not o.opening_type.bfrc_data)\n\n GLwin = _lighting_sum(window_openings) * light_access_factor / grnd_flr_a\n\n roof_openings = (o for o in openings if o.opening_type.roof_window and not o.opening_type.bfrc_data)\n GLroof = _lighting_sum(roof_openings) / grnd_flr_a\n\n # Use frame factor of 0.7 for bfrc rated windows\n window_bfrc_openings = (o for o in openings if not o.opening_type.roof_window and o.opening_type.bfrc_data)\n GLwin_bfrc = _lighting_sum(window_bfrc_openings) * 0.7 * 0.9 * light_access_factor / grnd_flr_a\n\n roof_bfrc_openings = (o for o in openings if o.opening_type.roof_window and o.opening_type.bfrc_data)\n GLroof_bfrc = _lighting_sum(roof_bfrc_openings) * 0.7 * 0.9 / grnd_flr_a\n\n GL = GLwin + GLroof + GLwin_bfrc + GLroof_bfrc\n C2 = 52.2 * GL ** 2 - 9.94 * GL + 1.433 if GL <= 0.095 else 0.96\n EL = mean_light_energy * C1 * C2\n light_consumption = EL * \\\n (1 + 0.5 * numpy.cos((2. * math.pi / 12.) * ((numpy.arange(12) + 1) - 0.2))) * \\\n DAYS_PER_MONTH / 365\n\n return dict(low_energy_bulb_ratio=low_energy_bulb_ratio,\n annual_light_consumption=sum(light_consumption),\n full_light_gain=light_consumption * (0.85 * 1000 / 24.) / DAYS_PER_MONTH,\n lighting_C1=C1,\n lighting_GL=GL,\n lighting_C2=C2)",
"def getcolorcodeALA15(ramapath, N, ssize=5):\n\n from analyse_ala_15 import AngleCategorizer\n\n nResidues = 15\n #angles = np.loadtxt('rama_dataset_ala_15.xvg', skiprows=32, usecols=range(0, 2), delimiter=' ')\n angles = np.loadtxt(os.path.join(ramapath, 'rama_dataset_ala_15_1500.xvg'), skiprows=32, usecols=range(0, 2), delimiter=' ')\n nSamples = angles.shape[0]/15\n angles.resize(nSamples, nResidues, 2)\n angCat = AngleCategorizer(angles)\n angCat.categorize()\n angCat.countConfigurations()\n colInd = angCat.getColorMatrix()\n alphaInd = angCat.getAlphaVals()\n\n marker = list()\n patchlist = list()\n\n marker.append('o')\n marker.append('o')\n marker.append('o')\n\n import matplotlib.patches as mpatches\n patchlist.append(mpatches.Patch(color='black', label=r'$\\alpha$'))\n patchlist.append(mpatches.Patch(color='blue', label=r'$\\beta$-1'))\n patchlist.append(mpatches.Patch(color='red', label=r'$\\beta$-2'))\n\n alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\\alpha$')\n beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\\beta\\textnormal{-}1$')\n beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\\beta\\textnormal{-}2$')\n plt.close()\n\n patchlist = [alpha, beta1, beta2]\n\n return colInd, marker, patchlist, alphaInd",
"def example():\n with MAX44009() as ambient_sensor:\n print('Ambient light luminance : {:.2f} lux'.format(ambient_sensor.luminance))",
"def stain_image(image, num_stains, color):",
"def gen_sensor_reward(self,MAX_UNCERTAINTY,window_size,window_lag):\n\n for i in range(0, len(self.tracker_object.tracks)):\n unormalized_uncertainty = np.sum(self.tracker_object.tracks[i].p_k_k.diagonal())\n self.uncertainty[i].append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty)\n\n\n this_uncertainty = []\n [this_uncertainty.append(self.uncertainty[x][-1]) for x in range(0, len(self.tracker_object.tracks))]\n\n self.avg_uncertainty.append(np.mean(this_uncertainty))\n\n if len(self.avg_uncertainty) < window_size + window_lag:\n self.reward.append(0)\n else:\n current_avg = np.mean(self.avg_uncertainty[-window_size:])\n prev_avg = np.mean(self.avg_uncertainty[-(window_size + window_lag):-window_lag])\n if current_avg < prev_avg or self.avg_uncertainty[-1] < .1:\n # if current_avg < prev_avg:\n self.reward.append(1)\n else:\n self.reward.append(0)",
"def test_str_luminance(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"luminance\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x45,\n 0x18,\n 0xD9,\n 0x76,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 2445.59130859375)\n self.assertEqual(sensor.unit_of_measurement(), \"cd/m²\")\n self.assertEqual(sensor.ha_device_class(), \"illuminance\")",
"def vari(self,\n img):\n return (img.select(['RED']).subtract(img.select(['GREEN'])))\\\n .divide(img.select(['RED']).add(img.select(['GREEN'])).subtract(img.select(['BLUE'])))\\\n .select([0], ['VARI']).multiply(self.scale_factor)",
"def mean_loss_py(florida_landfall_rate, florida_mean, florida_stddev,\n gulf_landfall_rate, gulf_mean, gulf_stddev, num_monte_carlo_samples):\n tot_loss = 0\n\n for i in range(num_monte_carlo_samples):\n fl_events = np.random.poisson(lam=florida_landfall_rate, size=1)[0]\n fl_loss = 0\n for j in range(fl_events):\n fl_loss += np.random.lognormal(florida_mean, florida_stddev)\n\n gulf_events = np.random.poisson(lam=gulf_landfall_rate, size=1)[0]\n\n gulf_loss = 0\n for k in range(gulf_events):\n gulf_loss += np.random.lognormal(gulf_mean, gulf_stddev)\n\n year_loss = fl_loss + gulf_loss\n\n tot_loss += year_loss\n\n return tot_loss / num_monte_carlo_samples",
"def lphot(self):\n return self._get_mean_and_samples_attribute('lphot')",
"def infrared_luminosity(template, norm=1.):\n\n waves, L_nu = read_K15_template(template)\n\n wavelength_range = np.logical_and((waves >= 8), (waves <= 1000))\n\n # integrate L_nu over dnu\n freqs = SPEED_OF_LIGHT.to(u.micron / u.s).value / waves[wavelength_range] # Hz\n delta_freqs= freqs[:-1] - freqs[1:]\n L_IR = np.sum(dnu* l_nu for dnu, l_nu in zip(delta_freqs, L_nu[wavelength_range])) * norm\n\n return L_IR * (u.W).to(u.Lsun)",
"def random_brightness(image):\n\n hsv_channel = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n brightness_scalar = np.random.rand()\n ratio = 1.0 + 0.4 * (brightness_scalar - 0.5)\n hsv_channel[:,:,2] = hsv_channel[:,:,2] * ratio\n return cv2.cvtColor(hsv_channel, cv2.COLOR_HSV2RGB)",
"def get_snr(image_data, b_var, hlr):\n img = galsim.Image(image_data)\n try:\n new_params = galsim.hsm.HSMParams(max_amoment=5.0e15,\n max_mom2_iter=20000,\n convergence_threshold=1.e-5)\n res = galsim.hsm.FindAdaptiveMom(img, hsmparams=new_params,\n guess_sig=hlr * 2.5)\n aperture_noise = float(np.sqrt(b_var * 2. * np.pi * (res.moments_sigma**2)))\n sn_ellip_gauss = res.moments_amp / aperture_noise\n print 'RES', res.moments_amp, res.moments_sigma\n print 'SNR', sn_ellip_gauss\n except:\n print 'SNR manually set'\n sn_ellip_gauss = -10.\n print 'SNR', sn_ellip_gauss\n return sn_ellip_gauss",
"def random_brightness(image):\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\r\n brightness = .25 + np.random.uniform()\r\n image[:, :, 2] = image[:, :, 2] * brightness\r\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\r\n return image",
"def ls5_sr_corr(img):\n return img.select(['B1'], ['BLUE']).float().multiply(0.91996).add(37).int16()\\\n .addBands(img.select(['B2'], ['GREEN']).float().multiply(0.92764).add(84).int16())\\\n .addBands(img.select(['B3'], ['RED']).float().multiply(0.8881).add(98).int16())\\\n .addBands(img.select(['B4'], ['NIR']).float().multiply(0.95057).add(38).int16())\\\n .addBands(img.select(['B5'], ['SWIR1']).float().multiply(0.96525).add(29).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.99601).add(20).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])",
"def _sample_r(self):\r\n phi_X = self.phi(self.X, self.W, add_bias=True)\r\n F = phi_X @ self.beta.T\r\n P = logistic(F)\r\n for j in range(self.J):\r\n A = self._crt_sum(j)\r\n # `maximum` is element-wise, while `max` is not.\r\n maxes = np.maximum(1 - P[:, j], -np.inf)\r\n B = 1. / -np.sum(np.log(maxes))\r\n self.R[j] = np.random.gamma(A, B)\r\n # `R` cannot be zero.\r\n self.R[np.isclose(self.R, 0)] = 0.0000001",
"def grey(self):\n return sum((self.value(0), self.value(1), self.value(2)))/3",
"def localize_red_clump(star_catalog,close_cat_idx,log):\n\n def select_within_range(mags, colours, mag_min, mag_max, col_min, col_max):\n \"\"\"Function to identify the set of array indices with values\n between the range indicated\"\"\"\n\n idx1 = np.where(colours >= col_min)[0]\n idx2 = np.where(colours <= col_max)[0]\n idx3 = np.where(mags >= mag_min)[0]\n idx4 = np.where(mags <= mag_max)[0]\n idx = set(idx1).intersection(set(idx2))\n idx = idx.intersection(set(idx3))\n idx = list(idx.intersection(set(idx4)))\n\n return idx\n\n RC = photometry_classes.Star()\n\n inst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n cal_i = star_catalog['imag'][close_cat_idx]\n cal_r = star_catalog['rmag'][close_cat_idx]\n cal_g = star_catalog['gmag'][close_cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gi = inst_g - inst_i\n inst_gr = inst_g - inst_r\n cal_ri = cal_r - cal_i\n cal_gi = cal_g - cal_i\n cal_gr = cal_g - cal_r\n\n log.info('\\n')\n log.info('Localizing the Red Clump')\n log.info('Median (r-i), i: '+str(np.median(inst_ri))+', '+str(np.median(inst_i)))\n log.info('Median (g-i), i: '+str(np.median(inst_gi))+', '+str(np.median(inst_i)))\n log.info('Median (g-r), g: '+str(np.median(inst_gr))+', '+str(np.median(inst_g)))\n\n ri_min = 0.8\n ri_max = 1.2\n i_min = 15.5\n i_max = 16.5\n\n r_min = 16.2\n r_max = 17.5\n\n gi_min = 2.5\n gi_max = 3.5\n\n gr_min = 1.5\n gr_max = 2.2\n g_min = 17.8\n g_max = 19.5\n\n log.info('Selected Red Clump giants between:')\n log.info('i = '+str(i_min)+' to '+str(i_max))\n log.info('r = '+str(r_min)+' to '+str(r_max))\n log.info('(r-i) = '+str(ri_min)+' to '+str(ri_max))\n log.info('g = '+str(g_min)+' to '+str(g_max))\n log.info('(g-r) = '+str(gr_min)+' to '+str(gr_max))\n log.info('(g-i) = '+str(gi_min)+' to '+str(gi_max))\n\n idx = select_within_range(inst_i, inst_ri, i_min, i_max, ri_min, ri_max)\n\n (RC.ri, RC.sig_ri, RC.i, RC.sig_i) = calc_distribution_centroid_and_spread_2d(inst_ri[idx], inst_i[idx], use_iqr=True)\n\n idx = select_within_range(inst_r, inst_ri, r_min, r_max, ri_min, ri_max)\n\n (RC.r, RC.sig_r) = calc_distribution_centre_and_spread(inst_r[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gr, g_min, g_max, gr_min, gr_max)\n\n (RC.gr, RC.sig_gr, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gr[idx], inst_g[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gi, g_min, g_max, gi_min, gi_max)\n\n (RC.gi, RC.sig_gi, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gi[idx], inst_g[idx], use_iqr=True)\n\n log.info('\\n')\n log.info('Centroid of Red Clump Stars at:')\n log.info(RC.summary(show_mags=True))\n log.info(RC.summary(show_mags=False,show_colours=True))\n\n RC.transform_to_JohnsonCousins()\n\n log.info(RC.summary(show_mags=False,johnsons=True))\n\n return RC",
"def analagous(R, G, B):\r\n RGB = [(R/255), (G/255), (B/255)]\r\n HLS = colorsys.rgb_to_hls(RGB[0], RGB[1], RGB[2])\r\n HLS_1 = [((((HLS[0]*360) + 30) % 360)/360), HLS[1], HLS[2]]\r\n HLS_2 = [((((HLS[0]*360) - 30) % 360)/360), HLS[1], HLS[2]]\r\n RGB_1 = colorsys.hls_to_rgb(HLS_1[0], HLS_1[1], HLS_1[2])\r\n RGB_2 = colorsys.hls_to_rgb(HLS_2[0], HLS_2[1], HLS_2[2])\r\n return [RGB255(RGB_1), RGB255(RGB_2)]",
"def intensityPSF_Blues(N=1000):\n col_seq = [( 0/255., 20/255., 80/255.), ( 8/255., 48/255., 107/255.),\n ( 8/255., 81/255., 156/255.), ( 33/255., 113/255., 181/255.),\n ( 66/255., 146/255., 198/255.), (107/255., 174/255., 214/255.),\n (158/255., 202/255., 225/255.), (198/255., 219/255., 239/255.),\n (222/255., 235/255., 247/255.), (247/255., 251/255., 255/255.)]\n\n cdict = {'red': ((0.00, col_seq[0][0], col_seq[0][0]),\n (0.02, col_seq[1][0], col_seq[1][0]),\n (0.06, col_seq[2][0], col_seq[2][0]),\n (0.10, col_seq[3][0], col_seq[3][0]),\n (0.20, col_seq[4][0], col_seq[4][0]),\n (0.30, col_seq[5][0], col_seq[5][0]),\n (0.50, col_seq[6][0], col_seq[6][0]),\n (0.75, col_seq[7][0], col_seq[7][0]),\n (0.90, col_seq[8][0], col_seq[8][0]),\n (1.00, col_seq[9][0], col_seq[9][0])),\n 'green': ((0.00, col_seq[0][1], col_seq[0][1]),\n (0.02, col_seq[1][1], col_seq[1][1]),\n (0.06, col_seq[2][1], col_seq[2][1]),\n (0.10, col_seq[3][1], col_seq[3][1]),\n (0.20, col_seq[4][1], col_seq[4][1]),\n (0.30, col_seq[5][1], col_seq[5][1]),\n (0.50, col_seq[6][1], col_seq[6][1]),\n (0.75, col_seq[7][1], col_seq[7][1]),\n (0.90, col_seq[8][1], col_seq[8][1]),\n (1.00, col_seq[9][1], col_seq[9][1])),\n 'blue': ((0.00, col_seq[0][2], col_seq[0][2]),\n (0.02, col_seq[1][2], col_seq[1][2]),\n (0.06, col_seq[2][2], col_seq[2][2]),\n (0.10, col_seq[3][2], col_seq[3][2]),\n (0.20, col_seq[4][2], col_seq[4][2]),\n (0.30, col_seq[5][2], col_seq[5][2]),\n (0.50, col_seq[6][2], col_seq[6][2]),\n (0.75, col_seq[7][2], col_seq[7][2]),\n (0.90, col_seq[8][2], col_seq[8][2]),\n (1.00, col_seq[9][2], col_seq[9][2]))}\n\n psfblues = _mplb.colors.LinearSegmentedColormap('psfblues', cdict, N)\n return psfblues",
"def get_luminosity(self, vel_disp):\n\t\tlog_L_V = self.slope*np.log10(vel_disp) + self.intercept\n\t\treturn log_L_V",
"def lightness(color):\n\n strongest = max(color.red, color.green, color.blue)\n weakest = min(color.red, color.green, color.blue)\n return 0.5 * (strongest + weakest) / 255",
"def mean_pixel(model_variant=None):\n if model_variant is None:\n return _MEAN_RGB\n else:\n return [127.5, 127.5, 127.5]",
"def _color_sample(img: np.ndarray, p: float = 0.05) -> np.ndarray:\n # combine the X and Y dimension into one, only keep the channels dimension\n ravelled = img.reshape(-1, 3)\n # for 5%, take every 20th value, for 10% every 10th, etc...\n every_nth = int(1 / p)\n return ravelled[::every_nth, :]",
"def noisy_color(col, noise, amount) :\n if random.random() < noise :\n red = (col[0] + random.randrange(-amount,amount))\n green = (col[1] + random.randrange(-amount,amount))\n blue = (col[2] + random.randrange(-amount,amount))\n red = clamp(red,0,255)\n green = clamp(green,0,255)\n blue = clamp(blue,0,255)\n return (red,green,blue)\n else :\n return col",
"def brown_noise():\n # TODO: try different values of BROWN_FACTOR\n # ... just seems to make it noisier or quieter - no change in freq\n global brown_val\n if brown_val > 32767:\n brown_val = brown_val - abs(white_noise()) / BROWN_FACTOR\n elif brown_val < -32767:\n brown_val = brown_val + abs(white_noise()) / BROWN_FACTOR\n else:\n brown_val = brown_val + white_noise() / BROWN_FACTOR\n return int(brown_val)",
"def __LAI(NDVI, vegt_cover):\n\n LAI_1 = np.log(-(vegt_cover - 1)) / -0.45\n LAI_1[LAI_1 > 8] = 8.0\n LAI_2 = (9.519 * np.power(NDVI, 3) + 0.104 * np.power(NDVI, 2) +\n 1.236 * NDVI - 0.257)\n\n LAI = (LAI_1 + LAI_2) / 2.0 # Average LAI\n LAI[LAI < 0.001] = 0.001\n return LAI",
"def estimate_brightness(self):\n\n intensity = self._get_intensity()\n self.avg_standard_lum = np.sum(intensity) / (self.img_height * self.img_width)\n return self.avg_standard_lum",
"def test03(ntest, prefix='fig-v01'):\n from time import time\n import psana.pyalgos.generic.Graphics as gg\n from psana.pscalib.geometry.GeometryAccess import img_from_pixel_arrays\n\n arr, geo = data_geo(ntest)\n\n iX, iY = geo.get_pixel_coord_indexes()\n X, Y, Z = geo.get_pixel_coords()\n mask = geo.get_pixel_mask(mbits=0o377).flatten()\n\n t0_sec = time()\n\n #hp = HPolar(X, Y, mask, nradbins=5, nphibins=8, phiedges=(-20, 240), radedges=(10000,80000))\n hp = HPolar(X, Y, mask, nradbins=3, nphibins=8, phiedges=(240, -20), radedges=(80000,10000)) # v3\n\n print('HPolar initialization time %.3f sec' % (time()-t0_sec))\n\n #print('bin_number_of_pixels:', hp.bin_number_of_pixels())\n #print('bin_intensity:', hp.bin_intensity(arr))\n #print('bin_avrg:', hp.bin_avrg(arr))\n\n t0_sec = time()\n nda, title = arr, None\n if ntest == 41: nda, title = arr, 'averaged data'\n elif ntest == 44: nda, title = hp.pixel_irad() + 2, 'pixel radial bin index'\n elif ntest == 45: nda, title = hp.pixel_iphi() + 2, 'pixel phi bin index'\n elif ntest == 46: nda, title = hp.pixel_iseq() + 2, 'pixel sequential (rad and phi) bin index'\n #elif ntest == 47: nda, title = mask, 'mask'\n elif ntest == 48: nda, title = hp.pixel_avrg(nda, subs_value=180), 'averaged radial intensity'\n elif ntest == 49: nda, title = hp.pixel_avrg_interpol(nda, verb=True) * mask, 'averaged radial interpolated intensity'\n elif ntest == 50: nda, title = hp.bin_avrg_rad_phi(nda),'r-phi'\n else:\n print('Test %d is not implemented' % ntest)\n return\n\n print('Get %s n-d array time %.3f sec' % (title, time()-t0_sec))\n\n img = img_from_pixel_arrays(iX, iY, nda) if not ntest in (50,) else nda # [100:300,:]\n\n colmap = 'jet' # 'cubehelix' 'cool' 'summer' 'jet' 'winter' 'gray'\n\n da = (nda.min()-1, nda.max()+1)\n ds = da\n\n if ntest in (41,48,49,50):\n ave, rms = nda.mean(), nda.std()\n da = ds = (ave-2*rms, ave+3*rms)\n\n gg.plotImageLarge(img, amp_range=da, figsize=(14,12), title=title, cmap=colmap)\n gg.save('%s-%02d-img.png' % (prefix, ntest))\n\n gg.hist1d(nda, bins=None, amp_range=ds, weights=None, color=None, show_stat=True, log=False, \\\n figsize=(6,5), axwin=(0.18, 0.12, 0.78, 0.80), \\\n title=None, xlabel='Pixel value', ylabel='Number of pixels', titwin=title)\n gg.save('%s-%02d-his.png' % (prefix, ntest))\n\n gg.show()\n\n print('End of test for %s' % title)",
"def random_brightness(image):\n image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n random_bright = 0.9 + 0.5 * ((2 * np.random.uniform()) - 1.0)\n image1[:, :, 2] = image1[:, :, 2] * random_bright\n image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)\n return image1",
"def Brightness(img: Image, magnitude: float) -> Image:\n return PIL.ImageEnhance.Brightness(img).enhance(\n 1 + magnitude * random.choice([-1, 1])\n )",
"def PSNR(orimg, estimg, pattern):\n PSNR = [0]*3\n _, mask = keep_measures(orimg[:, :, 0], pattern)\n for i in range(3):\n diff = orimg[:,:,i] - estimg[:,:,i]\n PSNR[i] = 10*np.log10(255**2/(np.linalg.norm((1-mask[:,:,i])*diff)**2/(1-mask[:,:,i]).sum()))\n \n return tuple(PSNR)",
"def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)",
"def _sample_z(self, a):\n r = np.random.rand()\n return ((1 + r * (a - 1))**2) / a",
"def analysis(avg, yellow, var, edges, texture): \n r = [a[0] for a in avg]\n ravg = numpy.mean(r)\n rmin = min(r) \n rmax = max(r)\n g = [a[1] for a in avg]\n gavg = numpy.mean(g)\n gmin = min(g) \n gmax = max(g)\n b = [a[2] for a in avg]\n bavg = numpy.mean(b)\n bmin = min(b) \n bmax = max(b)\n \n \n print('Red Statistics') \n print('Average is ', ravg) \n print('Min is ', rmin)\n print('Max is ', rmax)\n \n print('Green Statistics') \n print('Average is ', gavg) \n print('Min is ', gmin)\n print('Max is ', gmax)\n \n print('Blue Statistics') \n print('Average is ', bavg) \n print('Min is ', bmin)\n print('Max is ', bmax)\n \n \n yavg = numpy.mean(yellow)\n ymin = min(yellow)\n ymax = max(yellow)\n \n print 'Yellow Statistics' \n print 'Average is ', yavg \n print 'Min is ', ymin \n print 'Max is ', ymax\n \n vavg = numpy.mean(var)\n vmin= min(var)\n vmax = max(var) \n \n print 'Color Variance Statistics' \n print 'Average is ', vavg \n print 'Min is ', vmin \n print 'Max is ', vmax\n \n eavg = numpy.mean(edges)\n emin= min(edges)\n emax = max(edges) \n \n print 'Edge Count Statistics' \n print 'Average is ', eavg \n print 'Min is ', emin \n print 'Max is ', emax\n \n tavg = numpy.mean(texture) \n tmin= min(texture)\n tmax = max(texture) \n print 'Texture Statistics' \n print 'Average is ', tavg \n print 'Min is ', tmin \n print 'Max is ', tmax",
"def modelOnBetaGrid(sample,bins,N,l,u):\r\n\r\n betaGrid=np.linspace(l,u,N)\r\n traces=[]\r\n WAIC=dict()\r\n index=0\r\n\r\n for beta in betaGrid:\r\n trace=intensityLogGauss(sample,bins,beta)\r\n traces.append(trace['intensity'])\r\n WAIC[index]=trace\r\n index+=1\r\n\r\n df=pm.compare(WAIC,ic='WAIC')\r\n\r\n return betaGrid,df,traces",
"def noise_level(data):\n length=len(data) - 2\n dev=[]\n for i in range(1,length - 1):\n dev.append((abs(data[i] - data[i-1]) + abs(data[i] - data[i + 1]))/2)\n dev.sort()\n return dev[round(0.9*length)]",
"def test02(ntest, prefix='fig-v01'):\n #from Detector.GlobalUtils import print_ndarr\n from time import time\n import psana.pyalgos.generic.Graphics as gg\n from psana.pscalib.geometry.GeometryAccess import img_from_pixel_arrays\n\n arr, geo = data_geo(ntest)\n\n iX, iY = geo.get_pixel_coord_indexes()\n X, Y, Z = geo.get_pixel_coords()\n mask = geo.get_pixel_mask(mbits=0o377).flatten()\n\n t0_sec = time()\n #hp = HPolar(X, Y, mask) # v0\n hp = HPolar(X, Y, mask, nradbins=500) # , nphibins=8, phiedges=(-20, 240), radedges=(10000,80000))\n print('HPolar initialization time %.3f sec' % (time()-t0_sec))\n\n t0_sec = time()\n nda, title = arr, None\n if ntest == 21: nda, title = arr, 'averaged data'\n elif ntest == 24: nda, title = hp.pixel_irad() + 2, 'pixel radial bin index'\n elif ntest == 25: nda, title = hp.pixel_iphi() + 2, 'pixel phi bin index'\n elif ntest == 26: nda, title = hp.pixel_iseq() + 2, 'pixel sequential (rad and phi) bin index'\n #elif ntest == 27: nda, title = mask, 'mask'\n elif ntest == 28: nda, title = hp.pixel_avrg(nda), 'averaged radial intensity'\n elif ntest == 29: nda, title = hp.pixel_avrg_interpol(nda) * mask, 'averaged radial interpolated intensity'\n elif ntest == 30: nda, title = hp.bin_avrg_rad_phi(nda),'r-phi'\n else:\n print('Test %d is not implemented' % ntest)\n return\n\n print('Get %s n-d array time %.3f sec' % (title, time()-t0_sec))\n\n img = img_from_pixel_arrays(iX, iY, nda) if not ntest in (30,) else nda # [100:300,:]\n\n colmap = 'jet' # 'cubehelix' 'cool' 'summer' 'jet' 'winter' 'gray'\n\n da = (nda.min()-1, nda.max()+1)\n ds = da\n\n if ntest in (21,28,29,30):\n ave, rms = nda.mean(), nda.std()\n da = ds = (ave-2*rms, ave+3*rms)\n\n gg.plotImageLarge(img, amp_range=da, figsize=(14,12), title=title, cmap=colmap)\n gg.save('%s-%02d-img.png' % (prefix, ntest))\n\n gg.hist1d(nda, bins=None, amp_range=ds, weights=None, color=None, show_stat=True, log=False, \\\n figsize=(6,5), axwin=(0.18, 0.12, 0.78, 0.80), \\\n title=None, xlabel='Pixel value', ylabel='Number of pixels', titwin=title)\n gg.save('%s-%02d-his.png' % (prefix, ntest))\n\n gg.show()\n\n print('End of test for %s' % title)",
"def add_bollinger_bands(self, rstd):\n self.data['upper_band'] = self.data['rolling_mean'] + 2 * rstd\n self.data['lower_band'] = self.data['rolling_mean'] - 2 * rstd",
"def same_color_distribution():\n \n \n return 0.03, \"Fail to Reject\"",
"def grayscale(self):\n\n luminance = self.get_luminance() & 0xFF\n self.r = luminance\n self.g = luminance\n self.b = luminance",
"def test_mnir_image():\n # Initiate the sunglint correction class\n g = deglint.GlintCorr(odc_meta_file, sub_product)\n\n # ---------------------- #\n # NIR subtraction #\n # ---------------------- #\n mnir_xarrlist = g.glint_subtraction(\n vis_bands=[\"3\"],\n corr_band=\"6\",\n water_val=5,\n )\n\n sungc_band = mnir_xarrlist[0].lmbadj_green.values # 3D array\n\n # path to expected sunglint corrected output from NIR subtraction\n exp_sungc_band = (\n data_path\n / \"MINUS_NIR\"\n / \"ga_ls8c_lmbadj_3-2-0_091086_2014-11-06_final_band03-deglint-600m.tif\"\n )\n\n # ensure that all valid sungint corrected pixels match expected\n with rasterio.open(exp_sungc_band, \"r\") as exp_sungc_ds:\n urd_band = urd(sungc_band[0, :, :], exp_sungc_ds.read(1), exp_sungc_ds.nodata)\n assert urd_band.max() < 0.001",
"def _log_likelihood_colour(self, df, dfo):\n pdf = fit_colour_gaussian(df[\"colour_obs\"].values)\n return np.log(pdf(dfo[\"g_r\"].values)).sum()",
"def analysis_function_noise_level(self, clustering, total_elements):\n return 100.-(clustering.total_number_of_elements/float(total_elements))*100.",
"def luma(cspace: np.array) -> np.array:\n \n assert isinstance(cspace, DataFrame), \"Colorspace must be a dataframe\"\n assert all(np.isin(['R', 'G', 'B'], cspace.columns)), \"Colorspace must contain RGB columns\"\n return 0.2989 * cspace['R'] + 0.5870 * cspace['G'] + 0.1140 * cspace['B']",
"def bollinger_lband_indicator(close, n=20, ndev=2, fillna=False):\n df = pd.DataFrame([close]).transpose()\n mavg = close.rolling(n).mean()\n mstd = close.rolling(n).std()\n lband = mavg - ndev * mstd\n df['lband'] = 0.0\n df.loc[close < lband, 'lband'] = 1.0\n lband = df['lband']\n if fillna:\n lband = lband.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(lband, name='bbilband')",
"def _sample_gumbel(self, shape, eps=1e-20):\r\n U = tf.random_uniform(shape, minval=0, maxval=1)\r\n return -tf.log(-tf.log(U + eps) + eps)",
"def grayscale(filename):\r\n image = SimpleImage(filename)\r\n for pixel in image:\r\n luminosity = compute_luminosity(pixel.red, pixel.green, pixel.blue)\r\n pixel.red = luminosity\r\n pixel.green = luminosity\r\n pixel.blue = luminosity\r\n return image",
"def meancol(source):\n\tonepix = source.copy()\n\tonepix.thumbnail((1,1),Image.ANTIALIAS)\n\treturn onepix.getpixel((0,0))",
"def intensity(self, value: int, /) -> None:",
"def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0",
"def lsnr_mapping(\n self, lsnr: Tensor, lsnr_thresh: float, lsnr_min: Optional[float] = None\n ) -> Tensor:\n # s = a * lsnr + b\n lsnr_min = float(self.lsnr_min) if lsnr_min is None else lsnr_min\n a_ = 1 / (lsnr_thresh - lsnr_min)\n b_ = -a_ * lsnr_min\n return 1 - torch.clamp(a_ * lsnr + b_, 0.0, 1.0)",
"def ls_sr_band_correction(self,\n img):\n return \\\n ee.Algorithms.If(\n ee.String(img.get('SATELLITE')).compareTo('LANDSAT_8'),\n ee.Algorithms.If(ee.String(img.get('SATELLITE')).compareTo('LANDSAT_5'),\n ee.Image(img.select(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint'])),\n ee.Algorithms.If(ee.Number(int(self.auto_ls5_correction)),\n ee.Image(EEHelper.ls5_sr_corr(img)),\n ee.Image(img.select(\n ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint']))\n )\n ),\n ee.Algorithms.If(ee.Number(int(self.auto_ls8_correction)),\n ee.Image(EEHelper.ls8_sr_corr(img)),\n ee.Image(img.select(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint']))\n )\n )",
"def whatsgreen2(image):\n green = image.hueDistance(color= Color('green'), minvalue=40).binarize()\n return green",
"def augment_brightness_camera_images(image):\n\n # The HSV - Hue Saturation Value representation converts the image from RGB space to HSV space\n # where the Value(brightness) represents the brightness that is randomly increased\n\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = .25+np.random.uniform()\n #print(random_bright)\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1",
"def __hsl_threshold(input, hue, sat, lum):\r\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\r\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))"
] | [
"0.6417541",
"0.63884014",
"0.6198883",
"0.5988071",
"0.58689255",
"0.5791698",
"0.5777783",
"0.57607454",
"0.5741125",
"0.5704976",
"0.5666426",
"0.56387365",
"0.56344163",
"0.5579416",
"0.5577653",
"0.55769515",
"0.5561606",
"0.5552182",
"0.55497104",
"0.554356",
"0.5517453",
"0.54907095",
"0.54766655",
"0.54759985",
"0.5470947",
"0.54024357",
"0.5391043",
"0.5383013",
"0.5366394",
"0.5356897",
"0.532358",
"0.5273719",
"0.52334076",
"0.5226572",
"0.5222911",
"0.52095807",
"0.5206931",
"0.51999474",
"0.5195698",
"0.51796883",
"0.5170273",
"0.51674336",
"0.5152228",
"0.5142024",
"0.51321775",
"0.51119983",
"0.5107032",
"0.51052535",
"0.51043373",
"0.5102906",
"0.5098254",
"0.50970334",
"0.5077797",
"0.507473",
"0.5073024",
"0.5061861",
"0.5053087",
"0.5052031",
"0.5047112",
"0.50419444",
"0.5038847",
"0.50300795",
"0.50293773",
"0.5028339",
"0.5026282",
"0.50251627",
"0.50227666",
"0.50149333",
"0.50072056",
"0.5005682",
"0.5003047",
"0.5002816",
"0.4999331",
"0.49900442",
"0.4985172",
"0.49837607",
"0.4971347",
"0.49690527",
"0.49623027",
"0.496127",
"0.49482763",
"0.49423414",
"0.494073",
"0.49392074",
"0.49377123",
"0.49352548",
"0.49346218",
"0.49340644",
"0.4932024",
"0.49297205",
"0.49271336",
"0.49226815",
"0.49197233",
"0.49156025",
"0.49146444",
"0.49144864",
"0.49139497",
"0.49118087",
"0.48977426",
"0.48938718"
] | 0.7363408 | 0 |
expects 2 arrays of shape (3, N) rigid transform algorithm from | def rigid_transform_3d(xs,ys):
assert xs.shape == ys.shape
assert xs.shape[0] == 3, 'The points must be of dimmensionality 3'
# find centroids and H
x_centroid = np.mean(xs, axis=1)[:, np.newaxis]
y_centroid = np.mean(ys, axis=1)[:, np.newaxis]
H = (xs - x_centroid)@(ys - y_centroid).T
# find rotation
U, S, Vt = np.linalg.svd(H)
rotation = [email protected]
# handling reflection
if np.linalg.det(rotation) < 0:
Vt[2, :] *= -1
rotation = np.dot(Vt.T, U.T)
# find translation
translation = y_centroid - rotation@x_centroid
return translation, rotation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]",
"def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3",
"def solve_rigid_transform(X, Y, debug=True):\n assert X.shape[0] == Y.shape[0] >= 3\n assert X.shape[1] == Y.shape[1] == 3\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n # Look for Inge Soderkvist's solution online if confused.\n meanA = np.mean(A, axis=1, keepdims=True)\n meanB = np.mean(B, axis=1, keepdims=True)\n A = A - meanA\n B = B - meanB\n covariance = B.dot(A.T)\n U, sigma, VH = np.linalg.svd(covariance) # VH = V.T, i.e. numpy transposes it for us.\n\n V = VH.T\n D = np.eye(3)\n D[2,2] = np.linalg.det( U.dot(V.T) )\n R = U.dot(D).dot(V.T)\n t = meanB - R.dot(meanA)\n RB_matrix = np.concatenate((R, t), axis=1)\n\n #################\n # SANITY CHECKS #\n #################\n\n print(\"\\nBegin debug prints for rigid transformation from A to B:\")\n print(\"meanA:\\n{}\\nmeanB:\\n{}\".format(meanA, meanB))\n print(\"Rotation R:\\n{}\\nand R^TR (should be identity):\\n{}\".format(R, (R.T).dot(R)))\n print(\"translation t:\\n{}\".format(t))\n print(\"RB_matrix:\\n{}\".format(RB_matrix))\n\n # Get residual to inspect quality of solution. Use homogeneous coordinates for A.\n # Also, recall that we're dealing with (3,N) matrices, not (N,3).\n # In addition, we don't want to zero-mean for real applications.\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n ones_vec = np.ones((1, A.shape[1]))\n A_h = np.concatenate((A, ones_vec), axis=0)\n B_pred = RB_matrix.dot(A_h)\n assert B_pred.shape == B.shape\n\n # Careful! Use raw_errors for the RF, but it will depend on pred-targ or targ-pred.\n raw_errors = B_pred - B # Use pred-targ, of shape (3,N)\n l2_per_example = np.sum((B-B_pred)*(B-B_pred), axis=0)\n frobenius_loss = np.mean(l2_per_example)\n\n if debug:\n print(\"\\nInput, A.T:\\n{}\".format(A.T))\n print(\"Target, B.T:\\n{}\".format(B.T))\n print(\"Predicted points:\\n{}\".format(B_pred.T))\n print(\"Raw errors, B-B_pred:\\n{}\".format((B-B_pred).T))\n print(\"Mean abs error per dim: {}\".format( (np.mean(np.abs(B-B_pred), axis=1))) )\n print(\"Residual (L2) for each:\\n{}\".format(l2_per_example.T))\n print(\"loss on data: {}\".format(frobenius_loss))\n print(\"End of debug prints for rigid transformation.\\n\")\n\n assert RB_matrix.shape == (3,4)\n return RB_matrix",
"def rigid_transform(xyz, transform):\n xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)])\n xyz_t_h = np.dot(transform, xyz_h.T).T\n return xyz_t_h[:, :3]",
"def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2,:2] = rotation\n H[:2, 2] = translation\n return H",
"def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()",
"def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out",
"def fit_transform(self, x: Array2D) -> Array2D:",
"def transform(self, x: Array2D) -> Array2D:",
"def apply_transformation_np(source, transformation):\n source_homog = np.ones((source.shape[0], 4))\n source_homog[:, :-1] = source\n # source_homog = np.hstack(\n # (source, np.ones(source.shape[0], 1))\n # )\n\n source_transformed = np.matmul(transformation, source_homog.T).T[:, :-1]\n return source_transformed",
"def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H",
"def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H",
"def transform(self,X):\n X=np.array(X)\n if(X.ndim==1):\n return self.transform_1d(X) \n elif(X.ndim==2):\n X_tran=self.transform_1d(X[0])\n for i in range(1,X.shape[0]):\n X_tran=np.vstack((X_tran,self.transform_1d(X[i])))\n return X_tran \n else:\n print(\"Warning: The input array is not Transformed since its greater than 2 dimension\")\n print(\"Its dimension is:{} required is 2\".format(X.ndim))\n return X",
"def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]",
"def compose_transforms(*transforms):\n from functools import reduce\n\n for transform in transforms:\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n if len(transforms) == 0:\n return np.eye(4)\n\n return reduce(np.dot, reversed(transforms))",
"def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state",
"def transformation_matrix(self, s1, s2, s3, t1, t2, t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array(\n [\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]],\n ]\n )\n\n P = np.array([[s2[0] - s1[0], s2[1] - s1[1]], [s3[0] - s1[0], s3[1] - s1[1]]])\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2, np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n self.log.error(\"An error occured during the transformation.\", exc_info=True)\n return -1, -1\n\n return T, V0",
"def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)",
"def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3",
"def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine",
"def test_direct_shape():\n\n n = 21\n x = np.ones((n, n))\n\n recon = abel.direct.direct_transform(x, direction='forward')\n assert recon.shape == (n, n) \n\n recon = abel.direct.direct_transform(x, direction='inverse')\n assert recon.shape == (n, n)",
"def transform_pc3d(pcl_c3d, Ts, seq_n, K_cur, batch_n):\n\n ## need to transform: flat.uvb, flat.feature['xyz'], flat.feature['normal']\n ## no need to transform grid features\n \n assert batch_n % seq_n == 0 # mode==0\n n_group = batch_n // seq_n\n\n ## get relative pose\n T, R, t, target_id = relative_T(Ts, seq_n, batch_n)\n\n ## get accumulative length\n nb = pcl_c3d.flat.nb\n acc_b = []\n acc = 0\n acc_b.append( acc )\n for ib in range(batch_n):\n acc = acc + nb[ib]\n acc_b.append( acc )\n\n ## process flat features\n flat_xyz = pcl_c3d.flat.feature['xyz'] # 1*C*NB\n flat_normal = pcl_c3d.flat.feature['normal']\n trans_normal_list = []\n trans_xyz_list = []\n uvb_list = []\n new_nb = []\n for ib in range(batch_n):\n ## xyz\n trans_xyz = torch.matmul(R[ib], flat_xyz[:, :, acc_b[ib]:acc_b[ib+1]]) + t[ib]\n mask_positive = trans_xyz[0, 2, :] > 0\n trans_xyz = trans_xyz[:, :, mask_positive]\n trans_xyz_list.append(trans_xyz)\n new_nb.append(trans_xyz.shape[2])\n\n ## normal\n trans_normal = torch.matmul(R[ib], flat_normal[:, :, acc_b[ib]:acc_b[ib+1]])\n trans_normal = trans_normal[:, :, mask_positive]\n trans_normal_list.append(trans_normal)\n\n ## project to uv, add b\n uvb = torch.matmul(K_cur[ib], trans_xyz)\n uvb[:, :2] = uvb[:, :2] / uvb[:, [2]] #- 1 , commented because in dataset_read.py there is a K_mat2py() function converting K from matlab to python coordinate\n uvb[:, 2, :] = target_id[ib]\n uvb_list.append(uvb)\n\n ## construct the new object\n tr_pcl_c3d = PCL_C3D_Flat()\n tr_pcl_c3d.feature['xyz'] = torch.cat(trans_xyz_list, dim=2)\n tr_pcl_c3d.feature['normal'] = torch.cat(trans_normal_list, dim=2)\n tr_pcl_c3d.uvb = torch.cat(uvb_list, dim=2)\n tr_pcl_c3d.nb = new_nb\n\n for feat_key in pcl_c3d.flat.feature:\n if feat_key not in ['xyz', 'normal']:\n tr_pcl_c3d.feature[feat_key] = pcl_c3d.flat.feature[feat_key]\n\n return tr_pcl_c3d",
"def create_transforms(ntiles, solution):\n rtransforms = []\n for i in range(ntiles):\n rtransforms.append(renderapi.transform.AffineModel(\n B0=solution[0][i],\n B1=solution[1][i]))\n return rtransforms",
"def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()",
"def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b",
"def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b",
"def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)",
"def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed",
"def transform(self, src):\n T, feature_dim = src.shape[0], self.Y_static_dim*3\n\n if feature_dim == self.Y_static_dim:\n return super(GMM_M, self).transform(src)\n\n # A suboptimum mixture sequence (eq.37)\n optimum_mix = self.px.predict(src)\n\n # Compute E eq.(40)\n E = np.empty((T, feature_dim))\n for t in range(T):\n m = optimum_mix[t] # estimated mixture index at time t\n xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])\n #print(xx.shape,self.tgt_means[m].shape,self.covarYX[m].shape)\n # Eq. (22)\n E[t] = self.tgt_means[m] +np.dot(self.covarYX[m], xx)\n\n # Compute D eq.(23)\n # Approximated variances with diagonals so that we can do MLPG\n # efficiently in dimention-wise manner\n #print(E.shape)\n D = np.empty((T, feature_dim))\n #print(D.shape)\n for t in range(T):\n m = optimum_mix[t]\n # Eq. (23), with approximating covariances as diagonals\n #D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \\\n # np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])\n\n # Exact Inference\n dd = self.covarYY[m] - np.linalg.multi_dot([self.covarYX[m], np.linalg.pinv(self.covarXX[m]), self.covarXY[m]])\n #print(dd.shape)\n D[t] = np.diag(dd)\n\n # Once we have mean and variance over frames, then we can do MLPG\n return E, D, self.windows#mlpg(E, D, self.windows)",
"def task_two_test():\n # First test\n # Create points list for task two\n points = np.random.rand(2, 4)\n # Translate and rotate it somehow\n tetta = np.random.uniform(low=0, high=2 * np.pi, size=(1,))[0]\n R = np.array([[np.cos(tetta), -np.sin(tetta)],\n [np.sin(tetta), np.cos(tetta)]])\n T = np.random.uniform(low=0, high=3, size=(2, 1))\n H = np.append(R, T, axis=1)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n print(\"Points 2d translation + rotation:\\n\", H)\n points_list = np.array(list(zip(points.T, points_translated.T)))\n task_two(points_list)\n # Second test\n H = np.random.rand(3, 3)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n # Normalize it\n points = np.random.rand(3, 4)\n tetta = np.random.uniform(low=0, high=2 * np.pi, size=(1,))[0]\n R = np.array([[np.cos(tetta), -np.sin(tetta), 0],\n [np.sin(tetta), np.cos(tetta), 0],\n [0, 0, 1]])\n T = np.random.uniform(low=0, high=3, size=(3, 1))\n H = np.append(R, T, axis=1)\n print(\"Points 3d translation + rotation:\\n\", H)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n # Convert to p2\n norm = lambda x: [x[0] / x[2], x[1] / x[2]]\n points = np.array([norm(x) for x in points.T]).T\n points_translated = np.array([norm(x) for x in points_translated.T]).T\n points_list = np.array(list(zip(points.T, points_translated.T)))\n task_two(points_list)",
"def test_transform_3d(transform, alpha = 1):\r\n points = 20*[None]\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n points[i] = vec3(x, y, z)\r\n tr_x = random.randrange(-40, 41)\r\n tr_y = random.randrange(-40, 41)\r\n tr_z = random.randrange(-40, 41)\r\n mapping = [(p, vec3(p.x + tr_x, p.y + tr_y, p.z + tr_z)) for p in points]\r\n print(\"Translation\")\r\n print(\"Input\".ljust(30), \"Translation\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_translate = vec3(x + tr_x, y + tr_y, z + tr_z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_translate.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()\r\n th_x = 2*math.pi*random.random()\r\n th_y = 2*math.pi*random.random()\r\n th_z = 2*math.pi*random.random()\r\n points_rot = [vec3(p.x, p.y*math.cos(th_x) - p.z*math.sin(th_x), p.y*math.sin(th_x) + p.z*math.cos(th_x)) for p in points]\r\n points_rot = [vec3(p.z*math.sin(th_y) + p.x*math.cos(th_y), p.y, p.z*math.cos(th_y) - p.x*math.sin(th_y)) for p in points_rot]\r\n points_rot = [vec3(p.x*math.cos(th_z) - p.y*math.sin(th_z), p.x*math.sin(th_z) + p.y*math.cos(th_z), p.z) for p in points_rot]\r\n mapping = [(points[i], points_rot[i]) for i in range(len(points))]\r\n print(\"Rotation\")\r\n print(\"Input\".ljust(30), \"Rotation\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_rotate = vec3(v_in.x, v_in.y*math.cos(th_x) - v_in.z*math.sin(th_x), v_in.y*math.sin(th_x) + v_in.z*math.cos(th_x))\r\n v_rotate = vec3(v_rotate.z*math.sin(th_y) + v_rotate.x*math.cos(th_y), v_rotate.y, v_rotate.z*math.cos(th_y) - v_rotate.x*math.sin(th_y))\r\n v_rotate = vec3(v_rotate.x*math.cos(th_z) - v_rotate.y*math.sin(th_z), v_rotate.x*math.sin(th_z) + v_rotate.y*math.cos(th_z), v_rotate.z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_rotate.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()\r\n k = math.exp(2*random.random() - 1)\r\n mapping = [(p, vec3(k*p.x, k*p.y, k*p.z)) for p in points]\r\n print(\"Uniform scaling\")\r\n print(\"Input\".ljust(30), \"Scaling\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_scale = vec3(k*x, k*y, k*z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_scale.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()\r\n k_x = math.exp(2*random.random() - 1)\r\n k_y = 3*random.random() + 1\r\n k_z = 3*random.random() + 1\r\n if (k_x >= k_y + math.exp(-1)): k_y = k_x - k_y\r\n else: k_y = k_x + k_y\r\n if ((k_x + k_y)/2 >= k_z + math.exp(-1)): k_z = (k_x + k_y)/2 - k_z\r\n else: k_z = (k_x + k_y)/2 + k_z\r\n mapping = [(p, vec3(k_x*p.x, k_y*p.y, k_z*p.z)) for p in points]\r\n print(\"Non-uniform scaling\")\r\n print(\"Input\".ljust(30), \"Scaling\".ljust(30), \"Transformation\".ljust(30))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n z = random.randrange(-40, 41)\r\n v_in = vec3(x, y, z)\r\n v_scale = vec3(k_x*x, k_y*y, k_z*z)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(30), str(v_scale.str_repr(4)).ljust(30), str(v_transform.str_repr(4)).ljust(30))\r\n print()",
"def transform(self, images):\n return np.array([self.transform_single(i) for i in images])",
"def _compute_targets(ex_rois, gt_rois, labels, front_2_1_points, front_2_2_points, front_center_points, back_2_1_points, back_2_2_points, back_center_points, center_points):\n # Inputs are tensor\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 4\n # print(gt_rois)\n # fang[-1]\n\n targets = bbox_transform(ex_rois, gt_rois)\n\n front_2_1_points_targets = points_transform(ex_rois, front_2_1_points)\n front_2_2_points_targets = points_transform(ex_rois, front_2_2_points)\n front_center_targets = center_transform(ex_rois, front_center_points)\n\n back_2_1_points_targets = points_transform(ex_rois, back_2_1_points)\n back_2_2_points_targets = points_transform(ex_rois, back_2_2_points)\n back_center_targets = center_transform(ex_rois, back_center_points)\n\n center_targets = center_transform(ex_rois, center_points)\n\n # print(targets)\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n targets = ((targets - targets.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / targets.new(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n\n front_2_1_points_targets = ((front_2_1_points_targets - front_2_1_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / front_2_1_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n front_2_2_points_targets = ((front_2_2_points_targets - front_2_2_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / front_2_2_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n front_center_targets = ((front_center_targets - front_center_targets.new(cfg.TRAIN.CENTER_NORMALIZE_MEANS))\n / front_center_targets.new(cfg.TRAIN.CENTER_NORMALIZE_STDS))\n \n back_2_1_points_targets = ((back_2_1_points_targets - back_2_1_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / back_2_1_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n back_2_2_points_targets = ((back_2_2_points_targets - back_2_2_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / back_2_2_points_targets.new(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n back_center_targets = ((back_center_targets - back_center_targets.new(cfg.TRAIN.CENTER_NORMALIZE_MEANS))\n / back_center_targets.new(cfg.TRAIN.CENTER_NORMALIZE_STDS))\n\n center_targets = ((center_targets - center_targets.new(cfg.TRAIN.CENTER_NORMALIZE_MEANS))\n / center_targets.new(cfg.TRAIN.CENTER_NORMALIZE_STDS))\n \n return torch.cat(\n [labels.unsqueeze(1), targets], 1), front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets",
"def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n joints = torch.unsqueeze(joints, dim=-1)\n rel_joints = joints.clone()\n rel_joints[:, 1:] -= joints[:, parents[1:]]\n transforms_mat = transform_mat(rot_mats.view(-1, 3, 3), rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)\n transform_chain = [transforms_mat[:, 0]]\n for i in range(1, parents.shape[0]):\n curr_res = torch.matmul(transform_chain[parents[i]], transforms_mat[:, i])\n transform_chain.append(curr_res)\n transforms = torch.stack(transform_chain, dim=1)\n posed_joints = transforms[:, :, :3, 3]\n posed_joints = transforms[:, :, :3, 3]\n joints_homogen = F.pad(joints, [0, 0, 0, 1])\n rel_transforms = transforms - F.pad(torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])\n return posed_joints, rel_transforms",
"def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n joints = torch.unsqueeze(joints, dim=-1)\n rel_joints = joints.clone()\n rel_joints[:, 1:] -= joints[:, parents[1:]]\n transforms_mat = transform_mat(rot_mats.reshape(-1, 3, 3), rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)\n transform_chain = [transforms_mat[:, 0]]\n for i in range(1, parents.shape[0]):\n curr_res = torch.matmul(transform_chain[parents[i]], transforms_mat[:, i])\n transform_chain.append(curr_res)\n transforms = torch.stack(transform_chain, dim=1)\n posed_joints = transforms[:, :, :3, 3]\n joints_homogen = F.pad(joints, [0, 0, 0, 1])\n rel_transforms = transforms - F.pad(torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])\n return posed_joints, rel_transforms",
"def test_transform_multiples(self):\n \n t1_ref = 6.28318530717958647692528676655867\n t4_ref = t1_ref / 4\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n\n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n\n o = s.make_origin(3)\n p = s.make_point((2/11, 6/11, 9/11), magic)\n q = s.make_point((3/7, 6/7, 2/7), magic)\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False, skip=None):\n if skip:return\n for ref in (\n s.make_point((9/17, 8/17, 12/17), magic),\n s.make_point((0, 3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref)\n ))\n\n def skip_test(n):\n \"\"\"\n Should we skip this case?\n \"\"\"\n return k > 0 and magic * n * k**0.5 >= t4_ref\n\n # check f^0 = I\n check_transform_eq(f * 0, i)\n check_transform_eq(g * 0, i)\n check_transform_eq(i * 0, i)\n\n # check f^1 = f\n check_transform_eq(f * 1, f)\n check_transform_eq(g * 1, g)\n check_transform_eq(i * 1, i)\n\n # check f^-1 is correct inverse of f\n check_transform_eq(f * -1,\n space_point_transform(p * -1))\n check_transform_eq(g * -1,\n space_point_transform(q * -1))\n\n # check f^n is correct iterated f\n check_transform_eq(f * 3,\n space_point_transform(p * 3),\n skip = skip_test(3))\n check_transform_eq(g * 5,\n space_point_transform(q * 5),\n skip = skip_test(5))\n check_transform_eq(f * 19,\n space_point_transform(p * 19),\n skip = skip_test(19))\n check_transform_eq(g * 21,\n space_point_transform(q * 21),\n skip = skip_test(21))\n\n # check f^(1/n) f is correct fractional f\n hf = f * 0.5\n check_transform_eq(hf + hf, f)\n hg = g * 0.25\n check_transform_eq(hg * 4, g)",
"def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):\n\n\n\n v0 = np.array(v0, dtype=np.float64, copy=True)\n v1 = np.array(v1, dtype=np.float64, copy=True)\n\n ndims = v0.shape[0]\n if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:\n print(ndims < 2)\n print(v0.shape[1] < ndims)\n print(v0.shape != v1.shape)\n\n print(ndims)\n\n raise ValueError(\"input arrays are of wrong shape or type\")\n\n # move centroids to origin\n t0 = -np.mean(v0, axis=1)\n M0 = np.identity(ndims+1)\n M0[:ndims, ndims] = t0\n v0 += t0.reshape(ndims, 1)\n t1 = -np.mean(v1, axis=1)\n M1 = np.identity(ndims+1)\n M1[:ndims, ndims] = t1\n v1 += t1.reshape(ndims, 1)\n\n if shear:\n # Affine transformation\n A = np.concatenate((v0, v1), axis=0)\n u, s, vh = np.linalg.svd(A.T)\n vh = vh[:ndims].T\n B = vh[:ndims]\n C = vh[ndims:2*ndims]\n t = np.dot(C, np.linalg.pinv(B))\n t = np.concatenate((t, np.zeros((ndims, 1))), axis=1)\n M = np.vstack((t, ((0.0,)*ndims) + (1.0,)))\n elif usesvd or ndims != 3:\n # Rigid transformation via SVD of covariance matrix\n u, s, vh = np.linalg.svd(np.dot(v1, v0.T))\n # rotation matrix from SVD orthonormal bases\n R = np.dot(u, vh)\n if np.linalg.det(R) < 0.0:\n # R does not constitute right handed system\n R -= np.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)\n s[-1] *= -1.0\n # homogeneous transformation matrix\n M = np.identity(ndims+1)\n M[:ndims, :ndims] = R\n else:\n # Rigid transformation matrix via quaternion\n # compute symmetric matrix N\n xx, yy, zz = np.sum(v0 * v1, axis=1)\n xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1)\n xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1)\n N = [[xx+yy+zz, 0.0, 0.0, 0.0],\n [yz-zy, xx-yy-zz, 0.0, 0.0],\n [zx-xz, xy+yx, yy-xx-zz, 0.0],\n [xy-yx, zx+xz, yz+zy, zz-xx-yy]]\n # quaternion: eigenvector corresponding to most positive eigenvalue\n w, V = np.linalg.eigh(N)\n q = V[:, np.argmax(w)]\n q /= vector_norm(q) # unit quaternion\n # homogeneous transformation matrix\n M = quaternion_matrix(q)\n\n if scale and not shear:\n # Affine transformation; scale is ratio of RMS deviations from centroid\n v0 *= v0\n v1 *= v1\n M[:ndims, :ndims] *= math.sqrt(np.sum(v1) / np.sum(v0))\n\n # move centroids back\n M = np.dot(np.linalg.inv(M1), np.dot(M, M0))\n M /= M[ndims, ndims]\n return M",
"def compute_geometric_transform(p1,p2,best_matches):\n # How many good matches are there?\n num_bad_matches = sum([x == None for x in best_matches])\n num_good_matches = p1.shape[0]-num_bad_matches\n\n # Prepare data for fitting\n A = np.ones((3, num_good_matches))\n B = np.ones((3, num_good_matches))\n count = 0\n for i in range(p1.shape[0]):\n if best_matches[i] != None:\n A[0,count] = p1[i,0]\n A[1,count] = p1[i,1]\n A[2,count] = p1[i,2]\n B[0,count] = p2[best_matches[i],0]\n B[1,count] = p2[best_matches[i],1]\n B[2,count] = p2[best_matches[i],2]\n count += 1\n A = A.T\n B = B.T\n\n model = GeometricTransform(bScale=False)\n data = np.hstack((A,B))\n\n # Need at least seven points for a good transform fit...\n if (num_good_matches < 7):\n print 'WARNING: not enough matches to compute a geometric transform.'\n return 1, np.identity(3), np.array([0,0,0])\n elif (num_good_matches < 20):\n print 'WARNING: not enough matches to compute a robust fit.'\n return model.fit(data)\n else:\n import lflib.calibration.ransac as ransac\n try:\n bestdata = ransac.ransac(data,model,\n 10, #rand samp size (num required to fit)\n 30, #num iterations\n 4.0, #transformed dist required to be considered inlier,\n 15, #min inliers to be considered \n debug=False,return_all=False)\n return model.fit(bestdata)\n except ValueError:\n return model.fit(data)",
"def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])",
"def get_projective_transform(center: Tensor, angles: Tensor, scales: Tensor) -> Tensor:\n if not (len(center.shape) == 2 and center.shape[-1] == 3):\n raise AssertionError(center.shape)\n if not (len(angles.shape) == 2 and angles.shape[-1] == 3):\n raise AssertionError(angles.shape)\n if center.device != angles.device:\n raise AssertionError(center.device, angles.device)\n if center.dtype != angles.dtype:\n raise AssertionError(center.dtype, angles.dtype)\n\n # create rotation matrix\n axis_angle_rad: Tensor = deg2rad(angles)\n rmat: Tensor = axis_angle_to_rotation_matrix(axis_angle_rad) # Bx3x3\n scaling_matrix: Tensor = eye_like(3, rmat)\n scaling_matrix = scaling_matrix * scales.unsqueeze(dim=1)\n rmat = rmat @ scaling_matrix.to(rmat)\n\n # define matrix to move forth and back to origin\n from_origin_mat = eye_like(4, rmat, shared_memory=False) # Bx4x4\n from_origin_mat[..., :3, -1] += center\n\n to_origin_mat = from_origin_mat.clone()\n to_origin_mat = _torch_inverse_cast(from_origin_mat)\n\n # append translation with zeros\n proj_mat = projection_from_Rt(rmat, torch.zeros_like(center)[..., None]) # Bx3x4\n\n # chain 4x4 transforms\n proj_mat = convert_affinematrix_to_homography3d(proj_mat) # Bx4x4\n proj_mat = from_origin_mat @ proj_mat @ to_origin_mat\n\n return proj_mat[..., :3, :] # Bx3x4",
"def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M",
"def test_transform(self):\n pt = np.array([1.0, 2.0, 3.0])\n tr = pose.Pose()\n tr.position = onp.array([4.0, 5.0, 6.0])\n pt2 = tr.transform(pt)\n self.assertLess(np.linalg.norm(pt2 - np.array([5.0, 7.0, 9.0])), 1e-6)",
"def build_transform(self):\n if self.training:\n all_trans = [trans.BEVRandomHorizontalFlip(), trans.BEVToTensor()]\n else:\n all_trans = [trans.BEVToTensor()]\n\n self.transform = trans.Compose(all_trans)\n return self.transform",
"def affine_transform(trans_mat, p0):\r\n n_data, n_dim = np.shape(p0)\r\n p0 = np.hstack((p0, np.ones((n_data, 1))))\r\n #return np.transpose(np.dot(np.transpose(trans_mat), np.transpose(p0)))\r\n return np.dot(p0, trans_mat)",
"def transformation(T,M):\n\n n = len(M) # Nb de lignes\n p = len(M[0]) # Nb de colonnes\n\n x1, y1 = vecteur_image(T,0,n)\n x2, y2 = vecteur_image(T,p,n)\n x3, y3 = vecteur_image(T,p,0)\n\n xmin = round(min(0,x1,x2,x3))\n xmax = round(max(0,x1,x2,x3))\n ymin = round(min(0,y1,y2,y3))\n ymax = round(max(0,y1,y2,y3))\n\n pp = xmax-xmin\n nn = ymax-ymin\n\n Tinv = inverse_matrice(T)\n\n N = [[0 for jj in range(pp)] for ii in range(nn)]\n for ii in range(nn):\n for jj in range(pp):\n j, i = vecteur_image(Tinv,jj+xmin,ii+ymin)\n j, i = floor(j), floor(i)\n if (0 <= i < n) and (0 <= j < p):\n N[ii][jj] = M[i][j] \n else:\n N[ii][jj] = 0\n\n return N",
"def apply_T(T, points):\n flipped = False\n if points.shape[0] != 3:\n assert points.shape[1] == 3, \"Points must be 3xN or Nx3\"\n points = points.T\n flipped = True\n points_h = np.vstack((points, np.ones_like(points[0, :])))\n points_transformed_h = np.dot(T, points_h)\n points_transformed = points_transformed_h[:-1]\n if flipped:\n return points_transformed.T\n return points_transformed",
"def transform(i, j, k):\n return i * N * N + j * N + k + 1",
"def forward(self, inputs):\n #NOTE: Already merge axis 0(batches) and axis 1(channels) before extracting feature phase,\n # please refer to paddlevideo/modeling/framework/recognizers/recognizer2d.py#L27\n #y = paddle.reshape(\n # inputs, [-1, inputs.shape[2], inputs.shape[3], inputs.shape[4]])\n\n ####ResNet-C: use three 3x3 conv, replace, one 7x7 conv\n y = self.conv1_1(inputs)\n y = self.conv1_2(y)\n y = self.conv1_3(y)\n\n y = self.pool2D_max(y)\n for block in self.block_list:\n y = block(y)\n return y",
"def TransformPoint(transform, x, y, z):\n result = np.matmul(transform, np.array([x, y, z, 1.]))\n return result[0], result[1], result[2]",
"def MultiTransformix(in_im, out_dir, tps):\n\n\t#Ensure the input image is pathlib object\n\tin_im = Path(in_im)\n\t#Ensure the input tps are pathlib objects\n\ttps = [Path(t) for t in tps]\n\t#Ensure the out directory is a pathlib object\n\tout_dir = Path(out_dir)\n\n\t#Create transformix command\n\tcommand = \"transformix\"\n\n\t#Run the CreateCompositeTransforms(tps, out_dir) function\n\ttrans_calls,init_trans_list = CreateCompositeTransforms(tps, out_dir)\n\tprint(\"Created transform parameters length:\" + str(len(trans_calls)))\n\n\t#Create temporary directory in the out_dir\n\twith tempfile.TemporaryDirectory(dir=out_dir) as nestdirname:\n\t\t#Run the first transformation\n\t\ttmp_command = command +' -out ' + str(nestdirname)\n\t\ttmp_command = tmp_command + ' -tp ' + str(trans_calls[0])\n\t\ttmp_command = tmp_command + ' -in ' + str(in_im)\n\n\t\t#Iterate through each transform parameter file and run transformix\n\t\tRunTransformix(tmp_command)\n\n\t\t#rint(\"Getting first results\")\n\n\t\t#Get a result name for the output of transformix (assumes nifti for now)\n\t\tres_name = Path(os.path.join(str(nestdirname),\"result\"+in_im.suffix))\n\n\t\t#print(\"Got first results\")\n\n\t\t#Check to see if the list is larger than 2 (if no then only take the last parameter file)\n\t\tif len(trans_calls) > 1:\n\t\t\t#Print update\n\t\t\t#print(\"Number of transform calls:\" + str(len(trans_calls)))\n\n\t\t\t#Now iterate through all the other transform parameter files and run transformix\n\t\t\tfor t in range(1,len(trans_calls)):\n\t\t\t\t#print(\"Running transform call:\" + str(t))\n\n\t\t\t\t#Create the temporary transformix command\n\t\t\t\ttmp_command = command\n\n\t\t\t\t#Check to see if this is the last iteration\n\t\t\t\tif t == (len(trans_calls)-1):\n\t\t\t\t\t#Add the result name\n\t\t\t\t\ttmp_command = tmp_command + ' -in ' + str(res_name)\n\t\t\t\t\t#Make the output directory the final out_dir\n\t\t\t\t\ttmp_command = tmp_command +' -out ' + str(out_dir)\n\t\t\t\t\t#Update result name\n\t\t\t\t\tres_name = Path(os.path.join(str(out_dir),\"result\"+in_im.suffix))\n\n\t\t\t\t#Otherwise, leave it as the tmp directory\n\t\t\t\telse:\n\t\t\t\t\t#Add the result name to the command\n\t\t\t\t\ttmp_command = tmp_command + ' -in ' + str(res_name)\n\t\t\t\t\t#Run transformixs\n\t\t\t\t\ttmp_command = tmp_command +' -out ' + str(nestdirname)\n\t\t\t\t\t#Get a result name for the output of transformix (assumes nifti for now)\n\t\t\t\t\tres_name = Path(os.path.join(str(nestdirname),\"result\"+in_im.suffix))\n\n\t\t\t\t#Add the transform parameters\n\t\t\t\ttmp_command = tmp_command + ' -tp ' + str(trans_calls[t])\n\t\t\t\t#Iterate through each transform parameter file and run transformix\n\t\t\t\tRunTransformix(tmp_command)\n\n\n\t\telse:\n\t\t\t#Just change the results to the output directory\n\t\t\tnew_name = Path(os.path.join(str(out_dir),\"result\"+in_im.suffix))\n\t\t\t#Get the resulting image to rename (so we don't overwrite results)\n\t\t\tres_name.rename(new_name)\n\t\t\t#Set back the res name\n\t\t\tres_name = new_name\n\n\t#Return the result name\n\treturn res_name",
"def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr",
"def _apply_array_spin1234(self, h1e: 'Nparray', h2e: 'Nparray',\n h3e: 'Nparray', h4e: 'Nparray') -> 'Nparray':\n norb = self.norb()\n tno = 2 * norb\n assert h4e.shape == (tno, tno, tno, tno, tno, tno, tno, tno)\n lena = self.lena()\n lenb = self.lenb()\n\n nh1e = numpy.copy(h1e)\n nh2e = numpy.copy(h2e)\n nh3e = numpy.copy(h3e)\n\n if fqe.settings.use_accelerated_code:\n _make_nh123(norb, h4e, nh1e, nh2e, nh3e)\n else:\n for i in range(norb * 2):\n for j in range(norb * 2):\n for k in range(norb * 2):\n nh1e[:, :] -= h4e[:, j, i, k, j, i, k, :]\n for l in range(norb * 2):\n nh2e[i, j, :, :] += (h4e[j, l, i, k, l, k, :, :] +\n h4e[i, j, l, k, l, k, :, :] +\n h4e[i, l, k, j, l, k, :, :] +\n h4e[j, i, k, l, l, k, :, :] +\n h4e[i, k, j, l, k, :, l, :] +\n h4e[j, i, k, l, k, :, l, :] +\n h4e[i, j, k, l, :, k, l, :])\n nh3e[i, j, k, :, :, :] += (\n h4e[k, i, j, l, l, :, :, :] +\n h4e[j, i, l, k, l, :, :, :] +\n h4e[i, l, j, k, l, :, :, :] +\n h4e[i, k, j, l, :, l, :, :] +\n h4e[i, j, l, k, :, l, :, :] +\n h4e[i, j, k, l, :, :, l, :])\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n evecaa = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecab = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecba = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecbb = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n tmp = self._calculate_dvec_spin_with_coeff(dveca[i, j, :, :])\n evecaa[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecba[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n tmp = self._calculate_dvec_spin_with_coeff(dvecb[i, j, :, :])\n evecab[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecbb[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n out = self._apply_array_spin123(nh1e, nh2e, nh3e, (dveca, dvecb),\n (evecaa, evecab, evecba, evecbb))\n\n def ncon(A, B):\n \"\"\"Tensor contraction and transposition corresponding with\n einsum 'ikmojlnp,mnopxy->ijklxy'\n \"\"\"\n return numpy.transpose(numpy.tensordot(A,\n B,\n axes=((2, 6, 3, 7), (0, 1, 2,\n 3))),\n axes=(0, 2, 1, 3, 4, 5))\n\n n = norb # shorter\n nevecaa = ncon(h4e[:n, :n, :n, :n, :n, :n, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, :n, :n, n:, :n, :n, :n, n:], evecab) \\\n + ncon(h4e[:n, :n, n:, n:, :n, :n, n:, n:], evecbb)\n\n nevecab = ncon(h4e[:n, n:, :n, :n, :n, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, n:, :n, n:, :n, n:, :n, n:], evecab) \\\n + ncon(h4e[:n, n:, n:, n:, :n, n:, n:, n:], evecbb)\n\n nevecbb = ncon(h4e[n:, n:, :n, :n, n:, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[n:, n:, :n, n:, n:, n:, :n, n:], evecab) \\\n + ncon(h4e[n:, n:, n:, n:, n:, n:, n:, n:], evecbb)\n\n dveca2 = numpy.zeros(dveca.shape, dtype=self._dtype)\n dvecb2 = numpy.zeros(dvecb.shape, dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n dveca[:, :, :, :] = nevecaa[i, j, :, :, :, :]\n dvecb[:, :, :, :] = nevecab[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dveca2[i, j, :, :] += cvec[:, :]\n\n dveca[:, :, :, :] = nevecab[:, :, i, j, :, :]\n dvecb[:, :, :, :] = nevecbb[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dvecb2[i, j, :, :] += cvec[:, :]\n\n out += self._calculate_coeff_spin_with_dvec((dveca2, dvecb2))\n return out",
"def transform_images(img1,img2):",
"def transform():",
"def __call__(self, inputs_1d, inputs_2d, mask, affine):\n num_residues, _ = inputs_1d.shape\n\n # Improve readability by removing a large number of 'self's.\n num_head = self.config.num_head\n num_scalar_qk = self.config.num_scalar_qk\n num_point_qk = self.config.num_point_qk\n num_scalar_v = self.config.num_scalar_v\n num_point_v = self.config.num_point_v\n num_output = self.config.num_channel\n\n assert num_scalar_qk > 0\n assert num_point_qk > 0\n assert num_point_v > 0\n\n # Construct scalar queries of shape:\n # [num_query_residues, num_head, num_points]\n q_scalar = common_modules.Linear(\n num_head * num_scalar_qk, name='q_scalar')(\n inputs_1d)\n q_scalar = jnp.reshape(\n q_scalar, [num_residues, num_head, num_scalar_qk])\n\n # Construct scalar keys/values of shape:\n # [num_target_residues, num_head, num_points]\n kv_scalar = common_modules.Linear(\n num_head * (num_scalar_v + num_scalar_qk), name='kv_scalar')(\n inputs_1d)\n kv_scalar = jnp.reshape(kv_scalar,\n [num_residues, num_head,\n num_scalar_v + num_scalar_qk])\n k_scalar, v_scalar = jnp.split(kv_scalar, [num_scalar_qk], axis=-1)\n\n # Construct query points of shape:\n # [num_residues, num_head, num_point_qk]\n\n # First construct query points in local frame.\n q_point_local = common_modules.Linear(\n num_head * 3 * num_point_qk, name='q_point_local')(\n inputs_1d)\n q_point_local = jnp.split(q_point_local, 3, axis=-1)\n # Project query points into global frame.\n q_point_global = affine.apply_to_point(q_point_local, extra_dims=1)\n # Reshape query point for later use.\n q_point = [\n jnp.reshape(x, [num_residues, num_head, num_point_qk])\n for x in q_point_global]\n\n # Construct key and value points.\n # Key points have shape [num_residues, num_head, num_point_qk]\n # Value points have shape [num_residues, num_head, num_point_v]\n\n # Construct key and value points in local frame.\n kv_point_local = common_modules.Linear(\n num_head * 3 * (num_point_qk + num_point_v), name='kv_point_local')(\n inputs_1d)\n kv_point_local = jnp.split(kv_point_local, 3, axis=-1)\n # Project key and value points into global frame.\n kv_point_global = affine.apply_to_point(kv_point_local, extra_dims=1)\n kv_point_global = [\n jnp.reshape(x, [num_residues,\n num_head, (num_point_qk + num_point_v)])\n for x in kv_point_global]\n # Split key and value points.\n k_point, v_point = list(\n zip(*[\n jnp.split(x, [num_point_qk,], axis=-1)\n for x in kv_point_global\n ]))\n\n # We assume that all queries and keys come iid from N(0, 1) distribution\n # and compute the variances of the attention logits.\n # Each scalar pair (q, k) contributes Var q*k = 1\n scalar_variance = max(num_scalar_qk, 1) * 1.\n # Each point pair (q, k) contributes Var [0.5 ||q||^2 - <q, k>] = 9 / 2\n point_variance = max(num_point_qk, 1) * 9. / 2\n\n # Allocate equal variance to scalar, point and attention 2d parts so that\n # the sum is 1.\n\n num_logit_terms = 3\n\n scalar_weights = np.sqrt(1.0 / (num_logit_terms * scalar_variance))\n point_weights = np.sqrt(1.0 / (num_logit_terms * point_variance))\n attention_2d_weights = np.sqrt(1.0 / (num_logit_terms))\n\n # Trainable per-head weights for points.\n trainable_point_weights = jax.nn.softplus(hk.get_parameter(\n 'trainable_point_weights', shape=[num_head],\n # softplus^{-1} (1)\n init=hk.initializers.Constant(np.log(np.exp(1.) - 1.))))\n point_weights *= jnp.expand_dims(trainable_point_weights, axis=1)\n\n v_point = [jnp.swapaxes(x, -2, -3) for x in v_point]\n\n q_point = [jnp.swapaxes(x, -2, -3) for x in q_point]\n k_point = [jnp.swapaxes(x, -2, -3) for x in k_point]\n dist2 = [\n squared_difference(qx[:, :, None, :], kx[:, None, :, :])\n for qx, kx in zip(q_point, k_point)\n ]\n dist2 = sum(dist2)\n attn_qk_point = -0.5 * jnp.sum(\n point_weights[:, None, None, :] * dist2, axis=-1)\n\n v = jnp.swapaxes(v_scalar, -2, -3)\n q = jnp.swapaxes(scalar_weights * q_scalar, -2, -3)\n k = jnp.swapaxes(k_scalar, -2, -3)\n attn_qk_scalar = jnp.matmul(q, jnp.swapaxes(k, -2, -1))\n attn_logits = attn_qk_scalar + attn_qk_point\n\n attention_2d = common_modules.Linear(\n num_head, name='attention_2d')(\n inputs_2d)\n\n attention_2d = jnp.transpose(attention_2d, [2, 0, 1])\n attention_2d = attention_2d_weights * attention_2d\n attn_logits += attention_2d\n\n mask_2d = mask * jnp.swapaxes(mask, -1, -2)\n attn_logits -= 1e5 * (1. - mask_2d)\n\n # [num_head, num_query_residues, num_target_residues]\n attn = jax.nn.softmax(attn_logits)\n\n # [num_head, num_query_residues, num_head * num_scalar_v]\n result_scalar = jnp.matmul(attn, v)\n\n # For point result, implement matmul manually so that it will be a float32\n # on TPU. This is equivalent to\n # result_point_global = [jnp.einsum('bhqk,bhkc->bhqc', attn, vx)\n # for vx in v_point]\n # but on the TPU, doing the multiply and reduce_sum ensures the\n # computation happens in float32 instead of bfloat16.\n result_point_global = [jnp.sum(\n attn[:, :, :, None] * vx[:, None, :, :],\n axis=-2) for vx in v_point]\n\n # [num_query_residues, num_head, num_head * num_(scalar|point)_v]\n result_scalar = jnp.swapaxes(result_scalar, -2, -3)\n result_point_global = [\n jnp.swapaxes(x, -2, -3)\n for x in result_point_global]\n\n # Features used in the linear output projection. Should have the size\n # [num_query_residues, ?]\n output_features = []\n\n result_scalar = jnp.reshape(\n result_scalar, [num_residues, num_head * num_scalar_v])\n output_features.append(result_scalar)\n\n result_point_global = [\n jnp.reshape(r, [num_residues, num_head * num_point_v])\n for r in result_point_global]\n result_point_local = affine.invert_point(result_point_global, extra_dims=1)\n output_features.extend(result_point_local)\n\n output_features.append(jnp.sqrt(self._dist_epsilon +\n jnp.square(result_point_local[0]) +\n jnp.square(result_point_local[1]) +\n jnp.square(result_point_local[2])))\n\n # Dimensions: h = heads, i and j = residues,\n # c = inputs_2d channels\n # Contraction happens over the second residue dimension, similarly to how\n # the usual attention is performed.\n result_attention_over_2d = jnp.einsum('hij, ijc->ihc', attn, inputs_2d)\n num_out = num_head * result_attention_over_2d.shape[-1]\n output_features.append(\n jnp.reshape(result_attention_over_2d,\n [num_residues, num_out]))\n\n final_init = 'zeros' if self._zero_initialize_last else 'linear'\n\n final_act = jnp.concatenate(output_features, axis=-1)\n\n return common_modules.Linear(\n num_output,\n initializer=final_init,\n name='output_projection')(final_act)",
"def apply_transform_on_3d_image(image: torch.Tensor, transforms: List[Callable]) -> torch.Tensor:\n for z in range(image.shape[0]):\n pil = TF.to_pil_image(image[z])\n for transform_fn in transforms:\n pil = transform_fn(pil)\n image[z] = TF.to_tensor(pil).squeeze()\n return image",
"def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes",
"def _para_transform(self, X):\n self.check_fit()\n\n data = self.convert_input(X)\n\n coords = numpy.array(data.coords)\n R = cdist(coords, coords)\n Theta = self.calculate_Theta(coords)\n\n g1 = self.g_1(R, data.elements)\n g2 = self.g_2(Theta, R, data.elements)\n return numpy.hstack([g1, g2])",
"def _transform_input(self, compound, spacegroup, T):\n cmpd_features = self._transform_compound(compound)\n sg_features = self._transform_spacegroup(spacegroup)\n joined_features = self._join_features(cmpd_features, sg_features, T)\n scaled_features = self._scale_features(joined_features)\n return scaled_features",
"def gemv(self,transa_,m_,n_,alpha_,a,x,beta_,y): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a has wrong length\")\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n if x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n if y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y has wrong length\")\n res = self.__obj.gemv(transa_,m_,n_,alpha_,a_,x_,beta_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_y:\n y[:] = _tmparr_y",
"def get_homogeneous_transform_from_vectors(r_vector, t_vector):\n r_vector = np.array(r_vector)\n if r_vector.shape == (3, 3):\n if isRotationMatrix(r_vector):\n r_gt = r_vector\n else:\n assert EOFError\n t = np.eye(4)\n t[0:3, 0:3] = r_gt\n t[0:3, 3] = t_vector\n return t\n else:\n t_gt = extend_vector_to_homogeneous_transf(t_vector)\n\n r_gt = eulerAnglesToRotationMatrix({\n 'x': np.radians(r_vector[0]),\n 'y': np.radians(r_vector[1]),\n 'z': np.radians(r_vector[2])\n })\n\n return np.matmul(t_gt, r_gt)",
"def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.hstack([R, t])",
"def forward(self, x):\n # l1\n #print(\"INIT SIZE\", torch.cuda.max_memory_allocated())\n #print(\"L1\")\n #print(\"input\", x.shape)\n e1 = self.ec_init(x)\n #print(\"init\", e1.shape)\n syn1 = self.ec11(e1) # init right - l1\n #print(\"syn1\", syn1.shape)\n #print(\"L2\")\n e2 = self.bilinear(syn1, 32, 32, size=self.sizes[2]) # l1-2\n #print(\"e2\", e2.shape)\n # l2\n syn2 = self.ec22(e2) # right l2 (concat later)\n #print(\"syn2\", syn2.shape)\n del e1, e2\n e3 = self.bilinear(syn2, 32, 32, size=self.sizes[3]) # l2-3\n #print(\"L3\")\n #print(\"e3\", e3.shape)\n # l3\n syn3 = self.ec33(e3) # right l3 (concat later)\n #print(\"syn3\", syn3.shape)\n del e3 # delete\n #print(\"L4\")\n e41 = self.bilinear(syn3, 32, 64, size=self.sizes[4]) # l3-l4\n #print(\"e41\", e41.shape)\n\n # l4\n e42 = self.ec441(e41) # right 1 l4\n #print(\"e42\", e42.shape) \n syn4 = self.ec442(e42) # right 2 l4 (concat later)\n #print(\"syn4\", syn4.shape)\n del e41, e42\n #print(\"L5\")\n e51 = self.bilinear(syn4, 64, 128, size=self.sizes[5]) # l4-l5\n #print(\"e51\", e51.shape)\n # l5\n e52 = self.ec551(e51) # right 1\n #print(\"e52\", e52.shape)\n syn5 = self.ec552(e52) # right 2\n #print(\"syn5\", syn5.shape)\n del e51, e52\n #print(\"L6\")\n e61 = self.bilinear(syn5, 128, 128, size=self.sizes[6]) # l5-l6\n #print(\"e61\", e61.shape)\n \n # l6\n e62 = self.ec661(e61) # right 1\n #print(\"e62\", e62.shape)\n syn6 = self.ec662(e62) # right 2\n #print(\"syn6\", syn6.shape)\n del e61, e62\n #print(\"L7\")\n e71 = self.bilinear(syn6, 128, 256, size=self.sizes[7]) #l6-7\n #print(\"e71\", e71.shape)\n \n # l7\n e72 = self.ec771(e71) # right 1 (green)\n #print(\"e72\", e72.shape)\n syn7 = self.ec772(e72) # right 2 (turq)\n #print(\"syn7\", syn7.shape)\n del e71, e72\n\n #print(\"L8\")\n\n #e_bottom_left = self.bilinear(syn7, 256, 4092, size=self.sizes[8]) # l7-l8\n e_bottom_left = self.bilinear(syn7, 256, 256, size=self.sizes[8]) # l7-l8\n #print(\"e_b_l\", e_bottom_left.shape)\n\n # l8 - the very bottom most encoded\n e_bottom_left = e_bottom_left.view(e_bottom_left.size(0), -1)\n batch_size = e_bottom_left.size()[0]\n e_bottom_right = self.ec88(e_bottom_left)\n # TODO - change the view so that 1st arg is batch size again\n e_bottom_right = e_bottom_right.view(batch_size, e_bottom_right.size(1), 1,1,1)\n #print(\"e_b_r\", e_bottom_right.shape)\n\n #print(\"SIZE BEFORE DEL\", torch.cuda.max_memory_allocated())\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n #print(\"SIZE AFTER DEL\", torch.cuda.max_memory_allocated())\n\n ## DECODE ##\n #print(\"TO CONCAT:\")\n #print(\"Shape1\", self.bilinear(e_bottom_right, 4096, 256, size=self.sizes2[7]).shape)\n #print(\"Shape1\", self.bilinear(e_bottom_right, 256, 256, size=self.sizes2[7]).shape)\n #print(\"syn7 \", syn7.shape)\n # QUESTION - check this is a simple cat - says \"copy and stack\"\n #d71 = torch.cat((self.bilinear(e_bottom_right, 4096, 256, size=self.sizes2[7]), syn7), dim=1) # concat on level 7\n d71 = torch.cat((self.bilinear(e_bottom_right, 256, 256, size=self.sizes2[7]), syn7), dim=1) # concat on level 7\n #print(\"d71 (post cat)\", d71.shape)\n del e_bottom_left, e_bottom_right\n d72 = self.dc77(d71) # move right on level 7 (decode)\n #print(\"d72 (decoded)\", d72.shape)\n del d71, syn7\n\n # TODO - finish\n d61 = torch.cat((self.bilinear(d72, 256, 128, size=self.sizes2[6]), syn6), dim=1)\n del d72, syn6\n d62 = self.dc66(d61)\n\n d51 = torch.cat((self.bilinear(d62, 128, 128, size=self.sizes2[5]), syn5), dim=1)\n del d61, d62, syn5\n d52 = self.dc55(d51)\n\n d41 = torch.cat((self.bilinear(d52, 128, 64, size=self.sizes2[4]), syn4), dim=1)\n del d51, d52, syn4\n d42 = self.dc44(d41)\n\n d31 = torch.cat((self.bilinear(d42, 64, 32, size=self.sizes2[3]), syn3), dim=1)\n del d41, d42, syn3\n d32 = self.dc33(d31)\n\n d21 = torch.cat((self.bilinear(d32, 32, 32, size=self.sizes2[2]), syn2), dim=1)\n del d31, d32, syn2\n d22 = self.dc22(d21)\n\n d11 = torch.cat((self.bilinear(d22, 32, 32, size=self.sizes2[1]), syn1), dim=1)\n del d21, d22, syn1\n d12 = self.dc11(d11)\n return d12\n \"\"\"\n del d11\n # QUESTION\n # is this right or is there only 1 rightward step at top layer\n d0 = self.dc10(d12)\n return d0\n \"\"\"",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def projective_inverse_warp_torch3(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n #delta_xy = src_center_xy - torch.tensor([float(tgt_width - 1) / 2, float(tgt_height - 1) / 2], device=src_center_xy.device)\n #delta_xyz = torch.cat([delta_xy, torch.zeros([batch, 1], device=delta_xy.device)], dim=1).unsqueeze(-1).unsqueeze(-1)\n # delta xyz [batch, 3, 1, 1]\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n #pixel_coords = pixel_coords + delta_xyz\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img",
"def __call__(self, args):\n if isinstance(self.transform, list) and len(self.transform) > 1:\n result = self.regions_mask[x, y]\n unique_regions = np.unique(result)\n for i in unique_regions:\n indices = result==i\n transform=self.get_forward_transform(i)\n result[indices]=transform(x[indices], y[indices])\n print('resut', result)\n return result\n else:\n return self.transform(x, y)",
"def _apply_array_spin123(self,\n h1e: 'Nparray',\n h2e: 'Nparray',\n h3e: 'Nparray',\n dvec: Optional[Tuple['Nparray', 'Nparray']] = None,\n evec: Optional[Tuple['Nparray', 'Nparray', 'Nparray', 'Nparray']] \\\n = None) -> 'Nparray':\n norb = self.norb()\n assert h3e.shape == (norb * 2,) * 6\n assert not (dvec is None) ^ (evec is None)\n\n from1234 = (dvec is not None) and (evec is not None)\n\n nh1e = numpy.copy(h1e)\n nh2e = numpy.copy(h2e)\n\n for i in range(norb * 2):\n for j in range(norb * 2):\n for k in range(norb * 2):\n nh2e[j, k, :, :] += (-h3e[k, j, i, i, :, :] -\n h3e[j, i, k, i, :, :] -\n h3e[j, k, i, :, i, :])\n\n nh1e[:, :] += h3e[:, i, j, i, j, :]\n\n out = self._apply_array_spin12_halffilling(nh1e, nh2e)\n\n n = norb # This is just shorter\n if not from1234:\n symfac = 2.0\n axes = ((1, 3), (0, 1))\n (odveca, odvecb) = self.calculate_dvec_spin()\n dveca = numpy.zeros_like(odveca)\n dvecb = numpy.zeros_like(odvecb)\n\n for i in range(norb):\n for j in range(norb):\n evecaa, _ = self._calculate_dvec_spin_with_coeff(\n odveca[i, j, :, :])\n evecab, evecbb = self._calculate_dvec_spin_with_coeff(\n odvecb[i, j, :, :])\n\n dveca += numpy.tensordot(h3e[:n, :n, i, :n, :n, j],\n evecaa,\n axes=axes)\n dveca += numpy.tensordot(h3e[:n, :n, n + i, :n, :n, n + j],\n evecab,\n axes=axes) * symfac\n dveca += numpy.tensordot(h3e[:n, n:, n + i, :n, n:, n + j],\n evecbb,\n axes=axes)\n\n dvecb += numpy.tensordot(h3e[n:, :n, i, n:, :n, j],\n evecaa,\n axes=axes)\n dvecb += numpy.tensordot(h3e[n:, :n, n + i, n:, :n, n + j],\n evecab,\n axes=axes) * symfac\n dvecb += numpy.tensordot(h3e[:n, n:, n + i, :n, n:, n + j],\n evecbb,\n axes=axes)\n else:\n symfac = 1.0\n axes = ((1, 4, 2, 5), (0, 1, 2, 3)) # type: ignore\n dveca, dvecb = dvec # type: ignore\n evecaa, evecab, evecba, evecbb = evec # type: ignore\n\n dveca = numpy.tensordot(h3e[:n, :n, :n, :n, :n, :n],\n evecaa, axes=axes) \\\n + numpy.tensordot(h3e[:n, :n, n:, :n, :n, n:],\n evecab, axes=axes) * symfac \\\n + numpy.tensordot(h3e[:n, n:, n:, :n, n:, n:],\n evecbb, axes=axes) + \\\n + numpy.tensordot(h3e[:n, n:, :n, :n, n:, :n],\n evecba, axes=axes)\n\n dvecb = numpy.tensordot(h3e[n:, :n, :n, n:, :n, :n],\n evecaa, axes=axes) \\\n + numpy.tensordot(h3e[n:, :n, n:, n:, :n, n:],\n evecab, axes=axes) * symfac \\\n + numpy.tensordot(h3e[n:, n:, n:, n:, n:, n:],\n evecbb, axes=axes) + \\\n + numpy.tensordot(h3e[n:, n:, :n, n:, n:, :n],\n evecba, axes=axes)\n\n out -= self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n return out",
"def test_inverse_transform(self):",
"def transform_points(points, T):\n\n homo_points = np.array([(x, y, 1) for (y, x) in points])\n t_points = np.array([T.dot(v) for v in homo_points ])\n swap = np.array([(x,y) for (y,x,z) in t_points])\n return swap",
"def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)",
"def apply_transform(transform):\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n def apply(points, discard_z_coord=False, treat_input_as_vector=False):\n points, is_columnized, maybe_decolumnize = columnize(\n points, (-1, 3), name=\"points\"\n )\n\n homogenous_coordinate_value = 0 if treat_input_as_vector else 1\n padded_points = np.pad(\n points,\n ((0, 0), (0, 1)),\n mode=\"constant\",\n constant_values=homogenous_coordinate_value,\n )\n transformed_padded_points = np.dot(transform, padded_points.T).T\n transformed_points = np.delete(transformed_padded_points, 3, axis=1)\n\n result = maybe_decolumnize(transformed_points)\n if discard_z_coord:\n return result[:, 0:2] if is_columnized else result[0:2]\n else:\n return result\n\n return apply",
"def get_perspective_transform3d(src: Tensor, dst: Tensor) -> Tensor:\n if not isinstance(src, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(src)}\")\n\n if not isinstance(dst, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(dst)}\")\n\n if not src.shape[-2:] == (8, 3):\n raise ValueError(f\"Inputs must be a Bx8x3 tensor. Got {src.shape}\")\n\n if not src.shape == dst.shape:\n raise ValueError(f\"Inputs must have the same shape. Got {dst.shape}\")\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(f\"Inputs must have same batch size dimension. Expect {src.shape} but got {dst.shape}\")\n\n if not (src.device == dst.device and src.dtype == dst.dtype):\n raise AssertionError(\n f\"Expect `src` and `dst` to be in the same device (Got {src.dtype}, {dst.dtype}) \"\n f\"with the same dtype (Got {src.dtype}, {dst.dtype}).\"\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n\n # 000, 100, 110, 101, 011\n for i in [0, 1, 2, 5, 7]:\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'y'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'z'))\n\n # A is Bx15x15\n A = stack(p, 1)\n\n # b is a Bx15x1\n b = stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 0:1, 2],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 1:2, 2],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 2:3, 2],\n # dst[:, 3:4, 0], dst[:, 3:4, 1], dst[:, 3:4, 2],\n # dst[:, 4:5, 0], dst[:, 4:5, 1], dst[:, 4:5, 2],\n dst[:, 5:6, 0],\n dst[:, 5:6, 1],\n dst[:, 5:6, 2],\n # dst[:, 6:7, 0], dst[:, 6:7, 1], dst[:, 6:7, 2],\n dst[:, 7:8, 0],\n dst[:, 7:8, 1],\n dst[:, 7:8, 2],\n ],\n 1,\n )\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return\n batch_size: int = src.shape[0]\n M = torch.empty(batch_size, 16, device=src.device, dtype=src.dtype)\n M[..., :15] = X[..., 0]\n M[..., -1].fill_(1)\n\n return M.view(-1, 4, 4) # Bx4x4",
"def phasesin14xymult(param, xyord,crossord,t, x, y):\n # 2010-04-27 11:49 IJC: Created\n # 2010-05-28 15:42 IJC: Added x*y cross-terms\n\n param = array(param,copy=True)\n x = array(x,copy=True)\n y = array(y,copy=True)\n t = array(t,copy=True)\n\n xparam = zeros((0,14),float)\n yparam = zeros((0,14),float)\n crossparam = zeros((0,14),float)\n\n cparam = param[3:17]\n if xyord>=1:\n for ii in range(xyord):\n xparam = vstack((xparam,param[17+ii*28:31+ii*28]))\n yparam = vstack((yparam,param[31+ii*28:45+ii*28]))\n\n lastxyparamind = 45+(xyord-1)*28\n if crossord>=1:\n for ii in [0]: #range(crossparam):\n crossparam = vstack((crossparam,param[lastxyparamind:lastxyparamind+(ii+1)*14]))\n\n #cparam -= mean(cparam)\n param[2] = param[2] % (2*pi)\n \n if len(t.shape)==1:\n was1d = True\n t = t.reshape(14, len(t)/14.)\n x = x.reshape(14, len(x)/14.)\n y = y.reshape(14, len(y)/14.)\n else:\n was1d = False\n\n # Subtract the mean from the X and Y data\n x -= x.mean(1).reshape(14,1)\n y -= y.mean(1).reshape(14,1)\n\n # Zeroth-order model:\n ret = param[0] - abs(param[1]) *cos(2*pi*t +param[2])\n\n # Apply constant offsets:\n ret *= (1. + tile(cparam, (t.shape[1],1)).transpose())\n if xyord>=1:\n for ii in range(xyord):\n ret *= (1. + tile(xparam[ii], (t.shape[1],1)).transpose()*x**(ii+1))\n ret *= (1. + tile(yparam[ii], (t.shape[1],1)).transpose()*y**(ii+1))\n\n if crossord>=1:\n for ii in [0]: \n ret *= (1. + tile(crossparam[ii], (t.shape[1],1)).transpose()*x*y)\n\n if was1d:\n ret = ret.ravel()\n\n return ret",
"def transform(self, x):",
"def transform(self, X):\n\n t0 = time.perf_counter()\n check_is_fitted(self)\n self.check_external_components_modified()#[WARN] in d3m, primitives can \"restore\" private class variables...\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n t1 = time.perf_counter()\n\n if X.shape[1] != self.components_af_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_af_.shape[1])\n )\n\n #X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n #import pdb; pdb.set_trace()\n X_af = af.interop.from_ndarray(X).as_type(self.components_af_.dtype())\n X_new = af.matmulNT(X_af, self.components_af_)\n X_new = X_new.to_ndarray()\n t2 = time.perf_counter()\n return X_new",
"def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))",
"def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))",
"def test__inverse_transform_continuous(self):",
"def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a,b,beta_,c): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if not isinstance(transb_,transpose): raise TypeError(\"Argument transb has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((m_) * (k_)):\n raise ValueError(\"Array argument a has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n \n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n \n if b_ is not None and len(b_) != ((k_) * (n_)):\n raise ValueError(\"Array argument b has wrong length\")\n if c is None: raise TypeError(\"Invalid type for argument c\")\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((m_) * (n_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.gemm(transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_c:\n c[:] = _tmparr_c",
"def single_channel_stacking_unlimited(tifs):\n results=[]\n \n for i in range(len(tifs)-1):\n r1=gdal_array.LoadFile(tifs[i])\n r2=gdal_array.LoadFile(tifs[i+1])\n print(tifs[i])\n print(tifs[i+1])\n result=ird.similarity(r1,r2 , numiter=1, order=1)\n print(result['tvec'])\n print(result['scale'])\n print(result['angle'])\n results.append(result)\n \n \n print(i)\n \n x0y0=(0,0)\n x_max_y_max=(r1.shape[1], r1.shape[0])\n cords=np.array([[x0y0[0], x_max_y_max[0],x0y0[1], x_max_y_max[1] ]])\n plt.scatter((cords[0,0], cords[0, 1]), (cords[0,2], cords[0, 3]))\n\n \n\n for i in range(len(tifs)-1):\n \n print(i)\n scale=0\n tvec_x=0\n tvec_y=0\n angle=0\n x0y0=(0,0)\n x_max_y_max=(r1.shape[1], r1.shape[0])\n \n for j in range(i+1):\n print(j)\n result=results[j]\n scale=result['scale']\n tvec_x=tvec_x+result['tvec'][1]\n tvec_y=tvec_y+result['tvec'][0]\n angle=angle+result['angle']\n M=Affine.translation(tvec_x,tvec_y )*Affine.scale(scale)*Affine.rotation(angle)\n print(M)\n x0y0=M*x0y0\n x_max_y_max=M*x_max_y_max\n \n cords=np.append(cords, [[x0y0[0], x_max_y_max[0],x0y0[1], x_max_y_max[1]]], axis=0)\n print(x0y0)\n print(x_max_y_max)\n \n plt.scatter((cords[i+1,0], cords[i+1, 1]), (cords[i+1,2], cords[i+1, 3]))\n \n \n xmin=np.min(cords[:,0:2])\n xmax=np.max(cords[:,0:2])\n ymin=np.min(cords[:,2:])\n ymax=np.max(cords[:,2:])\n \n print(cords)\n cords[:,0:2]=cords[:,0:2]-xmin\n cords[:,2:]=cords[:,2:]-ymin\n \n print(xmin,xmax, ymin,ymax)\n print(cords)\n \n \n \n final_array_shape=(int(np.abs(ymin-ymax)), int(np.abs(xmin-xmax)))\n print(final_array_shape)\n \n raster=np.zeros(final_array_shape)\n avg_raster=np.zeros(final_array_shape)\n \n # Mzero=Affine.translation(int(cords[0,0])+1, int(cords[0,2])+1)\n tif_raster=gdal_array.LoadFile(tifs[0])\n ones_raster=np.full_like(tif_raster, 1) # ones_raster=np.full(tif_raster.shape, 1)\n \n pad_raster=np.zeros_like(raster)\n pad_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=tif_raster\n pad_ones_raster=np.zeros_like(raster)\n pad_ones_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=ones_raster\n \n \n pad_raster=ird.transform_img(pad_raster,tvec=(int(cords[0,2]),int(cords[0,0])), bgval=0)\n pad_raster=cut_transformed_array_borders(pad_raster)\n pad_ones_raster=ird.transform_img(pad_ones_raster,tvec=(int(cords[0,2]),int(cords[0,0])), bgval=0)\n pad_ones_raster=cut_transformed_array_borders(pad_ones_raster)\n # ones_raster=ird.transform_img(pad_raster,tvec=(int(cords[0,2])+1,int(cords[0,0])+1))\n # where_ones=np.where(pad_raster>0)\n # ones_raster[where_ones]=1\n raster=raster+pad_raster\n avg_raster=avg_raster+pad_ones_raster\n \n # for i in range(zero_raster.shape[0]):\n # for j in range(zero_raster.shape[1]):\n # xy=(j,i)\n # new_xy=Mzero*xy\n # new_xy=[new_xy[0], new_xy[1]]\n # new_xy[0]=int(new_xy[0])\n # new_xy[1]=int(new_xy[1])\n \n # raster[new_xy[1], new_xy[0]]=zero_raster[i,j]\n # avg_raster[new_xy[1], new_xy[0]]=avg_raster[new_xy[1], new_xy[0]]+1\n \n for k,tif in enumerate(tifs, start=1):\n print(tif)\n if k==1:\n continue\n scale=1\n tvec_x=0\n tvec_y=0\n angle=0\n \n for r in range(k-1):\n result=results[r]\n scale=scale*result['scale']\n tvec_x=tvec_x+result['tvec'][1]\n tvec_y=tvec_y+result['tvec'][0]\n angle=angle+result['angle']\n tvec_x=tvec_x-xmin\n tvec_y=tvec_y-ymin\n M=Affine.translation(tvec_x,tvec_y )*Affine.scale(scale)*Affine.rotation(angle)\n print(M)\n x0y0=M*x0y0\n x_max_y_max=M*x_max_y_max\n \n tif_raster=gdal_array.LoadFile(tif)\n \n \n # for i in tqdm(range(tif_raster.shape[0]), desc=\"transforming: \"+tif):\n # for j in range(tif_raster.shape[1]):\n # xy=(j,i)\n # new_xy=M*xy\n # new_xy=[new_xy[0], new_xy[1]]\n # new_xy[0]=int(new_xy[0])\n # new_xy[1]=int(new_xy[1])\n \n # raster[new_xy[1], new_xy[0]]=raster[new_xy[1], new_xy[0]]+tif_raster[i,j]\n # avg_raster[new_xy[1], new_xy[0]]=avg_raster[new_xy[1], new_xy[0]]+1\n \n \n pad_raster=np.zeros_like(raster)\n pad_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=tif_raster\n ones_raster=np.full_like(tif_raster, 1)\n pad_ones_raster=np.zeros_like(raster)\n pad_ones_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=ones_raster\n \n \n \n pad_raster=ird.transform_img(pad_raster,scale=scale, angle=angle, tvec=(tvec_y, tvec_x), mode='constant', bgval=0)\n pad_ones_raster=ird.transform_img(pad_ones_raster,scale=scale, angle=angle, tvec=(tvec_y, tvec_x), mode='constant', bgval=0)\n # ones_raster=ird.transform_img(pad_raster,tvec=(int(cords[0,2])+1,int(cords[0,0])+1))\n # where_ones=np.where(pad_raster>0)\n # ones_raster[where_ones]=1\n raster=raster+pad_raster\n # avg_raster=avg_raster+ones_raster\n avg_raster=avg_raster+pad_ones_raster\n \n # left_border=xmin\n # upper_border=ymax \n # print(raster.shape)\n\n\n \n\n\n\n\n\n plt.show()\n plt.close() \n \n gtz=np.where(avg_raster>0)\n \n raster[gtz]=raster[gtz]/avg_raster[gtz]\n basename=os.path.basename(tif)\n gdal_array.SaveArray(raster, os.path.dirname(os.path.abspath(tif))+\"/stacked/\"+basename[:-16]+\"_py_corr_stackeg_big_.tif\")\n \n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(15,10)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n \n \n plt.imshow(avg_raster)\n plt.show()\n plt.close()\n \n plt.imshow(raster)\n plt.show()\n plt.close()",
"def fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, theta):\n \n camera_params = params[:n_cameras * 9].reshape((n_cameras, 9))\n points_3d = params[n_cameras * 9:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices], theta)\n print(\"Residual is: \", (points_proj - points_2d).ravel())\n return (points_proj - points_2d).ravel()",
"def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))",
"def invredc(A, B, C, D, y, v):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[1] # the number of samples is the number of columns of y\n\n # calculate system's dimensions: number of states, number of inputs and number of outputs\n n = A.shape[0] # number of states\n # m=B.shape[1] # number of inputs, maybe it's not necessary\n p = C.shape[0] # number of outputs\n\n # A. Output Basis Change\n # here the output basis change and its important quantities and matrices are calculated\n\n # rank of the feedforward matrix:\n r = np.linalg.matrix_rank(D)\n\n # to calculate the S1 matrix, we have partitioned the matrix into [S1a;S2a]\n # firstly, we obtain S1a\n # since D0 must possess full row rank (rank(D0)=r), a simple way to do that is to use the scipy.linalg.orth function\n D0 = (scilin.orth(D.transpose())).transpose()\n # calculating S1a as a solution of the problem S1a*D=D0 using the pseudoinverse (Moore-Penrose inverse):\n S1at = scilin.pinv(D.transpose()) @ D0.transpose()\n S1a = S1at.transpose()\n # S1b is the null space (kernel) of D from the left\n S1b = (scilin.null_space(D.transpose())).transpose()\n # assembling the S1 matrix\n S1 = np.concatenate((S1a, S1b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # the C2 matrix is obtained by a partition of S1*C, which can by also obtained with the use of S1b\n # calculating C2\n C2 = S1b @ C\n # rank of C2\n q = np.linalg.matrix_rank(C2)\n\n # calculating the matrix S2, which is very similar to S1, and it is also partitioned as S2=[S2a;S2b]\n # since C2bar has to possess full row rank (rank(C2)=q)\n C2tilde = (scilin.orth(C2.transpose())).transpose()\n # calculating S2a as a solution of the problem S2a*C2=C2bar using the pseudoinverse (Moore-Penrose inverse):\n S2at = scilin.pinv(C2.transpose()) @ C2tilde.transpose()\n S2a = S2at.transpose()\n # S2b is the null space (kernel) of C2 from the left\n S2b = (scilin.null_space(C2.transpose())).transpose()\n # assembling the S2 matrix\n S2 = np.concatenate((S2a, S2b), axis=0) # axis=0 concatenate vertically (row wise)\n\n # now that we have S1 and S2, we can assemble the S matrix\n # we defined the notation: S=Sa*S1, where Sa is partitioned as Sa=[I 0;0 S2]=[Sa1 Sa2]\n # partitions of Sa\n Sa11 = np.identity(r)\n Sa12 = np.zeros((r, p - r))\n Sa21 = np.zeros((p - r, r))\n Sa22 = S2\n # assembling the columns of Sa, Sa=[Sa1 Sa2]\n Sa1 = np.concatenate((Sa11, Sa21), axis=0) # concatenate vertically (row wise)\n Sa2 = np.concatenate((Sa12, Sa22), axis=0) # concatenate vertically (row wise)\n # finally, assembling the matrix Sa:\n Sa = np.concatenate((Sa1, Sa2), axis=1) # concatenate horizontally (column wise)\n # obtaining the S matrix by the multiplication\n S = Sa @ S1\n\n # doing the transformation of the output ytilde=Sy\n ytilde = S @ y\n # we'll not partition the output yet, first, we'll do the State-Space Basis Change\n\n # B. State-Space Basis Change\n # in this section we'll do the state-space basis change of the system\n\n # the first step is the calculation of the transformation matrix, as defined in the paper\n # we'll call T^{-1} as M, so C2tilde*M=[0 I]. And we'll partition M as M=[M1 M2]. C2tilde*M=[C2tilde*M1 C2tilde*M2]\n # since rank(C2tilde)=q, nullity(C2tilde)=n-q\n # M1 can be defined as a basis of the null space of C2tilde\n M1 = scilin.null_space(C2tilde)\n # and M2 is the solution of the equation C2tilde*M2=I. To calculate this solution, we'll use the pseudoinverse again\n M2 = scilin.pinv(C2tilde)\n # now, we assemble the M matrix with the concatenate function\n M = np.concatenate((M1, M2), axis=1) # concatenate horizontally (column wise)\n # finally, we calculate the T matrix by inverting M\n T = np.linalg.inv(M)\n\n # now, we proceed to the transformation of the state-space matrices\n # transformation of the system's dynamic matrix\n Atilde = T @ A @ M\n # transformation of the system's input matrix\n Btilde = T @ B\n # transformation of the system's output matrix\n Ctilde = C @ M\n # transformation of the system's feedforward matrix (it's the same)\n # Dtilde=D # actually, this step is not necessary\n # transformation of the additional system input v\n vtilde = T @ v\n\n # in the next step, we need to partition the new system's matrices and outputs\n\n # partition of the outputs\n # y1 has r lines and N columns\n y1 = ytilde[0:r, :]\n # y2 has q lines and N columns, and it starts at the r+1 line (which in python is the r line since the vector index starts at 0)\n y2 = ytilde[r : r + q, :]\n # y3 is irrelevant, then, it will be neglected\n\n # partitioning the system matrices\n # firstly, the system's dynamic matrix Atilde\n A11 = Atilde[0 : n - q, 0 : n - q]\n A12 = Atilde[0 : n - q, n - q : n]\n A21 = Atilde[n - q : n, 0 : n - q]\n A22 = Atilde[n - q : n, n - q : n]\n # the system's input matrix Btilde\n B1 = Btilde[0 : n - q, :]\n B2 = Btilde[n - q : n, :]\n # the system's output matrix Ctilde\n C11 = Ctilde[0:r, 0 : n - q]\n C12 = Ctilde[0:r, n - q : n]\n\n # partition the additional input vtilde\n v1 = vtilde[0 : n - q, :]\n v2 = vtilde[n - q : n, :]\n\n # C. Reduction of State-Space Dimension\n # now, we'll do the reduction of the state-space system\n\n # following the equations in the paper\n # calculating y1hat\n y1hat = y1 - C12 @ y2\n # we have to discard the last sample to make the dimensions of y1hat and y2hat match\n y1hat = y1hat[:, 0 : N - 1]\n\n # calculating y2hat\n # preallocating variables before the loop\n y2hat = np.zeros((q, N - 1))\n # running the loop\n for k in range(\n 0, N - 1\n ): # the loop has to run N-1 times, from 0 to N-2, because of y2[k+1] on the equation\n y2hat[:, k] = y2[:, k + 1] - A22 @ y2[:, k] - v2[:, k]\n\n # assembling the reduced system's output vector\n yhat = np.concatenate((y1hat, y2hat), axis=0)\n\n # calculating the additional input vhat\n vhat = v1 + A12 @ y2\n # discarding the last sample\n vhat = vhat[:, 0 : N - 1]\n\n # now, we'll assemble the reduced state-space system\n # reduced system's dynamic matrix\n Ahat = A11\n # reduced system's input matrix\n Bhat = B1\n # reduced system's output matrix\n Chat = np.concatenate((C11, A21), axis=0) # concatenate vertically (row wise)\n # reduced system's feedforward matrix\n Dhat = np.concatenate((D0, B2), axis=0) # concatenate vertically (row wise)\n # calculating rhat, the new rank of the feedforward matrix Dhat (an important quantity of the algorithm)\n rhat = np.linalg.matrix_rank(Dhat)\n\n # calculating the new dimension of the reduced system\n # reduced system's state vector dimension\n nhat = n - q\n # reduced system's output vector dimension\n phat = r + q\n\n return Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat",
"def _calculate_transforms(self):\n\n self._logger.info(\"Generating transformations.\")\n\n # Calculate partial transforms - get partial transformation chain;\n partial_transformation_pairs = \\\n map(lambda idx: self._get_slice_pair(idx),\n self.options.slice_range)\n\n # Flatten the slices pairs\n partial_transformation_pairs =\\\n list(flatten(partial_transformation_pairs))\n\n # If user decided to prealign the images by their centre of gravity\n # an additional series of transformations has to be carried out.\n if self.options.enableMomentsAlignment:\n commands = map(lambda x: self._get_cog_alignment(*x),\n partial_transformation_pairs)\n commands = filter(None, commands)\n\n self._logger.info(\"Executing the centre of gravity transforms.\")\n self.execute(commands)\n\n # Calculate affine transformation for each slices pair\n commands = map(lambda x: self._get_partial_transform(*x),\n partial_transformation_pairs)\n commands = filter(None, commands)\n self._logger.info(\"Executing the transformation commands.\")\n self.execute(commands)",
"def rigid_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out",
"def _transform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover",
"def _compose_transforms(basis_transforms, source_basis, source_dag):\n example_gates = _get_example_gates(source_dag)\n mapped_instrs = {}\n\n for gate_name, gate_num_qubits in source_basis:\n # Need to grab a gate instance to find num_qubits and num_params.\n # Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .\n example_gate = example_gates[gate_name, gate_num_qubits]\n num_params = len(example_gate.params)\n\n placeholder_params = ParameterVector(gate_name, num_params)\n placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))\n placeholder_gate.params = list(placeholder_params)\n\n dag = DAGCircuit()\n qr = QuantumRegister(gate_num_qubits)\n dag.add_qreg(qr)\n dag.apply_operation_back(placeholder_gate, qr[:], [])\n mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag\n\n for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:\n logger.debug(\n \"Composing transform step: %s/%s %s =>\\n%s\",\n gate_name,\n gate_num_qubits,\n equiv_params,\n equiv,\n )\n\n for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():\n doomed_nodes = [\n node\n for node in dag.op_nodes()\n if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)\n ]\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updating transform for mapped instr %s %s from \\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n for node in doomed_nodes:\n\n replacement = equiv.assign_parameters(\n dict(zip_longest(equiv_params, node.op.params))\n )\n\n replacement_dag = circuit_to_dag(replacement)\n\n dag.substitute_node_with_dag(node, replacement_dag)\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updated transform for mapped instr %s %s to\\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n return mapped_instrs",
"def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)",
"def transform(x: np.array, params: TransformParams) -> np.array:\n if params.do_hor_flip:\n x = flip_axis(x, 1)\n\n if params.do_vert_flip:\n x = flip_axis(x, 0)\n\n return x",
"def affineTransform(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x,w)+b",
"def forward(self, inputs, outputs):\n\n inimg = inputs[0]\n inimg_reshaped = inimg.reshape((inimg.shape[0] * inimg.shape[1], inimg.shape[2]))\n result = np.dot(self.TC, inimg_reshaped.T).T.reshape(inimg.shape)\n np.copyto(outputs[0], result)",
"def transform_from_rot_trans(R, t):\n R = R.reshape(3, 3)\n t = t.reshape(3, 1)\n return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))",
"def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)",
"def deCasteljua2(P,n,m,u0,v0):\r\n #print \"P.shape: \", P.shape\r\n #print \"n, m = \", n, m\r\n if n <= m:\r\n # 3 because 3d points\r\n Q = np.zeros((3,n+1))\r\n for j in xrange(n+1):\r\n # Go through the rows\r\n Q[:,j] = deCasteljua(P[j,:,:].T,m,u0)\r\n return deCasteljua(Q,n,v0)\r\n else:\r\n #print \"Case where m > n:\"\r\n Q = np.zeros((3,m+1))\r\n for i in xrange(m+1):\r\n # Go through the columns\r\n Q[:,i] = deCasteljua(P[:,i,:].T,n,v0)\r\n return deCasteljua(Q,m,u0)"
] | [
"0.64119685",
"0.6391706",
"0.637345",
"0.63456833",
"0.6247986",
"0.61719114",
"0.6155749",
"0.6147075",
"0.60972595",
"0.60801315",
"0.607085",
"0.607085",
"0.6059854",
"0.6050465",
"0.6016514",
"0.59902495",
"0.59778076",
"0.59692",
"0.59610087",
"0.5923086",
"0.5920455",
"0.5881435",
"0.5854746",
"0.5853403",
"0.5842791",
"0.5842791",
"0.58371675",
"0.58249855",
"0.57725376",
"0.5754339",
"0.5717374",
"0.5709357",
"0.5695654",
"0.56857723",
"0.5670969",
"0.56642735",
"0.5629906",
"0.5624461",
"0.55857176",
"0.5584363",
"0.5571896",
"0.55714536",
"0.55551463",
"0.5551379",
"0.55351406",
"0.552891",
"0.55258924",
"0.5506247",
"0.550232",
"0.5497266",
"0.54889834",
"0.5486525",
"0.5475557",
"0.5461928",
"0.5458424",
"0.54566085",
"0.544854",
"0.5445793",
"0.5429535",
"0.5425555",
"0.5418975",
"0.541639",
"0.5410006",
"0.54077184",
"0.54077184",
"0.54077184",
"0.54077184",
"0.54077184",
"0.54077184",
"0.54077184",
"0.54055536",
"0.5398878",
"0.53985494",
"0.5397079",
"0.53918654",
"0.53855604",
"0.5383872",
"0.5383208",
"0.53821236",
"0.5380299",
"0.537746",
"0.5374959",
"0.5374959",
"0.53737205",
"0.53692025",
"0.53691703",
"0.5367915",
"0.53534603",
"0.53531754",
"0.5350638",
"0.53494656",
"0.5346387",
"0.5340869",
"0.5339744",
"0.5338939",
"0.53354573",
"0.5333835",
"0.533298",
"0.53262055",
"0.53250355"
] | 0.6508242 | 0 |
Update used dataset No copy is made. | def _updateData(self, data, range_):
self._data = None if data is None else numpy.array(data, copy=False)
self._getPlane().setData(self._data, copy=False)
# Store data range info as 3-tuple of values
self._dataRange = range_
if range_ is None:
range_ = None, None, None
self._setColormappedData(self._data, copy=False,
min_=range_[0],
minPositive=range_[1],
max_=range_[2])
self._updated(ItemChangedType.DATA) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_data(self, selected):\n if selected.row() != self.datasets.index:\n self.datasets.index = selected.row()\n self.datasets.update_current()\n self._update_main()",
"def _UpdateDataSetValues( self ):\n pass",
"def update_data(self, newData):\r\n self.AllData = newData",
"def update_data():\n pass",
"def update_original_data(self):\n pass",
"def refresh_train_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.train_items, self.option.max_path_length)\n self.train_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)",
"def _update_dataset_param(self, dataset):\n if dataset is None and self.dataset is None:\n return []\n if dataset is 'all':\n dataset = ''\n if dataset is None and self.dataset is not None:\n dataset = self.dataset\n return dataset",
"def update_dataset(\n self,\n dataset: DatasetDB,\n ) -> DatasetDB:\n dataset_id = dataset.id\n\n self._es.update_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset_id,\n document=self._dataset_to_es_doc(dataset),\n )\n return dataset",
"def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True",
"def setData(self,newData):\r\n pass",
"def force_update_graph(self):\n self.updated_data = 1\n self.update_graph()",
"def add_to_dataset(self, dataset: Dataset):\n pass",
"def update_data(self):\n self._model.update()\n self.__refresh()",
"def setDataset(self,dataset):\n self.__dataSet = dataset",
"def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure",
"def update(self, data: Union[QueryWithResponse, List[QueryWithResponse]], initial_point: Dict = None):\n if isinstance(data, list):\n self.dataset.extend(data)\n else:\n self.dataset.append(data)\n if initial_point is None:\n initial_point = self.mean\n \n self.create_samples(initial_point)",
"def resetDataRef(self, is_train):\n self.data_ref = 0",
"def update(self):\n self.data_service.update()\n self._state = self.data_service.data\n self._attributes = self.data_service.attributes",
"def update_dataset(self, dataset, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.patch(uri, json=self._attribs(name, description))",
"def update_data(self, name, cache_dir=None, data_dir=None, tasks=None):\n assert name, \"Must input a valid dataset name.\"\n assert name in self.data[\"dataset\"], \"The dataset \\'{}\\' does not exist in the cache.\" \\\n .format(name)\n if cache_dir:\n self.data[\"dataset\"][name][\"cache_dir\"] = cache_dir\n if data_dir:\n self.data[\"dataset\"][name][\"data_dir\"] = data_dir\n if tasks:\n self.data[\"dataset\"][name][\"tasks\"] = tasks\n self.data[\"dataset\"][name][\"keywords\"] = self._get_keywords_from_tasks(tasks)\n if cache_dir or data_dir or tasks:\n self.update_categories()\n self.write_data_cache(self.data)",
"def update(self, data):\n self.data.update(data)",
"def UpdateSet(self, dataset):\r\n for data in dataset:\r\n self.UpdateOddsRatioVsNoNorm(data)",
"def update(self, data_test):\n self.data_array[-1].update(data_test)",
"def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)",
"def update(self, name, cache_dir=None, data_dir=None, tasks=None):\n assert name, \"Must input a valid dataset name.\"\n self.manager.update_data(name, cache_dir, data_dir, tasks)",
"def update_dataset(self, data_name: str, append: pd.DataFrame):\n df = getattr(self, data_name)\n setattr(self, data_name, df.join(append, how='left'))",
"def add_or_remove(self, dataset: \"Dataset\") -> None:\n raise NotImplementedError",
"def refresh(dataset, client):\n pass",
"def set_data(self, dataset):\n if dataset is not None:\n self.infoa.setText('%d instances in input data set' % len(dataset))\n self.infob.setText('%d attributes in input data set' % len(dataset.domain.attributes))\n # Limited the batch size between 0.005 to 0.025, in\n # order tk=o make training fats and also accurate\n if(len(dataset) >= 200):\n self.batchsize = int(0.005 * len(dataset))\n self.batch_spin.setMinimum(int(0.005 * len(dataset)))\n self.batch_spin.setMaximum(int(0.025 * len(dataset)))\n else:\n # here the dataset is to small, hence fixed the\n # batch size programmatically\n self.batchsize = 1\n self.batch_spin.setMinimum(1)\n self.batch_spin.setMaximum(10)\n self.optionsBox.setDisabled(False)\n self.layerBox.setDisabled(False)\n self.updateLayer()\n self.dataset = dataset\n self.save_button.setDisabled(True)\n\n else:\n self.infoa.setText('No data on input yet, waiting to get something.')\n self.infob.setText('')\n self.optionsBox.setDisabled(True)\n self.layerBox.setDisabled(True)\n self.dataset = None",
"def __update_dataset(self, crs, transform, nodata=None):\r\n\r\n meta = {\r\n \"driver\": \"GTiff\",\r\n \"dtype\": self.__arr.dtype,\r\n \"nodata\": nodata,\r\n \"height\": self.__arr.shape[-2],\r\n \"width\": self.__arr.shape[-1],\r\n \"count\": self.__arr.shape[0],\r\n \"crs\": crs,\r\n \"transform\": transform,\r\n }\r\n\r\n memfile = MemoryFile()\r\n with memfile.open(**meta) as ds:\r\n ds.write(self.__arr)\r\n self.dataset = memfile.open()\r\n memfile.close()",
"def update_has_data(self):\n self.main()",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def updateData(self):\n self.needsData.emit(self.property(\"number\"))",
"def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())",
"def _update_dataset(lc, geno, dataset, delete_resources=False):\n package_update_required = False\n if not _dataset_match(geno, dataset):\n dataset.update(_dataset_fields(geno))\n package_update_required = True\n\n chromos = dict(\n (chromo['resource_name'], chromo) for chromo in geno['resources'])\n\n # migrate recombinant1 datasets which had no resource\n # name to identify resource\n if (len(chromos) == 1 and len(dataset['resources']) == 1\n and dataset['resources'][0]['name'] == 'data'):\n dataset['resources'][0]['name'] = geno['resources'][0]['resource_name']\n package_update_required = True\n\n # collect updated resources\n out_resources = []\n for resource in dataset['resources']:\n if resource['name'] not in chromos:\n if not delete_resources:\n out_resources.append(resource)\n continue\n\n r = chromos.pop(resource['name'])\n\n if not _resource_match(r, resource):\n resource.update(_resource_fields(r))\n package_update_required = True\n\n out_resources.append(resource)\n\n # missing resources\n if chromos:\n out_resources.extend(\n # dummy url for old ckan compatibility reasons\n dict(_resource_fields(chromo), url='http://')\n for chromo in chromos.values())\n package_update_required = True\n\n if (package_update_required or\n len(out_resources) != len(dataset['resources'])):\n dataset['resources'] = out_resources\n dataset = lc.call_action('package_update', dataset)\n\n return dataset",
"def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit",
"def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit",
"def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)",
"def mark_obsolete_in_dataset( dataset_name, engine, table ):\n s = table.select( table.c.dataset_name==dataset_name ) \n result = conn.execute(s) # all rows of replica.files with the specified dataset_name\n\n sr = []\n srf = {}\n for row in result:\n # Note that you can loop through result this way only once.\n sr.append(row)\n fn = filename(row)\n if fn in srf:\n srf[fn].append(row)\n else:\n srf[fn] = [row]\n\n #sr.sort( key=filename )\n\n for fn,rows in srf.items():\n if len(rows)<=1: continue\n rows.sort( key=rowversion )\n print \"jfp will keep abs_path=\",rows[-1]['abs_path'],\"status=\",rows[-1]['status'],\\\n \"dataset_name=\",rows[-1]['dataset_name']\n for row in rows[0:-1]:\n abs_path = row['abs_path']\n dataset_name = \"old_\"+row['dataset_name']\n print \"jfp will do update for abs_path=\",abs_path,\"status from\",row['status'],\"to 50\"\n s = table.update().where( table.c.abs_path==abs_path ).\\\n values( status=50 )\n #if dataset_name.find('old_old_')!=0:\n # s = table.update().where( table.c.abs_path==abs_path ).\\\n # values( dataset_name=dataset_name )\n # ... doesn't work, you first have to create a row in replica.datasets with this name.\n result = conn.execute(s)",
"def _updateLinkDataDataset(self, dataset, columns=None):\n if columns is None:\n columns = self.getModelObj().getChildren()\n return model_navigator.iqModelNavigatorManager._updateLinkDataDataset(self, dataset=dataset, columns=columns)",
"def _update_data(self, data, update_original=False):\n self._data.update(dict((key, self._deserialize(key, value))\n for key, value in data.items()))\n\n if update_original:\n self._original_data = copy.deepcopy(self._data)",
"def process_dataset(dataset, func):\n new_dataset = copy.copy(dataset)\n del new_dataset[\"val\"]\n new_dataset.update(func(dataset))\n return new_dataset",
"def update(self, dataset: Dataset, updates_allowed=None, archive_less_mature=None):\n existing = self.get(dataset.id)\n can_update, safe_changes, unsafe_changes = self.can_update(dataset, updates_allowed)\n\n if not safe_changes and not unsafe_changes:\n self._ensure_new_locations(dataset, existing)\n _LOG.info(\"No changes detected for dataset %s\", dataset.id)\n return dataset\n\n for offset, old_val, new_val in safe_changes:\n _LOG.info(\"Safe change in %s from %r to %r\", _readable_offset(offset), old_val, new_val)\n\n for offset, old_val, new_val in unsafe_changes:\n _LOG.warning(\"Unsafe change in %s from %r to %r\", _readable_offset(offset), old_val, new_val)\n\n if not can_update:\n raise ValueError(f\"Unsafe changes in {dataset.id}: \" + (\n \", \".join(\n _readable_offset(offset)\n for offset, _, _ in unsafe_changes\n )\n ))\n\n _LOG.info(\"Updating dataset %s\", dataset.id)\n\n product = self.types.get_by_name(dataset.product.name)\n with self._db_connection(transaction=True) as transaction:\n if not transaction.update_dataset(dataset.metadata_doc_without_lineage(), dataset.id, product.id):\n raise ValueError(\"Failed to update dataset %s...\" % dataset.id)\n if archive_less_mature is not None:\n self.archive_less_mature(dataset, archive_less_mature)\n\n self._ensure_new_locations(dataset, existing)\n\n return dataset",
"def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]\n self._attributes = self.data_service.attributes[self._json_key]",
"def update_stat_obj(self, nn_id, input_data):\n try:\n obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)\n data_set = getattr(obj, \"automl_stat\")\n data_set['bygen'] = input_data['bygen']\n data_set['best'] = input_data['best']\n setattr(obj, \"automl_stat\", data_set)\n obj.save()\n return data_set\n except Exception as e:\n raise Exception(e)",
"def reset_data(self):\n self.data = None",
"def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]",
"def reset_data(self):\n self.data = []",
"def ProcessDatasetOverwrite(ref, args, request):\n del ref\n dataset_id = request.dataset.datasetReference.datasetId\n project_id = request.projectId\n\n if args.overwrite:\n if _DatasetExists(dataset_id, project_id):\n _TryDeleteDataset(dataset_id, project_id)\n\n return request",
"def DataSet(self, dataset):\n # Do we have dataset to store\n if dataset and dataset.VTKObject:\n # Do we need to create a vtkWeakReference\n if not hasattr(self, '_dataset') or self._dataset is None:\n self._dataset = vtkWeakReference()\n\n self._dataset.Set(dataset.VTKObject)\n else:\n self._dataset = None",
"def update_scatter_data_based_on_bin(attr, old, new):\n # make new dataset\n new_src = make_dataset_for_scatter()\n\n #update the data\n scatter_src.data.update(new_src.data)\n\n return",
"def setData(self,newdata):\n self.record(inspect.currentframe())\n if np.shape(newdata) == np.shape(self.data):\n self.data = np.copy(newdata)",
"def __delitem__(self):\n raise ValueError(\"Dataset objects are immutable\")",
"def update_data(self, start=None, end=None):\n if self.verbose:\n print(\"Updating data\")\n start, end = self.get_range(start, end)\n self.source_data = self.get_dict_from_range(start, end)\n for c in self.callbacks[\"update_data\"]:\n c()",
"def update(self, ds, **kwargs):\n ds.set_status(self._db, self._es, self._queue, DatasetStatus.INDEXING)\n\n self._es.delete_ds(ds.id)\n for mol_db_dict in ds.config['databases']:\n mol_db = MolecularDB(name=mol_db_dict['name'],\n version=mol_db_dict.get('version', None),\n iso_gen_config=ds.config['isotope_generation'])\n self._es.index_ds(ds.id, mol_db)\n\n ds.set_status(self._db, self._es, self._queue, DatasetStatus.FINISHED)",
"def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data",
"def _reload(self):\n if os.path.exists(self.filename):\n self.data = pd.read_csv(self.filename)\n else:\n self.data = pd.DataFrame(columns=self.unique_keys)\n\n # Set these default values\n # if 'weight_rescale' not in self.data.columns:\n # self.data['weight_rescale'] = 'none'\n # if 'norm' not in self.data.columns:\n # self.data['norm'] = 'softmax'\n # if 'update' not in self.data.columns:\n # self.data['update'] = 'all'\n # if 'replay' not in self.data.columns:\n # self.data['replay'] = False\n if 'debug' not in self.data.columns:\n self.data['debug'] = False\n\n # if 'tie' not in self.data.columns:\n # self.data['tie'] = False\n\n if 'update_length' not in self.data.columns:\n self.data['update_length'] = 0\n # for key in self.unique_keys:\n # self.data[key] = np.nan\n # Remaining set to None\n # for k in self.check_keys:\n # if k not in self.data.columns:\n # self.data[k] = None",
"def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n raise NotImplementedError",
"def _update_data(storage: Union[h5py.File, h5py.Group], name: str, data: Any, compress: bool = False) -> None:\n if name in storage:\n del storage[name]\n if compress:\n storage.create_dataset(name, data=data, compression=\"gzip\", compression_opts=5)\n else:\n storage.create_dataset(name, data=data)",
"def _update_modified_data_sources(self):\n new_last_imported = datetime.utcnow()\n self._update_modified_since(self.last_imported)\n self.last_imported = new_last_imported",
"def update_record(self, d: dict) -> None:\n super().update_record(d)\n d.update(\n dataset_doses=str_list(self.doses),\n dataset_ns=str_list(self.ns),\n dataset_stdevs=str_list(self.stdevs),\n dataset_means=str_list(self.means),\n )",
"def update(self, data):\n return self._data.update(data)",
"def update(self):\r\n self._data.update()\r\n if self._data == None:\r\n _Log.error('No data!')\r\n\r\n\r\n expedite = self._data.data['trafficinfo']['evaluation']['expedite']\r\n congested = self._data.data['trafficinfo']['evaluation']['congested']\r\n blocked = self._data.data['trafficinfo']['evaluation']['blocked']\r\n unknown = self._data.data['trafficinfo']['evaluation']['unknown']\r\n description = self._data.data['trafficinfo']['evaluation']['description']\r\n status = self._data.data['trafficinfo']['evaluation']['status']\r\n if status == '0':\r\n traffic_status = '未知'\r\n elif status == '1':\r\n traffic_status = '畅通'\r\n elif status == '2':\r\n traffic_status = '缓行'\r\n elif status == '3':\r\n traffic_status = '拥堵'\r\n elif status == '4':\r\n traffic_status = '超堵'\r\n else:\r\n traffic_status = '无数据'\r\n self._state = traffic_status\r\n\r\n adddict = {\r\n '畅通所占百分比':expedite,\r\n '缓行所占百分比':congested,\r\n '拥堵所占百分比':blocked,\r\n '未知路段所占百分比':unknown,\r\n '道路描述':description,\r\n '路况': self._data.data['trafficinfo']['description'],\r\n '更新时间':datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\r\n self.attributes=adddict",
"def update_data(self, new_data = None):\n\t\tif new_data == None:\n\t\t\tnew_data = self.data\n\n\t\telse:\n\t\t\tassert isinstance(new_data, list), f\"{new_data} must be of type list!\"\n\n\t\twith open(\"data.json\", \"w\") as dw:\n\t\t\tjson.dump(new_data, dw)",
"def set_data(self, new_data):\n self.data = new_data",
"def update_data(self, extra_data):\n self._data.update(extra_data)\n return self",
"def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()",
"def update(self, dt):\n\n self.collecting(dt)",
"def reset(self, dataset):\n assert dataset, 'Groundtruth should not be empty.'\n assert isinstance(dataset,\n dict), 'annotation file format {} not supported'.format(\n type(dataset))\n self.anns, self.cats, self.imgs = dict(), dict(), dict()\n self.dataset = copy.deepcopy(dataset)\n self.createIndex()",
"def update(self):\n # default implementation is to do nothing.",
"def update_dataset(request):\n body = json.loads(request.body)\n orgs = request.user.orgs.all()\n # check if user has access to the dataset\n d = ImportRecord.objects.filter(\n super_organization__in=orgs, pk=body['dataset']['id']\n )\n if not d.exists():\n return {\n 'status': 'error',\n 'message': 'user does not have permission to update dataset',\n }\n d = d[0]\n d.name = body['dataset']['name']\n d.save()\n return {\n 'status': 'success',\n }",
"def delete_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n self.data[\"dataset\"].pop(name)\n self.update_categories()\n self.write_data_cache(self.data)\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)",
"def _posttrain(self, dataset):\n Classifier._posttrain(self, dataset)\n if self.params.retrainable:\n self.__changedData_isset = False",
"def update_data(client, dataset_id, dataset_name, updated_dataset):\n view = client.views.lookup(dataset_id)\n revision = view.revisions.create_replace_revision(permission='private')\n upload = revision.create_upload(dataset_name)\n\n # The path of the updated dataset should be a string to the csv, geojson, shapefile zip, etc.\n if type(updated_dataset) == str:\n with open(updated_dataset, 'rb') as f:\n extension = os.path.splitext(updated_dataset)[1]\n if extension == '.csv':\n source = upload.csv(f)\n elif extension == '.xls':\n source = upload.xls(f)\n elif extension == 'xlsx':\n source = upload.xlsx(f)\n elif extension == '.tsv':\n source = upload.tsv(f)\n elif extension == '.zip':\n source = upload.shapefile(f)\n elif extension == '.kml':\n source = upload.kml(f)\n elif extension == '.geojson':\n source = upload.geojson(f)\n else:\n raise Exception('File format not supported')\n elif type(updated_dataset) == pd.DataFrame or type(updated_dataset) == gpd.GeoDataFrame:\n source = upload.df(updated_dataset)\n\n output_schema = source.get_latest_input_schema().get_latest_output_schema()\n\n output_schema = output_schema.wait_for_finish()\n\n # check for errors\n assert output_schema.attributes['error_count'] == 0\n print(output_schema.attributes['error_count'])\n\n # If you want, you can get a csv stream of all the errors\n errors = output_schema.schema_errors_csv()\n for line in errors.iter_lines():\n print(line)\n\n #############################################################################\n # The next few lines of code will update the draft/revision into the asset. #\n # Do not run if you plan on keeping your draft! #\n #############################################################################\n job = revision.apply(output_schema=output_schema)\n\n # This code outputs the status from the Job object\n # Track the async process\n def job_progress(job):\n clear_output(wait=True)\n print(job.attributes['log'][0]['stage'])\n print('Job progress:', job.attributes['status'])\n\n job = job.wait_for_finish(progress = job_progress)\n sys.exit(0 if job.attributes['status'] == 'successful' else 1)",
"def _update_usage(self, usage, write_weights):\n with torch.no_grad():\n write_weights = 1 - torch.prod(1 - write_weights, 1)\n updated_usage = usage + (1 - usage) * write_weights\n return updated_usage",
"def __init__(self):\n self.__dataset = None",
"def update_data(self, **kwargs):\n self.source_data = self.get_dict()\n for c in self.callbacks[\"update_data\"]:\n c()",
"def _update(self, count=True, forced=False):",
"def free_finalizer(self, dataset: dict):\n # for gc being late\n if dataset:\n if dataset['vrtx']:\n dataset['vrtx'].release()\n if dataset['indx']:\n dataset['indx'].release()\n dataset.clear()",
"def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()",
"def datasetAvailable(self):\n dset = None\n try:\n dset = self._getcopy()\n except Exception:\n pass\n\n if dset is not None:\n self._parent.destroyDset(dset)\n return True\n return False",
"def set_data(self, d):\n self._data = d\n self.is_data_set = True",
"def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()",
"def update(self):\n self._xfinity_data.update()",
"def set_data(self, df):\n self.df = df",
"def update(self):\n raise NotImplementedError",
"def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data",
"def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)",
"def updateLayerData(self, **kwargs):\n self.currentLayerData = self.layers[self.getCurrentRow()]\n self.currentLayerData.update(**kwargs)\n self.layers[self.getCurrentRow()] = self.currentLayerData\n self.updateSelectedLayer()",
"def datasets(self, datasets):\n self.__datasetsAll = datasets\n self.__datasets = list(datasets)\n self.__axisDomains = None\n for ds in self.__datasetsAll:\n self.__datasetsPerClass[ds[-1]] = self.__datasetsPerClass.get(ds[-1], 0) + 1\n self.dataChanged.emit()",
"def set_all_data_internal(self, check_data=True):\n for key, dataset in self.datasets.items():\n if (\n isinstance(dataset, mfdataarray.MFArray)\n or (\n isinstance(dataset, mfdatalist.MFList)\n and dataset.structure.type == DatumType.recarray\n )\n and dataset.enabled\n ):\n dataset.store_internal(check_data=check_data)",
"def update_unified_dataset(session: Session, project: MasteringProject) -> Operation:\n unified_dataset = unified.from_project(session, project)\n op = unified._apply_changes_async(session, unified_dataset)\n return operation.wait(session, op)",
"def dataset(self):\n with self._lock:\n if self._dataset is None:\n if isinstance(self._orig_dataset, DaskLazyIndexer):\n self._orig_dataset = self._orig_dataset.dataset\n dataset = dask_getitem(self._orig_dataset, self.keep)\n for transform in self.transforms:\n dataset = transform(dataset)\n self._dataset = dataset\n self._orig_dataset = None\n return self._dataset",
"def update(self):\n self.getDbRecord().update()",
"def updateStockDF(self): # Client stock dataframe\n self.all_items = self.stock_df['Item'].unique()\n self.stock_df = pd.read_csv('data/menu.csv')\n self.categories = self.stock_df['Category'].unique()\n for category in self.categories_data:\n self.categories_data[category] = self.stock_df[self.stock_df['Category'] == category]"
] | [
"0.72957367",
"0.6916875",
"0.68677783",
"0.6687427",
"0.6636599",
"0.64887625",
"0.64763397",
"0.6363844",
"0.63379943",
"0.6290157",
"0.6272427",
"0.6256088",
"0.6151666",
"0.61373025",
"0.61060303",
"0.6089082",
"0.6079945",
"0.6057681",
"0.6042186",
"0.60393506",
"0.6034745",
"0.6025197",
"0.6013513",
"0.5996562",
"0.599536",
"0.59781003",
"0.5970853",
"0.5968805",
"0.59670424",
"0.5929985",
"0.5883843",
"0.58751273",
"0.58751273",
"0.58751273",
"0.58751273",
"0.5866153",
"0.58607805",
"0.5849826",
"0.582912",
"0.582912",
"0.5824031",
"0.58227175",
"0.5817852",
"0.5803995",
"0.57946134",
"0.5788502",
"0.5782556",
"0.5774407",
"0.5771815",
"0.57715213",
"0.57672226",
"0.5764667",
"0.5762618",
"0.57574284",
"0.57400763",
"0.57244813",
"0.57088774",
"0.56915957",
"0.5691172",
"0.568938",
"0.5665205",
"0.56647396",
"0.5657595",
"0.5652912",
"0.56523156",
"0.5650003",
"0.5636882",
"0.56365764",
"0.5636159",
"0.56122965",
"0.56025004",
"0.5591737",
"0.5588253",
"0.55787355",
"0.5574569",
"0.55719966",
"0.5571112",
"0.5569564",
"0.5569222",
"0.5552507",
"0.55507153",
"0.5541369",
"0.55354035",
"0.5529467",
"0.5529226",
"0.55261046",
"0.5524166",
"0.5523122",
"0.5520751",
"0.5512939",
"0.55121154",
"0.5494704",
"0.5490547",
"0.5489309",
"0.5488464",
"0.5483725",
"0.54764885",
"0.5475442",
"0.54732776",
"0.54708815"
] | 0.5581601 | 73 |
Synchronize this instance data with that of its parent | def _syncDataWithParent(self):
parent = self.parent()
if parent is None:
data, range_ = None, None
else:
data = parent.getData(copy=False)
range_ = parent.getDataRange()
self._updateData(data, range_) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()",
"def sync(self):\n pass",
"def sync(self):\n pass",
"def sync(self):\n return",
"def sync(self, other):\n pass # TODO",
"def do_sync(self):\n raise NotImplementedError() # pragma: no cover",
"def sync_local(self, other):\n pass # TODO",
"def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)",
"def sync(self, **kwargs):\n pass",
"def update_original_data(self):\n pass",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def sync(self):\n return self._sync",
"def update(self, parent):\r\n pass",
"def _post_sync(self):",
"def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)",
"def lock(self):\n raise NotImplementedError",
"def sync() -> None:",
"def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)",
"def SyncRoot(self) -> object:",
"def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def sync(self, sync):\n self._sync = sync",
"def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def _pre_sync(self):",
"def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)",
"def sync(self):\n\n if self._inchild:\n os.read(self._pr_child, len(self.RELEASE_MSG))\n else:\n os.read(self._pr_parent, len(self.RELEASE_MSG))",
"def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')",
"def freeze(self,):\n pass",
"def sync(self):\n # TODO: write better documentation: when would user need this?\n wait(self.proto.sync())",
"def sync(self, sync):\n\n self._sync = sync",
"def after_sync(self):\n self.title = self.c[\"title\"]\n self.body = self.c[\"body\"]\n self.state = self.c[\"state\"]\n self.base = self.c[\"base\"][\"ref\"]\n self.head = self.c[\"head\"][\"ref\"]\n self.maintainer_can_modify = self.c[\"maintainer_can_modify\"]",
"def sync(self) -> None:\n for parameter in self.data_to_sync:\n assert hasattr(self, parameter), \\\n \"Parameter: %s does not exist in: %s\" % (parameter, self)\n self.publish(self.key_gen(parameter), getattr(self, parameter))",
"def sync(self) -> None: #\n self.__target.load_state_dict(self.__policy.state_dict())",
"def sync(self):\n resp = yield self.do_sync()\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)",
"def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)",
"def __post_init__(self):\n # ------------------------------------------------------------ 01\n # if path exists load data dict from it\n # that is sync with contents on disk\n if self.path.exists():\n _hashable_dict_from_disk = \\\n m.FrozenDict.from_yaml(self.path.read_text())\n # update internal dict from HashableDict loaded from disk\n self.__dict__.update(\n _hashable_dict_from_disk.get()\n )\n\n # ------------------------------------------------------------ 02\n # start syncing i.e. any updates via __setattr__ will be synced\n # to disc\n self.internal.start_syncing = True",
"def _notify_parent_change(self):\n pass",
"def syncContents(self):\n self._contents.setState_TRY(self.temperature(),\n self.density(),\n self.massFractions())",
"def freeze(self):\n raise NotImplementedError()",
"def i_am_locking(self):\r\n pass",
"def update(self):\n\n pass",
"def update(self):\n return self",
"def update(self):\n raise NotImplementedError",
"def _update(self):\n pass",
"def update(self):\r\n pass",
"def on_parent_changed(self):\n pass",
"def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()",
"def sync_remote(self, other):\n pass # TODO",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n raise NotImplementedError()",
"def lock (self):\n self.locked = True\n self._changed = False",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")",
"def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()",
"def __getstate__(self) -> Dict[str, Any]:\n s = self.__dict__.copy()\n # Kill the parent ref. It won't pickle well.\n s[\"_parent\"] = None\n return s",
"def update(self):\n with managed_session() as session:\n session.merge(self)",
"def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)",
"def sync_tree_db(self) -> None:\n self.sync_tree_with_data(self.tree_db, self.data_db)",
"def update(self):\n # default implementation is to do nothing.",
"def sync_widgets(self):\n self.data_changed.emit(self.value)",
"def sync_to_ontology(self):\n self.ontology.sync_entity_to_graph(self)",
"def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()",
"def copy(self):\n return super().copy()",
"def lock(self):\n self.mtx.acquire()",
"def __init__(self):\n self._data_queue = []\n self._access_queue_lock = Lock()",
"def build(self):\n self.lock_built = True",
"def after_sync(self):\n pass",
"def update_data():\n pass",
"def __init__(self):\n self.data = {}\n self.refresh()",
"def __enter__(self):\n\n self.create()\n return super().__enter__()",
"def reparent(self, obj, parent):\n return self.update(obj, parent=parent)",
"async def async_update(self) -> None:\n await super().async_update()\n await self.async_get_state()",
"def restore_object(self):\n self.co_worker_list = self.original_co_worker_list",
"def _update_object(self, data_dict):\r\n pass",
"def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child",
"def sync_info(self, sync_info):\n\n self._sync_info = sync_info",
"def update(self):\n self._xfinity_data.update()",
"def cambiar_parent(self):\r\n self.client.parent = self",
"def cambiar_parent(self):\r\n self.client.parent = self",
"def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()",
"def sync_end(self):",
"def __enter__(self):\n return self._get_storage().__enter__()",
"def __enter__(self):\n\n return self"
] | [
"0.8105605",
"0.7977319",
"0.7733652",
"0.71153784",
"0.71153784",
"0.70563495",
"0.7043718",
"0.674526",
"0.65729344",
"0.6530828",
"0.64562",
"0.6451494",
"0.6362502",
"0.6362502",
"0.6325555",
"0.63112843",
"0.6255493",
"0.6245364",
"0.6242624",
"0.619789",
"0.61851496",
"0.61773336",
"0.61612016",
"0.61592674",
"0.615458",
"0.61517084",
"0.6139571",
"0.61280423",
"0.6117423",
"0.6102235",
"0.60881495",
"0.6027518",
"0.6023041",
"0.6007091",
"0.5970572",
"0.5955212",
"0.59466237",
"0.5941309",
"0.59173286",
"0.5876683",
"0.58658206",
"0.5858481",
"0.58379585",
"0.583246",
"0.5813005",
"0.57983154",
"0.5788369",
"0.5780768",
"0.5767515",
"0.57577527",
"0.57572305",
"0.5747498",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747163",
"0.5744666",
"0.57408583",
"0.5727137",
"0.5714643",
"0.5712627",
"0.5683065",
"0.5666505",
"0.56472826",
"0.5630778",
"0.56155974",
"0.56155485",
"0.55971104",
"0.5590951",
"0.5575481",
"0.5554041",
"0.5548815",
"0.55483866",
"0.5545577",
"0.55447865",
"0.5542659",
"0.554085",
"0.5539797",
"0.55267113",
"0.5525976",
"0.5525867",
"0.5520475",
"0.5518965",
"0.5518965",
"0.55176026",
"0.5515576",
"0.5513838",
"0.5512805"
] | 0.80830806 | 1 |
Handle data change in the parent this plane belongs to | def _parentChanged(self, event):
if event == ItemChangedType.DATA:
self._syncDataWithParent() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)",
"def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")",
"def data_changed(self):\n return",
"def on_parent_changed(self):\n pass",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()",
"def _notify_parent_change(self):\n pass",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()",
"def data_changed(self):\n self.data_changed_signal.emit(self)",
"def _notify_parent_change(self):\n for p in self.parameters:\n p._parent_changed(self)",
"def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)",
"def update(self, parent):\r\n pass",
"def on_data_vars_change(self, change):\n if change['type'] == 'change' and change['name'] == 'value':\n self.left_ds = getattr(self.ts.data, change['new'])\n if self.mask is None:\n self.right_ds = self.left_ds.copy(deep=True)\n else:\n self.right_ds = self.left_ds * self.mask\n\n self.left_imshow.set_data(self.left_ds.data[0])\n self.right_imshow.set_data(self.right_ds.data[0])",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolyline, self)._update_proxy(change)",
"def XPLMDataChanged_f(inRefcon):",
"def onChange(self, parent):\r\n pass",
"def update_original_data(self):\n pass",
"def update_data():\n pass",
"def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')",
"def MyDataChangedCallback(self, inRefcon):\r\n pass",
"def get_data(self, data):\n data = super().get_data(data)\n self.pid.update_layer1(data[self.pid_cols])\n return data",
"def data_dict_update(self, change):\n self.data_dict = change['value']",
"def parameter_tree_changed(self, param, changes):\n for param, change, data in changes:\n path = self.settings.childPath(param)\n if path is not None:\n childName = '.'.join(path)\n else:\n childName = param.name()\n if change == 'childAdded':pass\n\n elif change == 'value':\n\n if param.name() == 'Detectors':\n self.update_plot_det_items(param)\n\n elif param.name() == 'scan_average':\n self.show_average_dock(param.value() > 1)\n elif change == 'parent':pass",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def _numberOfPoints_changed(self):\n self.reinitialiseData()",
"def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())",
"def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolygon, self)._update_proxy(change)",
"def _data_updated_callback(self, attr, old, new):\n pass",
"def _modelUpdated(self, *args, **kwargs):\n topLeft = self.index(column=0)\n bottomRight = self.index(column=1)\n model = self.model()\n if model is not None:\n model.dataChanged.emit(topLeft, bottomRight)",
"def exogenous_change(self):\n pass",
"def exogenous_change(self):\n pass",
"def exogenous_change(self):\n pass",
"def process_IN_MODIFY(self, event):",
"def plane_update(self):\n self.plane.update()",
"def setData(self,newData):\r\n pass",
"def dataGridView_CellValueChanged(self, sender, eventArgs):\r\n name = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[0].Value\r\n newVal = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[eventArgs.ColumnIndex].Value\r\n child = Application.ActiveSceneRoot.FindChild2( name, constants.siPolyMeshType, constants.siMeshFamily, True )\r\n if child:\r\n transform = child.Kinematics.Local.GetTransform2(None)\r\n translation = transform.Translation\r\n if eventArgs.ColumnIndex == 1:\r\n transform.Translation = XSIMath.CreateVector3( newVal, translation.Y, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 2:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, newVal, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 3:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, translation.Y, newVal )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n else:\r\n print \"DataGridView_CellValueChanged: \" + child + \" not found!\"",
"def on_new_data(self, data):\n raise NotImplementedError()",
"def on_edit(self, dataobj):",
"def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()",
"def onUpdated(self):",
"def changed(self):\n\t\tpass",
"def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()",
"def updateData(self):\n self.needsData.emit(self.property(\"number\"))",
"def onFlowUpdate(self, event):",
"def modelChanged(self) -> None:\n ...",
"def onPropertiesChange(self, data):\n pass",
"def handle_actual_updated(self):\n self._actual_updated()",
"def set_field( self, data ):\n super( UnsteadyField1D, self ).set_field( data )\n self.history[:] = self.val[:]\n return",
"def messageHandler_TreeBasedCoord(self, msg):\n data = msg.getData()\n sender = msg.getIDSender()\n self.log_message('ID {0} has received msg {1} from ID {2}'.format(self.CommID, data, sender))\n if data[0] == 'newload': # new load curve received by child\n\n for i in range(len(self.Children)): # save received child load curve\n if self.Children[i] == sender:\n for t in range(len(data[1])):\n self.EConsumptionChildCurvesRec[i][t] = copy.deepcopy(data[1][t])\n self.noOfConsumptionCurvesReceived = self.noOfConsumptionCurvesReceived +1\n break\n\n # if load curves received by all children\n if self.noOfConsumptionCurvesReceived == len(self.Children):\n self.noOfConsumptionCurvesReceived = 0 # reset counter for received load curves\n\n #first time all children have sent load curves\n if self.state_coordination == 0:\n self.state_coordination += 1\n consumption_curve = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n local_remainder = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n\n #accumulate children's loads\n for c in range(len(self.Children)):\n for t in range(len(self.EConsumptionChildCurves[0])):\n self.EConsumptionChildCurves[c][t] = self.EConsumptionChildCurvesRec[c][t]\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n # calculate the local remainder (without own load)\n for t in range(len(consumption_curve)):\n local_remainder[t] = self.ERemainderLocal[t] + consumption_curve[t]\n\n if self.getTER1() != 0: # if NOT a gas boiler\n #select own best schedule\n self.selectBestSchedule(local_remainder)\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n #update local remainder with own load curve (global remainder)\n for t in range(len(consumption_curve)):\n local_remainder[t] += self.EConsumptionChosenSchedule[t]\n consumption_curve[t] += self.EConsumptionChosenSchedule[t]\n\n # save new global remainder\n self.ERemainderLocal = local_remainder\n\n if self.Parent: # not root\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40, ['localrecap', copy.deepcopy(self.ERemainderLocal)])\n else: #root\n # ask all children for a better compensation of the remainder in a new round\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40 , ['newround', copy.deepcopy(self.ERemainderLocal)])\n\n # any other round than first round\n else:\n\n idx_best_compensation = -1\n min_remainder = -1\n overall_min = -1\n overall_min_idx = -1\n local_remainder = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n abs_local_remainder = [0 for x in range(len(self.Children))]\n abs_global_remainder = 0\n max_min_diff_local_remainder = [0 for x in range(len(self.Children))]\n\n # calc current absolute global remainder\n for t in range(len(self.ERemainderLocal)):\n abs_global_remainder += abs(self.ERemainderLocal[t])\n max_min_diff_global_remainder = max(self.ERemainderLocal) - min(self.ERemainderLocal)\n\n for c in range(len(self.Children)):\n for t in range(len(local_remainder)):\n local_remainder[t] = self.ERemainderLocal[t] - self.EConsumptionChildCurves[c][t] + self.EConsumptionChildCurvesRec[c][t]\n abs_local_remainder[c] += abs(local_remainder[t])\n max_min_diff_local_remainder[c] = max(local_remainder) - min(local_remainder)\n\n if self.OPTcriterion == 'absremainder':\n #remember overall minimum\n if overall_min_idx == -1 or overall_min - abs_local_remainder[c] > 0.01:\n overall_min = abs_local_remainder[c]\n overall_min_idx = c\n\n if abs_global_remainder - abs_local_remainder[c] >= 1: # improvement greater or equal 1 Watt\n if idx_best_compensation == -1 or abs_local_remainder[c] < min_remainder:\n idx_best_compensation = c\n min_remainder = abs_local_remainder[c]\n\n elif self.OPTcriterion == 'maxmindiff':\n #remember overall minimum\n if overall_min_idx == -1 or overall_min - max_min_diff_local_remainder[c] > 0.01:\n overall_min = max_min_diff_local_remainder[c]\n overall_min_idx = c\n\n\n if max_min_diff_global_remainder - max_min_diff_local_remainder[c] > 0.01: # difference greater than 0.001 Watt\n if idx_best_compensation == -1 or max_min_diff_local_remainder[c] < min_remainder:\n idx_best_compensation = c\n min_remainder = max_min_diff_local_remainder[c]\n\n # no better compensation at all?\n if idx_best_compensation == -1:\n\n consumption_curve = [0 for x in range(len(self.EConsumptionChildCurves[0]))]\n self.log_message('ID {0}: did not receive an improvement by any of its children.'.format(self.CommID))\n\n for c in range(len(self.Children)):\n #send fallback to all children\n if not self.Parent: #root\n self.sendMessage(self.Children[c], 40, 'fallbackforward')\n else: #not root\n self.sendMessage(self.Children[c], 40, 'fallback')\n # calculate current load curve\n for t in range(len(self.ERemainderLocal)):\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n if self.getTER1() != 0: #NOT a gas boiler\n for t in range(len(consumption_curve)):\n consumption_curve[t] += self.EConsumptionChosenSchedule[t] #add own load to load curve\n\n if self.Parent: #not root --> propagate load curve to parent\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(consumption_curve)])\n\n else: #root\n # if self.noNewRounds < len(self.Children):\n # # tentatively integrate minimal max-min-diff load curve to remainder\n # tentative_remainder = [0 for x in range(len(self.ERemainderLocal))]\n # random_child = random.randint(0, len(self.Children)-1)\n # for t in range(len(tentative_remainder)):\n # tentative_remainder[t] = self.ERemainderLocal[t] - self.EConsumptionChildCurves[random_child][t] + self.EConsumptionChildCurvesRec[random_child][t]\n #\n # for c in range(len(self.Children)):\n # self.sendMessage(self.Children[c], 40, ['newround', copy.deepcopy(tentative_remainder)])\n # self.noNewRounds += 1\n # else:\n # finish algorithm\n self.state_coordination = 9999\n #self.plotDebugInfo(load_curve)\n\n else:\n self.noNewRounds = 0\n # send fallback message to all children except the one that has the best improving load curve\n self.log_message('ID {0}: best compensation is from child {1}'.format(self.CommID, self.Children[idx_best_compensation]))\n #raw_input('press a key')\n for c in range(len(self.Children)):\n if c != idx_best_compensation:\n if not self.Parent: #root\n self.sendMessage(self.Children[c], 40, 'fallbackforward')\n else: #not root\n self.sendMessage(self.Children[c], 40, 'fallback')\n\n # update remainder and save new child load curve\n for t in range(len(self.EConsumptionChildCurves[0])):\n self.ERemainderLocal[t] = self.ERemainderLocal[t] - self.EConsumptionChildCurves[idx_best_compensation][t] + self.EConsumptionChildCurvesRec[idx_best_compensation][t]\n self.EConsumptionChildCurves[idx_best_compensation][t] = self.EConsumptionChildCurvesRec[idx_best_compensation][t]\n\n # update remainder with own new load if not a gas boiler\n if self.getTER1() != 0:\n # select own new load\n\n remainder_without_own_load = [0 for x in range(len(self.ERemainderLocal))]\n for t in range(len(remainder_without_own_load)):\n remainder_without_own_load[t] = self.ERemainderLocal[t] - self.EConsumptionChosenSchedule[t]\n\n self.selectBestSchedule(self.ERemainderLocal)\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n for t in range(len(remainder_without_own_load)):\n self.ERemainderLocal[t] = remainder_without_own_load[t] + self.EConsumptionChosenSchedule[t]\n\n # start new round\n self.state_coordination += 1\n for c in range(len(self.Children)):\n if not self.Parent: #root\n self.sendMessage(self.Children[c], 40, ['newround', copy.deepcopy(self.ERemainderLocal)])\n else: #not root\n self.sendMessage(self.Children[c], 40, ['localrecap', copy.deepcopy(self.ERemainderLocal)])\n\n\n elif data == 'fallback':\n if self.getTER1() != 0: # NOT a gas boiler\n self.chosenScheduleIndex = self.prevChosenScheduleIndex\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n self.log_message('ID {0} has performed fallback to schedule {1}'.format(self.CommID, self.chosenScheduleIndex))\n else:\n self.log_message('ID {0} is GB (no schedule to fallback to)'.format(self.CommID))\n\n\n elif data == 'fallbackforward':\n if self.getTER1() != 0: # NOT a gas boiler\n self.chosenScheduleIndex = self.scheduleIdxOfPreviousRound\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n # save previous load curve\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[self.chosenScheduleIndex])\n\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n self.log_message('ID {0} has performed fallback to schedule {1}'.format(self.CommID, self.chosenScheduleIndex))\n else:\n self.log_message('ID {0} is GB (no schedule to fallback to)'.format(self.CommID))\n\n #inform all children about fallback\n if self.Children:\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40, 'fallbackforward')\n\n\n elif data[0] == 'newround':\n self.ERemainderLocal = copy.deepcopy(data[1])\n self.state_coordination = 0\n if self.getTER1() != 0: #if not a gas boiler\n #remember schedule before starting a new round\n self.scheduleIdxOfPreviousRound = self.chosenScheduleIndex\n\n if self.Children: # NOT a leave node\n # forward compensation curve to all children\n for c in range(len(self.Children)):\n self.sendMessage(self.Children[c], 40, ['newround', copy.deepcopy(self.ERemainderLocal)])\n else: #leave node\n if self.getTER1() != 0: # not a gas boiler\n #remainder_without_own_load = [0 for x in range(len(self.ERemainderLocal))]\n #for t in range(len(remainder_without_own_load)):\n # remainder_without_own_load[t] = self.ERemainderLocal[t] - self.EConsumptionChosenSchedule[t]\n self.selectBestSchedule(self.ERemainderLocal)\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(self.EConsumptionChosenSchedule)])\n else:\n zeros = [0 for x in range(len(self.ERemainderLocal))]\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(zeros)])\n\n elif data[0] == 'localrecap':\n self.ERemainderLocal = copy.deepcopy(data[1])\n consumption_curve = [0 for x in range(len(self.ERemainderLocal))]\n\n if self.getTER1() != 0: #NOT a gas boiler\n #remainder_without_own_load = [0 for x in range(len(self.ERemainderLocal))]\n #for t in range(len(remainder_without_own_load)):\n # remainder_without_own_load[t] = self.ERemainderLocal[t] - self.EConsumptionChosenSchedule[t]\n\n\n self.selectBestSchedule(copy.deepcopy(self.ERemainderLocal))\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n\n if self.Children: # NOT a leave node\n for c in range(len(self.Children)):\n for t in range(len(consumption_curve)):\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n for t in range(len(consumption_curve)): # add own load\n consumption_curve[t] += self.EConsumptionChosenSchedule[t]\n\n else: #gas boiler\n\n if self.Children: # NOT a leave node\n for c in range(len(self.Children)):\n for t in range(len(consumption_curve)):\n consumption_curve[t] += self.EConsumptionChildCurves[c][t]\n\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(consumption_curve)])",
"def _metadata_changed(self, old, new):\n\n #self.cross_plot.value_range.low = self.minz\n #self.cross_plot.value_range.high = self.maxz\n #self.cross_plot2.value_range.low = self.minz\n #self.cross_plot2.value_range.high = self.maxz\n if self._imag_index.metadata.has_key(\"selections\"):\n x_ndx, y_ndx = self._imag_index.metadata[\"selections\"]\n if y_ndx and x_ndx:\n# xdata, ydata = self._image_index.get_data()\n# xdata, ydata = xdata.get_data(), ydata.get_data()\n self.pd_horiz.set_data(\"horiz\", self._image_value.data[y_ndx,:])\n self.pd_vert.set_data(\"vert\", self._image_value.data[:,x_ndx])",
"def datachange_notification(self, node: Node, val, data):\n _logger.info('datachange_notification %r %s', node, val)",
"def on_dataobj_create(self, dataobj):",
"def change():",
"def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")",
"def _update_proxy(self, change):\n # The superclass handler implementation is sufficient.\n super(AbstractItemView, self)._update_proxy(change)",
"def _transformChanged(self, source):\n if source is not self:\n self.notify()",
"def _observe_root(self, change):\n if change['value']:\n for item in self.items:\n self._item_added(item)\n # Connect only now to avoid cleaning up in an unwanted way the\n # root linkable vars attr.\n self.observe('items', self._items_updated)\n\n else:\n self.unobserve('items', self._items_updated)\n for item in self.items:\n self._item_removed(item)\n self.observe('items', self._items_updated)",
"def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()",
"def _comm_changed(self, change):\n if change['new'] is None:\n return\n self._model_id = self.model_id\n\n self.comm.on_msg(self._handle_msg)\n _instances[self.model_id] = self",
"def _itemChanged(self, event):\n if event in self._EVENTS:\n model = self.model()\n if model is not None:\n index = self.index(column=0)\n model.dataChanged.emit(index, index)",
"def on_change_input(self, function_graph, node, i, r, new_r, reason=None):",
"def propagate_column(self, parent_dataset):\n # delete the rows in this dataset from the parent\n self.dataset.remove_parent_observations(parent_dataset.dataset_id)\n\n # get this dataset without the out-of-date parent rows\n dframe = self.dataset.dframe(keep_parent_ids=True)\n\n # create new dframe from the upated parent and add parent id\n parent_dframe = parent_dataset.dframe().add_parent_column(\n parent_dataset.dataset_id)\n\n # merge this new dframe with the existing dframe\n updated_dframe = concat([dframe, parent_dframe])\n\n # save new dframe (updates schema)\n self.dataset.replace_observations(updated_dframe)\n self.dataset.clear_summary_stats()\n\n # recur\n for merged_dataset in self.dataset.merged_datasets:\n merged_calculator = Calculator(merged_dataset)\n merged_calculator.propagate_column(self.dataset)",
"def parent(self, v):\n # method here",
"def _component_origin_changed(self):\n self._origin_changed()",
"def child_modified(self):\n raise NotImplementedError(\n \"{} does not have implemented `child_modified`\".format(self)\n )",
"def _handle_coordinator_update(self) -> None:\n self._update_data()\n self.async_write_ha_state()",
"def update(self):",
"def update(self):",
"def update(self):",
"def before_dataobj_create(self, dataobj):",
"def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())",
"def update(self, new_gameStateData):\r\n pass",
"def _handle_coordinator_update(self) -> None:\n self._update_attrs()\n return super()._handle_coordinator_update()",
"def on_entity_update(self, event):\n self.entity.on_entity_update(event)",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def beforeUpdate(self):",
"def onChanged(self, vp, prop):\n pass",
"def onChanged(self, vp, prop):\n pass",
"def _handle_coordinator_update(self) -> None:\n self._thermostat = self.coordinator.data[self._thermostat.serial_number]\n self.async_write_ha_state()",
"def child(self, v, c):\n # method here",
"def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()",
"def update(self, datain):\r\n self.arraydata = datain\r\n self.layoutChanged.emit()",
"def level_data(self):\n self.level(self.data)",
"def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()",
"def on_change_input(self, fgraph, app, i, old_r, new_r, reason):\r\n if app == 'output':\r\n # app == 'output' is special key that means FunctionGraph is redefining which nodes are being\r\n # considered 'outputs' of the graph.\r\n pass\r\n else:\r\n #if app not in self.debug_all_apps: raise ProtocolError(\"change without import\")\r\n\r\n #UPDATE self.clients\r\n self.clients[old_r][app] -= 1\r\n if self.clients[old_r][app] == 0:\r\n del self.clients[old_r][app]\r\n\r\n self.clients.setdefault(new_r,{}).setdefault(app,0)\r\n self.clients[new_r][app] += 1\r\n\r\n #UPDATE self.view_i, self.view_o\r\n for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items():\r\n if len(i_idx_list) > 1:\r\n #destroying this output invalidates multiple inputs\r\n raise NotImplementedError()\r\n i_idx = i_idx_list[0]\r\n output = app.outputs[o_idx]\r\n if i_idx == i:\r\n if app.inputs[i_idx] is not new_r:\r\n raise ProtocolError(\"wrong new_r on change\")\r\n\r\n self.view_i[output] = new_r\r\n\r\n self.view_o[old_r].remove(output)\r\n if not self.view_o[old_r]:\r\n del self.view_o[old_r]\r\n\r\n self.view_o.setdefault(new_r, OrderedSet()).add(output)\r\n\r\n self.stale_droot = True",
"def _update_value(self, value):\n super(SubOutputPlug, self)._update_value(value)\n for plug in self.connections:\n plug.value = value\n parent_value = self.parent_plug.value or {}\n parent_value[self.key] = value\n self.parent_plug.value = parent_value",
"def update_E(self):",
"def input_changed(self, input_data):\n boundary_props = {\n 'foam:0/T boundaryField':{\n \"type\": \"fixedValue\",\n \"value\": Field(0)\n }\n }\n self.load_boundary(boundary_props, input_data)",
"def on_change_input(self, fgraph, app, i, old_r, new_r, reason):\r\n if app == 'output':\r\n # app == 'output' is special key that means FunctionGraph is redefining which nodes are being\r\n # considered 'outputs' of the graph.\r\n pass\r\n else:\r\n if app not in self.debug_all_apps: raise ProtocolError(\"change without import\")\r\n\r\n #UPDATE self.clients\r\n self.clients[old_r][app] -= 1\r\n if self.clients[old_r][app] == 0:\r\n del self.clients[old_r][app]\r\n\r\n self.clients.setdefault(new_r, OrderedDict()).setdefault(app,0)\r\n self.clients[new_r][app] += 1\r\n\r\n #UPDATE self.view_i, self.view_o\r\n for o_idx, i_idx_list in getattr(app.op, 'view_map', OrderedDict()).items():\r\n if len(i_idx_list) > 1:\r\n #destroying this output invalidates multiple inputs\r\n raise NotImplementedError()\r\n i_idx = i_idx_list[0]\r\n output = app.outputs[o_idx]\r\n if i_idx == i:\r\n if app.inputs[i_idx] is not new_r:\r\n raise ProtocolError(\"wrong new_r on change\")\r\n\r\n self.view_i[output] = new_r\r\n\r\n self.view_o[old_r].remove(output)\r\n if not self.view_o[old_r]:\r\n del self.view_o[old_r]\r\n\r\n self.view_o.setdefault(new_r, OrderedSet()).add(output)\r\n\r\n self.stale_droot = True",
"def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(MapCircle, self)._update_proxy(change)",
"def _sceneChanged(self, oldScene, newScene):\n pass",
"def _component_changed(self, old, new):\n canvas = self.canvas\n if old is not None:\n canvas.remove(old)\n if new is not None:\n canvas.add(new)",
"def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(DockArea, self)._update_proxy(change)"
] | [
"0.6953876",
"0.68844354",
"0.67612237",
"0.6697958",
"0.65322846",
"0.6528624",
"0.6475116",
"0.6469735",
"0.6335095",
"0.63126916",
"0.6303943",
"0.6144572",
"0.6131545",
"0.6011931",
"0.5992628",
"0.59669846",
"0.5949945",
"0.59455943",
"0.5932121",
"0.59244823",
"0.5865968",
"0.5853364",
"0.5829941",
"0.5806182",
"0.5798211",
"0.57689553",
"0.5758383",
"0.5754564",
"0.5744946",
"0.57430965",
"0.5712347",
"0.56617707",
"0.5659287",
"0.5659287",
"0.5659287",
"0.5627916",
"0.5594537",
"0.5579292",
"0.55773443",
"0.55543864",
"0.55154556",
"0.55096817",
"0.55064684",
"0.54969966",
"0.54669917",
"0.54620624",
"0.5449711",
"0.54489625",
"0.544894",
"0.5440657",
"0.5422534",
"0.5417053",
"0.54034555",
"0.54033345",
"0.5382858",
"0.5379552",
"0.53788304",
"0.533435",
"0.53335845",
"0.53197676",
"0.5312967",
"0.53097665",
"0.5306937",
"0.5305871",
"0.530378",
"0.52967143",
"0.52836597",
"0.52814925",
"0.52807665",
"0.52651423",
"0.52651423",
"0.52651423",
"0.5257514",
"0.5257243",
"0.5255628",
"0.525247",
"0.52461666",
"0.52455246",
"0.52455246",
"0.52455246",
"0.52455246",
"0.5241062",
"0.5233305",
"0.5233305",
"0.5232355",
"0.5218267",
"0.5213422",
"0.52115226",
"0.51942945",
"0.5178273",
"0.5176758",
"0.5175421",
"0.517165",
"0.5166038",
"0.5160969",
"0.5158896",
"0.51584965",
"0.51580274",
"0.5145991"
] | 0.6928027 | 2 |
Return whether values <= colormap min are displayed or not. | def getDisplayValuesBelowMin(self):
return self._getPlane().colormap.displayValuesBelowMin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setDisplayValuesBelowMin(self, display):\n display = bool(display)\n if display != self.getDisplayValuesBelowMin():\n self._getPlane().colormap.displayValuesBelowMin = display\n self._updated(ItemChangedType.ALPHA)",
"def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH",
"def is_lower_limit(self):\n is_lower = self.get_raw_status() & self.STATUS_LLIM\n return bool(is_lower)",
"def isHittingLow(self):\n return not self.limLow.get()",
"def in_pixel_range(self, pixmin: int, pixmax: int) -> bool:\n \n if any(i < pixmin or i > pixmax or np.isnan(i) for i in self.datapos):\n return False\n\n return True",
"def _single_value_min(data, threshold):\r\n amin = np.min(data)\r\n amax = np.max(data)\r\n limit = amin + (amax - amin) * threshold\r\n return data < limit",
"def is_lower(self):\n M = self.rep\n for i in range(self.rows):\n for j in range(i + 1, self.cols):\n if M[i, j]:\n return False\n return True",
"def _get_display_range(image): # pragma: no cover\n ip = _get_image_properties(image)\n immin, immax = np.min(image), np.max(image)\n if ip.signed:\n magnitude = max(abs(immin), abs(immax))\n lo, hi = -magnitude, magnitude\n cmap = _diverging_colormap\n elif any(ip):\n _raise_warnings(ip)\n lo, hi = immin, immax\n cmap = _nonstandard_colormap\n else:\n lo = 0\n imtype = image.dtype.type\n hi = dtype_range[imtype][1]\n cmap = _default_colormap\n return lo, hi, cmap",
"def is_visible(self, x, y) :\n\t\tres_x = (x > self.x_min) and (x < self.x_max)\n\t\t# print 'res_x : {0}, x : {1}, x_min : {2}, x_max:{3}'.format(res_x, x, self.x_min, self.x_max)\n\t\tres_y = (y > self.y_min) #and (y < self.y_max)\n\t\treturn res_x and res_y",
"def is_visible(self):\n return self.rect.x < self.screen_rect.width",
"def highlight_min_max(s, min_color=\"#5fba7d\", max_color=\"#e67575\"):\n is_max = s == s.max()\n is_min = s == s.min()\n max_mapping = [f'background-color: {max_color}' if v else '' for v in is_max]\n min_mapping = [f'background-color: {min_color}' if v else '' for v in is_min]\n return [min_mapping[i] if min_mapping[i] != '' else max_mapping[i] for i in range(len(min_mapping))]",
"def YellowFilter(c):\n if (c[0] > c[2]) and (c[1] > c[2]) and (c[0] == c[1]): return True\n else: return False",
"def is_modern(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_in(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True",
"def f_has_range(self):\n return len(self._explored_range) > 0",
"def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()",
"def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return",
"def is_scale_enabled(self) -> bool:\r\n ...",
"def isScalar(self) -> bool:\n\n indices = list(range(self.layout.gaDims))\n indices.remove(self.layout.gradeList.index(0))\n\n for i in indices:\n if abs(self.value[i]) < _eps:\n continue\n else:\n return False\n\n return True",
"def is_visible(self):\n return self.real > 0",
"def isLow(self):\n\t\treturn self.resolution == 'LOW'",
"def _is_obstacle_in_front(self):\n range_front = []\n range_front[:20] = self.lidar_data[-20:]\n range_front[20:] = self.lidar_data[:20]\n range_front = list(filter(lambda num: num != 0, range_front))\n min_front = min(range_front)\n if min_front < 0.4 and min_front != 0.0:\n\t\t\treturn True\n else:\n\t\t\treturn False",
"def filter(self, intensities):\n return np.array(intensities) > self.min_ms1_intensity",
"def GreenFilter(c):\n if (c[1] > c[0]) and (c[1] > c[2]) and (c[0] == c[2]): return True\n else: return False",
"def _is_visible(self, point):\n return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1",
"def vmin(self):\n return self._vmin",
"def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True",
"def hasLow(self):\n\t\treturn self.toLow().exists",
"def color_vals(val, threshl=[0.15, 0.30, 0.50]):\n colormap = ['red', 'black', 'blue', 'green']\n color = colormap[-1]\n for i, thresh in enumerate(threshl):\n if val < thresh:\n color = colormap[i]\n break\n return 'color: %s' % color",
"def is_present(self, c, tiny=1.0E-30):\n v = [tiny if x <= tiny else x for x in c]\n present = bool(len(np.where(np.array(v) > tiny)[0]))\n return present, v",
"def _get_colorbar_limits(self):\n if self.boundaries is not None:\n C = self.boundaries\n if self.extend in [\"min\", \"both\"]:\n C = C[1:]\n\n if self.extend in [\"max\", \"both\"]:\n C = C[:-1]\n return min(C), max(C)\n else:\n return self.get_clim()",
"def is_off_grid(self, xmin, ymin, xmax, ymax):\n if xmin < 0:\n print('x-coordinate: {0} below minimum of 0.'.format(xmin))\n return True\n if ymin < 0:\n print('y-coordinate: {0} below minimum of 0.'.format(ymin))\n return True\n if xmax >= self.width:\n print('x-coordinate: {0} above maximum of {1}.'.format(\n xmax, self.width - 1))\n return True\n if ymax >= self.height:\n print('y-coordinate: {0} above maximum of {1}.'.format(\n ymax, self.height - 1))\n return True\n return False",
"def cmin(self):\n return self['cmin']",
"def cmin(self):\n return self[\"cmin\"]",
"def inside_gamut(rgb: ndarray) -> bool:\n return all(rgb >= 0)",
"def is_0to255(value):\n return 0 <= value <= 255",
"def is_at_wall(self):\n return self.distmin < self.distmax*0.8",
"def RedFilter(c):\n if (c[0] > c[1]) and (c[0] > c[2]) and (c[1] == c[2]): return True\n else: return False",
"def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin",
"def check_limits(self,frontier):\n\n if(frontier[0]+1>self.map_info.height or frontier[0]<0): return False\n if(frontier[1]+1>self.map_info.width or frontier[1]<0): return False\n return True",
"def localMin(eccMap, binSize):\r\n\r\n eccMap2 = np.array(eccMap)\r\n cutStep = np.arange(np.nanmin(eccMap2[:]) - binSize,\r\n np.nanmax(eccMap2[:]) + binSize * 2,\r\n binSize)\r\n NumOfMin = 0\r\n i = 0\r\n while (NumOfMin <= 1) and (i < len(cutStep)):\r\n currThr = cutStep[i]\r\n marker = np.zeros(eccMap.shape, dtype=np.int)\r\n marker[eccMap2 <= (currThr)] = 1\r\n marker, NumOfMin = ni.measurements.label(marker)\r\n i = i + 1\r\n\r\n # if NumOfMin == 1:\r\n # print 'Only one local minumum was found!!!'\r\n # elif NumOfMin == 0:\r\n # print 'No local minumum was found!!!'\r\n # else:\r\n # print str(NumOfMin) + ' local minuma were found!!!'\r\n #\r\n # if NumOfMin > 1:\r\n # plt.figure()\r\n # plt.imshow(marker,vmin=np.amin(marker), vmax=np.amax(marker),cmap='jet',interpolation='nearest')\r\n # plt.colorbar()\r\n # plt.title('marker from local min')\r\n\r\n return marker",
"def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True",
"def __call__(self, value: np.ndarray) -> bool:\n for k, bound in enumerate(self.bounds):\n if bound is not None:\n if np.any((value > bound) if k else (value < bound)):\n return False\n return True",
"def off_screen(self):\n return self._x < 0",
"def unsafe(self): \n return self.distmin < self.distmax*0.5",
"def is_sampling_for_minmax(self):\n return (self._level_change_time is not None) and \\\n (get_time() - self._level_change_time) < self._duration_in_sec",
"def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines",
"def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]",
"def visible(self, hipid):\n s = self.hip_stars[hipid]\n if s[3]<min(self.inner_dec, self.outer_dec): return False\n return s[3]<=max(self.inner_dec, self.outer_dec)",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True",
"def lux_above_threshold(self) -> bool:\n if self.lux_sensor:\n value = self.hass.get_state(self.lux_sensor)\n if value not in [\"unavailable\", \"unknown\"]:\n return float(value) > self.lux_threshold\n\n return False",
"def IsOpenXmin(self, *args):\n return _Bnd.Bnd_Box2d_IsOpenXmin(self, *args)",
"def eval_contour_fill_levels(plot_items):\n for i, item in enumerate(plot_items):\n max_value = np.amax(item['data'])\n min_value = np.amin(item['data'])\n print(\"Max and min value of plot {}: {:.3f} and {:.3f}\".format(i, max_value, min_value))\n if item['contour_fill_levels'][-1] < max_value:\n print(\"Contour fills (max={:.3f}) do not cover max value of plot {}!\"\n .format(item['contour_fill_levels'][-1], i))\n if item['contour_fill_levels'][0] > min_value:\n print(\"Contour fills (min={:.3f}) do not cover min value of plot {}!\"\n .format(item['contour_fill_levels'][0], i))",
"def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)",
"def IsOpenXmin(self, *args):\n return _Bnd.Bnd_Box_IsOpenXmin(self, *args)",
"def onlyrow(self):\n return self.y <= 1",
"def is_disp(self) -> bool:\n return self.disp_power > 0",
"def have_blower(self):\n return bool(self.blower)",
"def constrain_rgb(rgb: ndarray) -> bool:\n w = - min(0, *rgb) # Amount of white needed\n if w > 0:\n rgb += w # Add just enough white to make r, g, b all positive\n return True # Colour modified to fit RGB gamut\n return False # Colour within RGB gamut",
"def min():\n return KeeperOfMinOrMax(int.__gt__)",
"def PinkFilter(c):\n if (c[0] > c[1]) and (c[2] > c[1]) and (c[2] == c[0]): return True\n else: return False",
"def has_xy_values(plot):\n return Plot.has_xy_values(plot)",
"def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)",
"def is_gridlines_visible(self):\n return self.container['is_gridlines_visible']",
"def is_in_gutter(self, position, gutterDict):\n for boundary, gutterSize in gutterDict.iteritems():\n if boundary == 'left': \n return (position[0] < self.windowSize[0] + gutterSize)\n elif boundary == 'right': \n return (position[0] > self.windowSize[1] - gutterSize)\n elif boundary == 'top':\n return (position[1] > self.windowSize[2] - gutterSize)\n elif boundary == 'bottom':\n return (position[1] < self.windowSize[3] - gutterSize)",
"def showscale(self):\n return self['showscale']",
"def find_bluntcolor(num, lower_values, greater_values):\r\n i_lower = bisect.bisect_right(lower_values, num)\r\n if i_lower < len(lower_values):\r\n return COLORS_LOWER[i_lower]\r\n i_greater = bisect.bisect_left(greater_values, num)\r\n if i_greater == 0:\r\n return np.nan\r\n else:\r\n return COLORS_GREATER[i_greater - 1]",
"def is_win(my_board):\n return np.count_nonzero(my_board == CLOSED) == NUM_MINES",
"def hypercube(self, k):\r\n return np.all(k < 0.5, axis=1)",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True",
"def is_on_board(self, r, c):\r\n return 0 <= r <= 7 and 0 <= c <= 7",
"def exceeds_min(value, min_):\n\n if isinstance(value, (float, int)):\n val_ = value\n else:\n try:\n val_ = int(value)\n except:\n val_ = value\n if isinstance(min_, (float, int)):\n return (val_ < min_)\n else:\n if min_.isalnum():\n try:\n imin = int(min_)\n return (val_ < imin)\n except:\n pass\n \n return False",
"def _has_noise(self) -> bool:\n min = self.array.min()\n max = self.array.max()\n near_min, near_max = np.percentile(self.array, [0.5, 99.5])\n max_is_extreme = max > near_max * 1.25\n min_is_extreme = (min < near_min * 0.75) and (\n abs(min - near_min) > 0.1 * (near_max - near_min)\n )\n return max_is_extreme or min_is_extreme",
"def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True",
"def isWin(self):\n\n return self.tiles == self.winCdt",
"def _is_max(self, y0, y1, y2):\n return True if (y1 - y0 > 0) and (y2 - y1 < 0) else False",
"def _getRawBound(self):\n if self._colormap is None:\n return None\n elif self._index == 0:\n return self._colormap.getVMin()\n else: # self._index == 1\n return self._colormap.getVMax()",
"def _plot_one_value(\n data_matrix, grid_metadata_dict, colour_map_object, min_colour_value,\n max_colour_value, plot_cbar_min_arrow, plot_cbar_max_arrow,\n log_scale=False):\n\n figure_object, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = (\n _get_basemap(grid_metadata_dict)\n )\n\n num_grid_rows = data_matrix.shape[0]\n num_grid_columns = data_matrix.shape[1]\n x_spacing_metres = (\n (basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) /\n (num_grid_columns - 1)\n )\n y_spacing_metres = (\n (basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) /\n (num_grid_rows - 1)\n )\n\n data_matrix_at_edges, edge_x_coords_metres, edge_y_coords_metres = (\n grids.xy_field_grid_points_to_edges(\n field_matrix=data_matrix,\n x_min_metres=basemap_x_matrix_metres[0, 0],\n y_min_metres=basemap_y_matrix_metres[0, 0],\n x_spacing_metres=x_spacing_metres,\n y_spacing_metres=y_spacing_metres)\n )\n\n data_matrix_at_edges = numpy.ma.masked_where(\n numpy.isnan(data_matrix_at_edges), data_matrix_at_edges\n )\n\n # data_matrix_at_edges[numpy.isnan(data_matrix_at_edges)] = -1\n\n plotting_utils.plot_coastlines(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_countries(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_states_and_provinces(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_parallels(\n basemap_object=basemap_object, axes_object=axes_object,\n num_parallels=NUM_PARALLELS)\n\n plotting_utils.plot_meridians(\n basemap_object=basemap_object, axes_object=axes_object,\n num_meridians=NUM_MERIDIANS)\n\n basemap_object.pcolormesh(\n edge_x_coords_metres, edge_y_coords_metres,\n data_matrix_at_edges, cmap=colour_map_object,\n vmin=min_colour_value, vmax=max_colour_value, shading='flat',\n edgecolors='None', axes=axes_object, zorder=-1e12)\n\n colour_bar_object = plotting_utils.plot_linear_colour_bar(\n axes_object_or_matrix=axes_object, data_matrix=data_matrix,\n colour_map_object=colour_map_object, min_value=min_colour_value,\n max_value=max_colour_value, orientation_string='horizontal',\n extend_min=plot_cbar_min_arrow, extend_max=plot_cbar_max_arrow,\n padding=0.05)\n\n tick_values = colour_bar_object.get_ticks()\n\n if log_scale:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(10 ** v))) for v in tick_values\n ]\n elif numpy.nanmax(data_matrix) >= 6:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(v))) for v in tick_values\n ]\n else:\n tick_strings = ['{0:.2f}'.format(v) for v in tick_values]\n\n colour_bar_object.set_ticks(tick_values)\n colour_bar_object.set_ticklabels(tick_strings)\n\n return figure_object, axes_object",
"def IsMaximized(self):\r\n \r\n return self.HasFlag(self.optionMaximized)",
"def showscale(self):\n return self[\"showscale\"]",
"def compute_show_ac(self) -> bool:\n return self.value in (\"\", \"0\") and self.numbers == \"0\"",
"def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE",
"def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True",
"def compare_min(values, weights):\n return np.min(values.numpy())",
"def min_value_count(self, name, min=50, weight=None, condition=None,\n axis='y', verbose=True):\n for v in name:\n df = self.crosstab(v, w=weight, text=False, f=condition)[v]['@'][v]\n hide = []\n for i, c in zip(df.index, df.values):\n if c < min:\n hide.append(i)\n if hide:\n codes = self.codes(v)\n if verbose:\n if 'All' in hide or all(c in hide for c in codes):\n msg = '{}: All values have less counts than {}.'\n print(msg.format(v, min))\n else:\n print('{}: Hide values {}'.format(v, hide))\n hide = [h for h in hide if not h == 'All']\n self.hiding(v, hide, axis)\n return None",
"def test_min_vs_max(self, fig_test, fig_ref):\n ax = fig_test.add_subplot(projection=\"ternary\")\n ax.set_ternary_min(0.1, 0.2, 0.3)\n\n ax = fig_ref.add_subplot(projection=\"ternary\")\n ax.set_ternary_max(0.5, 0.6, 0.7)",
"def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)",
"def _is_blank(im):\n \n # Take the r% center\n r = 0.2\n h1 = int(float(im.shape[0]) * r)\n h2 = im.shape[0] - h1\n w1 = int(float(im.shape[1]) * r) \n w2 = im.shape[1] - w1\n #\n im_center = im[h1:h2, w1:w2]\n \n if np.mean(im_center) < 0.06:\n return True\n else:\n return False",
"def BlueFilter(c):\n if (c[2] > c[1]) and (c[2] > c[0]) and (c[1] == c[0]): return True\n else: return False",
"def above(self,object):\n if( isinstance(object,Feature) ):\n return( self.maxY() < object.minY() )\n elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):\n return( self.maxY() < object[1] )\n elif( isinstance(object,float) or isinstance(object,int) ):\n return( self.maxY() < object )\n else:\n logger.warning(\"SimpleCV did not recognize the input type to feature.above(). This method only takes another feature, an (x,y) tuple, or a ndarray type.\")\n return None",
"def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y",
"def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True",
"def get_strict_valence_flag(self):\n d_min = self.fmodel.f_obs().d_min()\n return (d_min < self.params.d_min_strict_valence)",
"def getMinAbundanceOfClrSample(self):\n #try: minimum = min(self.clr_sample['abundance'])-0.001\n try: minimum = min(self.clr_sample)-0.01\n except: minimum = 0\n return minimum",
"def checkValue(c, m, y, k):\n MINVAL=0\n MAXVAL=255\n valueOk=True\n for val in c, m, y, k:\n if val >=MINVAL and val <=255:\n pass\n else:\n valueOk=False\n \n return valueOk",
"def u_min(self):\n if self._u_min is None:\n return np.abs(self.uvgrid).min()\n else:\n return self._u_min",
"def minmax(self):\n return self._data_lim[self._n_overlay - 1]",
"def _getColormapRange(self):\n item = self.item()\n if item is not None and self._colormap is not None:\n return self._colormap.getColormapRange(item)\n else:\n return 1, 100 # Fallback",
"def localmin(x):\r\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1",
"def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0",
"def supports(self, x):\n return 0.0 < x"
] | [
"0.6101894",
"0.606636",
"0.5966878",
"0.59301066",
"0.5888018",
"0.58112633",
"0.58078593",
"0.5752245",
"0.5747372",
"0.57274777",
"0.57218593",
"0.5695929",
"0.55961335",
"0.5590847",
"0.5554003",
"0.5551317",
"0.55380684",
"0.5532524",
"0.55230325",
"0.5503854",
"0.55005634",
"0.54687977",
"0.54406214",
"0.5432614",
"0.54093766",
"0.54002166",
"0.53953683",
"0.53911763",
"0.5387343",
"0.5375715",
"0.53755474",
"0.5374518",
"0.5358248",
"0.5355967",
"0.5355576",
"0.5340683",
"0.5338968",
"0.5337671",
"0.5328756",
"0.53018254",
"0.52983576",
"0.52914023",
"0.5286525",
"0.52740216",
"0.5236433",
"0.52362734",
"0.5226275",
"0.52187526",
"0.52132165",
"0.5197322",
"0.51968044",
"0.51934505",
"0.51916194",
"0.51899517",
"0.51853275",
"0.5185204",
"0.51742494",
"0.5168107",
"0.5163639",
"0.515493",
"0.5152168",
"0.5150762",
"0.5144524",
"0.5142997",
"0.5135435",
"0.5133186",
"0.5131304",
"0.5130656",
"0.5129531",
"0.5125211",
"0.5117394",
"0.5115141",
"0.5112529",
"0.51076406",
"0.51016355",
"0.50978863",
"0.5092062",
"0.5089267",
"0.50827825",
"0.50815105",
"0.5080898",
"0.50806075",
"0.5080273",
"0.50793356",
"0.50761044",
"0.507309",
"0.50611955",
"0.5059959",
"0.5059198",
"0.505842",
"0.5057984",
"0.5057751",
"0.5057251",
"0.50563",
"0.50515425",
"0.5049565",
"0.50494987",
"0.5048595",
"0.5048424",
"0.50423586"
] | 0.7410294 | 0 |
Set whether to display values <= colormap min. | def setDisplayValuesBelowMin(self, display):
display = bool(display)
if display != self.getDisplayValuesBelowMin():
self._getPlane().colormap.displayValuesBelowMin = display
self._updated(ItemChangedType.ALPHA) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()",
"def getDisplayValuesBelowMin(self):\n return self._getPlane().colormap.displayValuesBelowMin",
"def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)",
"def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return",
"def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return",
"def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)",
"def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def normalize_cmap(self):\n vmax, vmin = np.max(self.values), np.min(self.values)\n self.midpoint = 1 - vmax/(vmax + abs(vmin))\n if self.midpoint > 0.5:\n self.start, self.stop = 0, 0.5 + (1-self.midpoint)\n else:\n self.start, self.stop = 0.5 - self.midpoint, 1",
"def set_limits_minmax(self, zmin, zmax):\n self._color_mapper.update(low=zmin, high=zmax)\n self.update()",
"def set_view_min(self, view_min):\n try:\n view_min = float(view_min)\n self._view_min = view_min\n self.update_rgba()\n if self._cross_pos:\n self.update_orth_rgba()\n except ValueError:\n print \"view_min must be a number.\"",
"def set_low_high_value(self):\n # do not apply scaler norm on not scalable data\n self.range_dict.clear()\n\n for data_name in self.dict_to_plot.keys():\n if self.quantitative_normalization:\n # Quantitative normalization\n data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[data_name],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=data_name,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[data_name],\n scaler=self.scaler_data,\n data_name=data_name,\n name_not_scalable=self.name_not_scalable,\n )\n\n lowv, highv = np.min(data_arr), np.max(data_arr)\n # Create some 'artificially' small range in case the array is constant\n if lowv == highv:\n lowv -= 0.005\n highv += 0.005\n self.range_dict[data_name] = {\"low\": lowv, \"low_default\": lowv, \"high\": highv, \"high_default\": highv}",
"def set_min(self, min):\n self.set_val((min, self.val[1]))",
"def vmin(self):\n return self._vmin",
"def __set_range_to_show(self) -> None:\n cantus_firmus_positions = [\n line_element.scale_element.position_in_semitones\n for line_element in self.cantus_firmus\n ]\n cantus_firmus_lower_bound = min(cantus_firmus_positions)\n cantus_firmus_upper_bound = max(cantus_firmus_positions)\n\n counterpoint_lower_bound = self.lowest_element.position_in_semitones\n counterpoint_upper_bound = self.highest_element.position_in_semitones\n\n self.lowest_row_to_show = min(\n cantus_firmus_lower_bound,\n counterpoint_lower_bound\n )\n self.highest_row_to_show = max(\n cantus_firmus_upper_bound,\n counterpoint_upper_bound\n )",
"def set_minVal(self, val):\n self.minVal = val",
"def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()",
"def set_limits_minmax(self, zmin, zmax):\n self.pixels.set_clim(zmin, zmax)\n self.autoscale = False",
"def _changeDisplayRange(self):\n try:\n newrange = float(str(self._wmin.text())), float(str(self._wmax.text()))\n except ValueError:\n return\n self._rc.setDisplayRange(*newrange)",
"def test_min_vs_max(self, fig_test, fig_ref):\n ax = fig_test.add_subplot(projection=\"ternary\")\n ax.set_ternary_min(0.1, 0.2, 0.3)\n\n ax = fig_ref.add_subplot(projection=\"ternary\")\n ax.set_ternary_max(0.5, 0.6, 0.7)",
"def view_limits(self, dmin, dmax):\n base = self._select_base(dmin, dmax)\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = base.le(dmin)\n vmax = base.ge(dmax)\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n else:\n vmin = dmin\n vmax = dmax\n\n return mtransforms.nonsingular(vmin, vmax)",
"def set_limits_minmax(self, zmin, zmax):\n self.camera.set_clim(zmin, zmax)\n self.autoscale = False",
"def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value",
"def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value",
"def _clamp_rgb_coordinate(self, coord):\r\n\r\n if not self.is_upscaled:\r\n return min(max(coord, 0.0), 1.0)\r\n else:\r\n return min(max(coord, 1), 255)",
"def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)",
"def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)",
"def config_pbc_min(self):\n self._config_min()\n self.title = \"PBC Minimization\"\n self.cntrl[\"cut\"] = 8.0\n self.cntrl[\"igb\"] = 0",
"def min_value(self, min_value):\n\n self._min_value = min_value",
"def min_value(self, min_value):\n\n self._min_value = min_value",
"def min_value(self, min_value):\n\n self._min_value = min_value",
"def testMinMax(self, value):\n\t\tif value > self.oldmax:\n\t\t\tself.oldmax = value\n\t\t\tself.maxBox.SetValue(str(value).encode('utf-8'))\n\t\telif value < self.oldmin:\n\t\t\tself.oldmin = value\n\t\t\tself.minBox.SetValue(str(value).encode('utf-8'))",
"def set_mapping(self, value_min, value_min_raw, value_max, value_max_raw):\n assert value_min <= value_max\n # prevent division by zero.\n if value_min == value_max:\n value_max += 1.\n if value_min_raw == value_max_raw:\n value_max_raw += 1.\n self.value_min = value_min\n self.value_max = value_max\n self.value_min_raw = value_min_raw\n self.value_max_raw = value_max_raw\n self._value_scale = (self.value_max - self.value_min) / (self.value_max_raw - self.value_min_raw)",
"def resetMinZoomVisibility(self):\n self._min_zoom = None",
"def setMinZoomVisibility(self, zoom_level):\n self._min_zoom = zoom_level",
"def setLowerThreshold(self, lower_threshold):\r\n\t\tself.LowerThreshold = lower_threshold",
"def clamp_values(result,vmin=0.0, vmax=10.0):\n for entry in result:\n for ht in result[entry]:\n ht[ht<vmin] = vmin\n ht[ht>vmax] = vmax",
"def _onSetParameterLower(self, value):\n self._parameters['lower'] = min(value, self._parameters['upper']) # Limit at upper\n self._logger.info(\"Parameter 'lower' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())",
"def _updateDisplayRange(self, dmin, dmax):\n self._wmin.setText(\"%.4g\" % dmin)\n self._wmax.setText(\"%.4g\" % dmax)\n self._updateFullRangeIcon()",
"def set_limits_minmax(self, zmin, zmax):\n self.pixels.set_clim(zmin, zmax)\n self.autoscale = False\n self._update()",
"def SetThreshold (self,VolumeNode, min, max):\n DisplayNode = VolumeNode.GetScalarVolumeDisplayNode()\n DisplayNode.SetApplyThreshold(True)\n DisplayNode.SetThreshold(min,max)",
"def SetScalarDisplay(self, ScalarvolumeNode, MinThresh = 10, Max = False):\n if not ScalarvolumeNode or ScalarvolumeNode.GetScalarVolumeDisplayNode()==None:\n return\n SvD = ScalarvolumeNode.GetScalarVolumeDisplayNode()\n SvD.SetAndObserveColorNodeID('vtkMRMLColorTableNodeRainbow')\n SvD.SetAutoWindowLevel(True)\n SvD.SetApplyThreshold(True)\n MaxThresh = SvD.GetUpperThreshold()\n if Max:\n MaxThresh = Max\n SvD.SetThreshold(MinThresh, MaxThresh)",
"def setDisplayActiveRange(selforcls, newRange):\n newRange = (selforcls.toSi(min(newRange)),\n selforcls.toSi(max(newRange)))\n selforcls.setActiveRange(newRange)",
"def onAxisMinimumChanged(self, axis_name, value):\n if axis_name in self.axes:\n if value is None:\n self.axes[axis_name].setMin(self.min[axis_name])\n else:\n self.axes[axis_name].setMin(value)",
"def reset_low_high(self, name):\n self.range_dict[name][\"low\"] = self.range_dict[name][\"low_default\"]\n self.range_dict[name][\"high\"] = self.range_dict[name][\"high_default\"]\n self.limit_dict[name][\"low\"] = 0.0\n self.limit_dict[name][\"high\"] = 100.0\n self.show_image()",
"def set_weights(self,weights):\n for i,layer in enumerate(weights):\n #checking for any values equal to minval\n if np.any(layer==self.minval):\n weights[i]=np.where(weights[i]==self.minval,self.replace_min,weights[i])\n super().set_weights(list(weights))",
"def threshold(self, vmin, vmax=None, vcut=0.):\n if vmax == None:\n vmin = -np.abs(vmin)\n vmax = +np.abs(vmin)\n assert (vmin < vmax)\n\n ret = self.copy()\n for m in [ret.tmap, ret.qmap, ret.umap]:\n m[np.where(m < vmin)] = vcut\n m[np.where(m > vmax)] = vcut\n return ret",
"def set_contrast_range(self, zmin, zmax):\n self.get_contrast_panel().set_range(zmin, zmax)",
"def is_scale_enabled(self) -> bool:\r\n ...",
"def abs_min_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_min_cool_setpoint_limit\", 1600)",
"def set_output_limits(self, min_value, max_value):\n self.out_min = min_value\n self.out_max = max_value\n if self.out_min > self.out_max:\n print(\"set_output_limits(): min must be smaller than max.\")\n self.iterm = self.clip_to_output_limits(self.iterm)\n self.output = self.clip_to_output_limits(self.output)",
"def ColorBarEnabled(self):\n sliceAnnotations = DataProbeLib.SliceAnnotations()\n sliceAnnotations.scalarBarEnabled = 1\n sliceAnnotations.updateSliceViewFromGUI()",
"def ignore_and_range(MAT, k=-9999.0):\n MAT[MAT==k]=0\n MAT = ((MAT-(MAT.min()))) / (MAT.max() - MAT.min())\n MAT *= 255\n \n return MAT",
"def set_min_confidence(self, new_min):\n self.__min_confidence = new_min",
"def cmin(self):\n return self['cmin']",
"def mip_slider_value_changed(self):\n min_value = float(self.min_slider.value()) / float(self.min_slider.maximum())\n max_value = float(self.max_slider.value()) / float(self.max_slider.maximum())\n\n self.render_widget.mipMin = self.render_widget.minimum \\\n + (self.render_widget.maximum - self.render_widget.minimum) * min_value\n self.render_widget.mipMax = self.render_widget.minimum \\\n + (self.render_widget.maximum - self.render_widget.minimum) * max_value\n\n self.render_widget.update()",
"def find_max_min(self, col):\n self.max = max(col)\n self.min = min(col)",
"def _single_value_min(data, threshold):\r\n amin = np.min(data)\r\n amax = np.max(data)\r\n limit = amin + (amax - amin) * threshold\r\n return data < limit",
"def min(self, min):\n\n self._min = min",
"def min(self, min):\n\n self._min = min",
"def set_min_position(self, min_us):\n raise NotImplementedError()",
"def unsafe(self): \n return self.distmin < self.distmax*0.5",
"def set_colorbar_limits(fld,cmin,cmax):\n\n # handle input\n if (cmin is None) and (cmax is not None):\n raise RuntimeError('Only cmax given, must provide both cmin and cmax')\n elif (cmin is not None) and (cmax is None):\n raise RuntimeError('Only cmin given, must provide both cmin and cmax')\n else:\n # handle colorbar limits accidentally passed as with xarray functions\n if type(cmin) is xr.DataArray:\n cmin = cmin.values()\n elif cmin is not None:\n raise TypeError('Unsure of cmin type: ',type(cmin))\n if type(cmax) is xr.DataArray:\n cmax = cmax.values()\n elif cmax is not None:\n raise TypeError('Unsure of cmax type: ',type(cmax))\n\n # compute fld limits\n fld_min = fld.min(skipna=True).values\n fld_max = fld.max(skipna=True).values\n\n # if cmin/cmax not set, compute\n if (cmin is None) and (cmax is None):\n\n cmin = fld_min\n cmax = fld_max\n\n # determine if divergent colorbar \n # Note: Not making divergent colorbar for temperature\n # in degC because still sequential even though +/-\n if (fld_max*fld_min < 0) and (fld.name is not 'THETA'):\n cmax = np.nanmax(np.abs(fld.values))\n cmin = -cmax\n\n # determine if colorbar needs to be extended\n if (cmin > fld_min) and (cmax < fld_max):\n extend_cbar = \"both\"\n elif cmin > fld_min:\n extend_cbar = \"min\"\n elif cmax < fld_max:\n extend_cbar = \"max\"\n else:\n extend_cbar = \"neither\"\n\n return cmin, cmax, extend_cbar",
"def _plot_one_value(\n data_matrix, grid_metadata_dict, colour_map_object, min_colour_value,\n max_colour_value, plot_cbar_min_arrow, plot_cbar_max_arrow,\n log_scale=False):\n\n figure_object, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = (\n _get_basemap(grid_metadata_dict)\n )\n\n num_grid_rows = data_matrix.shape[0]\n num_grid_columns = data_matrix.shape[1]\n x_spacing_metres = (\n (basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) /\n (num_grid_columns - 1)\n )\n y_spacing_metres = (\n (basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) /\n (num_grid_rows - 1)\n )\n\n data_matrix_at_edges, edge_x_coords_metres, edge_y_coords_metres = (\n grids.xy_field_grid_points_to_edges(\n field_matrix=data_matrix,\n x_min_metres=basemap_x_matrix_metres[0, 0],\n y_min_metres=basemap_y_matrix_metres[0, 0],\n x_spacing_metres=x_spacing_metres,\n y_spacing_metres=y_spacing_metres)\n )\n\n data_matrix_at_edges = numpy.ma.masked_where(\n numpy.isnan(data_matrix_at_edges), data_matrix_at_edges\n )\n\n # data_matrix_at_edges[numpy.isnan(data_matrix_at_edges)] = -1\n\n plotting_utils.plot_coastlines(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_countries(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_states_and_provinces(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_parallels(\n basemap_object=basemap_object, axes_object=axes_object,\n num_parallels=NUM_PARALLELS)\n\n plotting_utils.plot_meridians(\n basemap_object=basemap_object, axes_object=axes_object,\n num_meridians=NUM_MERIDIANS)\n\n basemap_object.pcolormesh(\n edge_x_coords_metres, edge_y_coords_metres,\n data_matrix_at_edges, cmap=colour_map_object,\n vmin=min_colour_value, vmax=max_colour_value, shading='flat',\n edgecolors='None', axes=axes_object, zorder=-1e12)\n\n colour_bar_object = plotting_utils.plot_linear_colour_bar(\n axes_object_or_matrix=axes_object, data_matrix=data_matrix,\n colour_map_object=colour_map_object, min_value=min_colour_value,\n max_value=max_colour_value, orientation_string='horizontal',\n extend_min=plot_cbar_min_arrow, extend_max=plot_cbar_max_arrow,\n padding=0.05)\n\n tick_values = colour_bar_object.get_ticks()\n\n if log_scale:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(10 ** v))) for v in tick_values\n ]\n elif numpy.nanmax(data_matrix) >= 6:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(v))) for v in tick_values\n ]\n else:\n tick_strings = ['{0:.2f}'.format(v) for v in tick_values]\n\n colour_bar_object.set_ticks(tick_values)\n colour_bar_object.set_ticklabels(tick_strings)\n\n return figure_object, axes_object",
"def abs_min_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_min_heat_setpoint_limit\", 700)",
"def highlight_min_max(s, min_color=\"#5fba7d\", max_color=\"#e67575\"):\n is_max = s == s.max()\n is_min = s == s.min()\n max_mapping = [f'background-color: {max_color}' if v else '' for v in is_max]\n min_mapping = [f'background-color: {min_color}' if v else '' for v in is_min]\n return [min_mapping[i] if min_mapping[i] != '' else max_mapping[i] for i in range(len(min_mapping))]",
"def truncate_colormap(cmap, minval=0, maxval=1, n=256):\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n new_cmap = mpl.colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap",
"def is_lower_limit(self):\n is_lower = self.get_raw_status() & self.STATUS_LLIM\n return bool(is_lower)",
"def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)",
"def u_min(self):\n if self._u_min is None:\n return np.abs(self.uvgrid).min()\n else:\n return self._u_min",
"def replace_zeros(self):\n\n min_c = np.array(self.map[self.column])\n self.map.loc[self.map[self.column]==0, self.column] = np.min(min_c[np.nonzero(min_c)])",
"def cmin(self):\n return self[\"cmin\"]",
"def __init__(self, value):\n self.value = max(min(value,1.0),-1.0)",
"def __init__(self, value):\n self.value = max(min(value,1.0),-1.0)",
"def config_gb_min(self):\n\n self._config_min()\n self.title = \"GB Minimization\"\n self.cntrl[\"cut\"] = 999.0\n self.cntrl[\"igb\"] = 1",
"def _set_minimum(self):\n self._level_gen.minimum_length = self._minimum_length_spinbox.value()\n self._refresh_view()",
"def set_locked_temp_min(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._locked_temp_min = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._locked_temp_min = celsius_to_kelvin(value)\r\n else:\r\n self._locked_temp_min = value\r\n\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"locked_temp_min\", value))",
"def clamp(value, mn, mx):\n\n return max(min(value, mx), mn)",
"def setValues(self, values):\n if values is not None:\n self.scale_min, self.scale_max = values\n if self.scale_min is None:\n self.scale_min = self.start\n if self.scale_max is None:\n self.scale_max = self.end\n else:\n self.scale_min = self.start\n self.scale_max = self.end\n self.emitRange()\n self.updateDisplayValues()\n self.update()",
"def test_negative_ticks(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n ax.set_ternary_min(0, 3, -3)",
"def display_cmap_color_range(cmap_style='rainbow'):\n cmap = plt.get_cmap(cmap_style)\n for c in range(256):\n plt.scatter([c], [0], s=500, c=cmap(c), lw=0)\n plt.show()",
"def truncate_colormap(cmap_str, minval=0.0, maxval=1.0, n=100):\n cmap = plt.get_cmap(cmap_str)\n new_cmap = colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap",
"def isLow(self):\n\t\treturn self.resolution == 'LOW'",
"def _truncate_cmap(cmap, Y_thresh=0.65, start_offN=100):\n\n cmap_func = plt.get_cmap(cmap)\n allcolors = cmap_func(np.linspace(0., 1., start_offN))\n mask = np.array([colorsys.rgb_to_yiq(*c[:-1])[0] <= Y_thresh for c in allcolors])\n if ~mask.any():\n return cmap # not truncated\n else:\n return colors.LinearSegmentedColormap.from_list('trunc_cmap', allcolors[mask])",
"def showscale(self):\n return self['showscale']",
"def update_pheromones_bounds(self):\n self.pheromones[self.pheromones < self.PH_MIN] = self.PH_MIN\n self.pheromones[self.pheromones > self.PH_MAX] = self.PH_MAX",
"def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)",
"def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()",
"def minmax_clipping(self, min_clip=None, max_clip=None):\n if min_clip is not None:\n mask = (self.data_arr < min_clip)\n self.data_arr.mask[mask] = True\n if max_clip is not None:\n mask = (self.data_arr > max_clip)\n self.data_arr.mask[mask] = True",
"def set_range(self, full_range, abs_range=None, suppress_feedback=False):\n self._enabled = full_range[0] != full_range[1]\n if not self._enabled:\n full_range = (\n full_range[0] - 1, full_range[0])\n self._suppress_feedback = suppress_feedback\n super(MarkerProperty, self).set_range(full_range, abs_range=abs_range)",
"def min():\n valid=result_alpha.F>0\n src_data.F[valid]=np.minimum( src_data.F[valid],result_data.F[valid] )",
"def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def set_minimum(self, min_value):\n\n self._progress.setMinimum(min_value)",
"def SetPixelValueMinMax(self, min: 'short', max: 'short') -> \"void\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterISS3_SetPixelValueMinMax(self, min, max)",
"def clip_to_output_limits(self, value):\n return max(self.out_min, min(self.out_max, value))",
"def Maximize(self):\r\n\r\n return self.SetFlag(self.optionMaximized, True)",
"def set_val(self, val):\n val = int(val)\n # valmax is not allowed, since it is out of the array.\n # valmin is allowed since 0 index is in depth array.\n if val < self.valmin or val >= self.valmax:\n # invalid, so ignore\n return\n # activate color is first since we still have access to self.val\n self.updatePageDepthColor(val)\n Slider.set_val(self, val)",
"def showscale(self):\n return self[\"showscale\"]",
"def isHittingLow(self):\n return not self.limLow.get()"
] | [
"0.6897544",
"0.68503755",
"0.6472414",
"0.64165556",
"0.6364885",
"0.6256008",
"0.6017445",
"0.6017445",
"0.5980322",
"0.5958103",
"0.59348243",
"0.58880955",
"0.58671427",
"0.5842141",
"0.580647",
"0.57340556",
"0.57123756",
"0.56508297",
"0.56315273",
"0.5560471",
"0.5549892",
"0.5542802",
"0.55403185",
"0.55403185",
"0.54861516",
"0.54762757",
"0.54597443",
"0.54445773",
"0.54432094",
"0.54432094",
"0.54432094",
"0.5441221",
"0.542877",
"0.54276305",
"0.5418674",
"0.5406932",
"0.5402066",
"0.5367152",
"0.53563213",
"0.5354864",
"0.5344467",
"0.5342328",
"0.5333022",
"0.53297997",
"0.531426",
"0.52960163",
"0.5290347",
"0.5288673",
"0.5287815",
"0.52703774",
"0.52619714",
"0.526102",
"0.5244387",
"0.5243458",
"0.5238343",
"0.5235204",
"0.52343875",
"0.5233531",
"0.52266216",
"0.52266216",
"0.5220103",
"0.5217105",
"0.521129",
"0.5202991",
"0.5202401",
"0.5196699",
"0.51802826",
"0.518024",
"0.5173164",
"0.51721275",
"0.5164997",
"0.5158756",
"0.51559",
"0.51559",
"0.5151435",
"0.5151431",
"0.5138951",
"0.5138105",
"0.5136024",
"0.51353174",
"0.51235956",
"0.51208746",
"0.51117915",
"0.51089764",
"0.51088166",
"0.5102604",
"0.5089171",
"0.50816363",
"0.5074479",
"0.50691676",
"0.50689673",
"0.50601965",
"0.5056774",
"0.5045603",
"0.50453854",
"0.50183356",
"0.50174946",
"0.5016484",
"0.50045484",
"0.50042224"
] | 0.77897596 | 0 |
Return the range of the data as a 3tuple of values. positive min is NaN if no data is positive. | def getDataRange(self):
return None if self._dataRange is None else tuple(self._dataRange) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum",
"def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def data_range(x):\n return max(x)-min(x)",
"def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def range(self) -> ty.Tuple[float, float]:\r\n ...",
"def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range",
"def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))",
"def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))",
"def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()",
"def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin",
"def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)",
"def range(series):\n return min(series), max(series)",
"def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)",
"def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])",
"def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans",
"def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range",
"def bounds(self):\n\n if self.size == 0:\n lo, hi = np.nan, np.nan\n elif self.is_monotonic:\n lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])\n elif self.dtype is np.datetime64:\n lo, hi = np.min(self.coordinates), np.max(self.coordinates)\n else:\n lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)\n\n return lo, hi",
"def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))",
"def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]",
"def scalar_range2tuple(sr: ScalarRange, defaults=(-np.inf, np.inf)):\n return (\n sr.min.value if sr.HasField(\"min\") else defaults[0],\n sr.max.value if sr.HasField(\"max\") else defaults[1],\n )",
"def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)",
"def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range",
"def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes",
"def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value",
"def _get_time_range(self, data):\n time = data.coords[self.time_field]\n if time.size == 0:\n raise ProviderNoDataError()\n else:\n start = _to_datetime_string(data[self.time_field].values.min())\n end = _to_datetime_string(data[self.time_field].values.max())\n return [start, end]",
"def get_bounds(self, value = None, index = None):\n\n if self._data is None or 0 in self._data.shape:\n return (0.0, 0.0)\n\n if type(value) == types.IntType:\n if self.value_dimension == 0:\n maxi = nanmax(self._data[value, ::])\n mini = nanmin(self._data[value, ::])\n else:\n # value_dimension == 1\n maxi = nanmax(self._data[::, value])\n mini = nanmin(self._data[::, value])\n elif type(index) == types.IntType:\n if self.index_dimension == 0:\n maxi = nanmax(self._data[index, ::])\n mini = nanmin(self._data[index, ::])\n else:\n # index_dimension == 1\n maxi = nanmax(self._data[::, index])\n mini = nanmin(self._data[::, index])\n else:\n # value is None and index is None:\n maxi = nanmax(self._data)\n mini = nanmin(self._data)\n\n return (mini, maxi)",
"def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1",
"def get_range(lst):\n return float(max(lst)) - float(min(lst))",
"def get_range(df, col):\n return df[col].min(), df[col].max()",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges",
"def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng",
"def _parse_vrange(self, data):\n vmin = self.config.get('vmin', np.nanmin(data))\n vmax = self.config.get('vmax', np.nanmax(data))\n vrange = self.config.get('vrange', None)\n\n # Parse vmin, vmax\n if isinstance(vmin, str):\n vmin = np.nanquantile(data, q=float(vmin))\n if isinstance(vmax, str):\n vmax = np.nanquantile(data, q=float(vmax))\n\n # Parse vrange\n if vrange is True:\n vrange = max(abs(np.nanmin(data)), abs(np.nanmax(data)))\n elif isinstance(vrange, str):\n vrange = abs(np.nanquantile(data, q=(float(vrange), 1-float(vrange)))).max()\n\n if vrange is not None:\n if isinstance(vrange, (list, tuple, np.ndarray)):\n vmin, vmax = vrange\n else:\n vmin, vmax = -vrange, vrange\n return vmin, vmax",
"def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]",
"def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)",
"def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)",
"def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end",
"def calcrange(data, log=False):\n xmin, xmax = None, None\n for x in data:\n if not log or x > 0.:\n if xmin is None or x < xmin: xmin = x\n if xmax is None or x > xmax: xmax = x\n\n if xmin is None and xmax is None:\n if log:\n return 0.1, 1.\n else:\n return 0., 1.\n else:\n return xmin, xmax",
"def min_range(self):\n return self._min_range",
"def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]",
"def range(self):\n return (self._start, self._end)",
"def values(self):\n lower = float(self.lowerSpnbx.value())\n upper = float(self.upperSpnbx.value())\n return lower, upper",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def range(df):\r\n\r\n\tdf_range_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_range_dict[col] = [df[col].max(), df[col].min(), df[col].max() - df[col].min()]\r\n\r\n\tdf_range = pd.DataFrame(df_range_dict, index=['Max Value', 'Min Value', 'Range (Max - Min)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_range",
"def regression_range(self):\n regression_range = detect_regression_range.DetectRegressionRange(\n self.historical_metadata)\n if regression_range is None: # pragma: no cover\n logging.warning('Got ``None`` for the regression range.')\n else:\n regression_range = tuple(regression_range)\n\n return regression_range",
"def interval(self):\n return (self.start, S.Infinity)",
"def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]",
"def xmin(self):\n return asarray([b[0] for b in self.bounds])",
"def get_index_range_inclusive(self):\n nx, ny, nz = self.get_mesh_size()\n return (1, nx, 1, ny, 1, nz)",
"def get_xrange(self):\n return self.xvec[0], self.xvec[-1]",
"def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum",
"def range(self):\n return self.range_array",
"def range(self):\n lower, upper = sorted((self.y1, self.y2))\n return FloatRange(lower=lower, upper=upper)",
"def get_dyn_range(scale, zero_point, dtype):\n if dtype == torch.quint8:\n min_val, max_val = 0, 255\n elif dtype == torch.qint8:\n min_val, max_val = -128, 127\n else:\n raise RuntimeError(f\"Unsupported quantized dtype {dtype}\")\n\n return (min_val - zero_point) * scale, (max_val - zero_point) * scale",
"def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)",
"def min_max(self, data, era):\n return 0, np.max(data)",
"def GetScalarRange(self):\n ...",
"def test_inclusive_intervals(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5.5)\n assert dim.interval() == (-3, 3)",
"def get_bounds():\n return [0.00], [1.00]",
"def get_statistics(data):\n v_min = None\n v_max = None\n v_avg = None\n v = None\n v_sum = .0\n count = 0\n for d in data:\n if d is None:\n continue\n try:\n v = float(d)\n except ValueError:\n print(pc.CRED, d, pc.CEND, end=',')\n continue\n if count == 0:\n v_min = v\n v_max = v\n else:\n if v < v_min:\n v_min = v\n if v > v_max:\n v_max = v\n v_sum += v\n count += 1\n if count > 0:\n v_avg = round(v_sum/count, 2)\n return v_min, v_max, v_avg",
"def _query_range_get(self):\n return (self.query_start, self.query_end)",
"def range_to_m(self, data):\n return data * self._total_range + self._min_range_m",
"def _rangeQueryFloatFeature(self):\n\n # create args\n minToGet = c_double()\n maxToGet = c_double()\n\n errorCode = VimbaDLL.featureFloatRangeQuery(self._handle,\n self._name,\n byref(minToGet),\n byref(maxToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return (minToGet.value, maxToGet.value)",
"def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)",
"def getCellData(X, y, min0, max0, min1, max1):\n Xcell = []\n ycell = []\n\n for x,label in zip(X, y):\n if (x[0] >= min0) and (x[0] < max0) and (x[1] >= min1) and (x[1] < max1):\n Xcell.append(x)\n ycell.append(label)\n\n return np.array(Xcell), np.array(ycell)",
"def get_range(value):\n\n raw = value\n\n # If we find a '@' at the beginning of the range, we should invert\n # the match.\n\n invert = False\n\n if value.find('@') == 0:\n invert = True\n value = value.lstrip('@')\n\n # The : separates a max/min range. If it exists, there is at least\n # a minimum. We'll start our ranges at zero and infinity so we don't\n # have to worry about complex testing logic.\n\n bottom = 0\n top = float('infinity')\n\n if value.find(':') > 0:\n (bottom, top) = value.split(':')\n if top == '':\n top = float('infinity')\n else:\n top = float(top)\n\n if bottom == '':\n bottom = 0\n elif bottom == '~':\n bottom = -float('infinity')\n else:\n bottom = float(bottom)\n else:\n top = float(value)\n\n return (bottom, top, invert, raw)",
"def possible_vals(pp):\n\n if pp[\"type\"] == \"w\":\n vals = [0, pp[\"pmax\"]]\n\n elif pp[\"type\"] == \"windturbine\":\n vals = [0, pp[\"pmin\"]]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"] + 1):\n vals.append(pp[\"pmin\"] + i)\n\n else: # Turbojet\n vals = [0]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"]):\n vals.append(pp[\"pmin\"] + i)\n return vals",
"def _read_range(range: str) -> Tuple[str, List[Tuple[Union[int, None], Union[int, None]]]]:\n format, split_on_pairs = range.split('=', 1)\n split_on_pairs = split_on_pairs.split(',')\n pairs = []\n for pair_str in split_on_pairs:\n split_on_range = pair_str.split('-', 1)\n start = int(split_on_range[0]) if len(split_on_range[0]) > 0 else None\n stop = int(split_on_range[1]) if len(split_on_range[1]) > 0 else None\n pairs.append((start, stop))\n return format, pairs",
"def get_range(self):\n return time_to_range(self.get_time())",
"def _get_shear_vals(lower_bound: float,\n upper_bound: float,\n step: float) -> Tuple[float]:\n return tuple(np.arange(lower_bound, upper_bound + step, step))",
"def getDataRange(self):\n return self._dataRange",
"def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]",
"def Min(data):\n return data.min()",
"def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max",
"def mins(self) -> Tensor:\n return self._ranges[:, 0]",
"def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]",
"def get_range(self, field, deep=False, axis=None):\n variables = list(self.vars(deep, with_name=field))\n\n if not variables:\n raise KeyError(\"No variable named '%s' was found!\" % field)\n\n start = [np.nanmin(self[var], axis).item(0) for var in variables]\n end = [np.nanmax(self[var], axis).item(0) for var in variables]\n return min(start), max(end)",
"def minimum(self) -> Union[int, float]:\n return self.range[0]",
"def eta_range(self):\n\t\tticks = self.eta_details.keys()\n\t\treturn min(ticks), max(ticks)",
"def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)",
"def heckbert_interval(data_low, data_high, numticks=8, nicefunc=_nice, enclose=False):\n if data_high == data_low:\n return data_high, data_low, 0\n if numticks == 0:\n numticks = 1\n\n range = nicefunc(data_high - data_low)\n if numticks > 1:\n numticks -= 1\n d = nicefunc(range / numticks, round=True)\n if enclose:\n graphmin = ceil(data_low / d) * d\n graphmax = floor(data_high / d) * d\n else:\n graphmin = floor(data_low / d) * d\n graphmax = ceil(data_high / d) * d\n return graphmin, graphmax, d",
"def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax",
"def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)",
"def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])",
"def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx",
"def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax",
"def range(self):\n return self.timerange()",
"def get_refrange(self):\n if np.all(np.isnan(self.par)):\n print( 'Run params() before')\n return\n if hasattr(self,'refranges'):\n return self.refranges\n ice_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[1,r,10,0])]\n liq_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[0,r,10,0])]\n return (liq_r,ice_r)",
"def min_max(xs):\n return min(xs), max(xs)",
"def GetTotalRange(vDataSet):\r\n dtype = GetType(vDataSet)\r\n if dtype == np.uint8 or dtype == np.uint16:\r\n info = np.iinfo(dtype)\r\n else:\r\n info = np.finfo(dtype)\r\n return info.min,info.max",
"def x_y_coor_min_max(x_y_coor):\n\tx_range = [np.min(x_y_coor[\"X\"]),np.max(x_y_coor[\"X\"])]\n\ty_range = [np.min(x_y_coor[\"Y\"]),np.max(x_y_coor[\"Y\"])]\n\treturn x_range, y_range",
"def _get_extremes(self, attr='values'):\n # calculate the maximum and minimum for all series\n series_max = [0]\n series_min = [0]\n for s in self:\n if s is not None:\n series_max.append(s.max(attr))\n series_min.append(s.min(attr))\n return min(series_min), max(series_max)",
"def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)",
"def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return",
"def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_",
"def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)"
] | [
"0.80794895",
"0.7623016",
"0.7399985",
"0.7339498",
"0.7240076",
"0.7086574",
"0.7079901",
"0.70793736",
"0.6944895",
"0.6782193",
"0.6766099",
"0.6677474",
"0.66592604",
"0.6654025",
"0.6621791",
"0.6613688",
"0.6587012",
"0.6554875",
"0.6525205",
"0.6514943",
"0.64271283",
"0.6386248",
"0.634553",
"0.63312197",
"0.63110703",
"0.62724155",
"0.62720484",
"0.62662834",
"0.6266116",
"0.6264186",
"0.62542087",
"0.6253586",
"0.62519306",
"0.6207791",
"0.61888206",
"0.6186835",
"0.616809",
"0.61655664",
"0.61417156",
"0.6118097",
"0.6116166",
"0.6101557",
"0.60865176",
"0.6076441",
"0.60661817",
"0.60623837",
"0.60571235",
"0.6052849",
"0.6051161",
"0.6039221",
"0.6036516",
"0.6025402",
"0.6013011",
"0.6007117",
"0.59838825",
"0.5983818",
"0.5980281",
"0.5965841",
"0.59648746",
"0.5953986",
"0.5951801",
"0.5950642",
"0.59454656",
"0.59387624",
"0.59369665",
"0.59355134",
"0.59345365",
"0.59342474",
"0.5926222",
"0.5923732",
"0.59197104",
"0.59195",
"0.59058934",
"0.5903206",
"0.587778",
"0.5869031",
"0.5863317",
"0.5834739",
"0.58343405",
"0.58285433",
"0.5815397",
"0.5813248",
"0.5812595",
"0.57993114",
"0.57934475",
"0.57877153",
"0.57704145",
"0.5766496",
"0.576398",
"0.5763425",
"0.57589126",
"0.57578534",
"0.57340467",
"0.5730579",
"0.5725628",
"0.5716928",
"0.57082254",
"0.5708046",
"0.57004607",
"0.56980264"
] | 0.7169807 | 5 |
Perform picking in this item at given widget position. | def _pickFull(self, context):
rayObject = context.getPickingSegment(frame=self._getScenePrimitive())
if rayObject is None:
return None
points = utils.segmentPlaneIntersect(
rayObject[0, :3],
rayObject[1, :3],
planeNorm=self.getNormal(),
planePt=self.getPoint())
if len(points) == 1: # Single intersection
if numpy.any(points[0] < 0.):
return None # Outside volume
z, y, x = int(points[0][2]), int(points[0][1]), int(points[0][0])
data = self.getData(copy=False)
if data is None:
return None # No dataset
depth, height, width = data.shape
if z < depth and y < height and x < width:
return PickingResult(self,
positions=[points[0]],
indices=([z], [y], [x]))
else:
return None # Outside image
else: # Either no intersection or segment and image are coplanar
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Point_Pick(self):\n self.vtkWidget.iren.AddObserver('RightButtonPressEvent', self.pick_loc)\n self.renWin.Render()",
"def pick_loc(self, event, x):\n #print(event, x)\n self.vtkWidget.iren.RemoveObservers('RightButtonPressEvent')\n loc = event.GetEventPosition()\n\n # Currently this only allow one pick points, but in the future, more reference points may be needed\n if self.pnt is None: # Check no points are already picked\n self.pnt = vtkRenWin.Pick_point(self.renWin, loc)\n else:\n show_message(\"A point is already set as the reference.\\n\"\n \"Clear the picked points to change reference\",\n message_type=\"info\")\n #vtkRenWin.mark(self.renWin,self.pnt[0],self.pnt[1],self.pnt[2])\n # print(self.pnt)",
"def _on_pick(self, event):\n pix_id = event.ind[-1]\n xx, yy, aa = u.Quantity(self.geom.pix_x[pix_id]).value, \\\n u.Quantity(self.geom.pix_y[pix_id]).value, \\\n u.Quantity(np.array(self.geom.pix_area)[pix_id])\n if self.geom.pix_type.startswith(\"hex\"):\n self._active_pixel.xy = (xx, yy)\n else:\n rr = sqrt(aa)\n self._active_pixel.xy = (xx - rr / 2., yy - rr / 2.)\n self._active_pixel.set_visible(True)\n self._active_pixel_label.set_x(xx)\n self._active_pixel_label.set_y(yy)\n self._active_pixel_label.set_text(f\"{pix_id:003d}\")\n self._active_pixel_label.set_visible(True)\n self._update()\n self.on_pixel_clicked(pix_id) # call user-function",
"def click(self, position):\n w, h = self.window.size\n sx, sy = self.tictactoe.size\n rx, ry = position\n x, y = sx * rx // w, sy * ry // h\n if self.tictactoe.available((x, y)):\n self.choice = (x, y)",
"def PerformPick(self, x, y, ignoreEntID = -1):\n pass",
"def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)",
"def selectedWidget(self, p_int): # real signature unknown; restored from __doc__\n pass",
"def selected(self, point):\n local_point = (point[0] - self.x, point[1] - self.y)\n self.remove(self.slide.rect)\n self.slide.update(local_point)\n self.insert(1, self.slide.rect)\n self.slide.rect.fill = self.slide_color\n self.title.text = f\"{self.name}:{int(self.slide.value)}\"",
"def on_click(self, event):\n item = self.identify(\"item\", event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return \"break\"",
"def _press(self, event):\n # Check for selection of a tool handle.\n if ((self._selection_completed or 'move_vertex' in self._state)\n and len(self._xys) > 0):\n h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)\n if h_dist < self.grab_range:\n self._active_handle_idx = h_idx\n # Save the vertex positions at the time of the press event (needed to\n # support the 'move_all' state modifier).\n self._xys_at_press = self._xys.copy()",
"def _updateSelectedItem(self):\n plot = self.plot\n if plot is not None:\n selected = plot.selection().getSelectedItems()\n # Give priority to image over scatter\n for klass in (items.ImageBase, items.Scatter):\n for item in selected:\n if isinstance(item, klass):\n # Found a matching item, use it\n self.getHistogramWidget().setItem(item)\n return\n self.getHistogramWidget().setItem(None)",
"def interact(self,mouseY):\n index = floor((mouseY+self.scroll-50)/150)-1\n if index >= -1 and index < len(self.itemList.items):\n self.selected = index\n #i*150+50-self.scroll",
"def set_piece_selected(self, uid, val):\n piece = self.get_piece_by_uid(uid)\n if piece:\n piece.selected = val",
"def _right_click(self, event, widget):\n self._currently_selected_widget = widget\n\n # need an actual mechanism for populating the menu, rather than this!!\n ### copied from edit_PO_in_currently...\n param_name = None\n for name,representation in self.representations.items():\n if self._currently_selected_widget is representation['widget']:\n param_name=name\n break\n # CEBALERT: should have used get_parameter_value(param_name)?\n PO_to_edit = self._string2object(param_name,self._tkvars[param_name].get())\n ###\n\n if hasattr(PO_to_edit,'params'):\n self.menu.tk_popup(event.x_root, event.y_root)",
"def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)",
"def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True",
"def _left_button_release_event(self, obj, event):\n #self.OnLeftButtonUp()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n #selector = vtk.vtkVisibleCellSelector()\n\n self.picker_points.append((pixel_x, pixel_y))\n\n #print(self.picker_points)\n if len(self.picker_points) == 2:\n p1x, p1y = self.picker_points[0]\n p2x, p2y = self.picker_points[1]\n self.picker_points = []\n xmin = min(p1x, p2x)\n ymin = min(p1y, p2y)\n xmax = max(p1x, p2x)\n ymax = max(p1y, p2y)\n #print(self.picker_points)\n #print('_area_pick_left_button_release', cell_id)\n\n dx = abs(p1x - p2x)\n dy = abs(p1y - p2y)\n self.picker_points = []\n if dx > 0 and dy > 0:\n if self._pick_visible:\n self._pick_visible_ids(xmin, ymin, xmax, ymax)\n else:\n self._pick_depth_ids(xmin, ymin, xmax, ymax)\n self.parent.vtk_interactor.Render()\n self.picker_points = []",
"def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)",
"def select(self,item):\r\n pass",
"def selectItem(*args):",
"def select_me(self, mouse_pos):\r\n\t\tself.active = self.rect.collidepoint(mouse_pos)",
"def pickUpAction(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colour = kwargs[\"fname\"]\n# pdb.set_trace()\n self.locator.update_pose() #get current pose of arm\n# x_offset = self.locator.pose[0] + pose_offset[0]\n# y_offset = self.locator.pose[1] + pose_offset[1]\n# goal_pose = (x_offset,y_offset,0,0,0,0)\n\n if self.exp_position_occupied:\n self.colour = 'blue'\n self.baxter.no()\n else:\n success = self.locator.locate(colour, pose_offset, 1)\n self.mm.loadMenu(\"actionMenu\")",
"def item_selected(self, _widget, _idx):\n # get item title\n self.sel_fmt = str(self.types_list.getItemNameAt(_idx))\n \n # enable \"ok\" button if any item selected\n self.button_ok.setEnabled(True)\n # update editor checkbox\n self.checkbox_edit.setStateCheck(False)\n self.checkbox_edit.setEnabled(self._formats[self.sel_fmt][1])",
"def select(self, position: Union[Sequence[int], int]) -> None:\n if isinstance(position, (list, tuple, np.ndarray)):\n y, x = round(position[0]), round(position[1])\n self._assert_valid(y, x)\n position = int(y * self.size[1] + x)\n\n if not isinstance(position, int):\n raise TypeError('`position` must be a integer, tuple or list.')\n\n if self.source != -1:\n self.cancel()\n self.current = self._opt_path(self.source, position)\n else:\n self.start = position\n\n self.destiny = position # must be after cancel",
"def select_items(self):\n\n self.listWidget.currentItem().setSelected(True)\n self.items_selected = self.listWidget.selectedItems()\n\n if self.frame_ordering == \"quality\":\n self.indices_selected = [self.quality_sorted_indices[self.listWidget.row(item)] for item\n in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n else:\n self.indices_selected = [self.listWidget.row(item) for item in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n\n self.synchronize_slider()",
"def _press(self, event):\n self._set_cursor(True)\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if self._active_handle is None or not self._interactive:\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n # self._pressv and self._prev are deprecated but we still need to\n # maintain them\n self._pressv = v\n self._prev = self._get_data(event)\n\n if self._active_handle is None and not self.ignore_event_outside:\n # when the press event outside the span, we initially set the\n # visibility to False and extents to (v, v)\n # update will be called when setting the extents\n self._visible = False\n self.extents = v, v\n # We need to set the visibility back, so the span selector will be\n # drawn when necessary (span width > 0)\n self._visible = True\n else:\n self.set_visible(True)\n\n return False",
"def select(self, _: int = 0) -> None:\n if not self.all_items:\n self._exit()\n return\n self.selected_option = self.current_option\n\n assert self.selected_item is not None\n self.selected_item.set_up()\n self.selected_item.action()\n self.selected_item.clean_up()\n\n self.returned_value = self.selected_item.get_return()\n self.should_exit = self.selected_item.should_exit\n\n if not self.should_exit:\n self.draw()",
"def withTouch(self, item, contentType=None, length=None, timeout=None, index=1, containerObject=None, relatedAreaEnd=None, doNotSelect=False):\r\n # Press (x, y) coordinate point when item is tuple\r\n if isinstance(item, tuple):\r\n self.phone._touch.press(item,length)\r\n self.phone._run('Press to coordinates: %s,%s' % item)\r\n return\r\n\r\n # Press HW key\r\n if item.startswith('KBD_KEY_'):\r\n self.phone._pressKey(item, length)\r\n self.phone._run('Press (%s) key' % item)\r\n if item == 'KBD_KEY_KEYLOCK_TOGGLE':\r\n self.phone.delay(100, False)\r\n return\r\n\r\n touchType=False#Fix touchable not working with images ,must be set not to false\r\n\r\n coordinates = self.phone.uiState.revealItem(item,timeout, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if coordinates:\r\n if not doNotSelect:\r\n itemCommented = self.phone.uiState._getCommented(item) # get step commented here so teststep won't be messed up with sx queries\r\n\r\n if containerObject != None:\r\n containerCommented = self.phone.uiState._getCommented(containerObject) # get container in commented format before touching\r\n self.phone._touch.press(coordinates,length)\r\n self.phone._run('select(%s) (on same area as %s)' % (itemCommented, containerCommented))\r\n else:\r\n self.phone._touch.press(coordinates,length)\r\n self.phone._run('select(%s)' % itemCommented)\r\n elif containerObject != None:\r\n self.phone.fail(\"Cannot select %s, no item found related to \\\"%s\\\".\"%(self.phone.uiState._getCommented(item), self.phone.uiState._getCommented(containerObject)))\r\n else:\r\n self.phone.fail(\"Cannot select %s, item is not found from screen.\"%self.phone.uiState._getCommented(item))",
"def pick(self, x, y, pb=2, multiple=False):\n width = self.size().width()\n height = self.size().height()\n #print('coords: %d, %d' % (x, y))\n # constrain to within border 1 pix smaller than widget, for glReadPixels call\n if not (pb <= x < width-pb and pb <= y < height-pb): # cursor out of range\n return\n if self.npoints > 2**24-2: # the last one is the full white background used as a no hit\n raise OverflowError(\"Can't pick from more than 2**24-2 sids\")\n # draw encoded RGB values to back buffer\n #GL.glDrawBuffer(GL_BACK) # defaults to back\n GL.glClearColor(1.0, 1.0, 1.0, 1.0) # highest possible RGB means no hit\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n GL.glEnableClientState(GL.GL_COLOR_ARRAY)\n GL.glEnableClientState(GL.GL_VERTEX_ARRAY)\n GL.glColorPointerub(self.rgbsids) # unsigned byte, ie uint8\n GL.glVertexPointerf(self.points) # float32\n GL.glDrawArrays(GL.GL_POINTS, 0, self.npoints) # to back buffer\n GL.glClearColor(0.0, 0.0, 0.0, 1.0) # restore to default black\n # grab back buffer:\n #GL.glReadBuffer(GL.GL_BACK) # defaults to back\n # find rgb at or around cursor coords, decode sid:\n backbuffer = GL.glReadPixels(x=x-pb, y=y-pb, width=2*pb+1, height=2*pb+1,\n format=GL.GL_RGB, type=GL.GL_UNSIGNED_BYTE,\n array=None, outputType=None)\n # NOTE: outputType kwarg above must be set to something other than str to ensure\n # that an array is returned, instead of a string of bytes\n if (backbuffer == 255).all(): # no hit\n return\n if not multiple:\n sid = self.decodeRGB(backbuffer[pb, pb]) # check center of backbuffer\n if sid != None:\n #print('hit at exact cursor pos')\n return sid # hit at exact cursor position\n # 2D array with nonzero entries at hits:\n hitpix = (backbuffer != [255, 255, 255]).sum(axis=2)\n if not multiple:\n ri = np.where(hitpix.ravel())[0][0] # get ravelled index of first hit\n i, j = np.unravel_index(ri, dims=hitpix.shape) # unravel to 2D index\n #print('Hit at %d, %d' % (i, j))\n return self.decodeRGB(backbuffer[i, j]) # should be a valid sid\n ijs = zip(*np.where(hitpix)) # list of ij tuples\n return np.asarray([ self.decodeRGB(backbuffer[i, j]) for i, j in ijs ])",
"def pick(self, x, y, pb=2, multiple=False):\n width = self.size().width()\n height = self.size().height()\n #print('coords: %d, %d' % (x, y))\n # constrain to within border 1 pix smaller than widget, for glReadPixels call\n if not (pb <= x < width-pb and pb <= y < height-pb): # cursor out of range\n return\n if self.npoints > 2**24-2: # the last one is the full white background used as a no hit\n raise OverflowError(\"Can't pick from more than 2**24-2 sids\")\n # draw encoded RGB values to back buffer\n #GL.glDrawBuffer(GL_BACK) # defaults to back\n GL.glClearColor(1.0, 1.0, 1.0, 1.0) # highest possible RGB means no hit\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n GL.glEnableClientState(GL.GL_COLOR_ARRAY)\n GL.glEnableClientState(GL.GL_VERTEX_ARRAY)\n GL.glColorPointerub(self.rgbsids) # unsigned byte, ie uint8\n GL.glVertexPointerf(self.points) # float32\n GL.glDrawArrays(GL.GL_POINTS, 0, self.npoints) # to back buffer\n GL.glClearColor(0.0, 0.0, 0.0, 1.0) # restore to default black\n # grab back buffer:\n #GL.glReadBuffer(GL.GL_BACK) # defaults to back\n # find rgb at or around cursor coords, decode sid:\n backbuffer = GL.glReadPixels(x=x-pb, y=y-pb, width=2*pb+1, height=2*pb+1,\n format=GL.GL_RGB, type=GL.GL_UNSIGNED_BYTE,\n array=None, outputType=None)\n # NOTE: outputType kwarg above must be set to something other than str to ensure\n # that an array is returned, instead of a string of bytes\n if (backbuffer == 255).all(): # no hit\n return\n if not multiple:\n sid = self.decodeRGB(backbuffer[pb, pb]) # check center of backbuffer\n if sid != None:\n #print('hit at exact cursor pos')\n return sid # hit at exact cursor position\n # 2D array with nonzero entries at hits:\n hitpix = (backbuffer != [255, 255, 255]).sum(axis=2)\n if not multiple:\n ri = np.where(hitpix.ravel())[0][0] # get ravelled index of first hit\n i, j = np.unravel_index(ri, dims=hitpix.shape) # unravel to 2D index\n #print('hit at %d, %d' % (i, j))\n return self.decodeRGB(backbuffer[i, j]) # should be a valid sid\n ijs = zip(*np.where(hitpix)) # list of ij tuples\n return np.asarray([ self.decodeRGB(backbuffer[i, j]) for i, j in ijs ])",
"def getSelectedPosition(*args):",
"def onButtonPress(self, event):\n\n if event.xdata and event.ydata:\n self.emit(QtCore.SIGNAL(\"positionSelected(float, float)\"),\n float(event.xdata), float(event.ydata))",
"def apply_selection(self, rv, index, is_selected):\r\n self.selected = is_selected",
"def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected",
"def select_entry(self):\n logging.debug(\"element selected\")\n if len(self.contents) > 0:\n self.to_background()\n self.contents[self.pointer][1]()\n self.to_foreground()\n if self.path_chosen:\n self.deactivate()\n else:\n self.to_foreground()",
"def on_click(self, event):\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify(\"item\", event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return \"break\"",
"def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None",
"def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False",
"def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def onDown():\n currentIndex = selector.currentRow()\n if currentIndex != selector.count() - 1 and currentIndex != -1:\n selector.blockSignals(True)\n currentItem = selector.takeItem(currentIndex)\n selector.insertItem(currentIndex + 1, currentItem)\n selector.setCurrentRow(currentIndex + 1)\n selector.blockSignals(False)\n position = []\n for index in range(selector.count()):\n position.append(selector.item(index).data(32))\n p.SetString(\"Position\", \",\".join(position))\n onItemChanged()",
"def act(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n item = self._get_item_on_floor(source_entity)\n if item is None:\n raise Exception(\"Could not find item on floor.\", source_entity, item)\n pickup_succeded = self.parent.inventory.try_add(item)\n if pickup_succeded:\n item.remove_component_of_type(\"player_auto_pick_up\")\n msg.send_visual_message(messenger.PICK_UP_MESSAGE % {\"item\": item.description.name},\n source_entity.position.value)\n self.parent.actor.newly_spent_energy += gametime.single_turn\n _item_flash_animation(source_entity, item)",
"def click(self, button, coord):\n if coord in self._ship_blocks:\n # selection doesn't work, if grid is blocked\n return\n\n if self._selection is None:\n # selection works, if not selected before\n self._selection = coord\n elif self._selection == coord:\n for coord in self._selection_buttons:\n self.remove_selection(coord)\n self._selection = None\n else:\n ship = list(self._selection_buttons)\n result = True\n\n if self.on_ship is not None:\n result = self.on_ship(ship)\n\n if result:\n self.ships.append(ship)\n self._ship_blocks += ship\n else:\n for coord in self._selection_buttons:\n self.remove_selection(coord)\n\n self._selection_buttons.clear()\n self._selection = None",
"def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj",
"def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj",
"def use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i included\" % frame_selected)\n item.setBackground(self.background_included)\n item.setForeground(QtGui.QColor(0, 0, 0))\n self.index_included[index_selected] = True\n self.frame_selector.setPhoto(self.frame_index)",
"def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def __menu_item_chosen(self, m):\n # Get selected item\n self.__selected_robot = m.index\n\n # Update the checkboxes/sliders for the selected robot\n self.__ui_controls.get('chkbox_ref').checked = \\\n self.__robots[self.__selected_robot].ref_shown\n\n self.__ui_controls.get('chkbox_rob').checked = \\\n self.__robots[self.__selected_robot].rob_shown\n\n self.__ui_controls.get('sld_opc').value = \\\n self.__robots[self.__selected_robot].opacity",
"def on_mouse_press(self, x, y, button):\n\n pass",
"def select_hand_pointer(self):\n self.locator_finder_by_hover_item(self.select_hand_pointer_id)\n time.sleep(1)",
"def _on_point_selected(self, _event):\r\n selected_iid = self._tree.selection()\r\n self._index_of_sel_point = self._tree.index(selected_iid)\r\n self._edit_zone.set_editable_point(self._tree.item(selected_iid)[\"values\"][0])\r\n self._notify(\"focus\", {})",
"def paint_item(self, posx, index):\r\n raise NotImplementedError()",
"def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()",
"def draw(self):\n if context.click():\n self.place()",
"def items_picking_window(self,\n instance: Nobleman,\n name: str,\n variable: Any,\n widget=None):\n window = tk.Toplevel()\n window.title(f'Pick new {name.lstrip(\"_\")}')\n window.geometry('350x250')\n\n search_variable = StringVar()\n entry = Entry(window, textvariable=search_variable)\n entry.pack(side=TOP)\n\n listbox = self.create_items_listbox(variable, widget, window)\n for name in self.get_data_for_listbox(instance, name):\n listbox.insert(END, name)\n\n entry.bind(\n '<Key>', partial(input_match_search, search_variable,\n lambda: self.manager.lords if name in LORDS_SETS\n else self.manager.locations, listbox))\n\n TkButton(window, text='Confirm and close',\n command=window.destroy).pack(side=TOP)",
"def _left_button_press_event(self, obj, event):\n #print('area_picker - left_button_press_event')\n self.OnLeftButtonDown()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n self.picker_points.append((pixel_x, pixel_y))",
"def add_selection(self, coord):\n button = self.grid[coord]\n button['bg'] = active\n button['activebackground'] = active",
"def pickup_item(self, ):\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['ether']:\n self.pos_item['ether'] = (0, 0 * sprite_size)\n self.item_count += 1\n self.sound_item.play()\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['tube']:\n self.pos_item['tube'] = (0, 1 * sprite_size)\n self.item_count += 1\n self.sound_item.play()\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['needle']:\n self.pos_item['needle'] = (0, 2 * sprite_size)\n self.item_count += 1\n self.sound_item.play()",
"def selected(self, item):\n self.elementoSeleccionado = item",
"def set_selection(self, index, value):\n if not self._has_cbox[index]:\n return\n i = self._widgets[index][\"values\"].index( str(value) )\n self._widgets[index].current(i)",
"def onpick(cls, event):\n if cls.rate_limiting():\n return True\n\n if len(event.ind) != 1:\n print(\"Two or more points are too close! Please zoom in.\")\n print(\"Showing the one with higher fitness score\")\n\n cloud_plot = gs.canvas2cloud_plot[event.canvas]\n artist = event.artist\n ind = event.ind[-1]\n button = event.mouseevent.button\n\n if button == 1:\n cls.button_1(cloud_plot, artist, ind)\n elif button == 3:\n cls.button_3(cloud_plot, artist, ind)",
"def trigger_open(self):\n self.get_selected()\n if self.selected_item:\n self.controller.display_item(self.selected_item)",
"def paint_item(self, posy, index):\r\n order = self.items[index]\r\n if order in self.selected:\r\n marker = \"*\"\r\n if index == self.item_sel:\r\n attr = COLOR_PAIR[\"dialog_sel_sel\"]\r\n else:\r\n attr = COLOR_PAIR[\"dialog_sel_text\"] + curses.A_BOLD\r\n else:\r\n marker = \"\"\r\n if index == self.item_sel:\r\n attr = COLOR_PAIR[\"dialog_sel\"]\r\n else:\r\n attr = COLOR_PAIR[\"dialog_text\"]\r\n\r\n self.addstr(posy, 2, marker, attr)\r\n self.addstr(posy, 5, order.typ, attr)\r\n self.addstr(posy, 9, self.gox.quote2str(order.price), attr)\r\n self.addstr(posy, 22, self.gox.base2str(order.volume), attr)",
"def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())",
"def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())",
"def _handleClick(self, x, y, btn):\n\n if btn == LEFT_BTN:\n result = self.plot._pickTopMost(x, y, lambda i: i.isSelectable())\n if result is None:\n return None\n\n item = result.getItem()\n\n if isinstance(item, items.MarkerBase):\n xData, yData = item.getPosition()\n if xData is None:\n xData = [0, 1]\n if yData is None:\n yData = [0, 1]\n\n eventDict = prepareMarkerSignal('markerClicked',\n 'left',\n item.getName(),\n 'marker',\n item.isDraggable(),\n item.isSelectable(),\n (xData, yData),\n (x, y), None)\n return eventDict\n\n elif isinstance(item, items.Curve):\n dataPos = self.plot.pixelToData(x, y)\n assert dataPos is not None\n\n xData = item.getXData(copy=False)\n yData = item.getYData(copy=False)\n\n indices = result.getIndices(copy=False)\n eventDict = prepareCurveSignal('left',\n item.getName(),\n 'curve',\n xData[indices],\n yData[indices],\n dataPos[0], dataPos[1],\n x, y)\n return eventDict\n\n elif isinstance(item, items.ImageBase):\n dataPos = self.plot.pixelToData(x, y)\n assert dataPos is not None\n\n indices = result.getIndices(copy=False)\n row, column = indices[0][0], indices[1][0]\n eventDict = prepareImageSignal('left',\n item.getName(),\n 'image',\n column, row,\n dataPos[0], dataPos[1],\n x, y)\n return eventDict\n\n return None",
"def setSelectedFromItem(self, item):\n row = self.model.indexFromItem(item).row()\n self.selectRow(row)",
"def move_mouse_to_and_click(self, selector, x=0, y=0):\n self.move_mouse_to(selector, x, y, return_action_chain=True).click().perform()",
"def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')",
"def rightselectitem(self, pos):\n self._rightlist.select(pos)",
"def selectShot(self):\r\n shot = self.mapToShot(self.remainingCoordinates.pop())\r\n logging.debug(\"select shot: %s\" % (shot))\r\n return shot",
"def grab_slider(self, mouse_x, mouse_y):\n\n for slider in self._menu_items:\n if slider['menu_type'] == 'Slider':\n if slider['rect'].collidepoint(mouse_x, mouse_y):\n slider['grabbed'] = True",
"def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()",
"def Scale_Pick( self, event ):\r\n x = event.x - cb.xorigin\r\n y = event.y\r\n #Was the position within the scale?\r\n if x < 0 and x > -2:\r\n x = 0 #low adjust\r\n if x > cb.xtotal and x < cb.xtotal+2:\r\n x = cb.xtotal #high adjust\r\n if( x >= 0 and x <= cb.xtotal ):\r\n self.filter_distance = round((cb.xtotal - float(x))/cb.xtotal*cb.longx,3)\r\n self.Draw_Scale()\r\n return",
"def middleselectitem(self, pos):\n self._linklist.select(pos)",
"def mouseDoubleClickEvent(self, event):\n try:\n item = self.currentItem()\n self.clearSelection()\n item.setSelected(True)\n super(ListPreviewImages, self).mouseDoubleClickEvent(event)\n except:\n print('No item selected')",
"def on_pointPickerButton(self, key_t, button):\n\n result = self.tabs.pick_mpoint( 'random', key_t, \n self.cfgmgr, self.prefs.get('lts') )\n\n ctxt = result.log if not result.err else result.err \n\n if ctxt != None:\n self.consoleBuffer.insert_at_cursor(ctxt)\n\n self._refresh_ui()",
"def on_touch_down(self, touch):\n if super(SelectableButton, self).on_touch_down(touch):\n return True\n if self.collide_point(*touch.pos) and self.selectable:\n return self.parent.select_with_touch(self.index, touch)",
"def HitTest(self, x, y):\r\n\r\n result = None\r\n\r\n for item in self._uiparts:\r\n # we are not interested in typeDock, because this space \r\n # isn't used to draw anything, just for measurements\r\n # besides, the entire dock area is covered with other\r\n # rectangles, which we are interested in.\r\n if item.type == AuiDockUIPart.typeDock:\r\n continue\r\n\r\n # if we already have a hit on a more specific item, we are not\r\n # interested in a pane hit. If, however, we don't already have\r\n # a hit, returning a pane hit is necessary for some operations\r\n if item.type in [AuiDockUIPart.typePane, AuiDockUIPart.typePaneBorder] and result:\r\n continue\r\n \r\n # if the point is inside the rectangle, we have a hit\r\n if item.rect.Contains((x, y)):\r\n result = item\r\n \r\n return result",
"def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True",
"def pick(layer, event):\n # on press\n layer.selected_label = layer._value or 0",
"def corner_click(position: Corner) -> None:\n actions.self.corner_hover(Corner)\n actions.mouse_click()",
"def radioButtonItem_Clicked( self, event ):\n\t\tself.activateTreasureBox(0)",
"def select_item(self, text):\n items = self.list_widget.findItems(text, Qt.MatchExactly)\n for item in items:\n item.setSelected(True)\n self.handle_item_changed()",
"def _selectInd(self, ind):\n logger.info(f'plotNumber:{self.plotNumber} ind: {ind}')\n if ind > len(self.plotDf)-1:\n return\n xVal = self.plotDf.at[ind, self.stateDict['xStat']]\n yVal = self.plotDf.at[ind, self.stateDict['yStat']]\n if self.scatterPlotSelection is not None:\n logger.info(f' setting scatterPlotSelection x:{xVal} y:{yVal}')\n self.scatterPlotSelection.set_data(xVal, yVal)\n self.fig.canvas.draw()",
"def handle_press( self, x, y ):\n self.pressed_flag = True\n self.first_point = (x, y)",
"def OnButtonClick(self):\n self.choice()",
"def getSelectedItem(*args):",
"def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected\n if is_selected:\n SelectableLabel.selected_hotel = rv.data[index]['text']",
"def grab_point(self, pos):\n self.move_cartesian_frame_linear_interpolation(tfx.pose(np.array(pos), np.array(self.GRAB_ORIENTATION)), 0.1)\n self.grab_current_point()",
"def update_selection(self):\n raise NotImplementedError",
"def selectShot(self):\r\n raise NotImplementedError(\"Subclass needs to implement this\")",
"def set_item_selection(self, item):\n self._set_item_selection(item.encode())",
"def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]",
"def select_multiple_item(self):\n time.sleep(2)\n self.wait_for_ajax()\n self.locator_finder_by_hover_item(self.row1_id)\n self.locator_finder_by_hover_item(self.row2_id)\n self.locator_finder_by_hover_item(self.row3_id)\n self.locator_finder_by_hover_item(self.row4_id)\n time.sleep(1)\n self.wait_for_ajax()",
"def _handle_select_event(self):\n selected_item = self.item_list[self.item_cursor.cursor]\n if selected_item == \"CANCEL\":\n self.is_dead = True\n\n # You can't sell key items.\n elif selected_item.type == ItemTypes.KEY_ITEMS:\n self.do_what_response_menu = \\\n Dialogue(\"29\", self.player, self.player,\n replace=[selected_item.name.upper()], show_curs=False)\n\n # Create a sell event with the selected item.\n else:\n self.active_sell_event = SellHowMany(self.player,\n selected_item)",
"def onclick_pick(self, click):\n from ..backend.util import _annot\n from ..backend.viz_raw import _plot_single_psd\n\n if self.plotType == 'All PSD':\n _annot(self, click, self.annot)\n # If double click, we plot the PSD\n if click.mouseevent.dblclick:\n ch = str(click.artist.get_label())\n index = self.psd.info['ch_names'].index(ch)\n index = self.psd.picks.index(index)\n _plot_single_psd(self, index + 1)",
"def emitPressEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mousePressEvent signal\n self.mousePress.emit(self, clickLocation, button, currentKbKey, items)",
"def activate_item(self, index):\n item = index.model().listdata[index.row()]\n self.get_selected(item)\n self.controller.display_item(item)",
"def select_asset_from_items(self):\n\n # items = self.get_nodes(selection=False)\n # nodes = []\n # for item in items.values():\n # nodes.extend(item[\"nodes\"])\n #\n # commands.select(nodes)\n\n raise NotImplementedError",
"def pickUpActionAny(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colour = kwargs[\"fname\"]\n self.locator.recognise_grid()\n red = self.locator.detect_colour(0, 'red')\n rospy.loginfo(\"permutation(): looking for red object: %s\" % str(red))\n blue = self.locator.detect_colour(0, 'blue')\n rospy.loginfo(\"permutation(): looking for blue object: %s\" % str(blue))\n if red[0] < blue[0]:\n colour = 'blue'\n else:\n colour = 'red'\n\n self.locator.update_pose() #get current pose of arm\n\n success = self.locator.locate(colour, pose_offset, 1)\n self.mm.loadMenu(\"actionMenu\")",
"def operate_row(self, point: QtCore.QPoint, opt: str):\n raise NotImplementedError"
] | [
"0.66232896",
"0.58813524",
"0.5879792",
"0.5860174",
"0.5787622",
"0.57383424",
"0.570975",
"0.56717336",
"0.564384",
"0.5572894",
"0.55690295",
"0.5561806",
"0.55518645",
"0.5547201",
"0.55388415",
"0.5531264",
"0.5512377",
"0.5507659",
"0.54962397",
"0.5493375",
"0.5490328",
"0.54831296",
"0.5481948",
"0.5471091",
"0.54537976",
"0.5385567",
"0.53819734",
"0.5380689",
"0.5376581",
"0.5376581",
"0.53687215",
"0.5354697",
"0.53440946",
"0.5337128",
"0.53037",
"0.52962893",
"0.5296004",
"0.52738595",
"0.524684",
"0.5238567",
"0.5237964",
"0.52072847",
"0.5201069",
"0.5201069",
"0.5189809",
"0.5175269",
"0.51695746",
"0.5162868",
"0.51596415",
"0.51481307",
"0.5143322",
"0.51402134",
"0.51390123",
"0.5114692",
"0.51132935",
"0.51132095",
"0.51111865",
"0.51073176",
"0.5106252",
"0.50770056",
"0.5075046",
"0.5071524",
"0.5039546",
"0.5019741",
"0.5013485",
"0.50122666",
"0.50101805",
"0.5009396",
"0.50026864",
"0.49794713",
"0.49772796",
"0.49754158",
"0.4969919",
"0.4964197",
"0.49609694",
"0.4960346",
"0.49599373",
"0.4958935",
"0.49562597",
"0.4949249",
"0.49417773",
"0.4940599",
"0.49330366",
"0.49327916",
"0.49322525",
"0.4929804",
"0.4929371",
"0.4921385",
"0.49172404",
"0.49140868",
"0.4913035",
"0.49122664",
"0.49008182",
"0.49006447",
"0.49001527",
"0.48981848",
"0.48945206",
"0.48936823",
"0.48891103",
"0.48823547",
"0.4878011"
] | 0.0 | -1 |
Synchronize this instance data with that of its parent | def _syncDataWithParent(self):
parent = self.parent()
if parent is None:
self._data = None
else:
self._data = parent.getData(copy=False)
self._updateScenePrimitive() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()",
"def sync(self):\n pass",
"def sync(self):\n pass",
"def sync(self):\n return",
"def sync(self, other):\n pass # TODO",
"def do_sync(self):\n raise NotImplementedError() # pragma: no cover",
"def sync_local(self, other):\n pass # TODO",
"def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)",
"def sync(self, **kwargs):\n pass",
"def update_original_data(self):\n pass",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()",
"def sync(self):\n return self._sync",
"def update(self, parent):\r\n pass",
"def _post_sync(self):",
"def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)",
"def lock(self):\n raise NotImplementedError",
"def sync() -> None:",
"def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)",
"def SyncRoot(self) -> object:",
"def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def sync(self, sync):\n self._sync = sync",
"def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def _pre_sync(self):",
"def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)",
"def sync(self):\n\n if self._inchild:\n os.read(self._pr_child, len(self.RELEASE_MSG))\n else:\n os.read(self._pr_parent, len(self.RELEASE_MSG))",
"def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)",
"def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')",
"def freeze(self,):\n pass",
"def sync(self):\n # TODO: write better documentation: when would user need this?\n wait(self.proto.sync())",
"def sync(self, sync):\n\n self._sync = sync",
"def after_sync(self):\n self.title = self.c[\"title\"]\n self.body = self.c[\"body\"]\n self.state = self.c[\"state\"]\n self.base = self.c[\"base\"][\"ref\"]\n self.head = self.c[\"head\"][\"ref\"]\n self.maintainer_can_modify = self.c[\"maintainer_can_modify\"]",
"def sync(self) -> None:\n for parameter in self.data_to_sync:\n assert hasattr(self, parameter), \\\n \"Parameter: %s does not exist in: %s\" % (parameter, self)\n self.publish(self.key_gen(parameter), getattr(self, parameter))",
"def sync(self) -> None: #\n self.__target.load_state_dict(self.__policy.state_dict())",
"def sync(self):\n resp = yield self.do_sync()\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)",
"def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)",
"def __post_init__(self):\n # ------------------------------------------------------------ 01\n # if path exists load data dict from it\n # that is sync with contents on disk\n if self.path.exists():\n _hashable_dict_from_disk = \\\n m.FrozenDict.from_yaml(self.path.read_text())\n # update internal dict from HashableDict loaded from disk\n self.__dict__.update(\n _hashable_dict_from_disk.get()\n )\n\n # ------------------------------------------------------------ 02\n # start syncing i.e. any updates via __setattr__ will be synced\n # to disc\n self.internal.start_syncing = True",
"def _notify_parent_change(self):\n pass",
"def syncContents(self):\n self._contents.setState_TRY(self.temperature(),\n self.density(),\n self.massFractions())",
"def freeze(self):\n raise NotImplementedError()",
"def i_am_locking(self):\r\n pass",
"def update(self):\n\n pass",
"def update(self):\n return self",
"def update(self):\n raise NotImplementedError",
"def _update(self):\n pass",
"def update(self):\r\n pass",
"def on_parent_changed(self):\n pass",
"def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()",
"def sync_remote(self, other):\n pass # TODO",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n pass",
"def update(self):\n raise NotImplementedError()",
"def lock (self):\n self.locked = True\n self._changed = False",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")",
"def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()",
"def __getstate__(self) -> Dict[str, Any]:\n s = self.__dict__.copy()\n # Kill the parent ref. It won't pickle well.\n s[\"_parent\"] = None\n return s",
"def update(self):\n with managed_session() as session:\n session.merge(self)",
"def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)",
"def sync_tree_db(self) -> None:\n self.sync_tree_with_data(self.tree_db, self.data_db)",
"def update(self):\n # default implementation is to do nothing.",
"def sync_widgets(self):\n self.data_changed.emit(self.value)",
"def sync_to_ontology(self):\n self.ontology.sync_entity_to_graph(self)",
"def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()",
"def copy(self):\n return super().copy()",
"def lock(self):\n self.mtx.acquire()",
"def __init__(self):\n self._data_queue = []\n self._access_queue_lock = Lock()",
"def build(self):\n self.lock_built = True",
"def after_sync(self):\n pass",
"def update_data():\n pass",
"def __init__(self):\n self.data = {}\n self.refresh()",
"def __enter__(self):\n\n self.create()\n return super().__enter__()",
"def reparent(self, obj, parent):\n return self.update(obj, parent=parent)",
"async def async_update(self) -> None:\n await super().async_update()\n await self.async_get_state()",
"def restore_object(self):\n self.co_worker_list = self.original_co_worker_list",
"def _update_object(self, data_dict):\r\n pass",
"def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child",
"def sync_info(self, sync_info):\n\n self._sync_info = sync_info",
"def update(self):\n self._xfinity_data.update()",
"def cambiar_parent(self):\r\n self.client.parent = self",
"def cambiar_parent(self):\r\n self.client.parent = self",
"def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()",
"def sync_end(self):",
"def __enter__(self):\n return self._get_storage().__enter__()",
"def __enter__(self):\n\n return self"
] | [
"0.80830806",
"0.7977319",
"0.7733652",
"0.71153784",
"0.71153784",
"0.70563495",
"0.7043718",
"0.674526",
"0.65729344",
"0.6530828",
"0.64562",
"0.6451494",
"0.6362502",
"0.6362502",
"0.6325555",
"0.63112843",
"0.6255493",
"0.6245364",
"0.6242624",
"0.619789",
"0.61851496",
"0.61773336",
"0.61612016",
"0.61592674",
"0.615458",
"0.61517084",
"0.6139571",
"0.61280423",
"0.6117423",
"0.6102235",
"0.60881495",
"0.6027518",
"0.6023041",
"0.6007091",
"0.5970572",
"0.5955212",
"0.59466237",
"0.5941309",
"0.59173286",
"0.5876683",
"0.58658206",
"0.5858481",
"0.58379585",
"0.583246",
"0.5813005",
"0.57983154",
"0.5788369",
"0.5780768",
"0.5767515",
"0.57577527",
"0.57572305",
"0.5747498",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747469",
"0.5747163",
"0.5744666",
"0.57408583",
"0.5727137",
"0.5714643",
"0.5712627",
"0.5683065",
"0.5666505",
"0.56472826",
"0.5630778",
"0.56155974",
"0.56155485",
"0.55971104",
"0.5590951",
"0.5575481",
"0.5554041",
"0.5548815",
"0.55483866",
"0.5545577",
"0.55447865",
"0.5542659",
"0.554085",
"0.5539797",
"0.55267113",
"0.5525976",
"0.5525867",
"0.5520475",
"0.5518965",
"0.5518965",
"0.55176026",
"0.5515576",
"0.5513838",
"0.5512805"
] | 0.8105605 | 0 |
Handle data change in the parent this isosurface belongs to | def _parentChanged(self, event):
if event == ItemChangedType.DATA:
self._syncDataWithParent() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()",
"def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True",
"def data_changed(self):\n return",
"def XPLMDataChanged_f(inRefcon):",
"def data_changed(self):\n self.data_changed_signal.emit(self)",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)",
"def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")",
"def MyDataChangedCallback(self, inRefcon):\r\n pass",
"def get_data(self, data):\n data = super().get_data(data)\n self.pid.update_layer1(data[self.pid_cols])\n return data",
"def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()",
"def update_original_data(self):\n pass",
"def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)",
"def on_data_vars_change(self, change):\n if change['type'] == 'change' and change['name'] == 'value':\n self.left_ds = getattr(self.ts.data, change['new'])\n if self.mask is None:\n self.right_ds = self.left_ds.copy(deep=True)\n else:\n self.right_ds = self.left_ds * self.mask\n\n self.left_imshow.set_data(self.left_ds.data[0])\n self.right_imshow.set_data(self.right_ds.data[0])",
"def update_data():\n pass",
"def update(self, datain):\r\n self.arraydata = datain\r\n self.layoutChanged.emit()",
"def update(self, parent):\r\n pass",
"def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")",
"def dataGridView_CellValueChanged(self, sender, eventArgs):\r\n name = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[0].Value\r\n newVal = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[eventArgs.ColumnIndex].Value\r\n child = Application.ActiveSceneRoot.FindChild2( name, constants.siPolyMeshType, constants.siMeshFamily, True )\r\n if child:\r\n transform = child.Kinematics.Local.GetTransform2(None)\r\n translation = transform.Translation\r\n if eventArgs.ColumnIndex == 1:\r\n transform.Translation = XSIMath.CreateVector3( newVal, translation.Y, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 2:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, newVal, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 3:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, translation.Y, newVal )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n else:\r\n print \"DataGridView_CellValueChanged: \" + child + \" not found!\"",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def _notify_parent_change(self):\n pass",
"def on_parent_changed(self):\n pass",
"def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )",
"def _notify_parent_change(self):\n for p in self.parameters:\n p._parent_changed(self)",
"def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())",
"def _numberOfPoints_changed(self):\n self.reinitialiseData()",
"def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolygon, self)._update_proxy(change)",
"def _metadata_changed(self, old, new):\n\n #self.cross_plot.value_range.low = self.minz\n #self.cross_plot.value_range.high = self.maxz\n #self.cross_plot2.value_range.low = self.minz\n #self.cross_plot2.value_range.high = self.maxz\n if self._imag_index.metadata.has_key(\"selections\"):\n x_ndx, y_ndx = self._imag_index.metadata[\"selections\"]\n if y_ndx and x_ndx:\n# xdata, ydata = self._image_index.get_data()\n# xdata, ydata = xdata.get_data(), ydata.get_data()\n self.pd_horiz.set_data(\"horiz\", self._image_value.data[y_ndx,:])\n self.pd_vert.set_data(\"vert\", self._image_value.data[:,x_ndx])",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def on_new_data(self, data):\n raise NotImplementedError()",
"def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolyline, self)._update_proxy(change)",
"def data_dict_update(self, change):\n self.data_dict = change['value']",
"def _update_data(self) -> None:\n data: SwitcherShutter = self.coordinator.data\n self._attr_current_cover_position = data.position\n self._attr_is_closed = data.position == 0\n self._attr_is_closing = data.direction == ShutterDirection.SHUTTER_DOWN\n self._attr_is_opening = data.direction == ShutterDirection.SHUTTER_UP",
"def _modelUpdated(self, *args, **kwargs):\n topLeft = self.index(column=0)\n bottomRight = self.index(column=1)\n model = self.model()\n if model is not None:\n model.dataChanged.emit(topLeft, bottomRight)",
"def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')",
"def _data_updated_callback(self, attr, old, new):\n pass",
"def update_E(self):",
"def update_visualization(self) -> None:\n pass",
"def updateData(self):\n self.needsData.emit(self.property(\"number\"))",
"def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False",
"def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()",
"def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")",
"def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]",
"def setData(self,newData):\r\n pass",
"def onChange(self, parent):\r\n pass",
"def exogenous_change(self):\n pass",
"def exogenous_change(self):\n pass",
"def exogenous_change(self):\n pass",
"def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()",
"def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data",
"def onMarketUpdate(self, data):\n pass",
"def update(self, new_gameStateData):\r\n pass",
"def onFlowUpdate(self, event):",
"def on_edit(self, dataobj):",
"def update_graph(self, data):\n if (self.type == 'matplotlib'):\n pass\n else:\n pass",
"def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())",
"def update(self):",
"def update(self):",
"def update(self):",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, data):\n pass",
"def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]",
"def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()",
"def process_data(self, windowed_data):\n return",
"def on_dataobj_create(self, dataobj):",
"def project_changed(self, day_idx):\n self.is_modified = True\n self.fire_project_changed(ChartProject.CHANGED)",
"def onFrameUpdated(self):\n pass",
"def _resolution_changed(self):\n self.reinitialiseData()",
"def process_IN_MODIFY(self, event):",
"def _measurement_update(self):\n pass",
"def updateData(self, fp, prop):\n return",
"def setData(self,newdata):\n self.record(inspect.currentframe())\n if np.shape(newdata) == np.shape(self.data):\n self.data = np.copy(newdata)",
"def onUpdated(self):",
"def slot_depth(self, dummy_sender, data):\r\n (typ, price, _voldiff, total_vol) = data\r\n if self._update_book(typ, price, total_vol):\r\n self.signal_changed(self, None)",
"def _update_proxy(self, change):\n # The superclass handler implementation is sufficient.\n super(AbstractItemView, self)._update_proxy(change)",
"def update_data(self, newData):\r\n self.AllData = newData",
"def onPropertiesChange(self, data):\n pass",
"def _itemChanged(self, event):\n if event == items.ItemChangedType.COLORMAP:\n self._sigColormapChanged.emit()\n if self._colormap is not None:\n self._colormap.sigChanged.disconnect(self._colormapChanged)\n\n item = self.item()\n if item is not None:\n self._colormap = item.getColormap()\n self._colormap.sigChanged.connect(self._colormapChanged)\n else:\n self._colormap = None\n\n elif event == items.ItemChangedType.DATA:\n self._sigColormapChanged.emit()",
"def _itemChanged(self, event):\n if event in self._EVENTS:\n model = self.model()\n if model is not None:\n index = self.index(column=0)\n model.dataChanged.emit(index, index)",
"def process_IN_ATTRIB(self, event):",
"def data_input_changed(self):\n self.message.data = self.dataInput.toPlainText()\n self.validate_data_input(self.message.dlc)",
"def update(self):\n self.data.update()\n\n sensor_type = self.entity_description.key\n if sensor_type == \"light\":\n self._attr_native_value = self.data.light\n elif sensor_type == \"light_red\":\n self._attr_native_value = self.data.light_red\n elif sensor_type == \"light_green\":\n self._attr_native_value = self.data.light_green\n elif sensor_type == \"light_blue\":\n self._attr_native_value = self.data.light_blue\n elif sensor_type == \"accelerometer_x\":\n self._attr_native_value = self.data.accelerometer_x\n elif sensor_type == \"accelerometer_y\":\n self._attr_native_value = self.data.accelerometer_y\n elif sensor_type == \"accelerometer_z\":\n self._attr_native_value = self.data.accelerometer_z\n elif sensor_type == \"magnetometer_x\":\n self._attr_native_value = self.data.magnetometer_x\n elif sensor_type == \"magnetometer_y\":\n self._attr_native_value = self.data.magnetometer_y\n elif sensor_type == \"magnetometer_z\":\n self._attr_native_value = self.data.magnetometer_z\n elif sensor_type == \"temperature\":\n self._attr_native_value = self.data.temperature\n elif sensor_type == \"pressure\":\n self._attr_native_value = self.data.pressure\n elif sensor_type == \"voltage_0\":\n self._attr_native_value = self.data.voltage_0\n elif sensor_type == \"voltage_1\":\n self._attr_native_value = self.data.voltage_1\n elif sensor_type == \"voltage_2\":\n self._attr_native_value = self.data.voltage_2\n elif sensor_type == \"voltage_3\":\n self._attr_native_value = self.data.voltage_3",
"def draw_data(self):\n\n return NotImplementedError",
"def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(DockArea, self)._update_proxy(change)",
"def OnData(self, data):\n\n for k in self.assets_keys:\n cond1 = (self.Time > self.stop_time_dict[k])\n cond2 = self.Portfolio[k].Invested\n # self.Debug(f\"cond1 {cond1}, cond2 {cond2}\")\n if cond1 and cond2:\n self.Debug(f\"{self.Time}, {k} position {self.Portfolio[k].Quantity}\")\n self.Liquidate(k)\n self.Debug(f\"{k} position liquidated: {self.Portfolio[k].Quantity}\")\n\n for k in self.assets_keys:\n if not data.ContainsKey(k):\n continue\n\n dat = data[k]\n time = dat.Time\n\n try:\n # self.features.loc[time] = [data[\"GAZP\"].Fastmavg, data[\"GAZP\"].Slowmavg, data[\"GAZP\"].Close]\n # self.features.loc[time]\n self.features_dict[k].loc[time] = [dat.Logret, dat.Momone, dat.Momtwo, dat.Momthree, dat.Momfour,\n dat.Momfive, dat.Volatilityfifty, dat.Volatilitythirtyone,\n dat.Volatilityfifteen,\n dat.Autocorrone, dat.Autocorrtwo, dat.Autocorrthree,\n dat.Autocorrfour, dat.Autocorrfive,\n dat.Logtone, dat.Logttwo, dat.Logtthree, dat.Logtfour, dat.Logtfive,\n dat.Bin, dat.Side]\n # self.Debug(\"1\")\n except AttributeError as e:\n continue\n\n if self.clf_dict[k] is not None:\n X = self.features_dict[k].drop([\"Bin\"], axis=1).loc[time].values.reshape(1, -1)\n y_pred = self.clf_dict[k].predict(X)\n\n if y_pred > .8:\n\n if dat.Side == 1:\n if not self.Portfolio[k].IsLong:\n self.stop_time_dict[k] = self.Time + self.lifetime\n if self.Portfolio[k].Invested:\n self.Liquidate(k)\n self.SetHoldings(k, .5)\n # self.Debug(f\" long {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n # self.Debug(f\" hol {self.Portfolio.TotalHoldingsValue}, cash {self.Portfolio.Cash}\")\n\n else:\n self.stop_time_dict[k] = self.Time + self.lifetime\n # self.Debug(f\" long_ {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n\n elif dat.Side == -1:\n if self.Portfolio[k].IsLong:\n self.stop_time_dict[k] = self.Time + self.lifetime\n self.Liquidate(k)\n self.SetHoldings(k, -0.5)\n # self.Debug(f\" short {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n # self.Debug(f\" hol {self.Portfolio.TotalHoldingsValue}, cash {self.Portfolio.Cash}\")\n else:\n self.stop_time_dict[k] = self.Time + self.lifetime\n # self.Liquidate(k)\n self.SetHoldings(k, -0.5)\n # self.Debug(f\" short_ {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n # self.Debug(f\" hol {self.Portfolio.TotalHoldingsValue}, cash {self.Portfolio.Cash}\")",
"def _itemChanged(self, event):\n if event == items.ItemChangedType.VISUALIZATION_MODE:\n item = self.sender()\n if item is not None: # This occurs with PySide/python2.7\n self.__isEnabled = item.isPropertyEnabled(self.__propertyName)\n self.__updateFlags()\n\n # Notify model\n model = self.model()\n if model is not None:\n begin = self.index(column=0)\n end = self.index(column=1)\n model.dataChanged.emit(begin, end)",
"def changeFridge(self,*args):\n self.selectedADR = self.adrSelect.get()\n # clear temps plot\n self.stage60K.set_xdata([])\n self.stage60K.set_ydata([])\n self.stage03K.set_xdata([])\n self.stage03K.set_ydata([])\n self.stageGGG.set_xdata([])\n self.stageGGG.set_ydata([])\n self.stageFAA.set_xdata([])\n self.stageFAA.set_ydata([])\n # load saved temp data\n # We have to sleep for 0.5s here because it seems like it takes\n # a moment for the connected server to register in self.cxn, even\n # though all this starts because a message is received saying it\n # is connected :\\\n time.sleep(0.5)\n startDateTime = yield self.cxn[self.selectedADR].get_start_datetime()\n try:\n reg = self.cxn.registry\n yield reg.cd(ADR_SETTINGS_BASE_PATH + [self.selectedADR])\n logPath = yield reg.get('Log Path')\n tempDataChest = dataChest(logPath)\n ds = dateStamp()\n dset = '%s_temperatures'%ds.dateStamp(startDateTime.isoformat())\n tempDataChest.openDataset(dset)\n\n n = tempDataChest.getNumRows()\n # load approximately the last 6 hours of data\n pastTempData = tempDataChest.getData(max(0,n-6*60*60),None )\n for newRow in pastTempData:\n # change utc time to local\n utc = newRow[0] # (float)\n utc = datetime.datetime.utcfromtimestamp(utc)\n utc = utc.replace(tzinfo=tz.tzutc())\n newRow[0] = mpl.dates.date2num(utc)\n # add old data from file into plot\n self.stage60K.set_xdata(numpy.append(self.stage60K.get_xdata(),newRow[0]))\n self.stage60K.set_ydata(numpy.append(self.stage60K.get_ydata(),newRow[1]))\n self.stage03K.set_xdata(numpy.append(self.stage03K.get_xdata(),newRow[0]))\n self.stage03K.set_ydata(numpy.append(self.stage03K.get_ydata(),newRow[2]))\n self.stageGGG.set_xdata(numpy.append(self.stageGGG.get_xdata(),newRow[0]))\n self.stageGGG.set_ydata(numpy.append(self.stageGGG.get_ydata(),newRow[3]))\n self.stageFAA.set_xdata(numpy.append(self.stageFAA.get_xdata(),newRow[0]))\n self.stageFAA.set_ydata(numpy.append(self.stageFAA.get_ydata(),newRow[4]))\n except IOError:\n # file not created yet if adr server just opened\n print( 'temp file not created yet?' )\n self.updatePlot()\n # clear and reload last 20 messages of log\n self.log.clear()\n logMessages = yield self.cxn[self.selectedADR].get_log(20)\n for (t,m,a) in logMessages:\n self.updateLog(t,m,a)\n # update instrument status stuff: delete old, create new\n for widget in self.instrumentStatusFrame.winfo_children():\n widget.destroy()\n returnStatus = yield self.cxn[self.selectedADR].get_instrument_state()\n self.instrumentStatuses = {}\n for name,status in returnStatus:\n self.instrumentStatuses[name] = Tkinter.Label(self.instrumentStatusFrame,\n text=name,\n relief=Tkinter.RIDGE,\n bg='gray70')\n self.instrumentStatuses[name].pack(side=Tkinter.LEFT,\n expand=True,\n fill=Tkinter.X)\n # update field limits and button statuses\n self.setFieldLimits()\n self.magUpButton.configure(state=Tkinter.NORMAL)\n self.regulateButton.configure(state=Tkinter.NORMAL)\n self.compressorButton.configure(state=Tkinter.DISABLED)\n mUp = yield self.cxn[self.selectedADR].get_state_var('maggingUp')\n reg = yield self.cxn[self.selectedADR].get_state_var('regulating')\n if mUp:\n self.magUpButton.configure(text='Stop Magging Up',\n command=self.cancelMagUp)\n self.regulateButton.configure(state=Tkinter.DISABLED)\n if reg:\n self.regulateButton.configure(text='Stop Regulating',\n command=self.cancelRegulate)\n self.magUpButton.configure(state=Tkinter.DISABLED)\n # update heat switch buttons\n HSAvailable = yield self.cxn[self.selectedADR].get_instrument_state(['Heat Switch'])\n if HSAvailable[0][1][0]:\n self.HSCloseButton.configure(state=Tkinter.NORMAL)\n self.HSOpenButton.configure(state=Tkinter.NORMAL)\n else:\n self.HSCloseButton.configure(state=Tkinter.DISABLED)\n self.HSOpenButton.configure(state=Tkinter.DISABLED)\n # refresh interface\n self.updateInterface()",
"def __flight_data_handler(self, event, sender, data):\n self.battery = data.battery_percentage\n self.fly_mode = data.fly_mode\n self.throw_fly_timer = data.throw_fly_timer\n self.throw_ongoing = data.throw_fly_timer > 0\n\n if self.prev_flight_data != str(data):\n print(data)\n self.prev_flight_data = str(data)\n self.flight_data = data\n\n if self.is_flying != data.em_sky:\n self.is_flying = data.em_sky\n log.debug(f\"FLYING : {self.is_flying}\")\n if not self.is_flying:\n self.reset()\n else:\n if self.tracking_after_takeoff:\n log.info(\"Tracking on after takeoff\")\n self.toggle_tracking(True)\n\n # if self.write_header_log:\n # self.write_header_log = False\n # self.log_file_log.write(f\"{data.format_cvs_header()}\\n\")\n # self.log_file_log.write(f\"{data.format_cvs(0)}\\n\")",
"def updateData( Tables, Graph, LayersInfo, WarningMessage ):\n\n # clean the warning message\n LayersInfo.clean()\n WarningMessage.clean()\n\n LayerThicknessBuffer = Tables[ \"GeometryProperties\" ].getValue( 0, 2 )\n try:\n\n\n Layers = getLayersFromString( Tables[ \"GeometryProperties\" ].getValue( 0, 2 ) )\n\n LayersInfo.printMessage( str( len( Layers ) ) )\n\n # Homogenize the input data\n if len(Layers) != 1:\n\n makeMultiLayerMask( Tables )\n\n HomogenizedData = homogenize( Tables[ \"ElasticModulus\" ].getData( )[ 0 ],\n Tables[ \"ShearModulus\" ].getData( )[ 0 ],\n Tables[ \"PoissonRatios\" ].getData( ),\n Layers )\n\n #cangeMode( Tables, WarningMessage, Graph.getMode( ) )\n\n Tables[ \"ElasticModulus\" ].assignValuesSet( HomogenizedData[ \"ElasticModulus\" ] )\n Tables[ \"ShearModulus\" ].assignValuesSet( HomogenizedData[ \"ShearModulus\" ] )\n Tables[ \"PoissonRatios\" ].assignValuesSet( HomogenizedData[ \"PoissonRatios\" ] )\n Tables[ \"GeometryProperties\" ].assignValue( 0, 2, HomogenizedData[ \"TotalThickness\" ] )\n\n\n # Part of error handling.Function \"isInputNegative\" throws an error\n # if there is an element with its negetive value.\n isInputNegative( Tables [ \"ElasticModulus\" ].getData( ) )\n isInputNegative( Tables [ \"ShearModulus\" ].getData( ) )\n isInputNegative( Tables [ \"PoissonRatios\" ].getData( ) )\n isInputNegative( Tables [ \"MaterialProperties\" ].getData( ) )\n isInputNegative( Tables [ \"GeometryProperties\" ].getData( ) )\n\n # update the tables buffers\n makeMask( Tables, Graph.getMode() )\n\n # before calling user-define functions check the current mode\n cangeMode( Tables, WarningMessage, Graph.getMode() )\n\n precomputePoissonRatios( Tables )\n\n # get data from the corresponding tables\n ElasticModulusData = Tables [ \"ElasticModulus\" ].getData( )\n ShearModulusData = Tables [ \"ShearModulus\" ].getData( )\n PoissonRatiosData = Tables [ \"PoissonRatios\" ].getData( )\n MaterialPropertiesData = Tables [ \"MaterialProperties\" ].getData( )\n GeometryPropertiesData = Tables [ \"GeometryProperties\" ].getData( )\n\n\n #################### CALL USER-SPECIFIC FUNCTION ##########################\n\n testInputData( Graph.getMode(), PoissonRatiosData )\n\n Graph.Containers[ \"WaveVelocity\" ] = wave_speeds(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode() ),\n Graph.getRange() )\n\n\n Graph.Containers[ \"ModesInBand\" ] = ModesInBand(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode( ) ),\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"ModalDensity\" ] = ModaleDichte(\n Graph.Containers[ \"WaveVelocity\" ][ \"c_L\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_S\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B_eff\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_g_eff\" ],\n GeometryPropertiesData,\n bool( Graph.getMode( ) ),\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"ModalOverlapFactor\" ] = ModalOverlapFactor(\n MaterialPropertiesData,\n Graph.Containers[ \"ModalDensity\" ],\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"MaxElementSize\" ] = MaximumElementSize(\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B_eff\" ],\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"EigenFrequency\" ] = EigenfrequenciesPlate(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode() ),\n Graph.getRange() )\n\n # Update the current graph with new data\n updateGraph( Graph, Graph.getCurrentGraphNumber( ) )\n\n WarningMessage.clean()\n\n\n except VibroP_DataCorrupted as Error:\n WarningMessage.printMessage( str(Error) )\n Tables[ \"GeometryProperties\" ].setValue( 0, 2, LayerThicknessBuffer, \"\" )\n\n\n except VibroP_WrongLayersThikness as Error:\n WarningMessage.printMessage( str(Error) )\n\n\n except VibroP_TableCorrupted as Error:\n WarningMessage.printMessage( str(Error) )\n\n #'''\n except:\n Message = \"Error: Unexpected error. Please, refer to the code\"\n WarningMessage.printMessage( Message )\n #'''",
"def on_new_data(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update plot data\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.plotted_data[i].setData(self.data_indices, self.measurements_list[i])",
"def _update_object(self, data_dict):\r\n pass",
"def __init__(self, parent): \n \n self.parent = parent\n \n self.custom_channel_name = _qstring(parent.rhd)\n self.native_channel_name = _qstring(parent.rhd)\n self.native_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.custom_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.signal_type = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.channel_enabled = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.chip_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.board_stream = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_trigger_mode= np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_threshold = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_trigger_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_edge_polarity = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.electrode_impedance_magnitude = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n self.electrode_impedance_phase = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n\n if self.signal_type == 0 and self.channel_enabled:#Add name to the amplifier channel list\n parent._AMPLIFIER_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 1 and self.channel_enabled:#Add name to the aux channel list\n parent._AUX_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 2 and self.channel_enabled:#Supply voltage\n parent._SUPPLY_VOLTAGE_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 3 and self.channel_enabled:#usb board adc input channel\n parent._ADC_INPUT_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 4 and self.channel_enabled:#usb board digital input channel\n parent._DIGITAL_INPUT_CHANNELS.append(self.native_channel_name)",
"def updateVisualization(self):\n\t\tif self.visualization:\n\t\t\tif self.fixedVisualization:\n\t\t\t\tself.visualization.setFixedVisualization(self.fixedVisualization)\n\t\t\tif self.movingVisualization:\n\t\t\t\tself.visualization.setMovingVisualization(self.movingVisualization)\n\t\tself.multiRenderWidget.setVolumeVisualization(self.visualization)\n\t\tself.visualizationUpdated.emit(self.visualization)"
] | [
"0.78250813",
"0.7281809",
"0.6798219",
"0.663362",
"0.6617056",
"0.6495038",
"0.63203007",
"0.6309291",
"0.62671393",
"0.6239306",
"0.6219209",
"0.6177352",
"0.6040155",
"0.59989554",
"0.5994137",
"0.59767103",
"0.5967239",
"0.5934066",
"0.5895432",
"0.58403164",
"0.5837722",
"0.58095384",
"0.5787082",
"0.5745996",
"0.57370967",
"0.57274175",
"0.57120687",
"0.5686296",
"0.5668755",
"0.56500715",
"0.56143516",
"0.56101364",
"0.5600463",
"0.55783623",
"0.5572413",
"0.5542561",
"0.54469305",
"0.5445184",
"0.54451364",
"0.5436735",
"0.5423925",
"0.5405189",
"0.5404468",
"0.540398",
"0.53881997",
"0.5385559",
"0.53612375",
"0.53593886",
"0.53440094",
"0.5343246",
"0.5343246",
"0.5343246",
"0.53135335",
"0.53007156",
"0.5298004",
"0.5285347",
"0.5275803",
"0.52378005",
"0.521567",
"0.52154994",
"0.5215113",
"0.5215113",
"0.5215113",
"0.52099544",
"0.52099544",
"0.52099544",
"0.52099544",
"0.5205525",
"0.5201411",
"0.5200751",
"0.51920694",
"0.5163546",
"0.5162113",
"0.51564157",
"0.5156308",
"0.5154777",
"0.51547617",
"0.515394",
"0.5153451",
"0.51531476",
"0.5135554",
"0.5131347",
"0.5127962",
"0.51279086",
"0.5106883",
"0.5100003",
"0.50936526",
"0.5084411",
"0.50794625",
"0.5078345",
"0.5075342",
"0.5073403",
"0.5068132",
"0.5062168",
"0.5055551",
"0.50551134",
"0.5054878",
"0.50545305",
"0.5052645"
] | 0.63887423 | 6 |
Return the level of this isosurface (float) | def getLevel(self):
return self._level | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def level(self):\n return self.game_data['player stats']['Level']",
"def level(self):\n return self.init_v[2]",
"def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)",
"def level(self):\n return self._level",
"def level(self):\n return self._level",
"def level(self):\n return self._level",
"def level(self):\n return self._level",
"def level(self):\n return self.__level",
"def level(self):\n return self.__level",
"def getLevel(self):\n return _libsbml.SBase_getLevel(self)",
"def _get_isis_level(self):\n return self.__isis_level",
"def get_level(self, channel=None):\n return int(self.getSensorData(\"FILLING_LEVEL\", channel))",
"def getLevel(self):\n return self.level",
"def volume_level(self):\n return self._volumeLevel/100",
"def level(self) -> int:\n return self._level",
"def level(self) -> int:\n return self._level",
"def get_level(self) -> int:\n return self.rstate.level()",
"def volume_level(self):\n return self._group.volume / 100",
"def volume_level(self):\n return int(self._volume) / MAX_VOL",
"def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10",
"def volume_level(self):\n return self._table.speed",
"def get_level(cls, curve_value):\n return curve_value & (2 ** cls.level_bits - 1)",
"def volume_level(self) -> float:\n return int(self._state.get(\"playback_volume\", 0)) / 100",
"def level(self) -> int:\n return self.__state.level()",
"def getLevel(self, *args):\n return _libsbml.FbcExtension_getLevel(self, *args)",
"def volume_level(self):\n if 'mixer volume' in self._status:\n return int(self._status['mixer volume']) / 100.0",
"def volume_level(self) -> str | None:\n return int(self.zone.Volume) / 100.0",
"def volume_level(self):\n return self._volume_level",
"def volume_level(self):\n return self._client.volume / 100",
"def volume_level(self):\n return self._volume",
"def volume_level(self):\n return self._volume",
"def volume_level(self):\n return self._volume",
"def volume_level(self):\n return self._volume",
"def volume_level(self):\n return self._volume",
"def getLevel(self, *args):\n return _libsbml.SBMLExtension_getLevel(self, *args)",
"def get_level(self, level):\n return",
"def level( self ):\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n return self._level",
"def get_level(self, channel=None):\n return int(self.getSensorData(\"VALVE_STATE\", channel))",
"def getLevel(self):\n return _libsbml.ASTBasePlugin_getLevel(self)",
"def getLevel(self, *args):\n return _libsbml.CompExtension_getLevel(self, *args)",
"def getLevel(self, *args):\n return _libsbml.LayoutExtension_getLevel(self, *args)",
"def getLevel(self, *args):\n return _libsbml.QualExtension_getLevel(self, *args)",
"def getLevel(self, *args):\n return _libsbml.MultiExtension_getLevel(self, *args)",
"def beatlevel_float(self):\n\n parts = self.header['BeatLevel'].split('/')\n return float(parts[0])/float(parts[1])",
"def level(self) -> pulumi.Input[Union[str, 'Level']]:\n return pulumi.get(self, \"level\")",
"def get_food_level(self):\n return self.plant",
"def getSupportResistanceLevels(self):\n return self.levels",
"def get_water_level(self):\n return self.water_level",
"def level(self):\n return self.__pin.pwm",
"def resolution(self, level):\n return 2 ** (level - 1)",
"def volume_level(self):\n volume = self._state.get(\"volume\", None)\n if volume is not None and volume != \"\":\n volume = int(volume) / 100\n return volume",
"def getThresholdLevel(self):\n return _libsbml.Input_getThresholdLevel(self)",
"def compute_volume(self) -> float:\n return (\n (1 if self.clockwise else -1)\n * np.sum(\n np.linalg.det(\n np.dstack(\n (\n self.vertices[self._faces[:, 0]],\n self.vertices[self._faces[:, 1]],\n self.vertices[self._faces[:, 2]],\n )\n )\n )\n )\n / 6\n )",
"def getSurfaceArea(self) -> float:\n return self.area()",
"def get_lux(self):\n\n svc = \"urn:micasaverde-com:serviceId:LightSensor1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentLevel\")",
"def _get_lsp_config_isis_level(self):\n return self.__lsp_config_isis_level",
"def level(self) -> int:\n return self.categorization.level(self)",
"def get_surface_level(game_object: GameObject) -> int:\n if game_object is None:\n return 0\n routing_surface = CommonObjectLocationUtils.get_routing_surface(game_object)\n if not routing_surface:\n return 0\n return routing_surface.secondary_id",
"def GetLevelSetValue(self) -> \"double\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetLevelSetValue(self)",
"def calc_base_eff_and_infl(level):\n return 2 + (level - 1)",
"def GetLevelSetValue(self) -> \"double\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetLevelSetValue(self)",
"def battery_level(self):\n return self.battery",
"def get_geologic_level(self, point: Point) -> int:\n if point == self.target:\n return 0\n if point.y == 0:\n return point.x * 16807\n if point.x == 0:\n return point.y * 48271\n return self.get_erosion_level(to_above(point)) * self.get_erosion_level(to_left(point))",
"def current_cover_position(self):\n return self._device.level * 100.0",
"def getAmbientLevel(self, channel, unitCode=0):\n resp = self.XAPCommand('AMBLVL', channel, unitCode=unitCode)\n return float(resp)",
"def get_num_levels(self):\n return len(self._Psi)",
"def getMaxLevel(self):\n return _libsbml.QualitativeSpecies_getMaxLevel(self)",
"def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64",
"def ambient(self) -> float:\n return self.GetAmbient()",
"def getAudioLevel(self) :\n\t\tif not Util.AUDIO : return\n\t\treturn self.audioLevel",
"def set_level(self, x, level):\n return x * 10 ** ((level - self.ref_level) / 20)",
"def getLevel( self ):\n level = self.getEffectiveLevel()\n if level == logging.CRITICAL:\n return 'critical'\n elif level == logging.ERROR:\n return 'error'\n elif level == logging.WARNING:\n return 'warning'\n elif level == logging.INFO:\n return 'info'\n elif level == logging.DEBUG:\n return 'debug'\n elif level == logging.NOTSET:\n return 'notset'\n else:\n return 'unknown ({})'.format( level )",
"def get_channel_h_unit(self)->float: \n return self.__channel_h_unit",
"def get_level(rol):\n\treturn rol.level",
"def getLevels():",
"def getLevel(self, *args):\n return _libsbml.GroupsExtension_getLevel(self, *args)",
"def read_level(self):\n addresse = 0x48\n self.bus.write_byte(addresse,self.channel)\n value = self.bus.read_byte(addresse)\n time.sleep(1)\n volts = self.convert_volts(value,2)\n self.write_level(volts)\n alerteur = Alerteur()\n if volts < self.seuil_min:\n alerteur.add_alert(self.module_name, \"Batterie faible.\")\n else:\n alerteur.remove_alert(self.module_name)\n return volts",
"def get_logging_level(self):\n return self.logging_level",
"def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2",
"def get_antenna_level(self):\n response = self.parent.rfid.get_antenna_level()\n response = response[0]\n return response",
"def min_level(self):\n return self.__min",
"def get_level(self):\n return self.debug_level, self.verbosity",
"def get_level(k):\r\n return int(log2(k))",
"def getLevel(unique_name):",
"def getLevel(self, channel, group=\"I\", stage=\"I\", unitCode=0):\n self.send(XAP800_CMD + str(unitCode) + \" LVL \" + str(channel) + \" \" +\n group + \" \" + stage + \" \" + EOM)\n return float(self.readResponse())",
"def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea",
"def battery_level(self):\n return self._battery_level",
"def getLevel(self, *args):\n return _libsbml.SBMLNamespaces_getLevel(self, *args)",
"def get_levels(self):\n return self.levels[self.game]",
"def get_fluorescence(self):\n return self._lib.StGetFluorFlg()",
"def ambient_coefficient(self):\n return self._ambient_coefficient",
"def get_vol_lvl(self):\n global volume\n #output = subprocess.check_output(['amixer', 'sget', self.mixer_name]).decode('utf-8')\n return volume#int(output[(output.find('[') + 1):output.find('%]', (output.find('[') + 1))])",
"def max_level(self):\n return self.__max",
"def level(self):\n index = self._ordered_input_names.index('level')\n return self._inputs[index]",
"def critical_depth(self):\n crit_depth = math.pow((self.flow**2 /\n (self.width ** 2 * Channel.g)), (1/3))\n return crit_depth",
"def get_level(self):\n try:\n return self.root_node()['document_level']\n except KeyError:\n return None",
"def get_coverage_area(self) -> float:\n return math.sqrt(self.norm_hull.volume)",
"def read_level(self):\n current_level = 1\n\n try:\n if self.store.exists(LEVEL_STORE):\n current_level_str = self.store.get(LEVEL_STORE)['level']\n current_level = int(current_level_str)\n except:\n print 'Exception when reading Galaxy run level from JSON file!'\n current_level = 1\n\n return current_level",
"def getFov(self):\n return self.light.node().getLens().getFov()",
"def brightness(self):\n return to_hass_level(self._position)"
] | [
"0.692961",
"0.68555295",
"0.68304414",
"0.67786837",
"0.67786837",
"0.67786837",
"0.67786837",
"0.67447656",
"0.67447656",
"0.673165",
"0.6729187",
"0.67290646",
"0.67083573",
"0.6660453",
"0.66239667",
"0.66239667",
"0.65987796",
"0.6588329",
"0.65245295",
"0.65243083",
"0.65113395",
"0.6481602",
"0.64800906",
"0.64771783",
"0.64745146",
"0.6445088",
"0.64376295",
"0.6420487",
"0.64171034",
"0.63877404",
"0.63877404",
"0.63877404",
"0.63877404",
"0.63877404",
"0.6340208",
"0.63004327",
"0.6267936",
"0.6240504",
"0.62020516",
"0.61995",
"0.6188778",
"0.61362034",
"0.61095786",
"0.6100362",
"0.60919845",
"0.60883963",
"0.6009828",
"0.59998304",
"0.59921026",
"0.5945851",
"0.5941293",
"0.59361726",
"0.59153193",
"0.59091294",
"0.5887704",
"0.58862275",
"0.5885093",
"0.58558416",
"0.5854854",
"0.5828928",
"0.58041555",
"0.57913387",
"0.57900935",
"0.57848364",
"0.5782751",
"0.57803226",
"0.5771661",
"0.5769855",
"0.57647234",
"0.57635725",
"0.5718955",
"0.57154775",
"0.5713224",
"0.57089454",
"0.5699764",
"0.56974435",
"0.5694527",
"0.5657116",
"0.56560117",
"0.5650933",
"0.56452495",
"0.56405354",
"0.5639666",
"0.563852",
"0.5627681",
"0.5626314",
"0.5622937",
"0.56155443",
"0.56042993",
"0.5598692",
"0.55902606",
"0.5588103",
"0.5579196",
"0.55771035",
"0.55654645",
"0.55627286",
"0.5562703",
"0.5549301",
"0.55482763",
"0.55438995"
] | 0.68635625 | 1 |
Set the value at which to build the isosurface. Setting this value reset autolevel function | def setLevel(self, level):
self._autoLevelFunction = None
level = float(level)
if level != self._level:
self._level = level
self._updateScenePrimitive()
self._updated(Item3DChangedType.ISO_LEVEL) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue(self, _arg)",
"def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)",
"def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"",
"def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})",
"def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def setUseGizmos(value=True):\n global cc\n cc = not value",
"def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass",
"def update_electronic_settings(self, key, value):\n\n if key in self._electronic_settings:\n self._electronic_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {prec_level, algo, encut , nelm,nelmin, ediff, sigma, lasph, lreal, addgrid, bmaxmix, bmix}\")",
"def _set_power(self, value: str):\n if value == STATE_ON:\n self.state[1] = self.state[1][:2] + '1' + self.state[1][3:]\n\n if value == STATE_OFF:\n self.state[1] = self.state[1][:2] + '0' + self.state[1][3:]",
"def setFlatImage(self, value=1.0):\n self.fimage = None\n self.image = numpy.zeros((self.ny, self.nx), 'float') + value\n return",
"def changeRingSetting(self):\n #Input code to accommodate function of Ring setting",
"def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )",
"def Init(self):\r\n print(\"Initiating...\")\r\n if (self.Get_FullScale_Value() == self.FullScaleEnum[0]):\r\n self.gain = 0.00875\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[1]):\r\n self.gain = 0.0175\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[2]):\r\n self.gain = 0.07\r\n print(\"Gain set to:{0}\".format(self.gain))",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def trigger_level(self, value):\n self.lib.SetTriggerLevel(ct.c_float(value))",
"def async_set_level(self, value: int) -> None:\n value = max(0, min(255, value))\n self._position = int(value * 100 / 255)\n self.async_write_ha_state()",
"def setSolenoidCurrent(self, current):\n self.calc_level = min(self.calc_level, CALC_B_MAP - 1)\n self.solenoid.setSolenoidCurrent(current) # to reset solenoid calc",
"def set_z(self, value):\n\n # set the zero register if value is zero\n self.p &= ~(const.FLAG_ZERO)\n self.p |= const.FLAG_ZERO if value == 0b0 else 0b0",
"def set_volume(self, value):\n utils.set_volume(self.config[\"alsa\"][\"card\"], value) # Sets the actual volume level\n\n if value == 0:\n mode = \"muted\"\n elif value <= 25:\n mode = \"low\"\n elif value <= 75:\n mode = \"medium\"\n else:\n mode = \"high\"\n \n icon = utils.get_volume_icon(mode)\n self.settings_window.volume_label.setPixmap(icon)",
"def setAutoLevelFunction(self, autoLevel):\n assert callable(autoLevel)\n self._autoLevelFunction = autoLevel\n self._updateScenePrimitive()",
"def ft_sensor_set_zero(self):\r\n return self._arm.ft_sensor_set_zero()",
"def set_state(self, value):\n _LOGGER.debug(\"%s: Set state to %d\", self.entity_id, value)\n self._flag_state = True\n\n params = {ATTR_ENTITY_ID: self.entity_id}\n if value == 0:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(3)\n self.call_service(DOMAIN, SERVICE_OPEN_COVER, params)\n elif value == 1:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(2)\n self.call_service(DOMAIN, SERVICE_CLOSE_COVER, params)",
"def apply_settings(camera):\r\n camera.clear_mode = 0\r\n camera.exp_mode = \"Internal Trigger\"\r\n camera.readout_port = 0\r\n camera.speed_table_index = 0\r\n camera.gain = 1",
"def update_magnetic_settings(self, key, value):\n\n if self._magnetic_settings:\n if key in self._magnetic_settings:\n self._magnetic_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {ispin, magmom, nupdown, saxis, lsorbit,noncollinear}\")\n else:\n print(\"magnetic settings not present!\")",
"def setImage(self, image=None, autoLevels=None, **kargs):\n profile = debug.Profiler()\n\n gotNewData = False\n if image is None:\n if self.image is None:\n return\n else:\n old_xp = self._xp\n cp = getCupy()\n self._xp = cp.get_array_module(image) if cp else numpy\n gotNewData = True\n processingSubstrateChanged = old_xp != self._xp\n if processingSubstrateChanged:\n self._processingBuffer = None\n shapeChanged = (processingSubstrateChanged or self.image is None or image.shape != self.image.shape)\n image = image.view()\n if self.image is None or image.dtype != self.image.dtype:\n self._effectiveLut = None\n self.image = image\n self._imageHasNans = None\n if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:\n if 'autoDownsample' not in kargs:\n kargs['autoDownsample'] = True\n if shapeChanged:\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n\n profile()\n\n if autoLevels is None:\n if 'levels' in kargs:\n autoLevels = False\n else:\n autoLevels = True\n if autoLevels:\n level_samples = kargs.pop('levelSamples', 2**16) \n mn, mx = self.quickMinMax( targetSize=level_samples )\n # mn and mx can still be NaN if the data is all-NaN\n if mn == mx or self._xp.isnan(mn) or self._xp.isnan(mx):\n mn = 0\n mx = 255\n kargs['levels'] = [mn,mx]\n\n profile()\n\n self.setOpts(update=False, **kargs)\n\n profile()\n\n self._renderRequired = True\n self.update()\n\n profile()\n\n if gotNewData:\n self.sigImageChanged.emit()\n if self._defferedLevels is not None:\n levels = self._defferedLevels\n self._defferedLevels = None\n self.setLevels((levels))",
"def settemp(t=-10):\n print camera.SetTemperature(t)\n camera.status.update()",
"def __init__(self):\n super(SteklovBoundary, self).__init__()\n self.value = SteklovBoundary.value\n SteklovBoundary.value -= 1\n self.update(param=\"1\")",
"def set_value(self, on_level):\n if on_level in FanSpeedRange.OFF:\n fan_speed = FanSpeed.OFF\n elif on_level in FanSpeedRange.LOW:\n fan_speed = FanSpeed.LOW\n elif on_level in FanSpeedRange.MEDIUM:\n fan_speed = FanSpeed.MEDIUM\n else:\n fan_speed = FanSpeed.HIGH\n self.value = fan_speed",
"def initialize_dynamic_settings(self):\r\n self.alien_speed_factor = 0.1\r\n self.alien_bullet_speed_factor = 0.7",
"def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)",
"def inversion_mode(self, value):\n if value:\n self._write(ST7789_INVON)\n else:\n self._write(ST7789_INVOFF)",
"def set_value(self, device_name, val):\n epics.caput(device_name, val)\n\n\t\t#mu = mu\n\t\t#sig = math.sqrt(abs(mu))\n\t\t#y = (float(x)-mu)/(sig)",
"def set_alpha(self, alpha=1.0):\r\n self.unif[17] = alpha",
"def set_power_management(value: int) -> None:",
"def iniitialize_dynamic_settings(self):\n\t\tself.speed_factor = 1.5\n\t\tself.bullet_speed_factor = 3\n\t\tself.alien_speed_factor = 1\n\n\t\t# fleet_direction of 1 represents right; -1 represents left. \n\t\tself.fleet_direction = 1",
"def setup_mode():\n status_label.color = WHITE\n status_label.text = \"-SET-\"\n\n ave_label.color = BLACK # Turn off average label and value display\n ave_value.color = BLACK\n\n max_value.text = str(MAX_RANGE_F) # Display maximum range value\n min_value.text = str(MIN_RANGE_F) # Display minimum range value\n\n time.sleep(0.8) # Show SET status text before setting parameters\n status_label.text = \"\" # Clear status text\n\n param_index = 0 # Reset index of parameter to set\n\n setup_state = \"SETUP\" # Set initial state\n while setup_state == \"SETUP\":\n # Select parameter to set\n setup_state = \"SELECT_PARAM\" # Parameter selection state\n while setup_state == \"SELECT_PARAM\":\n param_index = max(0, min(2, param_index))\n status_label.text = SETUP_COLORS[param_index][0]\n image_group[param_index + 226].color = BLACK\n status_label.color = BLACK\n time.sleep(0.25)\n image_group[param_index + 226].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.25)\n\n param_index -= get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_index = param_index - 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_index = param_index + 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"ADJUST_VALUE\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Adjust parameter value\n param_value = int(image_group[param_index + 230].text)\n\n while setup_state == \"ADJUST_VALUE\":\n param_value = max(32, min(157, param_value))\n image_group[param_index + 230].text = str(param_value)\n image_group[param_index + 230].color = BLACK\n status_label.color = BLACK\n time.sleep(0.05)\n image_group[param_index + 230].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.2)\n\n param_value += get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_value = param_value + 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_value = param_value - 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"SETUP\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Exit setup process\n status_label.text = \"RESUME\"\n time.sleep(0.5)\n status_label.text = \"\"\n\n # Display average label and value\n ave_label.color = YELLOW\n ave_value.color = YELLOW\n return int(alarm_value.text), int(max_value.text), int(min_value.text)",
"def set_Value(self, n_value):\n#Joerg S/Martin W advice\n self.StoredValue=n_value",
"def set_depth(self, depth):\n self._depth = depth\n if self._scaled_image_coordinates is None:\n self._initialise()\n else:\n self._update_depth()",
"def initialize_dynamic_settings(self):\n self.ship_speed = 5\n self.bullet_speed = 1.0\n self.alien_speed=1.0\n #fleet direction of 1 represents right -1 represents left\n self.fleet_direction = 1\n #scoring\n self.alien_points=50",
"def setUp(self):\n self.t = True\n self.f = False\n self.value = 25",
"def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity",
"def set_z(self, value: int) -> None:\n assert -self.__max_value <= value and value <= self.__max_value\n\n should_sync = self.__z != value\n self.__z = value\n if should_sync:\n self.__sync_z()",
"def __set_mode(self, value):\n # update Nuke\n localization.setMode(str(value.lower()))\n # update panel UI\n logger.debug('disabling pause button: %s', value=='Off')\n # if the localization mode is off diasble pause and force widgets\n self.pauseBtn.setDisabled(value == 'Off')\n self.updateBtn.setDisabled(value == 'Off')\n self.__update_pause_icon()",
"def modify_sky(path, name, number, op, value):\n os.chdir(path)\n sky_levels = get(name, 'sky')\n sky_level = sky_levels[number]\n if op == '+':\n new_sky_level = sky_level + value\n elif op == '-':\n new_sky_level = sky_level - value\n sky_levels[number] = new_sky_level\n write(name, 'sky', sky_levels)\n generate_sky(name, number, new_sky_level)",
"async def set_init(self, value: int | float) -> bool:\n return await self.set_value(value, True)",
"def set_W0_unit(self, value):\n if self.lf_W0.text() != \"\":\n self.set_W0() # Update for deg if needed and call comp_output\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def initialize_dynamic_settings(self):\n self.ship_speed_factor = 1.5\n self.bullet_speed_factor = 3\n self.alien_speed_factor = 1\n self.fleet_direction = 1\n #Puntos\n self.alien_points = 50",
"def initial(self):\n\n self.var.Kfrost = loadmap('Kfrost')\n self.var.Afrost = loadmap('Afrost')\n self.var.FrostIndexThreshold = loadmap('FrostIndexThreshold')\n self.var.SnowWaterEquivalent = loadmap('SnowWaterEquivalent')\n\n # FrostIndexInit=ifthen(defined(self.var.MaskMap),scalar(loadmap('FrostIndexInitValue')))\n # self.var.FrostIndex=FrostIndexInit\n self.var.FrostIndex = loadmap('FrostIndexInitValue')\n # self.var.AfrostIndex=-(1-self.var.Afrost)*self.var.FrostIndex\n # initial Frost Index value",
"def set_value(self,x):\n self._value = x",
"def set_value(self,x):\n self._value = x",
"def setMyStatus(self):\n self.clearMyStatus()\n self.mass = self.myShipHull.mass\n for position, myQuad in self.quads.iteritems():\n self.maxBattery += myQuad.maxBattery\n self.currentPower += myQuad.maxPower\n self.thrust += myQuad.thrust\n self.rotation += myQuad.rotation\n self.radar += myQuad.radar\n self.jamming += myQuad.jamming\n self.repair += myQuad.repair\n self.mass += myQuad.mass\n self.maxAssault += myQuad.maxAssault\n\n # scale back attributes if internal structure has been hit\n ratio = self.currentISP/self.myShipHull.maxISP\n self.currentPower = self.currentPower * ratio\n self.thrust = self.thrust * ratio\n self.rotation = self.rotation * ratio\n\n self.accel = self.myDesign.getAccel(self.thrust, self.mass)\n self.accel = self.accel\n\n self.rotation = self.myDesign.getRotation(self.rotation, self.mass)\n self.rotation = self.rotation\n self.setMyStrength()\n self.setWeaponStatus()\n self.setRange()\n self.setAssaultStrength(ratio)",
"def antenna_set(self):",
"def light_set(self, pin='D13', value='0'):\n self.bridge.put(str(pin), str(value))",
"def autonomousInit(self):\n fieldState = self.driverStation.getGameSpecificMessage()\n self.fieldState = fieldState\n self.smartDashboard.putString(\"field state\", fieldState)\n fieldPosition = self.smartDashboard.getString(\"field position\", \"\")\n self.startingFieldPosition = self.parserobotFieldPosition(fieldPosition)\n self.smartDashboard.putNumber(\"position\", self.startingFieldPosition)\n \n #convert field states to our enum values \n self.ourSwitchSide = self.parserobotFieldPosition(self.fieldState[0])\n self.scaleSide = self.parserobotFieldPosition(self.fieldState[1])\n self.theirSwitchSide = self.parserobotFieldPosition(self.fieldState[2])\n if self.startingFieldPosition==self.kNothing:\n print(\"No field position set. Aborting\")\n return \n \n \n #self.Encoder.setMaxPeriod(.1)\n #self.Encoder.setMinRate(10)\n #self.Encoder.setDistancePerPulse(5)\n #self.Encoder.setReverseDirection(True)\n #self.Encoder.getDistance()\n \n \"\"\"self.Encoder.reset()\n while (self.Encoder.get() < value):\n drive\n delay\"\"\"\n \n \n \n \n \n \n \n #self.Encoder.getRawAxis()\n \n \n #todo change RRR to from fms, maybe parse it first\n \n self.autonomousProgram = commands.autonomousCommand.AutonomousProgram(self.startingFieldPosition)\n self.autonomousProgram.start()",
"def setPowerIfNecessary(self):\n if self.p.power == 0 and self.p.powerDensity > 0:\n self.setPowerFromDensity()",
"def __defaults__(self): \n self.tag = 'Constant-property atmosphere'\n self.composition = Data()\n self.composition.gas = 1.0",
"def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )",
"def setLevels(self, levels, update=True):\n if self._xp is None:\n self.levels = levels\n self._defferedLevels = levels\n return\n if levels is not None:\n levels = self._xp.asarray(levels)\n self.levels = levels\n self._effectiveLut = None\n if update:\n self.updateImage()",
"def setHardness(self, hardness):\n self.__hardness = hardness\n self.scene.setShaderInput(\"props\", self.__ambient, self.__hardness, 0, 1)",
"def set_value(self, value: float):\n self.points[0, 0] = value\n return self",
"def set_Sensor(self, value):\n super(GetPathElevationInputSet, self)._set_input('Sensor', value)",
"def init_game_setting(self):\r\n pass",
"def zguider():\n gzero.gxoff = camera.status.guider[0] + gzero.gxoff\n gzero.gyoff = camera.status.guider[1] + gzero.gyoff\n guider(0,0)\n f = open('/data/guidezero','w')\n cPickle.dump(gzero,f)\n f.close()",
"def add_Cor_Value(self, input):\n self.corona_level = input",
"def gaindb(self, value):\n self._logger.debug(\"setting gain: %7.2f\", value)\n self._gaindb = value\n self._update()",
"def changeFlooring():\r\n\tif tileFloor.getVisible():\r\n\t\ttileFloor.visible(viz.OFF)\r\n\t\thardwoodFloor.visible(viz.ON)\r\n\telse:\r\n\t\ttileFloor.visible(viz.ON)\r\n\t\thardwoodFloor.visible(viz.OFF)",
"def fun_set(self):\n\n self.type.set(self.xtl._scattering_type)\n # self.energy_kev.set(8)\n self.theta_offset.set(self.xtl._scattering_theta_offset)\n self.theta_min.set(self.xtl._scattering_min_theta)\n self.theta_max.set(self.xtl._scattering_max_theta)\n self.twotheta_min.set(self.xtl._scattering_min_two_theta)\n self.twotheta_max.set(self.xtl._scattering_max_two_theta)\n\n if self.orientation.get() == 'Reflection':\n self.direction_h.set(self.xtl._scattering_specular_direction[0])\n self.direction_k.set(self.xtl._scattering_specular_direction[1])\n self.direction_l.set(self.xtl._scattering_specular_direction[2])\n else:\n self.direction_h.set(self.xtl._scattering_parallel_direction[0])\n self.direction_k.set(self.xtl._scattering_parallel_direction[1])\n self.direction_l.set(self.xtl._scattering_parallel_direction[2])",
"def set_value(self,x):\n self._value = float(x)",
"def vsSetValue(self, value):\n self._vs_value = float(value)",
"def _write_value(self, value):\n mapped_value = int(100.0 * (self.alpha * value + self.beta))\n speed = min(max(abs(mapped_value), 0), 100)\n self.set_speed(speed)\n if mapped_value > 0:\n self.rotate_forward()\n else:\n self.rotate_backward()",
"def viewerSettings():\n node = nuke.thisNode()\n node.knob('near').setValue(100)\n node.knob('far').setValue(500000)\n node.knob('grid_display').setValue(False)\n node.knob('gl_lighting').setValue(1)",
"def sketch(self, val):\n row, col = self.selected\n self.cubes[row][col].set_temp(val)",
"def setLevel(self, level):\n self.lvl = level",
"def vauto(self):\n # save our horizontal and trigger settings to restore later\n timebase = self.timebase\n trigger_type = self.trigger_type\n trigger_mode = self.trigger_mode\n # auto scale the whole scope\n self.auto()\n # restore the timebase and triggers so just the vertical is auto\n # scaled\n self.timebase = timebase\n self.trigger_type = trigger_type\n self.trigger_mode = trigger_mode",
"def _set_debug_mode(self, value):\n self.debug_mode = value\n self.l_info(\"_set_debug_mode\",\"%d\" % (self.debug_mode))\n self.set_driver('GV4', self.debug_mode, uom=25, report=True)\n self.logger.setLevel(self.debug_mode)\n return True",
"def set_W0(self):\n if self.c_W0_unit.currentIndex() == 0: # Rad\n self.slot.W0 = self.lf_W0.value()\n else:\n self.slot.W0 = self.lf_W0.value() / 180 * pi\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def handlerSpinBoxLayerTransparencyValueChanged(self, newValue):\r\n self.sliderLayerTransparency.setValue(newValue)",
"def setValues(\n self,\n enforcement: SymbolicConstant = SURFACE_TO_SURFACE,\n thickness: Boolean = ON,\n smooth: float = 0,\n contactControls: str = \"\",\n ):\n pass",
"def _set_z_size(self):\n self._level_gen.size = (self._level_gen.size[X],\n self._level_gen.size[Y],\n self._level_size_z_spinbox.value())\n self._refresh_view()",
"def svn_info_t_depth_set(svn_info_t_self, svn_depth_t_depth): # real signature unknown; restored from __doc__\n pass",
"def auto(self):\n self.set_thermostat = 1 if self.desired_values[0] > self.data[0] else 0\n self.set_humidifier = 1 if self.desired_values[1] > self.data[1] else 0\n self.set_sprinklers = 1 if self.desired_values[2] > self.data[2] else 0\n self.set_ventilation = 1 if (self.desired_values[3] > self.data[3] or self.desired_values[4] < self.data[4]) else 0",
"def init_game_setting(self):\n self.state.state_counter_while_testing += 1",
"def setValue(self, value):\r\n # Clamp values to [0,1]\r\n self.__value = max(0, min(value, 1))",
"def elevation_servo_save(self, name: str = None, force: bool = False):\n self.servo_config.set(\n \"elevation\",\n {\n \"min\": self.elevation_servo.get_min_position(),\n \"max\": self.elevation_servo.get_max_position()\n }\n )\n self.servo_config.save(name, force=force)",
"def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)",
"def __set_max_value(self, value: int) -> None:\n self.__max_value = value * 2000\n half_value = self.__max_value // 2\n\n self.__x_spinbox.configure(from_=-half_value, to=half_value)\n self.__x_scale.configure(from_=half_value, to=-half_value)\n self.__y_spinbox.configure(from_=-half_value, to=half_value)\n self.__y_scale.configure(from_=-half_value, to=half_value)\n self.__z_spinbox.configure(from_=-half_value, to=half_value)\n self.__z_scale.configure(from_=half_value, to=-half_value)",
"def reset_energizer_flag(self): \r\n self.energizer_flag = False",
"def setPowerFromDensity(self):\n self.p.power = self.p.powerDensity * self.getHMMass()",
"def goToZero():\n #on remet tout à zero\n usr_choice = 0\n fonctions_pfc.x = 0\n fonctions_pfc.y = 0\n fonctions_pfc.result = \"\"\n fonctions_pfc.pc_score = \" SCORE DU PC : \"\n fonctions_pfc.usr_score = \" SCORE DU PC : \"\n #on ré-affiche tous les composants\n display_pack_component()",
"def z_flag(self):\n if self.calibrationlogflag:\n self.calibrationlog = DEFAULT_CALIBRATIONLOG_D3S",
"def __init__(self, value):\n if isinstance(value, Pressure):\n value = Pascal(value.base_value).atmosphere.value\n self.base_value = value * 101325.0\n self.value = float(value)",
"def _setEnergy(self, eflag):\n ie = 1\n if eflag == 'off' or eflag == 0:\n ie = 0\n if self._verbose:\n if ie:\n print 'enabling energy equation for reactor',self._name\n else:\n print 'disabling energy equation for reactor',self._name \n _cantera.reactor_setEnergy(self.__reactor_id, ie)",
"def set_level(self, x, level):\n return x * 10 ** ((level - self.ref_level) / 20)",
"def rec_default(self):\n self.phase_triggers.setText('(0,1,320)')\n self.phase_min.setText('-1.57')\n self.phase_max.setText('1.57')",
"def setupNextLevel(self):\r\n self.shield_indicator.image = self.greenShield\r\n self.captured = 0\r\n self.level += 1\r\n self.num_farmers += 1\r\n self.num_cows += 1",
"def setMode(self):\n if self.currentTarget != None and self.finishedAssault == 0:\n if self.isAssault == 1:\n if self.currentTarget != None:\n self.mode = 'assault'\n else:\n self.mode = 'escape'\n else:\n self.log.debug('COUNT: %s: %s TARGET-> %s' % (self.myGalaxy.count, self.name, self.currentTarget.name))\n ##self.myGalaxy.resultList.append('COUNT: %s: %s TARGET-> %s' % (self.myGalaxy.count, self.name, self.currentTarget.name))\n if ((len(self.activeWeapons) == 0 or (self.currentISP/self.myShipHull.maxISP) < 0.7)) and self.__module__ == 'anw.war.ship':\n self.mode = 'escape'\n else:\n range = funcs.getTargetRange(self.posX, self.posY, self.currentTarget.posX, self.currentTarget.posY)\n if range <= self.range:\n self.mode = 'engage'\n else:\n self.mode = 'close'\n else:\n self.mode == 'escape'\n if globals.serverMode == 0:\n self.shipsim.updateShipMode()",
"def set_invvar(self, filename):\n\tself._properties[\"var\"] = 1.0/pf.getdata(filename)",
"def extents(self, value):\n\n self._local = value\n if self.is_attached:\n if self._local is None:\n self[\"local\"] = self._global\n else:\n self[\"local\"] = self._local\n self[\"clipping\"] = self._clipping\n self[\"transform\"] = self._transform",
"def predeposition(self):\n# status = self.mks146.settings['read_MFC0_valve']\n# if status == 'O' or status == 'C':\n# self.mks146.settings['set_MFC0_valve'] = 'N'\n# time.sleep(1)\n# self.mks146.settings['set_MFC0_SP'] = 0.7\n self.settings['predeposition'] = True"
] | [
"0.59382164",
"0.58852255",
"0.5742166",
"0.5726637",
"0.57214034",
"0.571938",
"0.5709026",
"0.56838727",
"0.56748176",
"0.5579223",
"0.55515665",
"0.55400157",
"0.55338275",
"0.5522072",
"0.54870933",
"0.5481656",
"0.5459882",
"0.5447825",
"0.54363453",
"0.5433698",
"0.5425798",
"0.53781307",
"0.53757036",
"0.5367045",
"0.5360007",
"0.5359763",
"0.5359451",
"0.5346631",
"0.5334891",
"0.53202844",
"0.53075165",
"0.5303693",
"0.53026134",
"0.52968633",
"0.52914274",
"0.52810496",
"0.5279703",
"0.52769166",
"0.5272275",
"0.5259145",
"0.52551377",
"0.525475",
"0.5242064",
"0.52415174",
"0.52382255",
"0.5236581",
"0.52345014",
"0.52318746",
"0.52295136",
"0.5228005",
"0.5228005",
"0.52246284",
"0.52210265",
"0.52100205",
"0.5209788",
"0.52040243",
"0.51910144",
"0.51840264",
"0.5180016",
"0.517828",
"0.5175845",
"0.517496",
"0.51631534",
"0.51613915",
"0.5156591",
"0.51549685",
"0.51533604",
"0.5147965",
"0.5139623",
"0.51382965",
"0.51221436",
"0.5116141",
"0.5115904",
"0.5113176",
"0.51028585",
"0.50969064",
"0.5096086",
"0.50892645",
"0.50861037",
"0.5085118",
"0.50848895",
"0.50842726",
"0.5072547",
"0.5072545",
"0.5068626",
"0.5068431",
"0.5068301",
"0.5068279",
"0.5066216",
"0.5064847",
"0.5061413",
"0.5060422",
"0.50603753",
"0.5056436",
"0.5056338",
"0.5053836",
"0.50520045",
"0.50461155",
"0.5043831",
"0.50416815"
] | 0.6042807 | 0 |
True if isolevel is rebuild for each data set. | def isAutoLevel(self):
return self.getAutoLevelFunction() is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_flat(self):\n if self.master:\n return self.master.is_flat\n\n return len(self.levels) == 1",
"def is_dirty(self):\n return True in [n.is_dirty for n in self.nodes]",
"def is_populated(self) -> bool:\n return 0 < self.count_compounds()",
"def expand(self) -> bool:\n self.__build_iteration()\n return len(self.__trees.keys()) == 0",
"def is_map_updated(self):\r\n self.old_obs_len =0\r\n if len(self.obs_ls[0])!= self.old_obs_len:\r\n self.old_obs_len =len(self.obs_ls[0])\r\n return True\r\n return False",
"def is_reducing(self):\n return bool(set(self.kind) & set(\"XYZ\"))",
"def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True",
"def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True",
"def is_dirty(self):\n if self.sub_plugs:\n for sub_plug in self.sub_plugs.values():\n if sub_plug.is_dirty:\n return True\n return False\n return self._is_dirty",
"def overwrite_all ( self ):\n return self.value == self.OV_ALL",
"def is_full(self):\n return len(self.keys) == self.order",
"def has_oms(self) -> bool:\n if not self.has_metal:\n raise NoMetal(\"This structure does not contain a metal\")\n if self._has_oms is not None: # pylint:disable=no-else-return\n return self._has_oms\n else:\n for site_index in self.metal_indices:\n if self.is_site_open(site_index):\n self._has_oms = True\n return True\n self._has_oms = False\n return False",
"def needs_rebuild(self):\n for p in self.sys.particles:\n dr = p.r - self.old_pos[p.id]\n dr.apply_periodic(self.sys.box)\n if dr.length() >= 0.5*self.pad:\n return True \n return False",
"def _update_same(self, update_set):\n for upd in update_set:\n cupd = None\n for rd, wrt, inst in self.syncinfo.rd_wrt_list:\n log.debug(\" UPD0-CHK: %s - RD: %s - WRT: %s [%s]\" \\\n % (upd ,rd, wrt, inst))\n if wrt == \"\":\n continue\n if upd == wrt:\n if self._is_updated(rd):\n log.debug(\" UPD0-FAIL: %s - RD: %s - WRT: %s [%s]\"\\\n % (upd ,rd, wrt, inst))\n return False\n elif AsmParser.is_register(wrt):\n cupd = self.arch.expand_reg_expr(upd) if not cupd else cupd\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cupd, cwrt) != None:\n if self._is_updated(rd):\n log.debug(\" UPD1: %s - RD: %s - WRT: %s [%s]\" \\\n % (upd ,rd, wrt, inst))\n return False\n return True",
"def already_processed(self):\n # If the flag file has been created by a previous run\n # or if any of the rules have already been re-ordered\n # then we shouldn't make any more changes and instead\n # the system needs to be rebooted.\n return self.syspaths.flag_exists",
"def isOpen(self):\n return self.analyzed_digest != {}",
"def initialized(self):\n return len(self.ops) > 0",
"def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)",
"def is_populated(self):\n return len(self.__entries) > 0",
"def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None",
"def __is_modification_legal_in_current_mode(self):\n self.__is_collection_close()\n if self.__mode == 'r':\n from ir_log import IRLog\n IRLog.get_instance().println(\n 'Error! Cannot write to collection being opened in read mode.')\n assert False",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def is_resumable(self, tag=\"current\"):\n\n flag_file = os.path.join(self._dir_name, tag, \"flag.p\")\n if os.path.isfile(flag_file):\n return True\n else:\n return False",
"def d_flag(self):\n if self.datalog:\n self.datalogflag = True",
"def d_flag(self):\n if self.datalog:\n self.datalogflag = True",
"def verify(self):\n D,S,I,C = False,False,False,False\n if self.geoData and os.path.exists(self.geoData):\n D = True\n if self.scales:\n S = True\n if type(self.idVariable) == int:\n I = True\n if self.cacheFile:\n C = True\n if D and S and I and C:\n return True\n return False",
"def __bool__(self):\n return len(self._states_) > 0",
"def master(self):\n return self.depth == 0",
"def is_full(self):\n core_full = self.drone.complete() and self.subject.complete()\n if self.peds is None:\n return core_full\n else:\n return core_full and all([p.complete() for p in self.peds.values()])",
"def converged(self):\n if len(self.rundir) >= 2:\n if io.ionic_steps(self.rundir[-1]) <= 3:\n return True\n if self.settings[\"nrg_convergence\"] != None:\n if io.job_complete(self.rundir[-1]) and io.job_complete(self.rundir[-2]):\n o1 = io.Oszicar(os.path.join(self.rundir[-1],\"OSZICAR\"))\n o2 = io.Oszicar(os.path.join(self.rundir[-2],\"OSZICAR\"))\n if abs( o1.E[-1] - o2.E[-1]) < self.settings[\"nrg_convergence\"]:\n return True\n\n return False",
"def is_full(self):\n return len(self.keys) > self.m",
"def is_full(self) -> bool:",
"def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True",
"def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True",
"def refresh(self):\n hasChanged = self.hasChanged()\n if hasChanged: self.loadIni()\n if len(self.loadFiles) > 255:\n del self.loadFiles[255:]\n self.safeSave()\n return hasChanged",
"def test_check_cds_5(self):\n self.eval_flags[\"check_locus_tag\"] = False\n self.eval_flags[\"check_gene\"] = False\n self.eval_flags[\"check_description\"] = False\n self.eval_flags[\"check_description_field\"] = False\n import_genome.check_cds(self.cds1, self.eval_flags)\n self.assertEqual(len(self.cds1.evaluations), 8)",
"def is_full(self):\n return self.name and self.variables and self.assumptions and self.guarantees",
"def fusable(self) -> bool:\n if not self._pre_check() or not self.has_crossing_len2_ob():\n return False\n new_tiling = self._tiling.add_obstructions(self.obstructions_to_add())\n\n return (\n self._tiling == new_tiling\n and self._check_isolation_level()\n and all(\n self._can_component_fuse_assumption(assumption)\n for assumption in self._tiling.assumptions\n )\n )",
"def isDirty(self):\n\t#@DEBUG christophe have to fix denoising optionnal issue prior to set isDirty() to True\n return False",
"def _check_by_changing():\n current_settings = read_from_archive(\n archive_path, TRAINING_SETTINGS_FILENAME\n )\n\n is_changed = False\n\n for key, obj in current_settings.items():\n if key == \"mark_up_source\":\n if obj != training_settings[key]:\n is_changed = True\n break\n elif key == \"bug_resolution\":\n current_metrics = {resolution[\"value\"] for resolution in obj}\n new_metrics = {\n resolution[\"value\"]\n for resolution in training_settings[\"bug_resolution\"]\n }\n if current_metrics.difference(new_metrics):\n is_changed = True\n break\n else:\n old_areas_of_testing = {\n entity[\"area_of_testing\"]: entity[\"entities\"]\n for entity in obj\n }\n new_areas_of_testing = {\n entity[\"area_of_testing\"]: entity[\"entities\"]\n for entity in training_settings[key]\n }\n for iteration, key_ in enumerate(old_areas_of_testing, 1):\n if key_ not in new_areas_of_testing or set(\n old_areas_of_testing[key_]\n ).difference(set(new_areas_of_testing[key_])):\n is_changed = True\n break\n\n if is_changed:\n delete_training_data(archive_path)",
"def cached(self, args) -> bool:\n return all([art.built for art in self.artifacts])",
"def isSetPersistent(self):\n return _libsbml.Trigger_isSetPersistent(self)",
"def is_clean(self):\n return not self._modified",
"def is_modified(self):\n return len(self.modified_fields) > 0",
"def is_full(self):\n return False",
"def valid_update_flags(self) -> bool:\n if CoronaCaseRaw.objects.all().count() < 2:\n return True\n return not CoronaCaseRaw.objects.filter(update_flag=(not self.latest_flag())).exists()",
"def isSceneModified(self):\n logger.debug(\"Func: isSceneModified\")\n return nuke.modified()",
"def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)",
"def flagSet():\r\n for flag in flags:\r\n if flags[flag]:\r\n return True\r\n return False",
"def is_modified(self):\n return self._original_sections != self._sections",
"def is_saved(self):\n return self._slicerIsSaved",
"def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)",
"def scene_is_modified():\n\n pass",
"def isdirty(self):\n\n return not not self._olddata",
"def __is_new_save(self):\n last_save = self.__get_last_save()\n new_save = self.__create_save()\n for signal in new_save:\n if signal in last_save:\n for attribut in new_save[signal]:\n if attribut in last_save[signal]:\n if new_save[signal][attribut] == last_save[signal][attribut]:\n return False\n else:\n return True\n else:\n return True\n else:\n return True",
"def needs_rebuild(self) -> bool:\n old_hash = self._cache.get(\"config\", None)\n new_hash = utilities.hash_object_sha256(self._get_config_raw())\n self._cache[\"config\"] = new_hash\n\n if not old_hash:\n return False\n return old_hash != new_hash",
"def tile_is_set(index, level_map):\n return level_map[index] != -1",
"def is_solution(self):\n # Only need to check the length because the configuration expansion assesses the feasibility.\n return len(self._path) == self._N",
"def isSet(self) -> bool:\n ...",
"def _can_update(self):\r\n if not self._is_persisted: return False\r\n pks = self._primary_keys.keys()\r\n return all([not self._values[k].changed for k in self._primary_keys])",
"def isSetInitialLevel(self):\n return _libsbml.QualitativeSpecies_isSetInitialLevel(self)",
"def hasChanged(self):\r\n if self.is_updated:\r\n self.is_updated = False\r\n return True\r\n else:\r\n return False\r\n\r\n # if not self.hasBeenUpdatedOnce:\r\n # self.hasBeenUpdatedOnce = True\r\n # return True\r\n # else:\r\n # if BLENDER_MODE == 'BPY':\r\n # # for e in dir(self.obj): print(e)\r\n # # print(self.obj, self.obj.name, self.obj.is_updated, self.obj.is_updated_data)\r\n # # return self.obj.is_updated # DOESN't UPDATE A THING!\r\n # # return True\r\n # return self.is_updated\r\n\r\n # return False # no update in BGE mode\r",
"def isComplete(self):\n for n in range(9):\n for m in range(9):\n if self.puzzle[n][m] == 0:\n return False\n return True",
"def checkIncToSets(_session, _el, _sets, _arc_type):\n for set in _sets:\n if _session.search_one_shot(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n set,\n sc.SC_ARC | _arc_type,\n _el), True, 3) is None:\n return False\n \n return True",
"def need_update(self):\n self.logging.debug( \"need_update()\" )\n\n for name in self.tables:\n\n md5 = self.dbs_tables[name]['md5']\n test = get_md5(self.dbs_tables[name]['path'])\n\n self.logging.debug('(%s) table:%s md5:[old: %s new: %s]' % \\\n (self.db,name,md5,test) )\n\n if test != md5: return True\n\n return False",
"def all_seen_fun(self):\n return self.get_all_j(self.id) and \\\n (set(self.get_fd_part_j(self.id)) <= (self.all_seen | {self.id}))",
"def _check_already_present(self, new_da):\n for da in self:\n self._id_of_DataArrays_equal(da, new_da)",
"def iso_equal(self):\n\n if date(self.time_stamp.year, 1, 1).weekday() in (0, 1, 2, 3, 6):\n return True\n return False",
"def update(self):\n self.haveCouncil = len(self.councils()) > 0",
"def stale_info_type_4(self):\n if self.get_fd_part_j(self.id) == []:\n type_4_a = False\n else:\n type_4_a = True\n for k in self.get_fd_part_j(self.id):\n different_fd = self.get_fd_j(self.id) != self.get_fd_j(k)\n different_fd_part = self.get_fd_part_j(self.id) != self.get_fd_part_j(k)\n if different_fd or different_fd_part:\n type_4_a = False\n type_4_b = self.get_config_j(self.id) not in [constants.BOTTOM, constants.NOT_PARTICIPANT]\n type_4_c = True\n for k in self.get_fd_part_j(self.id):\n if k in self.get_config_j(self.id):\n type_4_c = False\n type_4 = type_4_a and type_4_b and type_4_c\n if type_4:\n logger.debug(\"Stale info (type 4) found!\")\n return type_4",
"def open_workbooks(self):\n try:\n self.wb_alm = load_workbook(self.fn_alm)\n self.wb_defect = load_workbook(self.fn_defect)\n self.wb_enhancement = load_workbook(self.fn_enhancement)\n self.wb_incident = load_workbook(self.fn_incident)\n self.wb_destination = load_workbook(self.fn_destination)\n\n self.wb_alm.iso_dates = True\n self.wb_defect.iso_dates = True\n self.wb_enhancement.iso_dates = True\n self.wb_incident.iso_dates = True\n self.wb_destination.iso_dates = True\n except Exception as e:\n self.error(str(e))\n return False\n\n return True",
"def isInitialized(self):\n\t\tif self.isTypeSet and self.isCfgSet:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def check_if_reduce_needed(vars_to_modify):\n for var in vars_to_modify:\n if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \\\n var[-1,1,:,:].mask.all():\n return True\n return False",
"def fcoe_dirty(self):\n return any(c.dirty or c.renames_remaining for c in self.fcoe_confs)",
"def stock_level_exposures(self) -> bool:\n return self.__stock_level_exposures",
"def alreadyStored(self, date_):\r\n for day in self._repo:\r\n if day.date == date_:\r\n return True\r\n return False",
"def _iswritemode(self, mode):\n\n # Currently only used to test the bz2 files.\n _writemodes = (\"w\", \"+\")\n for c in mode:\n if c in _writemodes:\n return True\n return False",
"def dirty(self):\n return not self.consistent",
"def equal_levels(self, other: MultiIndex) -> bool:\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True",
"def replication_log_archiving_enabled(self) -> bool:\n return pulumi.get(self, \"replication_log_archiving_enabled\")",
"def at_least_one_alive(self, containers):\n for container in self.get_standard_containers(containers):\n # Update container variables so that status is accurate.\n container.container.reload()\n if container.container.status != 'exited':\n return True\n return False",
"def is_full(self):",
"def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0",
"def exclusive_arch(pathogen_groups_set, collapse_pathogen_groups):\n if len(pathogen_groups_set) == 1:\n return True\n\n # Only check pathogen grouping when the flag is on\n if collapse_pathogen_groups:\n if len(pathogen_groups_set) > 2:\n return False\n if 0 in pathogen_groups_set and 1 in pathogen_groups_set:\n return True\n if 3 in pathogen_groups_set and 4 in pathogen_groups_set:\n return True\n return False",
"def HasRestored(self):\n\n return self.HasRestoredProp",
"def _copyDataSetsForPlates(self):\n\n # Get the plates (if some exist)\n plates = self._getAllPlates()\n if len(plates) == 0:\n return True\n\n # Now iterate over the plates, retrieve their datasets and fcs files\n # and copy them to the plate subfolders\n for plate in plates:\n if not self._copyDataSetsForPlate(plate):\n self._message = \"Could not retrieve datasets for plate.\"\n self._logger.error(self._message)\n return False\n\n # Return\n return True",
"def checkMetamodelLevel(cls):\n for mmd in cls.metamodelDependencies():\n mmd.check()",
"def refreshInstallersNeeded(self):\n for archive in dirs['installers'].list():\n apath = dirs['installers'].join(archive)\n if not apath.isfile() or not archive.cext in ('.7z','.zip','.rar'):\n continue\n installer = self.data.get(archive)\n if not installer or (installer.size,installer.modified) != (apath.size,apath.mtime):\n return True\n return False",
"def isDataChanged(self):\n return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps)",
"def is_infrastructure (self):\n return sum([1 for i in self.infras]) != 0",
"def test_check_cds_3(self):\n self.eval_flags[\"check_gene\"] = False\n import_genome.check_cds(self.cds1, self.eval_flags)\n self.assertEqual(len(self.cds1.evaluations), 11)",
"def _is_vessel_full(self):\n return np.size(np.where(self.end_of_lanes + (np.ones(self.lanes) * self.minimal_package) <= self.rows)) == 0",
"def stable(self):\n return(self.zeta > 0)",
"def built(self) -> bool:\n raise NotImplementedError()",
"def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'relu5-3/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'relu5-3/test.pkl')))",
"def refreshRenamed(self):\n changed = False\n pRenamed = dirs['mods'].join('Mash','Official_Local.csv')\n if not pRenamed.exists():\n changed = bool(Installer.off_local)\n self.renamedSizeDate = (0,0)\n Installer.off_local.clear()\n elif self.renamedSizeDate != (pRenamed.size,pRenamed.mtime):\n self.renamedSizeDate = (pRenamed.size,pRenamed.mtime)\n off_local = {}\n reader = bolt.CsvReader(pRenamed)\n for fields in reader:\n if len(fields) < 2 or not fields[0] or not fields[1]: continue\n off,local = map(string.strip,fields[:2])\n if not reModExt.search(off) or not reModExt.search(local): continue\n off,local = map(GPath,(off,local))\n if off != local: off_local[off] = local\n reader.close()\n changed = (off_local != Installer.off_local)\n Installer.off_local = off_local\n #--Refresh Installer mappings\n if changed:\n for installer in self.data.itervalues():\n installer.refreshDataSizeCrc()\n #--Done\n return changed",
"def compile_levels():\n \n for ogmo_filename in [x for x in os.listdir(MAP_SRC_DIR) if x.endswith('.oel')]:\n ogmo_path = os.path.join(MAP_SRC_DIR, ogmo_filename)\n ogmo_flattened_path = os.path.join(MAP_COMPILED_DIR, ogmo_filename)\n\n if os.path.exists(ogmo_flattened_path):\n if os.path.getmtime(ogmo_flattened_path) > os.path.getmtime(ogmo_path):\n sys.stdout.write(\"--%s up to date\\n\" % ogmo_flattened_path)\n continue\n \n flatten_ogmo_tilemaps(ogmo_path, ogmo_flattened_path)",
"def _optimized(self):\n return False",
"def an_check(self):\n\t\tfor filles in self.xelt:\n\t\t\t# parcours rapide des branches niveau 1\n\t\t\tif search(r'analytic$', filles.tag):\n\t\t\t\treturn True\n\t\treturn False",
"def check_modified(self) -> bool:\n return bool(self._modified)"
] | [
"0.5513903",
"0.53375137",
"0.53244",
"0.5214366",
"0.52118033",
"0.51006144",
"0.509896",
"0.5054429",
"0.50461084",
"0.504185",
"0.5018518",
"0.5013366",
"0.4990533",
"0.49454755",
"0.49356946",
"0.4934179",
"0.49291185",
"0.49103767",
"0.49099454",
"0.49048275",
"0.48948747",
"0.4884365",
"0.48760533",
"0.48669827",
"0.48669827",
"0.48537818",
"0.48466",
"0.48430428",
"0.48398632",
"0.4832185",
"0.48225844",
"0.48129717",
"0.48066926",
"0.48066926",
"0.48054612",
"0.48027995",
"0.48003778",
"0.47995883",
"0.47980976",
"0.4792096",
"0.47876817",
"0.47842366",
"0.47816935",
"0.47780228",
"0.47771004",
"0.47745222",
"0.4773108",
"0.47730818",
"0.47582614",
"0.47558206",
"0.4750498",
"0.47478268",
"0.4743043",
"0.47427812",
"0.47425693",
"0.47369927",
"0.47284356",
"0.47273716",
"0.47263896",
"0.47256792",
"0.4712061",
"0.47057086",
"0.47027838",
"0.46972808",
"0.46954826",
"0.46933252",
"0.46877712",
"0.46732986",
"0.46707034",
"0.46664947",
"0.4664873",
"0.4658409",
"0.46542677",
"0.46525192",
"0.46508735",
"0.4650534",
"0.4647449",
"0.4645019",
"0.46404693",
"0.4638211",
"0.4637382",
"0.46362647",
"0.46355352",
"0.46318585",
"0.46273014",
"0.4622901",
"0.4620458",
"0.46193284",
"0.46186283",
"0.46127516",
"0.46109065",
"0.46106738",
"0.46033016",
"0.4600698",
"0.4595067",
"0.45942524",
"0.45936036",
"0.4591061",
"0.45897368",
"0.45806885"
] | 0.46711308 | 68 |
Return the function computing the isolevel (callable or None) | def getAutoLevelFunction(self):
return self._autoLevelFunction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _func(self):\n return self._get_flint_func(self.domain)",
"def _get_isis_level(self):\n return self.__isis_level",
"def poly_level(f):\n if poly_univariate_p(f):\n return 1\n else:\n return 1 + poly_level(poly_LC(f))",
"def getFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None",
"def get_function(self):\n return Gumtree.gumtree.getFunction()",
"def func ( self ) :\n return self.__func",
"def define_scalar_functions(self):\n\n # Exit if functions have already been defined.\n # A function decorator might work better here...\n if hasattr(self, 'pressure'):\n return None\n\n if self.config['material']['incompressible']:\n self.pressure = dlf.Function(self.scalarSpace, name='p')\n\n if self.config['formulation']['time']['unsteady']:\n self.pressure0 = dlf.Function(self.scalarSpace, name='p0')\n else:\n self.pressure0 = 0\n\n self.test_scalar = dlf.TestFunction(self.scalarSpace)\n self.trial_scalar = dlf.TrialFunction(self.scalarSpace)\n else:\n self.pressure = 0\n self.pressure0 = 0\n self.test_scalar = None\n self.trial_scalar = None\n\n # Apply initial conditions if provided\n initial_condition = self.config['formulation']['initial_condition']\n if initial_condition['pressure'] is not None:\n init_pressure = initial_condition['pressure']\n self.apply_initial_conditions(init_pressure,\n self.pressure,\n self.pressure0)\n\n return None",
"def _get_impl(self, name: str) -> Optional[Callable]:\n if name in dir(operator):\n impl = getattr(operator, name)\n elif name in dir(builtins):\n impl = getattr(builtins, name)\n elif name in self['numeric/right']:\n impl = reverse_args(self._get_impl(name.lstrip('r')))\n else:\n impl = None\n return impl",
"def sem_function(self, parser, node, children):\n print (\"Function name\")\n print(children[1])\n \n if len(children) == 1:\n print(children[0]) \n return children[0]\n \n sign = -1 if children[0] == '-' else 1\n \n return sign * children[-1]",
"def get_function(self):\n return self.element.get_basis_functions()[self.n]",
"def my_function():\n\n\treturn None",
"def function(self):\n return self.generator.module.neumannz",
"def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc",
"def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")",
"def func ( self ) :\n return self.__func",
"def get_unc_f(self):\n return self.uncf",
"def get_function(self):\n raise NotImplementedError()",
"def lin_o_func(self):\n return self.hx",
"def function(self):\n return self.generator.module.neumanny",
"def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")",
"def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")",
"def _call(self, x):\n return x.ufuncs.sign()",
"def get_function(self):\n return subs(self.f.get_function(), self.sub_pre, self.sub_post)",
"def calc_Uiso(self):\n if self.temp_factor is None:\n return None\n return numpy.identity(3, float) * (self.temp_factor * Constants.B2U)",
"def _isNullFunc():\n try:\n return vd.sheet.isNullFunc()\n except AttributeError:\n import visidata\n\n return visidata.isNullFunc()",
"def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...",
"def _call(self, x):\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)",
"def function(self):\n return self.generator.module.neumannx",
"def functional(self):\n return self.__functional",
"def function(self):\n raise NotImplementedError",
"def load_raw_wignetting_function():\n global rawvignfun\n if rawvignfun is None:\n vignfile = get_vigneting_by_urd(28)\n x = 23.5 + np.tan(vignfile[\"Offset angles\"].data[\"X\"]*pi/180/60.)*F/DL\n y = 23.5 + np.tan(vignfile[\"Offset angles\"].data[\"Y\"]*pi/180/60.)*F/DL\n rawvignfun = RegularGridInterpolator((vignfile[\"5 arcmin PSF\"].data[\"E\"], x, y), vignfile[\"5 arcmin PSF\"].data[\"EFFAREA\"])\n return rawvignfun",
"def intrinsic(*args, **kwargs):\n # Make inner function for the actual work\n def _intrinsic(func):\n name = getattr(func, '__name__', str(func))\n llc = _Intrinsic(name, func, **kwargs)\n llc._register()\n return llc\n\n if not kwargs:\n # No option is given\n return _intrinsic(*args)\n else:\n # options are given, create a new callable to recv the\n # definition function\n def wrapper(func):\n return _intrinsic(func)\n return wrapper",
"def indicator_func(self):\n if self.indicator_type == IndicatorType.Logistic:\n self.indicator_score = scipy.special.expit(\n (self.predict - self.kappa) / self.sigma)\n self.indicator_derivative = self.indicator_score * (\n 1 - self.indicator_score) / self.sigma\n elif self.indicator_type == IndicatorType.Relu:\n self.indicator_score = 0.5 * (1 + np.minimum(\n 1, np.maximum(-1, (self.predict - self.kappa) / self.delta)))\n self.indicator_derivative = (\n (self.kappa - self.delta < self.predict) &\n (self.predict < self.kappa + self.delta)) / (2 * self.delta)\n elif self.indicator_type == IndicatorType.Flat:\n raise NotImplementedError(\"Flat indicator not implemented\")\n elif self.indicator_type == IndicatorType.Hard:\n raise NotImplementedError(\"Hard indicator not implemented\")\n else:\n raise RuntimeError(\"Unkown inidicator function\")",
"def __call__(fun_name):",
"def _get_intermediate_simp(deffunc=lambda x: x, offfunc=lambda x: x,\n onfunc=_dotprodsimp, dotprodsimp=None):\n\n if dotprodsimp is False or _dotprodsimp_state.state is False:\n return offfunc\n if dotprodsimp is True or _dotprodsimp_state.state is True:\n return onfunc\n\n return deffunc # None, None",
"def get_func(k_center,enk,I,gamma,gamma_k):\n\n def lorentzian_k(k):\n return 1./np.pi * gamma_k / ( (k-k_center)**2 + gamma_k**2)\n\n def lorentzian(k,omega):\n return I * gamma / ( (omega-enk)**2 + gamma**2) * lorentzian_k(k)\n\n return lorentzian",
"def any_function(x):\n return x ** x # here we can hardcode any function",
"def f(self):\n\n if self._f is not None:\n return(self._f)\n if self.larmor is None:\n return(None)\n if self._ppm is not None:\n self._f = (self._ppm - self._ppmshift) * self.larmor * 1e-6;\n return(self._f)\n return(None)",
"def closure(Z):\r\n Z = np.array(Z)\r\n Z = Z/float(np.sum(Z))\r\n if any(Z < 0):\r\n return None\r\n else:\r\n return Z",
"def scalar_function(x, y):\n #Your code here\n if x<=y:\n fs = x*y\n else:\n fs = x/y\n return fs\n raise NotImplementedError",
"def ti_func(self):\n return self.ti.val - self.calc_ti()",
"def fun_exact(x):\n\n if func_type == \"sine\":\n return numpy.sin(x)\n elif func_type == \"tanh\":\n return 0.5*(1.0+numpy.tanh((x-1.0)/0.1))",
"def getFunction(self):\n return lambda x: self.slope * x + self.ordinate",
"def fn():",
"def _get_default(self):\n if callable(self.default):\n return self.default()\n else:\n return self.default",
"def main_function():\n return 1",
"def iso_flag(iso, flag_path=u''):\n\tfrom countries.utils.isoflag import iso_flag\n\treturn iso_flag(iso, flag_path)",
"def C_function_2 (x):\n\treturn 1/x",
"def test_level_width_is_callable(self):\n self.assertTrue(callable(level_width))",
"def my_function():\n\treturn 42",
"def ident_value(self, name: str, root_scope: bool = False) -> Result_Function:\n try:\n return cast(Result, self.activation.resolve_variable(name))\n except KeyError:\n return self.functions[name]",
"def fn(node):\n if not node: return 0\n h = ht(node.left)\n if h == ht(node.right): return 2**h + fn(node.right)\n else: return 2**(h-1) + fn(node.left)",
"def fn(self):\n return self._fn",
"def getFunction(self, key: long) -> ghidra.program.model.listing.Function:\n ...",
"def __set_operation_function(self):\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval",
"def getFunction(self):\n return gpio.gpio_function(self.bcm_id)",
"def try_or_none(f):\n def f_or_none(x):\n try: return f(x)\n except: return None\n return f_or_none",
"def get_function(self):\n return SSAFunction(self.get_graph())",
"def _original_vlog_fn(level):\n return _LOG_MAPPING[level]",
"def poly_LC(f):\n if not f:\n return INT_ZERO\n else:\n return f[0]",
"def get_level(rol):\n\treturn rol.level",
"def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func",
"def getLevel(unique_name):",
"def function(self):\n return self.generator.module.neumann",
"def resolution(self, level):\n return 2 ** (level - 1)",
"def f(self):\n return 1",
"def try_or_none(f):\n\n def f_or_none(x):\n try: return f(x)\n except: return None\n\n return f_or_none",
"def f(x):\n return x**2",
"def nn(self, ID=None, raw=False):\n if ID:\n pastInfo = self.getInfo(ID, 'wireVersion')\n if pastInfo:\n return pastInfo\n result = None, None\n callDict = self.getInfo(ID, 'callDict')\n if not callDict:\n # No callable set\n return result\n func = callDict['f']\n if isinstance(func, str):\n # A callable defined as a string can only be a function\n # name, return its FQN or None if that doesn't work\n result = None, self.cv.strToFQN(func)\n elif inspect.ismethod(func):\n # It's a method, so get its parent\n parent = getattr(func, '__self__', None)\n if parent:\n processed = self.cv.processObject(parent)\n if processed:\n # Pickle or FQN of parent, method name\n if raw:\n processed = parent\n result = processed, func.__name__\n if result == (None, None):\n # Couldn't get or process a parent, try processing the\n # callable itself\n processed = self.cv.processObject(func)\n if processed:\n # None, pickle or FQN of callable\n if raw:\n processed = func\n result = None, processed\n return self.saveInfo('wireVersion', result, ID)",
"def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function",
"def get_iso(state):\n return len_states[np.where(len_states == state)[0][0], 2]",
"def f(self):\n return self._f",
"def code(self):\n if not self._code:\n filename = '<fluxtools function %s>' % self.tag\n self._code = compile(self.math, filename, mode='eval')\n return self._code",
"def get_now_func():\n return _now_func[0]",
"def unwrap_f(arg):\n return Corex.calculate_p_xi_given_y(*arg)",
"def get_U(self):\n if self.U is not None:\n return self.U\n return self.calc_Uiso()",
"def optional(tp: type) -> Callable:\n return lambda x: None if x is None else tp(x)",
"def test_func(self):\n self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)\n return self.rol_nu == Rollen.ROL_RKO",
"def get_func(self):\n return self.get(COMMAND_UIC, 'GetFunc')",
"def piku():\n pass",
"def func():",
"def get_real_function( this, fn):\n\t\treturn this._get_native_function(fn)",
"def getTest(self):\n if self.applyTo == 'global':\n return lambda x, on: on\n fun = evaluate(self.check)[0]\n if self.on:\n return lambda x, on: fun(*x) and on\n else:\n return lambda x, on: fun(*x)",
"def calc_base_eff_and_infl(level):\n return 2 + (level - 1)",
"def function(self, state):\n if state is None:\n raise RuntimeError(\"state cannot be None!\")\n\n return getattr(self, 'state_' + state)",
"def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))",
"def _get_lsp_config_isis_level(self):\n return self.__lsp_config_isis_level",
"def get_fnllh(self):\n\n def fnllh(p):\n return 0.5 * anp.sum(self.get_fres()(p) ** 2)\n\n return fnllh",
"def __call__ (self, pts, coeff, function_type='no solution', default_function=True):\n ft = function_type\n if ft == 'no solution': coeff = [0,1]\n \n pts,coeff,no_solution = self._check_pts_coeff(pts,coeff) \n if no_solution: ft = 'no solution'\n \n if ft not in self._function_types:\n if default_function: ft = 'no solution'\n else:raise ValueError(\"Unknown function type:\"+str(function_type))\n \n if ft is 'pts': return self.wl_soln_pts(pts)\n \n func = self._function_types[ft][1]\n return func(pts,coeff)",
"def question_8():\n return None",
"def _call(self, x):\n if self.exponent == 0:\n return self.domain.one().inner(np.not_equal(x, 0))\n elif self.exponent == 1:\n return x.ufuncs.absolute().inner(self.domain.one())\n elif self.exponent == 2:\n return np.sqrt(x.inner(x))\n elif np.isfinite(self.exponent):\n tmp = x.ufuncs.absolute()\n tmp.ufuncs.power(self.exponent, out=tmp)\n return np.power(tmp.inner(self.domain.one()), 1 / self.exponent)\n elif self.exponent == np.inf:\n return x.ufuncs.absolute().ufuncs.max()\n elif self.exponent == -np.inf:\n return x.ufuncs.absolute().ufuncs.min()\n else:\n raise RuntimeError('unknown exponent')",
"def eurologic_backplane_function(self):\n return self._eurologic_backplane_function",
"def return_first(fn):\n def wrapped(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res if _HVD.rank() == 0 else None\n return wrapped",
"def my_fuction():\n pass",
"def get_ufunc(ufunc_method: Optional[str] = None):\n if ufunc_method:\n if ufunc_method not in Replacements._ufunc_rep:\n return None\n return Replacements._ufunc_rep[ufunc_method]\n return Replacements._ufunc_rep['ufunc']",
"def lambda_func(self):\n air = self.air_alias.val\n fuel = self.fuel_alias.val\n\n m_air = 0\n m_fuel = 0\n\n for i in self.inl:\n m_air += (i.m.val_SI * i.fluid.val[air])\n m_fuel += (i.m.val_SI * i.fluid.val[fuel])\n\n return self.lamb.val - m_air / (m_fuel * self.air_min)",
"def get_initialize(tree):\n for node in ast.walk(tree):\n if isinstance(node, ast.FunctionDef):\n if node.name == \"initialize\":\n # check that there are no arguments\n if (len(node.args.args) == 0 and\n len(node.args.kwonlyargs) == 0 and\n len(node.args.kw_defaults) == 0 and\n len(node.args.defaults) == 0 and\n node.args.vararg is None and\n node.args.kwarg is None\n ): \n return node\n else:\n raise Exception(\"Initialization function may not contain arguments\")",
"def evaluate():\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value"
] | [
"0.6047651",
"0.5671476",
"0.5463008",
"0.54624134",
"0.5348979",
"0.53289545",
"0.5268753",
"0.5257093",
"0.52481455",
"0.5203101",
"0.5194863",
"0.51774395",
"0.51627976",
"0.5157307",
"0.51524407",
"0.5147835",
"0.513994",
"0.51247656",
"0.5106995",
"0.5046533",
"0.50287354",
"0.50287354",
"0.50190294",
"0.50177026",
"0.5011827",
"0.5011127",
"0.49978614",
"0.49810368",
"0.49693674",
"0.49539286",
"0.49529058",
"0.4952385",
"0.49472934",
"0.49463502",
"0.49309623",
"0.49276736",
"0.4915752",
"0.49149472",
"0.49087223",
"0.49052972",
"0.4900581",
"0.48839906",
"0.48768735",
"0.48693755",
"0.48649523",
"0.48436695",
"0.48412985",
"0.48327446",
"0.48281425",
"0.4827673",
"0.48269588",
"0.4826911",
"0.48181304",
"0.48157713",
"0.48124674",
"0.48095152",
"0.48085192",
"0.4807057",
"0.47939235",
"0.47899354",
"0.4773122",
"0.47705144",
"0.47704354",
"0.47665143",
"0.47622004",
"0.47616047",
"0.47570723",
"0.47551733",
"0.47513592",
"0.47392294",
"0.47294796",
"0.47113204",
"0.47050375",
"0.470395",
"0.46901885",
"0.46894336",
"0.46872807",
"0.46866974",
"0.46821952",
"0.46806973",
"0.46797326",
"0.46753687",
"0.4672771",
"0.46620408",
"0.46610996",
"0.46584252",
"0.4653255",
"0.46532002",
"0.4652358",
"0.46504375",
"0.46474493",
"0.46435344",
"0.46416044",
"0.4636285",
"0.4634965",
"0.4633914",
"0.46334255",
"0.46325657",
"0.46235463",
"0.4623174"
] | 0.5753576 | 1 |
Set the function used to compute the isolevel. | def setAutoLevelFunction(self, autoLevel):
assert callable(autoLevel)
self._autoLevelFunction = autoLevel
self._updateScenePrimitive() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_func(self, function):\n self.get(COMMAND_UIC, 'SetFunc', [('function', function)])",
"def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)",
"def _set_function(self):\n value = 0x20 | self.power | self.addressing | self.instr\n self.command([value])",
"def setFunction(self, function):\n self.ordinate = function(0)\n self.slope = function(1) - function(0)",
"def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue(self, _arg)",
"def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)",
"def set_level(self, x, level):\n return x * 10 ** ((level - self.ref_level) / 20)",
"def set(self, Kv = -1.0, F = None):\n if F:\n self.setFunction(F)\n if Kv > 0.0:\n self.setValveCoeff(Kv)",
"def setLogFunction(function):\n _setLogFunction(function)",
"def _functionset(self):\n\t\n\t\t#Instruciton is set based on __init__ () arguments\n\t\tinstruction = 0b00100000\n\t\tinstruction = instruction | self.bit_mode\n\t\tinstruction = instruction | self.line_num\n\t\tinstruction = instruction | self.char_height\n\t\t\n\t\tself._send(instruction, RS_INSTRUCTION)",
"def getAutoLevelFunction(self):\n return self._autoLevelFunction",
"def SetLogicalFunction(*args, **kwargs):\n return _gdi_.PseudoDC_SetLogicalFunction(*args, **kwargs)",
"def set_objective_fn(self, objective_fn):\n self.objective_fn = objective_fn",
"def set_func(self, func):\n li_dim = self.space.dim\n from . import func_tools as ft\n if (self.operator in _cst.list_FIELD_OPERATORS) or (len(self.func_arguments) == 0) :\n from .utils import function\n self.func = function(func, space=self.space)\n else:\n raise(\"Not used anymore. Dead code\")",
"def set_unc_f(self, unc):\n self.uncf = unc",
"def setLogFunction(function):\n None",
"def set_opt_fuctions(self) -> None:\n self._ordered_opt_functions = self.get_rights()[1]",
"def __set_operation_function(self):\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval",
"def set_funct(self, funct):\n if funct in ['LDA','GGA','PBEsol']:\n self.__funct = funct\n else:\n self.__funct = 'GGA'\n print \"Unknown type of functional! Please set xc functional to one either 'LDA','GGA' or 'PBEsol'. (set to default --> GGA)\"",
"def set_power(self, power, set=True):\n assert power in [self.POWER_UP, self.POWER_DOWN], \"Power must be POWER_UP or POWER_DOWN.\"\n self.power = power\n if set:\n self._set_function()",
"def setFunction(self, func: ghidra.program.model.listing.Function, entry: ghidra.program.model.address.Address, dbg: ghidra.app.decompiler.DecompileDebug) -> None:\n ...",
"def SetLogicalFunction(*args, **kwargs):\n return _gdi_.DC_SetLogicalFunction(*args, **kwargs)",
"def set_obj_fun(self):\n\n # disable button \"Edit Objective Function\"\n # self.ui.pb_edit_obj_func.setEnabled(False)\n a_str = str(self.le_a.text())\n state_a = self.is_le_addr_ok(self.le_a)\n b_str = str(self.le_b.text())\n state_b = self.is_le_addr_ok(self.le_b)\n c_str = str(self.le_c.text())\n state_c = self.is_le_addr_ok(self.le_c)\n func = str(self.le_of.text())\n def get_value_exp():\n A = 0.\n B = 0.\n C = 0.\n if state_a:\n A = self.mi.get_value(a_str)\n if state_b:\n B = self.mi.get_value(b_str)\n if state_c:\n C = self.mi.get_value(c_str)\n if func == \"\":\n return 0\n return eval(func)\n\n self.objective_func = get_value_exp\n\n return self.objective_func",
"def GetLevelSetValue(self) -> \"double\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetLevelSetValue(self)",
"def define_scalar_functions(self):\n\n # Exit if functions have already been defined.\n # A function decorator might work better here...\n if hasattr(self, 'pressure'):\n return None\n\n if self.config['material']['incompressible']:\n self.pressure = dlf.Function(self.scalarSpace, name='p')\n\n if self.config['formulation']['time']['unsteady']:\n self.pressure0 = dlf.Function(self.scalarSpace, name='p0')\n else:\n self.pressure0 = 0\n\n self.test_scalar = dlf.TestFunction(self.scalarSpace)\n self.trial_scalar = dlf.TrialFunction(self.scalarSpace)\n else:\n self.pressure = 0\n self.pressure0 = 0\n self.test_scalar = None\n self.trial_scalar = None\n\n # Apply initial conditions if provided\n initial_condition = self.config['formulation']['initial_condition']\n if initial_condition['pressure'] is not None:\n init_pressure = initial_condition['pressure']\n self.apply_initial_conditions(init_pressure,\n self.pressure,\n self.pressure0)\n\n return None",
"def set_scaleup_fn(self, label, fn):\n\n assert type(label) is str, 'Name of scale-up function is not string'\n self.scaleup_fns[label] = fn",
"def _set_func(self, func):\n if callable(func):\n self._func = func\n else:\n raise TypeError(\"'func should be callable'\")",
"def GetLevelSetValue(self) -> \"double\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetLevelSetValue(self)",
"def level(levelValue):\n def _decoration(fcn):\n fcn.level = levelValue\n return fcn\n return _decoration",
"def _function_set(self, data_length=self.data_length, number_of_lines=self.number_of_lines, character_font=self.character_font):\n function_set_mask = 32\n data = funtion_set_mask | (data_length << 4) | (number_of_lines << 3) | (character_font << 2)\n\n self.instruction(data)",
"def set_attrib(self, key, func, func_args):\n self.aux_attrib[key] = func\n self.aux_attrib_args[key] = func_args",
"def set_level(self, device_id, new_level):\n\t\treturn self.post(self.value_url % (ART_SERVER_HOST, device_id), {'value':new_level })",
"def add_custom(self, func: Callable, opset: OpsetVersion) -> None:\n self._functions.add_custom(opset, func)",
"def set_fermi_level(self, mu):\n self.mu = mu\n self.qptanalyzer.mu = mu",
"def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L",
"def pick(func):\n setattr(world, func.__name__, func)\n return func",
"def trigger_level(self, value):\n self.lib.SetTriggerLevel(ct.c_float(value))",
"def __change_level(self, level):\n self.level = level",
"def fun_set(self):\n\n self.type.set(self.xtl._scattering_type)\n # self.energy_kev.set(8)\n self.theta_offset.set(self.xtl._scattering_theta_offset)\n self.theta_min.set(self.xtl._scattering_min_theta)\n self.theta_max.set(self.xtl._scattering_max_theta)\n self.twotheta_min.set(self.xtl._scattering_min_two_theta)\n self.twotheta_max.set(self.xtl._scattering_max_two_theta)\n\n if self.orientation.get() == 'Reflection':\n self.direction_h.set(self.xtl._scattering_specular_direction[0])\n self.direction_k.set(self.xtl._scattering_specular_direction[1])\n self.direction_l.set(self.xtl._scattering_specular_direction[2])\n else:\n self.direction_h.set(self.xtl._scattering_parallel_direction[0])\n self.direction_k.set(self.xtl._scattering_parallel_direction[1])\n self.direction_l.set(self.xtl._scattering_parallel_direction[2])",
"def set_altfunc(self, port, bit, af):\n bit &= 15\n if bit < 8:\n hw = self.device.peripherals[port].AFRL\n else:\n hw = self.device.peripherals[port].AFRH\n bit -= 8\n shift = bit << 2\n val = hw.rd()\n val &= ~(15 << shift)\n val |= af << shift\n hw.wr(val)",
"def set_power_state(self, node, power_state):",
"def setter(self):\n def decorator(func):\n self._setf = func\n return self\n return decorator",
"def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)",
"def set_function(self, function, guard):\r\n self.function = function\r\n self.guard = guard",
"def set_mode(self, mode):\n self.write(\":FUNC {}\".format(mode))",
"def __init__(self, path=\"IdentityFunction\"):\n self.path = path\n self.heal = lambda X: X\n self.heal_tf = lambda X: X",
"def _func(self):\n return self._get_flint_func(self.domain)",
"def set_mask_key(self, func):\r\n self.get_mask_key = func",
"def my_settings_function():\n global level\n level += 1",
"def set_power(self, power):\n print('Setting santec power to %.4f mW' % power)\n self.santec1.write(\"LP %.2f\" % power)\n self.santec2.write(\"LP %.2f\" % power)\n self.santec3.write(\"LP %.2f\" % power)\n self.santec4.write(\"LP %.2f\" % power)",
"def _setValveCharacteristic(self, f):\n if type(f) == types.InstanceType:\n self.setFunction(f)\n else:\n raise CanteraError(\"Wrong type for valve characteristic function.\")",
"def _set_power(self, power: any) -> None:\n\n self.set_power(power, inplace=True)",
"def setter(self, func):\n self.fset = func\n self.set_setattr_mode(SetAttr.CallObject_ObjectValue, func)\n return self",
"def _set_default_func(self):\n dim = self.space.dim\n if dim == 1:\n func = lambda x : [ 1. ]\n if dim == 2:\n func = lambda x,y : [ 1. ]\n if dim == 3:\n func = lambda x,y,z : [ 1. ]\n return func",
"def set_alpha(self, alpha=1.0):\r\n self.unif[17] = alpha",
"def _set_power(self, value: str):\n if value == STATE_ON:\n self.state[1] = self.state[1][:2] + '1' + self.state[1][3:]\n\n if value == STATE_OFF:\n self.state[1] = self.state[1][:2] + '0' + self.state[1][3:]",
"def changeLossFunction(self,loss):\n self.loss_function = loss",
"def setPointInSourceFunction(self, int: int) -> 'SettingComparisonRequestBuilder':\n ...",
"def setLevel(self, level):\n self.lvl = level",
"def set_fitfunc(self):\n if self.amplitude != None:\n# print self.amplitude\n self.fitfunc = lambda p, x: (self.amplitude * exp(-x * p[0]))\n else: \n if self.offset:\n self.fitfunc = lambda p, x: (p[1] * exp(-x * p[0]) + p[2])\n else:\n self.fitfunc = lambda p, x: (p[1] * exp(-x * p[0]) + self.fixed_offset)",
"def set_level(self, level):\n\n self.sh.setLevel(level)\n\n if self.fh:\n self.fh.setLevel(level)",
"def load_function(self):\n self._fn = from_pickle(\n read_from_disk(os.path.join(self.location, FNCT_NM))\n )\n\n if self.farmer is not None:\n if self.farmer.fn is None:\n self.farmer.fn = self._fn\n else:\n # TODO: check equality?\n raise XYZError(\n \"Trying to load this Crop's function, {}, from \"\n \"disk but its farmer already has a function \"\n \"set: {}.\".format(self._fn, self.farmer.fn)\n )",
"def reinitialize_level_set_image_filter(*args, **kwargs):\n import itk\n instance = itk.ReinitializeLevelSetImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()",
"def set_version(self, version=None, version_fun: Callable[[], str] = None):\n def version_compute():\n fun = version_fun\n if fun is None:\n fun = default_version_hash\n\n if version is None:\n return fun()\n else:\n return version\n\n self.version = version_compute\n return self",
"def get_setPower(self):\n self.read(\":POW?\")",
"def setMath(self, *args):\n return _libsbml.FunctionDefinition_setMath(self, *args)",
"def set_null(self, /, *defaults: Any, **kwargs: Any) -> \"fn\":\n return self._mod.set_null(self._func, *defaults, **kwargs)",
"def set_U_exp(self):\n self.set_T_lm()\n self.set_Qdot_exp()\n self.exh.U_exp = ( self.exh.Qdot_exp / (self.width *\n self.length * self.cool.ducts * self.delta_T_lm_array) ) \n self.cool.U_exp = ( self.cool.Qdot_exp / (self.width *\n self.length * self.cool.ducts * self.delta_T_lm_array) )",
"def add_update_function(self, update_function):\n self._update_function = update_function\n if self._update_function is not None:\n self.render_landmarks_checkbox.on_trait_change(\n self._update_function, 'value')\n self.group_dropdown.on_trait_change(self._update_function, 'value')\n self._add_function_to_labels_toggles(self._update_function)",
"def setLevel(self, level):\n self.level = level",
"def setFunctionName(self, function_name):\r\n self.actualFunction = function_name",
"def updatefunction(self):\n self.arcdisplay.ws.order=int(self.orderValueEdit.text())\n self.arcdisplay.ws.function=self.funcComboBox.currentText()\n self.arcdisplay.ws.set_func()\n self.arcdisplay.findfit()",
"def setLevelChangeCallback(self, callback):\n\n self.change_level_callback = callback",
"def set_Sensor(self, value):\n super(GetPathElevationInputSet, self)._set_input('Sensor', value)",
"def autolevel(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.autolevel, _crank16.autolevel, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)",
"def __level(self, *args, **kwargs):\n pass",
"def add_extra_level(self, variable, xlevel):\n \n if variable not in [\"geopotential\", \"temperature\"]:\n raise Exception(\"variable should be one of [geopotential,temperature]\")\n \n if variable == \"geopotential\":\n # geopotential \n A = self.z.z[:, -1, :, :].to_dataset() # copy lowest pressure level\n A[\"level\"] = xlevel\n self.z = (xarray.concat([self.z, A], dim=\"level\"))\n \n # convert pressure to geopotential\n self.z.z[0, -1, :, :] = pres2alt(xlevel * 100) * g\n \n else: \n # temperature\n A = self.t.t[:, -1, :, :].to_dataset() # copy lowest pressure level\n A[\"level\"] = xlevel\n self.t = (xarray.concat([self.t, A], dim=\"level\"))",
"def add(self, func: Callable, opset: OpsetVersion) -> None:\n # FIXME(titaiwang): Check if the \"function\" is ducplicated.\n self._functions.set_base(opset, func)",
"def set_transform_function(self,transform_key):\n\n options = { 0:None,\n 90:self._r90 ,\n 180:self._r180,\n 270:self._r270}\n\n self._transform_function = options[transform_key]",
"def set_value(self, on_level):\n if on_level in FanSpeedRange.OFF:\n fan_speed = FanSpeed.OFF\n elif on_level in FanSpeedRange.LOW:\n fan_speed = FanSpeed.LOW\n elif on_level in FanSpeedRange.MEDIUM:\n fan_speed = FanSpeed.MEDIUM\n else:\n fan_speed = FanSpeed.HIGH\n self.value = fan_speed",
"def setGeolevel(self):\n #geocodeDict = {16:\"Block\",12:\"Block_Group\",11:\"Tract\",5:\"County\",2:\"State\",1:\"National\"}\n geocodeLen = len(self.geocode)\n try:\n self.geolevel = self.geocodeDict[geocodeLen]\n except KeyError:\n error_msg = \"No GeoLevel name for geocode of length {} (geocode:{}) in geocode dictionary \\\"{}\\\"\"\\\n .format(geocodeLen, self.geocode, self.geocodeDict)\n logging.error(error_msg)\n raise KeyError(error_msg)",
"def _original_vlog_fn(level):\n return _LOG_MAPPING[level]",
"def _get_isis_level(self):\n return self.__isis_level",
"def setMath(self, *args):\n return _libsbml.Trigger_setMath(self, *args)",
"def set_function_value(api_key, building_id, value,\n function_value=\"FLOOR_HEIGHT_M\", \n api_endpoint=(\"https://engine.tygron.com/api/session/\"\n \"event/editorbuilding/\"\n \"set_function_value/?\")):\n r = requests.post(url=api_endpoint+api_key, json=[building_id, function_value, value])\n return",
"def set_scene():\n function = LegacyFunctionSpecification()\n function.addParameter(\"scene_number\", dtype='int32', direction=function.IN)\n function.result_type = 'int32'\n return function",
"def set_powers(self, power_1, power_2):\n pass",
"def setLevels(self, levels, update=True):\n if self._xp is None:\n self.levels = levels\n self._defferedLevels = levels\n return\n if levels is not None:\n levels = self._xp.asarray(levels)\n self.levels = levels\n self._effectiveLut = None\n if update:\n self.updateImage()",
"def _setEnergy(self, eflag):\n ie = 1\n if eflag == 'off' or eflag == 0:\n ie = 0\n if self._verbose:\n if ie:\n print 'enabling energy equation for reactor',self._name\n else:\n print 'disabling energy equation for reactor',self._name \n _cantera.reactor_setEnergy(self.__reactor_id, ie)",
"def __init__(self, function):\n self.function = function",
"def Init(self):\r\n print(\"Initiating...\")\r\n if (self.Get_FullScale_Value() == self.FullScaleEnum[0]):\r\n self.gain = 0.00875\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[1]):\r\n self.gain = 0.0175\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[2]):\r\n self.gain = 0.07\r\n print(\"Gain set to:{0}\".format(self.gain))",
"def __init__(self, function=None):\n self._function = function",
"def set_fcn(self, bcprop, fcn):\n _pychidg.f90wrap_set_fcn(self=self._handle, bcprop=bcprop, fcn=fcn)",
"def set_level(self, level: LogLevel):\n pass",
"def svn_client_ctx_t_notify_func_set(svn_client_ctx_t_self, svn_wc_notify_func_t_notify_func): # real signature unknown; restored from __doc__\n pass",
"def set_invvar(self, filename):\n\tself._properties[\"var\"] = 1.0/pf.getdata(filename)",
"def set_power_management(value: int) -> None:",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def __init__(self, function='sourcepfam/'):\n self.function = function",
"def on_lz_hardware_update(self, func):\n self._set_event_handler(\"lz\")\n self._events.on_lz_hardware_update(func)"
] | [
"0.6181312",
"0.6165592",
"0.6127846",
"0.5975614",
"0.59404004",
"0.588994",
"0.5565282",
"0.54807913",
"0.546307",
"0.54172516",
"0.5397693",
"0.5396863",
"0.539358",
"0.5385595",
"0.5355135",
"0.53504646",
"0.5310676",
"0.5305245",
"0.5295712",
"0.5280084",
"0.52643174",
"0.52617544",
"0.524106",
"0.5177632",
"0.51602966",
"0.51470864",
"0.5134829",
"0.5097155",
"0.5090193",
"0.50714344",
"0.50400805",
"0.50383663",
"0.503774",
"0.50325257",
"0.5018966",
"0.50169426",
"0.50101835",
"0.5008523",
"0.4991751",
"0.49864307",
"0.49776208",
"0.4974853",
"0.49719408",
"0.49547228",
"0.49499238",
"0.49225917",
"0.49199122",
"0.4917554",
"0.4905694",
"0.490359",
"0.4870127",
"0.48604286",
"0.4859267",
"0.48515707",
"0.48323593",
"0.48277846",
"0.48219562",
"0.48209718",
"0.4818588",
"0.48124948",
"0.48115674",
"0.48061377",
"0.48038268",
"0.48029152",
"0.47920853",
"0.47894222",
"0.47890383",
"0.47882017",
"0.47738883",
"0.47727892",
"0.47711518",
"0.47702298",
"0.4767596",
"0.47617814",
"0.4760184",
"0.4754674",
"0.47500938",
"0.47480357",
"0.4747987",
"0.4743664",
"0.47393215",
"0.4736283",
"0.4730339",
"0.47287846",
"0.47276962",
"0.47264498",
"0.47259018",
"0.4722296",
"0.47121197",
"0.47119576",
"0.47041062",
"0.46847856",
"0.4683067",
"0.46693492",
"0.46616444",
"0.46582916",
"0.46561915",
"0.46532702",
"0.46509853",
"0.46421212"
] | 0.612693 | 3 |
Return the color of this isosurface (QColor) | def getColor(self):
return qt.QColor.fromRgbF(*self._color) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getColor(self):\n return self.color",
"def getColor(self):\r\n return self.color",
"def get_color(self):\r\n return self._color",
"def getColor(self):\n return self.__color",
"def getColor(self):\n return self.__color",
"def getColor(self):\n return self.__color",
"def get_color(self):\n return self.color",
"def get_color(self):\n\n return self.color",
"def get_color(self):\n return self._color",
"def get_color(self):\n return self._color",
"def get_color(self):\r\n return self.__color",
"def get_color(self):\n\n return self._color",
"def color(self):\n return self.__color",
"def color(self):\n return self._color",
"def color(self):\n return self._color",
"def color(self):\n return self._rgba",
"def color(self):\n return self._rgba",
"def color(self):\n return rgba(self.value_of_css_property('color'))",
"def color(self):\n return self.container['color']",
"def getColor(self):\n return self._l[2]",
"def color(self):\n return self['color']",
"def color(self):\n return self.COLOR",
"def get_color(self):\n return COLOR_DICT[self.element]",
"def getColor(self):\n return self.side_color",
"def get_color(self) -> str:\n return self.color",
"def get_color(self) -> str:\r\n return self.color",
"def get_colour(self):\n return self.colour",
"def conseguir_color(self):\n return self.pluma.conseguir_color()",
"def get_color(self):\n return self._io.last_state['color']['front-center']",
"def color(self):\n return 0x2f3136",
"def color(self) -> Optional[str]:\n return self.colour",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def color(self):\n return self.settings['color']",
"def getColorModel(self):\n return self.getModel().getColorModel()",
"def color(self):\n if \"color\" in self._prop_dict:\n return self._prop_dict[\"color\"]\n else:\n return None",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def get_color(self) -> Optional[str]:\n return self.color",
"def get_colour(self) -> str:\n return self.colour",
"def GetColour(self):\r\n\r\n return self._colour",
"def mesh_color(self):\n return self._mesh_color",
"def _get_color(self, color):\n qcolor = QtGui.QColor()\n qcolor.setRgb(int(color[:2], base=16),\n int(color[2:4], base=16),\n int(color[4:6], base=16))\n return qcolor",
"def rgb_color(self):\n return self._color",
"def colour(self) -> Optional[str]:\n return self._colour",
"def GetColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_GetColor(self, *args)",
"def color(self):\n return self._zoom.color",
"def getColor(self):\n\n return self.pktColor",
"def color(self):\n if self._simplecell:\n self.fetch()\n return self._color",
"def get_color(self, widget, data=None):\n\t\tpygtk_color = widget.get_current_color()\n\t\tpygtk_color = pygtk_color.to_string()\n\t\t\n\t\tpygtk_red = int('0x'+pygtk_color[1:5],16)\n\t\tpygtk_green = int('0x'+pygtk_color[5:9],16)\n\t\tpygtk_blue = int('0x'+pygtk_color[9:12],16)\n\t\t\n\t\tcairo_color = '\"custom-'+str(self.color_counter)+'\"\\t: ('\n\t\tcairo_color += str(round(float(pygtk_red/65535.),1))\n\t\tcairo_color += (',')\n\t\tcairo_color += str(round(float(pygtk_green/65535.),1))\n\t\tcairo_color += (',')\n\t\tcairo_color += str(round(float(pygtk_blue/65535.),1))\n\t\tcairo_color += (',1.0)')\n\t\tprint cairo_color\n\n\t\tself.color_counter += 1",
"def hs_color(self):\n return self._hs_color",
"def get_color(self):\n return \"yellow\"",
"def get_colour(self, address):\n return idaapi.get_item_color(address)",
"def getColor(self):\n outValue = ColorRGBA()\n _res = self.mAPIContext.SDGraphObjectFrame_getColor(self.mHandle, ctypes.byref(outValue))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return outValue",
"def get_color(self):\n _lib.caca_get_dither_color.argtypes = [_Dither]\n _lib.caca_get_dither_color.restype = ctypes.c_char_p\n\n return _lib.caca_get_dither_color(self)",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def color(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"color\")",
"def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))",
"def color_in_rgb(self):\n return self._color_rgb",
"def get_rgba(self):\n\n return \"#%02X%02X%02X%02X\" % (self.r, self.g, self.b, self.a)",
"def get_color(self):\n return self._item.get(\"background_color\")",
"def get_graph_color ( self, object ):\n return self.graph_color_",
"def _rgbColor(self):\n item = self.item()\n if item is None:\n return None\n else:\n color = item.getColor()\n color.setAlpha(255)\n return color",
"def bgcolor(self):\n return self[\"bgcolor\"]",
"def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'",
"def get_hex_color(self) -> str:\n return f'#{self.color.hex()}'",
"def penColor( self ):\n return self._penColor",
"def GetColor(self, id):\r\n\r\n if id == AUI_DOCKART_BACKGROUND_COLOUR:\r\n return self._background_brush.GetColour()\r\n elif id == AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR:\r\n return self._background_gradient_colour\r\n elif id == AUI_DOCKART_SASH_COLOUR:\r\n return self._sash_brush.GetColour()\r\n elif id == AUI_DOCKART_INACTIVE_CAPTION_COLOUR:\r\n return self._inactive_caption_colour\r\n elif id == AUI_DOCKART_INACTIVE_CAPTION_GRADIENT_COLOUR:\r\n return self._inactive_caption_gradient_colour\r\n elif id == AUI_DOCKART_INACTIVE_CAPTION_TEXT_COLOUR:\r\n return self._inactive_caption_text_colour\r\n elif id == AUI_DOCKART_ACTIVE_CAPTION_COLOUR:\r\n return self._active_caption_colour\r\n elif id == AUI_DOCKART_ACTIVE_CAPTION_GRADIENT_COLOUR:\r\n return self._active_caption_gradient_colour\r\n elif id == AUI_DOCKART_ACTIVE_CAPTION_TEXT_COLOUR:\r\n return self._active_caption_text_colour \r\n elif id == AUI_DOCKART_BORDER_COLOUR:\r\n return self._border_pen.GetColour()\r\n elif id == AUI_DOCKART_GRIPPER_COLOUR:\r\n return self._gripper_brush.GetColour()\r\n else:\r\n raise Exception(\"Invalid Colour Ordinal.\")",
"def get_color(self, point):\n return self._color.dup()",
"def accent_colour(self) -> undefined.UndefinedNoneOr[colors.Color]:\n return self.accent_color",
"def pencolor(self):\n return self._pencolor",
"def colored(self):\n return colored(str(self), **self.color_opts)",
"def rgb_color(self):\n return self._COLORS[self._mystate]"
] | [
"0.70161194",
"0.6997709",
"0.69830304",
"0.6970522",
"0.6970522",
"0.6970522",
"0.69664246",
"0.6958327",
"0.69552636",
"0.69552636",
"0.69544834",
"0.6918692",
"0.6895953",
"0.68916535",
"0.68916535",
"0.68693244",
"0.68693244",
"0.68580174",
"0.680577",
"0.67948073",
"0.6764362",
"0.6753648",
"0.6735035",
"0.6709825",
"0.66705096",
"0.6656925",
"0.66178995",
"0.66061646",
"0.64928824",
"0.64739454",
"0.6473431",
"0.64164627",
"0.64061093",
"0.6372201",
"0.6352689",
"0.63445354",
"0.63445354",
"0.63445354",
"0.63445354",
"0.63239896",
"0.6321617",
"0.63132757",
"0.62777036",
"0.6275968",
"0.6257419",
"0.6243997",
"0.619587",
"0.6185876",
"0.61681277",
"0.6165878",
"0.61518866",
"0.61099476",
"0.6099706",
"0.60662556",
"0.6059516",
"0.60426617",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60387594",
"0.60207206",
"0.60145617",
"0.60024726",
"0.60009277",
"0.59759796",
"0.5965536",
"0.5935273",
"0.59249556",
"0.58968073",
"0.5891844",
"0.5887238",
"0.58845043",
"0.5879818",
"0.58723676",
"0.58684963",
"0.5865142"
] | 0.77090704 | 0 |
Handle update of color | def _updateColor(self, color):
primitive = self._getScenePrimitive()
if len(primitive.children) != 0:
primitive.children[0].setAttribute('color', color) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_color(self, color):\n self.color = color",
"def _update_color(self, *args):\n\n if self._variable and 'w' in self._mode and not self._dnd_started:\n self._internal_color_change = True\n self.color_var.set(self._variable)",
"def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass",
"def refresh_color(self):\n self.color = max(0, int(math.sqrt(self.vx ** 2\n + self.vy ** 2)) + 100)",
"def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))",
"def color(self, color):\n #self._color = color\n new_color = \"{0}{1}{2}\".format(hex(int(color[0]))[2:].zfill(2),\n hex(int(color[1]))[2:].zfill(2),\n hex(int(color[2]))[2:].zfill(2))\n #self.log.info(\"RASPLes.color(%s : %s -> %s)\" % (self.number, color, new_color))\n #print(\"color(%s -> %s)\" % (self.number, new_color))\n try:\n self.current_color = new_color\n #self.strip.setPixelColor(int(self.number), self.current_color)\n self.strip.setPixelColorRGB(int(self.number), color[0], color[1], color[2])\n\n self.strip.updated = True\n except Exception as e:\n self.log.error(\"led update error\" + str(e))",
"def _color_var_changed(self, *args):\n\n if not self._internal_color_change:\n self._variable = self.color_var.get()\n self._update()\n self._internal_color_change = False",
"def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)",
"def process_color(self, color):\n self.controller.game.receive_color(color)\n self.parent.parent.update_stat_frame()\n self.parent.parent.update_table_frame()\n self.parent.parent.end_turn()",
"def update_r(color, new_r):\n\n color.update_r(new_r)",
"def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts",
"def _update_color(self, rgb_tuple):\n for color in rgb_tuple._fields:\n pin = getattr(PINS, color)\n value = getattr(rgb_tuple, color)\n # Ensure color between 0 and 255\n value = max(min(value, 255), 0)\n # print(pin, value)\n self.pi.set_PWM_dutycycle(pin, value)",
"def update(self):\n super().update()\n time_since_start = self.time_since_start() \n curr_mod = time_since_start%self.game.time_cycle_secs\n grade = abs(curr_mod - self.game.time_cycle_secs/2) / (self.game.time_cycle_secs/2)\n color_value = grade*(255-self.game.max_darkness) + self.game.max_darkness\n for sprite in self.all_sprites:\n sprite.color = (color_value, color_value, color_value)",
"def _onEdit(self, event):\n index = self.colorlist.GetSelection()\n icol = self._indexTupleToColor(index)\n icd = wx.ColourData()\n icd.SetColour(icol)\n dialog = wx.ColourDialog(self, icd)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors[index] = tup\n self._tupleListToStrings()\n self._updateButtons(None)",
"def plot_color_changed(self):\n self.plot_color = self.plot_color_button.color()",
"def rgb_slider_moved(self, event):\n slider_red = int(self.slider_r.get_value())\n slider_green = int(self.slider_g.get_value())\n slider_blue = int(self.slider_b.get_value())\n\n self.change_color((slider_red, slider_green, slider_blue))",
"def _color(self, args):",
"def update_g(color, new_g):\n\n color.update_g(new_g)",
"def _update(self):\n\n if self.rgb:\n self._canvas['bg'] = tks.color_funcs.rgb_to_hex_string(self.rgb)\n self._text['text'] = self._color_info_text()\n else:\n self._canvas['bg'] = self._blank_label_color\n self._text['text'] = ''",
"def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)",
"def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = self._label_to_rgb[palette_data['label']]\n # if the selected color is different, queue a cursor update\n if not np.array_equal(self._color, color):\n self.is_cursor_change = True\n # store the color with the new value\n self._color[:] = color\n # set the is brush flag\n self.is_brush = palette_data['paint'] == 'brush'\n # store the brush size with the new value\n self.brush_size = palette_data['brush_size']\n # if the palette is in super pixel mode, get that data\n if palette_data['paint'] == 'super_pixel':\n # get the algorithm from the dictionary\n algorithm = palette_data['super_pixel']\n # get the arguments for the specific algorithm\n arguments = palette_data[algorithm]\n # get the segments using the given algorithm and arguments\n segs = segment(self._image, algorithm, **arguments)\n # apply the segmented image pixels and segments to local structures\n self._super_pixel_segments[:], self._super_pixel[:] = segs\n # otherwise set the super pixel data back to 0\n else:\n self._super_pixel_segments[:] = 0\n self._super_pixel[:] = 0",
"def set_color(self, new_color):\n self.color = new_color",
"def change_color(self, rgb):\n\n rgba = Gdk.RGBA()\n rgba.parse(\"rgb({},{},{})\".format(*rgb))\n self.square.override_background_color(Gtk.StateType.NORMAL, rgba)\n\n GObject.signal_handler_block(self.spinbutton_r, self.red_sb_id)\n self.spinbutton_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.spinbutton_r, self.red_sb_id)\n GObject.signal_handler_block(self.slider_r, self.red_s_id)\n self.slider_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.slider_r, self.red_s_id)\n\n GObject.signal_handler_block(self.spinbutton_g, self.green_sb_id)\n self.spinbutton_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.spinbutton_g, self.green_sb_id)\n GObject.signal_handler_block(self.slider_g, self.green_s_id)\n self.slider_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.slider_g, self.green_s_id)\n\n GObject.signal_handler_block(self.spinbutton_b, self.blue_sb_id)\n self.spinbutton_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.spinbutton_b, self.blue_sb_id)\n GObject.signal_handler_block(self.slider_b, self.blue_s_id)\n self.slider_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.slider_b, self.blue_s_id)\n\n GObject.signal_handler_block(self.output, self.output_id)\n self.output.set_text(rgb_to_hex(rgb))\n GObject.signal_handler_unblock(self.output, self.output_id)\n\n self.rgb_color = rgb\n self.change_output()",
"def test_update_r():\n\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_r(color, 202)\n\n assert color.get_r() == 202\n assert color.get_g() == 142\n assert color.get_b() == 438",
"def test_update_g():\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_g(color, 239)\n\n assert color.get_r() == 100\n assert color.get_g() == 239\n assert color.get_b() == 438",
"def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if (len(primitive.children) != 0 and\n isinstance(primitive.children[0], primitives.ColormapMesh3D)):\n primitive.children[0].alpha = self._color[3]\n else:\n super(ComplexIsosurface, self)._updateColor(color)",
"def update(self):\n try:\n if not self._light.connected:\n self._light.connect()\n # pylint: disable=invalid-name\n r, g, b, w = self._light.get_color()\n except pykulersky.PykulerskyException as exc:\n if self._available:\n _LOGGER.warning(\"Unable to connect to %s: %s\", self._light.address, exc)\n self._available = False\n return\n if not self._available:\n _LOGGER.info(\"Reconnected to %s\", self.entity_id)\n self._available = True\n\n hsv = color_util.color_RGB_to_hsv(r, g, b)\n self._hs_color = hsv[:2]\n self._brightness = int(round((hsv[2] / 100) * 255))\n self._white_value = w",
"def on_rgb_slide(self,r,g,b):\n if not self.active:\n return\n red = int(round(r / 100.0))\n green = int(round(g / 100.0))\n blue = int(round(b / 100.0))\n self.rgb = colormodel.RGB(red, green, blue)\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()",
"def update_b(color, new_b):\n\n color.update_b(new_b)",
"def changeColor(self):\n self.layer.new_colormap()",
"def change_color(self, color):\n self.color = color",
"async def update_image(new_value: str) -> None:\n img.apply(swatch, img.Handle.color(parse_color(new_value), size, size))",
"def message_colour_tick():\n global colour_count\n colour_count += 1",
"def updateColorFor(self, id, color):\n\n # find the good LED strip\n currentStrip = None\n index = 0\n for LEDStrip in self._LEDStrips:\n if LEDStrip._id == id:\n currentStrip = LEDStrip\n if currentStrip == None:\n index += 1\n \n if currentStrip == None:\n return\n\n self._colors[index] = color",
"def set_color(self, color):\n\t\tpass",
"def output_entry_changed(self, event):\n value = self.output.get_text().lstrip(\"#\")\n\n if len(value) == 6:\n rgb = hex_to_rgb(value)\n self.change_color(rgb)",
"def shadechanged(self, shadenum, newshade):\n\n if self.performingupdate or shadenum >= self.numcols or type(newshade) != int:\n return\n\n diff = newshade - self.currentshades[shadenum]\n if diff == 0:\n return\n\n incr = 1\n if diff < 0:\n incr = -1\n\n while newshade in self.currentshades:\n newshade += incr\n\n # If we've run off either end, we'll have to go back to where we were\n\n if newshade < 0 or newshade > 255:\n self.colspins[shadenum].setValue(self.currentshades[shadenum])\n return\n\n self.performingupdate = True\n self.currentshades[shadenum] = newshade\n self.currentshades.sort(reverse=not self.gs.inverse)\n for n in range(0, self.numcols):\n self.colspins[n].setValue(self.currentshades[n])\n self.performingupdate = False\n self.plotmap()",
"def slider_action(self, sender):\n self.r = self.rslider.value\n self.g = self.gslider.value\n self.b = self.bslider.value\n self.preview.background_color = self.rgb\n self.colorlabel.text = self.hexcode",
"def color_callback(self, data):\n cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding=\"passthrough\")\n self.color_mutex.acquire()\n self.color_image = cv_image\n self.color_mutex.release()",
"def test_update_b():\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_b(color, 47)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 47",
"def updatePixelColor(self):\n height = self.frameGeometry().height()\n width = self.frameGeometry().width()\n pixel_pos = QtCore.QPoint(width/2, self.__selector_y*height)\n self.__color_selected = QtGui.QColor(self.__picker_image.pixel(pixel_pos))\n self.color_changed.emit(self.__color_selected)",
"def updateColors(self):\n self.negativeColor = (int(self.negativeRedTextField.get(\"1.0\", tk.END)),\n int(self.negativeGreenTextField.get(\"1.0\", tk.END)),\n int(self.negativeBlueTextField.get(\"1.0\", tk.END)))\n self.positiveColor = (int(self.positiveRedTextField.get(\"1.0\", tk.END)),\n int(self.positiveGreenTextField.get(\"1.0\", tk.END)),\n int(self.positiveBlueTextField.get(\"1.0\", tk.END)))\n # Update the positive and negative labels\n self.negativeLabel.config(background=self.negativeColorHex())\n self.positiveLabel.config(background=self.positiveColorHex())\n\n print(f\"Negative: {self.negativeColor}\")\n print(f\"Positive: {self.positiveColor}\")",
"def _on_edge_color_change(self, event=None):\n with self.layer.events.edge_color.blocker():\n index = self.edgeComboBox.findText(\n self.layer.edge_color, Qt.MatchFixedString\n )\n self.edgeComboBox.setCurrentIndex(index)\n color = Color(self.layer.edge_color).hex\n self.edgeColorSwatch.setStyleSheet(\"background-color: \" + color)",
"def update(self, rgb, cmyk, hsv):\n compRGB = a3.complement_rgb(rgb)\n if (compRGB is None):\n compRGB = rgb\n \n rgb_str = rgb_to_str(rgb)\n cmyk_str = '' if cmyk is None else str5_cmyk(cmyk) \n hsv_str = '' if hsv is None else str5_hsv(hsv)\n \n self.main.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\")\n self.main.background = rgb.glColor()\n self.main.foreground = compRGB.glColor()\n self.comp.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\" )\n self.comp.background = compRGB.glColor()\n self.comp.foreground = rgb.glColor()\n \n # set the sliders\n self.rSlider.value = rgb.red*100\n self.gSlider.value = rgb.green*100\n self.bSlider.value = rgb.blue*100\n self.cSlider.value = 0 if cmyk is None else cmyk.cyan*100 \n self.mSlider.value = 0 if cmyk is None else cmyk.magenta*100\n self.ySlider.value = 0 if cmyk is None else cmyk.yellow*100\n self.kSlider.value = 0 if cmyk is None else cmyk.black*100\n self.hSlider.value = 0 if hsv is None else hsv.hue*100\n self.sSlider.value = 0 if hsv is None else hsv.saturation*100\n self.vSlider.value = 0 if hsv is None else hsv.value*100",
"def on_material_color_btn_color_set(self,button,data=None):\n self.app.reload_job()",
"def set_color(self, color):\n pass",
"def rgb(self, value):\n\n self._variable = value\n self._update()",
"def _setColor(self, index):\n\n self.colorLabel.setStyleSheet(\"border: 1px solid black; background-color:rgb(%s, %s, %s);\" % (\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.rgbColorDlg.setCurrentColor(QColor.fromRgb(\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.colorSlider.setValue(index)",
"def onColorMenu(self, item):\n self.canvas.color = item.color\n return 1",
"def _color_change_mode(self):\r\n self.dlg.exec_()\r\n self.color = self.dlg.currentColor().name()\r\n self.colorPlate.setStyleSheet(\"background-color: %s;\" % self.color)\r\n self.input_scene.get_stk_color(self.color)\r\n return",
"def rgb_spin_changed(self, event):\n spin_red = self.spinbutton_r.get_value_as_int()\n spin_green = self.spinbutton_g.get_value_as_int()\n spin_blue = self.spinbutton_b.get_value_as_int()\n\n self.change_color((spin_red, spin_green, spin_blue))",
"def updateFromHsl ( self ):\n rgb = Colz.hslToRgb( self.h, self.s, self.l )\n self.r = rgb[0]\n self.g = rgb[1]\n self.b = rgb[2]\n self.rgb = rgb\n self.rgba = [ rgb[0], rgb[1], rgb[2], self.a ]\n # Updates Hex\n self.hex = Colz.rgbToHex( rgb[0], rgb[1], rgb[2] )",
"def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color",
"def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color",
"def change_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(-25,25)\n color_green = random.randint(-25,25)\n color_blue = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = (color[0]+color_red,color[1]+color_green,color[2]+color_blue)\n else: #color_mode == 'L':\n color_diff = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = color+color_diff\n mutated_genome[index][0] = newcolor",
"def updateFromRgb ( self ):\n hsl = self.rgbToHsl( self.r, self.g, self.b )\n self.h = hsl[0]\n self.s = hsl[1]\n self.l = hsl[2]\n self.hsl = hsl\n self.hsla = [ hsl[0], hsl[1], hsl[2], self.a ]",
"def on_rgb_press(self,r,g,b):\n self.rgb = colormodel.RGB(r, g, b)\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()",
"def set_color(color):\r\n global _current_color\r\n _current_color = color",
"def red(self, new_value):\r\n if self.empty is True and self.yellow is False and self.red is False and new_value is True:\r\n self._red = new_value\r\n self.empty = False\r\n else:\r\n raise DomainError('Square already full! ')",
"def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down",
"def update(self, rgb, cmyk, hsv):\n # RGB Fields\n self.rField.text = `rgb.red`\n self.gField.text = `rgb.green`\n self.bField.text = `rgb.blue`\n # CMYK fields\n self.cField.text = \"\" if cmyk is None else `round(cmyk.cyan,2)`\n self.mField.text = \"\" if cmyk is None else `round(cmyk.magenta,2)`\n self.yField.text = \"\" if cmyk is None else `round(cmyk.yellow,2)`\n self.kField.text = \"\" if cmyk is None else `round(cmyk.black,2)`\n # HSV fields\n self.hField.text = \"\" if hsv is None else `round(hsv.hue,1)`\n self.sField.text = \"\" if hsv is None else `round(hsv.saturation,3)`\n self.vField.text = \"\" if hsv is None else `round(hsv.value,3)`",
"def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True",
"def _on_change(self, *_):\n colour = self.on_colour if self.value else self.off_colour\n self.configure(bg=colour)\n if self.label:\n self.label.configure(bg=colour)",
"def set_color(self):\n self.image[self.x, self.y] = self.color\n if self.diffusion:\n r = g = b = 0\n for i in range(self.convolution_matrix.shape[0]):\n for j in range(self.convolution_matrix.shape[1]):\n r = g = b = 0\n for k in range(self.convolution_matrix.shape[0]):\n for l in range(self.convolution_matrix.shape[1]):\n m = (self.x + i + k - 2 + self.image.shape[0]) % self.image.shape[0]\n n = (self.y + j + l - 2 + self.image.shape[1]) % self.image.shape[1]\n r += self.convolution_matrix[k][l] * self.image[m, n][2]\n g += self.convolution_matrix[k][l] * self.image[m, n][1]\n b += self.convolution_matrix[k][l] * self.image[m, n][0]\n self.image[self.x, self.y] = (b, g, r)",
"def recolor(self, label_value: int, color: Tuple[float, float, float]) -> None:\n seginfo = self.infos[label_value]\n seginfo.color = color\n # propagate state changes\n self._update_state_from_infos()",
"def fill(self, color):",
"def change_lights_color(self, entity, attribute, oldUrl, newUrl, kwargs):\n if newUrl != oldUrl and newUrl is not None and self.can_change_colors():\n rgb_colors = self.get_colors(self.format_ha_url(newUrl))\n for i in range(len(self.lights)):\n threading.Thread(target=self.set_light_rgb, args=(self.lights[i], rgb_colors[i])).start()",
"def test_color(self):\n self._calibration_test(\"color_full\")",
"def set_color(self, color):\n self.light_color = color\n for f in self.color_change_cb:\n f(self)",
"def tween_rgb_at(self, progress, output):\n for cell_id in self.next.keys():\n next_color = self.next[cell_id]\n\n if cell_id in self.last:\n last_color = self.last[cell_id]\n else:\n last_color = color.BLACK\n\n r = tween.linear(last_color.r, next_color.r, progress)\n g = tween.linear(last_color.g, next_color.g, progress)\n b = tween.linear(last_color.b, next_color.b, progress)\n cell_color = color.RGB(r,g,b)\n output(cell_id, cell_color)",
"def color(self, color):\n\n self.container['color'] = color",
"def color(self, color_value):\n self.app.color = color_value",
"def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)",
"def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)",
"def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val",
"def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")",
"def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])",
"def _update_brightness(self):\n while self.current_brightness != self.brightness:\n next_color = RGB(r=int(self.color.r * (self.current_brightness/100.0)),\n g=int(self.color.g * (self.current_brightness/100.0)),\n b=int(self.color.b * (self.current_brightness/100.0)))\n self._update_color(next_color)\n diff = self.brightness - self.current_brightness\n # adjust current brightness to +/- 1\n self.current_brightness = self.current_brightness + \\\n (diff) / abs(diff)\n time.sleep(.05)\n # Final update to exact brightness and default if no change in brightness setting\n final_color = RGB(r=int(self.color.r * (self.brightness/100.0)),\n g=int(self.color.g * (self.brightness/100.0)),\n b=int(self.color.b * (self.brightness/100.0)))\n self._update_color(final_color)",
"def shell_fgcolor_changed(self, color):\n self.set_fgcolor(color)",
"def update(self, grid, colRamp = ['white', 'blue']):\n \n # update the cell colors\n for y in range(len(grid)):\n yl = y + 1\n for x in range(len(grid[y])):\n xl = x + 1\n color = colRamp[int(grid[y][x])]\n self.displayWindow.update((xl, yl), color)\n\n # refresh the window\n self.displayWindow.tkupdate()",
"def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value",
"def register(self):\n active = True\n self.rgb = colormodel.RGB(0, 255, 0)\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()",
"def _color(self,c):\n return self.colorlist[c%len(self.colorlist)]",
"def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()",
"def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value",
"def update(self):\n self._brightness = self._lj.get_load_level(self._index) / 99 * 255",
"def shell_bgcolor_changed(self, color):\n self.set_bgcolor(color)",
"def set_color_rgb(r, g, b):\r\n global _current_color\r\n _current_color = (r, g, b)",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def changecolor (color):\n valid_colors = (\"red\", \"grey\", \"yellow\", \"green\")\n if color in valid_colors:\n if changecolor.times:\n print(\"The color was last changed at \", changecolor.times[-1])\n print (color)\n changecolor.times.append(time.asctime())\n else:\n n = valid_colors.__len__()\n not_last = valid_colors[:n-1]\n last = valid_colors[-1]\n\n message = ', '.join(not_last) + ' and ' + last\n print (\"sorry, a color can only be\", message)",
"def updateColorItem(self, item, itemColor): \n self.avatarConfiguration[item] = itemColor\n self.paintAvatarItem(item)",
"def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)",
"def _calcColor(self, colorTuple):\n return milight.color_from_rgb(*colorTuple)",
"def change_color(self, x, y, state):\n if state == 1:\n color = self.tile_color\n else:\n color = self.background_color\n self.canvas.itemconfig(self.board[(x, y)], fill=color)",
"def _setRgbColor(self, color):\n item = self.item()\n if item is not None:\n color.setAlpha(item.getColor().alpha())\n item.setColor(color)",
"def setColor(self, color):\n color = rgba(color)\n if color != self._color:\n self._color = color\n self._updateColor(self._color)\n self._updated(ItemChangedType.COLOR)",
"def change_led_floor_color(update: 'Update', context: 'CallbackContext'):\n args = context.args\n message = \" \".join(args)\n\n try:\n publish.single(\"ledfloorupdates\", message, hostname=\"10.90.154.80\", port=1883, client_id=\"kolabbot\")\n update.message.reply_text('Changing LED floor color to \"{}\".'.format(message))\n except (ConnectionRefusedError, TimeoutError) as err:\n msg = \"Could not connect to LED-floor: {}\".format(err)\n print(msg)\n update.message.reply_text(msg)"
] | [
"0.792751",
"0.7712366",
"0.77086216",
"0.7582702",
"0.72797334",
"0.7251339",
"0.72495466",
"0.7238376",
"0.72019273",
"0.7178436",
"0.7135327",
"0.69967073",
"0.69614214",
"0.69462603",
"0.6907712",
"0.68813443",
"0.6823944",
"0.6733206",
"0.67252445",
"0.6687395",
"0.6653723",
"0.6634742",
"0.6625806",
"0.662149",
"0.6616557",
"0.659273",
"0.6581856",
"0.6546624",
"0.65457785",
"0.6538357",
"0.6502497",
"0.64979625",
"0.6493508",
"0.64900565",
"0.64847165",
"0.6460563",
"0.6456034",
"0.6430071",
"0.6428601",
"0.64193237",
"0.6414078",
"0.6398025",
"0.63827074",
"0.6357889",
"0.6353343",
"0.63296276",
"0.6328359",
"0.63124424",
"0.6298331",
"0.6284459",
"0.6268865",
"0.6255252",
"0.6252684",
"0.6252684",
"0.62485236",
"0.624581",
"0.6235886",
"0.62200403",
"0.6213656",
"0.6213357",
"0.621217",
"0.6204643",
"0.620115",
"0.6199151",
"0.6186757",
"0.6171659",
"0.61689895",
"0.61684155",
"0.61650395",
"0.61620325",
"0.6161217",
"0.61439455",
"0.6138982",
"0.61376494",
"0.61267763",
"0.61214495",
"0.6093968",
"0.607878",
"0.60768545",
"0.60765153",
"0.60698134",
"0.60603917",
"0.60493994",
"0.60486907",
"0.6045861",
"0.60455716",
"0.6044695",
"0.6036818",
"0.6029281",
"0.6029281",
"0.6029281",
"0.6029281",
"0.6028772",
"0.6017756",
"0.6013994",
"0.60078824",
"0.59962827",
"0.5993008",
"0.5984273",
"0.59812677"
] | 0.6820552 | 17 |
Set the color of the isosurface | def setColor(self, color):
color = rgba(color)
if color != self._color:
self._color = color
self._updateColor(self._color)
self._updated(ItemChangedType.COLOR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if (len(primitive.children) != 0 and\n isinstance(primitive.children[0], primitives.ColormapMesh3D)):\n primitive.children[0].alpha = self._color[3]\n else:\n super(ComplexIsosurface, self)._updateColor(color)",
"def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass",
"def set_color(self, color):\n\t\tpass",
"def set_color(self, color):\n pass",
"def setSurfaceMeshColor(clr=-1):\n dislin.mshclr(clr)",
"def changeColor(self):\n self.layer.new_colormap()",
"def change_colour_surface(surface, r, g, b):\n arr = pg.surfarray.pixels3d(surface)\n arr[:, :, 0] = r\n arr[:, :, 1] = g\n arr[:, :, 2] = b",
"def set_color(self, new_color):\n self.color = new_color",
"def set_color(self, color):\n self.color = color",
"def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))",
"def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value",
"def setColor(self, value):\n _res = self.mAPIContext.SDGraphObjectFrame_setColor(self.mHandle, ctypes.byref(value))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return None",
"def setColorDiffuse(*args):",
"def color(self, color):\n\n self.container['color'] = color",
"def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)",
"def change_color(self, color):\n self.color = color",
"def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])",
"def set_color(color):\r\n global _current_color\r\n _current_color = color",
"def UseColor(self, use_color):\n self.use_color = use_color",
"def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()",
"def set_color(self, color):\n self._color = color",
"def setSurfaceColors(topcolor=-1,bottomcolor=-1):\n dislin.surclr(topcolor, bottomcolor)",
"def set_color(self, color: str):\n self.color = color",
"def setColor(self, color):\n self.point_color = color\n self.side_color = color\n self.area_color = color",
"def set_colour(self, address, colour):\n idaapi.set_item_color(address, colour)",
"def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts",
"def set_color(self, value):\n _lib.caca_set_dither_color.argtypes = [_Dither, ctypes.c_char_p]\n _lib.caca_set_dither_color.restype = ctypes.c_int\n\n return _lib.caca_set_dither_color(self, value)",
"def color(self, color_value):\n self.app.color = color_value",
"def color(self, color):\n #self._color = color\n new_color = \"{0}{1}{2}\".format(hex(int(color[0]))[2:].zfill(2),\n hex(int(color[1]))[2:].zfill(2),\n hex(int(color[2]))[2:].zfill(2))\n #self.log.info(\"RASPLes.color(%s : %s -> %s)\" % (self.number, color, new_color))\n #print(\"color(%s -> %s)\" % (self.number, new_color))\n try:\n self.current_color = new_color\n #self.strip.setPixelColor(int(self.number), self.current_color)\n self.strip.setPixelColorRGB(int(self.number), color[0], color[1], color[2])\n\n self.strip.updated = True\n except Exception as e:\n self.log.error(\"led update error\" + str(e))",
"def _update_color(self, color):\n self.color = color",
"def setColor(self, color):\n self.__color = color",
"def setColor(color):\n turtleTmp.color = color\n turtleTmp.penColor(color)",
"def set_color():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The new RGB color vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function",
"def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())",
"def set_mesh_color(self, mesh_color):\n self.mesh_color = mesh_color\n self.update_mesh(self.all_meshes)",
"def setColour(self, col):\n\t\tself.colour = col",
"def setColorIndex(idx):\n dislin.setclr(idx)",
"def Set(*args, **kwargs):\n return _gdi_.Colour_Set(*args, **kwargs)",
"def fill(self, color):\n color = spyral.color._determine(color)\n self._surf.fill(color)",
"def Set(*args):\n return _XCAFDoc.XCAFDoc_ColorTool_Set(*args)",
"def set_color(self, color: str):\n self.color = bytes.fromhex(color.replace('#', ''))",
"def set_at(self, pos: Tuple2NumberType, color: ColorInputType) -> 'BaseImage':\n assert_vector(pos, 2)\n self._surface.set_at(pos, assert_color(color))\n return self",
"def setColor(self, color):\n color = QtGui.QColor(color)\n color.setAlpha(50)\n self.setBrush(QtGui.QBrush(color))\n color.setAlpha(255)\n self.setPen(QtGui.QPen(color, 1.0))",
"def SetAll(strip, color):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color)",
"def set_color(self):\n self.image[self.x, self.y] = self.color\n if self.diffusion:\n r = g = b = 0\n for i in range(self.convolution_matrix.shape[0]):\n for j in range(self.convolution_matrix.shape[1]):\n r = g = b = 0\n for k in range(self.convolution_matrix.shape[0]):\n for l in range(self.convolution_matrix.shape[1]):\n m = (self.x + i + k - 2 + self.image.shape[0]) % self.image.shape[0]\n n = (self.y + j + l - 2 + self.image.shape[1]) % self.image.shape[1]\n r += self.convolution_matrix[k][l] * self.image[m, n][2]\n g += self.convolution_matrix[k][l] * self.image[m, n][1]\n b += self.convolution_matrix[k][l] * self.image[m, n][0]\n self.image[self.x, self.y] = (b, g, r)",
"def set_color(self, color):\n # type: (Color) -> None\n\n self.color = color",
"def fl_set_icm_color(colr, red, green, blue):\n _fl_set_icm_color = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_icm_color\",\\\n None, [xfdata.FL_COLOR, cty.c_int, cty.c_int, cty.c_int],\\\n \"\"\"void fl_set_icm_color(FL_COLOR col, int r, int g, int b)\"\"\")\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n i_red = library.convert_to_intc(red)\n i_green = library.convert_to_intc(green)\n i_blue = library.convert_to_intc(blue)\n library.keep_elem_refs(colr, ul_colr, red, green, blue, i_red, \\\n i_green, i_blue)\n _fl_set_icm_color(ul_colr, i_red, i_green, i_blue)",
"def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if len(primitive.children) != 0:\n primitive.children[0].setAttribute('color', color)",
"def _set_hsv(self, color):\n\n self.qcolor.setHsv(color[0], color[1], color[2], 255)",
"def refresh_color(self):\n self.color = max(0, int(math.sqrt(self.vx ** 2\n + self.vy ** 2)) + 100)",
"def set_colors(self, ):\n try:\n odd = self._parent.settings.get_key('interface.odd_color')\n even = self._parent.settings.get_key('interface.even_color')\n self.dialog.instruments.set_odd_color(odd)\n self.dialog.accounts.set_odd_color(odd)\n self.dialog.instruments.set_even_color(even)\n self.dialog.accounts.set_even_color(even)\n except od_exception_config_key_error:\n pass",
"def set_param(self, name, val):\n # name will be 'colorR', 'colorG', 'colorB'\n rgb255 = int(val * 255)\n if name == 'colorR':\n self.color.r = rgb255\n elif name == 'colorG':\n self.color.g = rgb255\n elif name == 'colorB':\n self.color.b = rgb255",
"def the_user_changes_the_color_of_the_device(color):\n web_app.change_property_softassert(\"color\",color)",
"def set_style(_layer, _color=None):\n\tif _color is None:\n\t\t_color = QColor(\"white\")\n\t_layer.renderer().symbol().setColor(_color)",
"def rgb(self, value):\n\n self._variable = value\n self._update()",
"def fill(surface, color):\n w, h = surface.get_size()\n r, g, b, _ = color\n for x in range(w):\n for y in range(h):\n a = surface.get_at((x, y))[3]\n surface.set_at((x, y), pygame.Color(r, g, b, a))",
"def _set_hsvF(self, color):\n\n self.qcolor.setHsvF(color[0], color[1], color[2], 255)",
"def plot_color_changed(self):\n self.plot_color = self.plot_color_button.color()",
"def setColor(self):\n\n sel = cmds.ls(selection=True, type=['shape', 'transform'])\n if len(sel) > 0:\n for obj in sel:\n if cmds.nodeType(obj) == 'transform':\n shapes = cmds.listRelatives(obj, type='shape')\n if len(shapes) > 0 and self.shapeTypeCbx.isChecked():\n for shape in shapes:\n if cmds.attributeQuery('overrideEnabled', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideEnabled', True)\n if self.colorsTab.currentIndex() == 0:\n if cmds.attributeQuery('overrideRGBColors', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideRGBColors', False)\n if cmds.attributeQuery('overrideColor', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideColor', self.colorSlider.value())\n else:\n if cmds.attributeQuery('overrideRGBColors', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideRGBColors', True)\n if cmds.attributeQuery('overrideColorRGB', node=shape, exists=True):\n color = self.rgbColorDlg.currentColor()\n cmds.setAttr(shape + '.overrideColorRGB', color.red()/255.0, color.green()/255.0, color.blue()/255.0)\n\n if self.transformTypeCbx.isChecked():\n if cmds.attributeQuery('overrideEnabled', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideEnabled', True)\n if self.colorsTab.currentIndex() == 0:\n if cmds.attributeQuery('overrideRGBColors', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideRGBColors', False)\n if cmds.attributeQuery('overrideColor', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideColor', self.colorSlider.value())\n else:\n if cmds.attributeQuery('overrideRGBColors', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideRGBColors', True)\n if cmds.attributeQuery('overrideColorRGB', node=obj, exists=True):\n color = self.rgbColorDlg.currentColor()\n cmds.setAttr(obj + '.overrideColorRGB', color.red() / 255.0,\n color.green() / 255.0, color.blue() / 255.0)",
"def draw(self, color = Color.GREEN):\n self.image[self.x, self.y] = color",
"def put_color(self, _pos, _color):\n assert(((len(_pos) == 2) and (len(_color) == self.__resolution[2])) or\n ((len(_pos) == 3) and (len(_color) == 1)))\n self.__framebuffer[_pos] = _color",
"def setColor(self, color):\n for patch in self._patches:\n patch.setColor(color)",
"def _color_change_mode(self):\r\n self.dlg.exec_()\r\n self.color = self.dlg.currentColor().name()\r\n self.colorPlate.setStyleSheet(\"background-color: %s;\" % self.color)\r\n self.input_scene.get_stk_color(self.color)\r\n return",
"def XCAFDoc_ColorTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ColorTool_Set(*args)",
"def setColor(pnj, color):\r\n\r\n assert isinstance(color, (int, tuple, str))\r\n pnj[\"color\"] = color",
"def setPointColor(self, color):\n for point in self.points:\n point.color = color",
"def set_color(self, red, green, blue, white):\n color_specs = [self._red_spec, self._green_spec, \n self._blue_spec, self._white_spec]\n\n for spec, color in zip(color_specs, [red, green, blue, white]):\n driver = DRIVERS[spec.addr]\n driver.set_time_off(spec.pin, color)",
"def change_color(self, color):\r\n if color == \"black\":\r\n self.color = \"white\"\r\n self.canvas.itemconfig(self.ball, fill='white')\r\n else:\r\n self.color = \"black\"\r\n self.canvas.itemconfig(self.ball, fill='black')",
"def SetColor(self, rgbtuple):\n if not rgbtuple:\n rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()\n col = [c/255.0 for c in rgbtuple]\n self.figure.set_facecolor(col)\n self.figure.set_edgecolor(col)\n self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))",
"def setColor(clr):\n if type(clr) == types.StringType:\n setColorString(clr)\n return \n if type(clr) == types.IntType:\n setColorIndex(clr)\n return\n if type(clr) == types.TupleType:\n setColorRGB(*clr)",
"def resetColor(self):\n self.setColor(255, 255, 255 ,255)",
"def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'",
"def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])",
"def set_trace_color(color): #py:set_trace_color\n RUR._set_trace_color_(color)",
"def set_window_colour(self, event):\n rgb_triplet, rgb_string = tkColorChooser.askcolor()\n self.canvas.config(bg = rgb_string)",
"def setColorString(clr):\n dislin.color(clr)",
"def set_color(self, r=0, g=0, b=0):\n r = clamp(r)\n g = clamp(g)\n b = clamp(b)\n self._state.color = (r, g, b)\n self.send_command(Command.SET_COLOR, [int(r), int(g), int(b)])",
"def fill(self, color):",
"def _update_color(self, *args):\n\n if self._variable and 'w' in self._mode and not self._dnd_started:\n self._internal_color_change = True\n self.color_var.set(self._variable)",
"def setPixelColor(self, n, color):\n self._logger.debug(\"setPixelColor\")",
"def test_color(self):\n self._calibration_test(\"color_full\")",
"def initialize_shade(self,shade_name,shade_color,alpha):\n\n self.shades[shade_name] = [0, pygame.Surface(self.image.get_size())]\n self.shades[shade_name][1].fill(shade_color)\n self.shades[shade_name][1].set_alpha(alpha)",
"def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)",
"def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value",
"def set_green(self):\n self.fill= Cell.FILLED_COLOR_BG\n self.draw()",
"def set_color(self, color):\n self.light_color = color\n for f in self.color_change_cb:\n f(self)",
"def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'",
"def _set_color_mode(self, mode):\n self._write(ST7789_COLMOD, bytes([mode & 0x77]))",
"def fill(self, color):\n self._surf.fill(color)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self",
"def set_color(self, color):\n with doc_ctrl.open_command():\n doc_ctrl.set_color(self.lbl, color)\n std_events.document_modified.emit()",
"def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color",
"def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color",
"def set_red_light(self, value):\n self.diffuse_light[0] = value\n self.redraw()",
"def set_color(objname, rgb):\r\n return f'\\ncmd.set_color(\"{objname}\", {(rgb[0], rgb[1], rgb[2])})'",
"def fill(self, value):\n self.fill_color = value",
"def setNewColor(self, color: QColor):\n self.drawNewColor = color",
"def _setRgbColor(self, color):\n item = self.item()\n if item is not None:\n color.setAlpha(item.getColor().alpha())\n item.setColor(color)",
"def setColor(self, color, group=None):\n group = group is None and self.group or group\n r = self.controller.send(self.light.color(milight.color_from_rgb(*color), group))\n logger.debug('Set color to %s (group: %s): %s' % (color, self.group, r))",
"def __setitem__(self, index, colour):\n self.desk[index] = colour",
"def change_color(icon, color):\n img = icon.convert(\"LA\")\n img = img.convert(\"RGBA\")\n image_np = np.array(icon)\n _, _, _, alpha = image_np.T\n mask = alpha > 0\n image_np[..., :-1][mask.T] = ImageColor.getcolor(color, \"RGB\")\n edited_image = Image.fromarray(image_np)\n return edited_image"
] | [
"0.6811779",
"0.67965776",
"0.6700545",
"0.6639459",
"0.66203904",
"0.6502433",
"0.647672",
"0.645605",
"0.64526445",
"0.64462304",
"0.64165473",
"0.627027",
"0.624905",
"0.623961",
"0.62221557",
"0.6202853",
"0.62005407",
"0.61851376",
"0.61728835",
"0.6150435",
"0.61454993",
"0.6145467",
"0.6136638",
"0.61366314",
"0.61061513",
"0.60856515",
"0.6073309",
"0.60676956",
"0.6067689",
"0.6053702",
"0.59976673",
"0.5995913",
"0.5992786",
"0.5989994",
"0.59734875",
"0.59569585",
"0.5923734",
"0.59234744",
"0.59181243",
"0.59115785",
"0.5891856",
"0.58700967",
"0.58688784",
"0.5851928",
"0.58029705",
"0.58016986",
"0.5791542",
"0.57736975",
"0.5746172",
"0.57382876",
"0.5734746",
"0.57136625",
"0.5711141",
"0.5706344",
"0.56957215",
"0.5681659",
"0.5678737",
"0.5671033",
"0.5665978",
"0.5661007",
"0.5659824",
"0.5651575",
"0.56464183",
"0.56120896",
"0.5581529",
"0.5580051",
"0.5569786",
"0.55596167",
"0.55534655",
"0.5552872",
"0.5548573",
"0.5545378",
"0.5534313",
"0.55248266",
"0.5513385",
"0.5510721",
"0.55106026",
"0.55091345",
"0.55059266",
"0.5504005",
"0.5499712",
"0.54847574",
"0.5478128",
"0.5474034",
"0.5468675",
"0.54635155",
"0.5456622",
"0.5453254",
"0.54508585",
"0.54502916",
"0.5447817",
"0.5447817",
"0.5424704",
"0.54241496",
"0.54136115",
"0.5413244",
"0.5409449",
"0.53975886",
"0.5394121",
"0.5389832"
] | 0.6050741 | 30 |
Compute isosurface for current state. | def _computeIsosurface(self):
data = self.getData(copy=False)
if data is None:
if self.isAutoLevel():
self._level = float('nan')
else:
if self.isAutoLevel():
st = time.time()
try:
level = float(self.getAutoLevelFunction()(data))
except Exception:
module_ = self.getAutoLevelFunction().__module__
name = self.getAutoLevelFunction().__name__
_logger.error(
"Error while executing iso level function %s.%s",
module_,
name,
exc_info=True)
level = float('nan')
else:
_logger.info(
'Computed iso-level in %f s.', time.time() - st)
if level != self._level:
self._level = level
self._updated(Item3DChangedType.ISO_LEVEL)
if numpy.isfinite(self._level):
st = time.time()
vertices, normals, indices = MarchingCubes(
data,
isolevel=self._level)
_logger.info('Computed iso-surface in %f s.', time.time() - st)
if len(vertices) != 0:
return vertices, normals, indices
return None, None, None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isosurface(self):\n return self._isosurface()",
"def get_fsurface(self, path):\n raise NotImplementedError",
"def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()",
"def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out",
"def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]",
"def _get_surfaces(idf):\n surfaces = idf.getsurfaces() + idf.getshadingsurfaces() + idf.getsubsurfaces()\n return surfaces",
"def removeIsosurface(self, isosurface):\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)",
"def closed_v(self):\n sa = ShapeAnalysis_Surface(self.surface())\n return sa.IsVClosed()",
"def invert_simple(forward, meas, geom):\n\n surface = forward.surface\n RT = forward.RT\n instrument = forward.instrument\n\n vswir_present = False\n if any(forward.surface.wl < 2600):\n vswir_present = True \n\n tir_present = False\n if any(forward.surface.wl > 2600):\n tir_present = True \n\n # First step is to get the atmosphere. We start from the initial state\n # and estimate atmospheric terms using traditional heuristics.\n x = forward.init.copy()\n x_surface, x_RT, x_instrument = forward.unpack(x)\n\n if vswir_present:\n x[forward.idx_RT] = heuristic_atmosphere(RT, instrument, \n x_RT, x_instrument, meas, geom)\n\n # Now, with atmosphere fixed, we can invert the radiance algebraically\n # via Lambertian approximations to get reflectance\n x_surface, x_RT, x_instrument = forward.unpack(x)\n rfl_est, Ls_est, coeffs = invert_algebraic(surface, RT,\n instrument, x_surface, x_RT,\n x_instrument, meas, geom)\n\n # Condition thermal part on the VSWIR portion. Only works for\n # Multicomponent surfaces. Finds the cluster nearest the VSWIR heuristic\n # inversion and uses it for the TIR suface initialization.\n if tir_present:\n tir_idx = np.where(forward.surface.wl > 3000)[0]\n\n if vswir_present:\n x_surface_temp = x_surface.copy()\n x_surface_temp[:len(rfl_est)] = rfl_est\n mu = forward.surface.xa(x_surface_temp, geom)\n rfl_est[tir_idx] = mu[tir_idx]\n else:\n rfl_est = 0.03 * np.ones(len(forward.surface.wl))\n\n # Now we have an estimated reflectance. Fit the surface parameters.\n x_surface[forward.idx_surface] = forward.surface.fit_params(rfl_est, geom)\n\n # Find temperature of emissive surfaces\n if tir_present:\n\n # Estimate the total radiance at sensor, leaving out surface emission\n # Radiate transfer calculations could take place at high spectral resolution\n # so we upsample the surface reflectance\n rfl_hi = forward.upsample(forward.surface.wl, rfl_est)\n rhoatm, sphalb, transm, solar_irr, coszen, transup = coeffs\n\n L_atm = RT.get_L_atm(x_RT, geom)\n L_down_transmitted = RT.get_L_down_transmitted(x_RT, geom)\n L_total_without_surface_emission = \\\n L_atm + L_down_transmitted * rfl_hi / (1. - sphalb * rfl_hi)\n\n # These tend to have high transmission factors; the emissivity of most\n # materials is nearly 1 for these bands, so they are good for\n # initializing the surface temperature.\n clearest_wavelengths = [10125., 10390.00, 10690.00]\n\n # This is fragile if other instruments have different wavelength\n # spacing or range\n clearest_indices = [np.argmin(np.absolute(RT.wl - w))\n for w in clearest_wavelengths]\n\n # Error function for nonlinear temperature fit\n def err(z):\n T = z\n emissivity = forward.surface.emissivity_for_surface_T_init\n Ls_est, d = emissive_radiance(emissivity, T,\n forward.surface.wl[clearest_indices])\n resid = transup[clearest_indices] * Ls_est + \\\n L_total_without_surface_emission[clearest_indices] - \\\n meas[clearest_indices]\n return sum(resid**2)\n\n # Fit temperature, set bounds, and set the initial values\n idx_T = forward.surface.surf_temp_ind\n Tinit = np.array([forward.surface.init[idx_T]])\n Tbest = minimize(err, Tinit).x\n T = max(forward.surface.bounds[idx_T][0]+eps,\n min(Tbest, forward.surface.bounds[idx_T][1]-eps))\n x_surface[idx_T] = Tbest\n forward.surface.init[idx_T] = T\n\n # Update the full state vector\n x[forward.idx_surface] = x_surface\n\n # We record these initial values in the geometry object - the only\n # \"stateful\" part of the retrieval\n geom.x_surf_init = x[forward.idx_surface]\n geom.x_RT_init = x[forward.idx_RT]\n\n return x",
"def _compute_grid_state(self, for_id):\n own = np.zeros_like(self._map, float)\n own_pos = self._id2pos[for_id]\n own[own_pos] = 1\n\n thieves = (self._map == THIEF ).astype(float)\n guardians = (self._map == GUARDIAN).astype(float)\n\n own_team = self.id2team[for_id]\n if own_team == THIEF:\n teammates = thieves\n opponents = guardians\n else:\n teammates = guardians\n opponents = thieves\n\n treasure_channel = (self._map == TREASURE).astype(float)\n\n # Channels first\n return np.stack([own, teammates, opponents, self._walls_channel, treasure_channel])",
"def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface",
"def getItems(self):\n return self.getCutPlanes() + self.getIsosurfaces()",
"def drfl_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))",
"def dLs_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))",
"def update(self) -> pygame.Surface:\n return self.surface",
"def _get_state(self, obs_env):\n state = []\n obs_env = obs_env.reshape(self.n_agent, 2)\n for i in range(self.n_agent):\n local_obs = obs_env[i]\n if self.agent.startswith('ia2c'):\n imgs = [local_obs]\n\n if not self.agent == 'ia2c_fp': # ia2c\n for j in np.where(self.neighbor_mask[i] == 1)[0]:\n imgs.append(obs_env[j])\n imgs = np.array(imgs, dtype=np.float32)\n fps = np.array([], dtype=np.float32)\n\n else: # ia2c_fp\n fps = []\n for j in np.where(self.neighbor_mask[i] == 1)[0]:\n imgs.append(obs_env[j])\n fps.append(self.fp[j])\n imgs = np.array(imgs, dtype=np.float32)\n fps = np.concatenate(fps).astype(np.float32)\n\n agent_obs = [imgs, fps]\n\n else: # ma2c\n agent_obs = local_obs.astype(np.float32)\n\n state.append(agent_obs)\n\n return state\n # return [[obs_env, np.array([], dtype=np.float32)] for _ in range(self.n_agent)]",
"def project(self):\n # update positions compared to observer\n pos = self.pos.copy()\n\n # center coordinates around obs coords\n pos[:, 0] -= np.sin(self.theta) * self.V * self.time_elapsed\n pos[:, 2] -= np.cos(self.theta) * self.V * self.time_elapsed\n\n # wrap in a novel box around obs coords\n for i in range(3):\n pos[:, i] = self.bounds[2*i] + np.mod(pos[:, i], self.bounds[2*i + 1]-self.bounds[2*i])\n\n d = (pos**2).sum(axis=1)**.5\n # ind_visible = (pos[:, 2] > 0) * (self.d_min<d) * (d<self.d_max)\n ind_visible = (pos[:, 2] > self.d_min) * (d < self.d_max)\n N_visible = int(np.sum(ind_visible))\n\n # self.state = [X, Y, size]\n self.state = np.ones((N_visible, 7))\n for i in range(2):\n self.state[:, i] = self.mag * pos[ind_visible, i] / pos[ind_visible, 2]\n print(i, self.state[:, i].min(), self.state[:, i].max())\n self.state[:, 2] = self.size / d[ind_visible]\n\n # colors do not change\n self.state[:, 3:] = pos[ind_visible, 3:]\n\n # TODO: larger transparency at larger distance => too fancy :-)\n # self.state[:, 2] = self.size / d[ind_visible]\n\n # for i in range(3):\n # self.state[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n # self.state[:, i] -= self.bounds[2*i]",
"def getState(game):\n pixels = pygame.surfarray.array3d(game.screen)[:]\n pixels = np.array([pixels], dtype=float)\n\n # Here we will preprocess the pixel data\n bitsize = game.screen.get_bitsize() / 4\n pixels *= 1 / 2**bitsize # Normalize to [0..1]\n\n return pixels",
"def plot_fft_isosurfaces(description: str, omega: np.ndarray, \n ut: np.ndarray, filename: str) -> None:\n\n print(f'Plotting fft isosurfaces: {description}...')\n\n (omega_x_grid, omega_y_grid, omega_z_grid) = np.meshgrid(omega, omega, \n omega, indexing='ij')\n n = len(omega)\n\n num_slices = ut.shape[0]\n # We only want to plot the first, middle, and last time slices.\n slices = [0, num_slices//2, num_slices-1]\n\n titles = [f'{description}: slice {slice}' for slice in slices]\n\n num_rows = 1\n num_cols = len(slices)\n fig = make_subplots(\n rows=num_rows, \n cols=num_cols,\n specs=[\n [{'is_3d': True}]*num_cols,\n ]*num_rows,\n subplot_titles=titles,\n )\n for s in range(len(slices)):\n ut_slice = np.reshape(ut[slices[s],:], (n, n, n))\n fig.add_trace(\n go.Isosurface(\n x=omega_x_grid.flatten(), \n y=omega_y_grid.flatten(), \n z=omega_z_grid.flatten(), \n value=normalize(ut_slice).flatten(),\n opacity=0.5,\n isomin=0.6,\n isomax=0.9,\n surface_count=3,\n colorscale=\"Viridis\",\n ),\n row=1,\n col=s+1\n )\n fig.update_layout(\n scene_xaxis_title_text=\"omega_x\",\n scene_yaxis_title_text=\"omega_y\",\n scene_zaxis_title_text=\"omega_z\",\n scene2_xaxis_title_text=\"omega_x\",\n scene2_yaxis_title_text=\"omega_y\",\n scene2_zaxis_title_text=\"omega_z\",\n scene3_xaxis_title_text=\"omega_x\",\n scene3_yaxis_title_text=\"omega_y\",\n scene3_zaxis_title_text=\"omega_z\",\n )\n pio.write_html(fig, filename)",
"def active_surfaces(self):\n return [surface for surface in self._surfaces if surface.active]",
"def _dsurface_dbsf(self):\n # bare soil contribution\n I_bs = (self.I0 * self._mu_0\n * self.SRF.brdf(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict))\n\n\n Isurf = (np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex))\n ) * I_bs * np.ones_like(self.t_0)\n\n return self.SRF.NormBRDF * (I_bs - Isurf)",
"def full_output_state(self):\n state = self.circuit.global_input_state\n for layer in range(self.circuit.element_layers):\n #TODO: a way to update the state one layer at a time\n #instead of one element at a time might be slightly faster\n for element in self.circuit.elements[layer]:\n state = self.evolve_element(state, element)\n return state",
"def _get_surfaces(idf):\n surface_types = [\n 'BUILDINGSURFACE:DETAILED',\n 'FENESTRATIONSURFACE:DETAILED',\n ]\n surfaces = []\n for surface_type in surface_types:\n surfaces.extend(idf.idfobjects[surface_type])\n\n return surfaces",
"def surface(self):\n return self._surface",
"def Surface(self, *args):\n return _Adaptor3d.Adaptor3d_HSurface_Surface(self, *args)",
"def pure_energy_state(self, i):\n return unvectorize(\n ketbra(self.s, i, i)\n for i in vectorize(i)\n )",
"def assemble_hyper_surface(self):\n def get_row_no(k0):\n hdf = pd.HDFStore(self.ewlibpath, 'r')\n hdf0 = hdf.get(k0)\n idx = np.where((np.abs(hdf0.th_wavelength-self.wavelength)<=0.025)\n & (np.abs(hdf0.th_EP - self.ep)<=0.02)\n & (hdf0.element == self.element))[0]\n if idx.size!=0:\n idx = idx[0]\n else:\n idx = -1\n hdf.close()\n return idx\n\n if self.interpolated:\n raise NotImplementedError(\"Interpolated model doesn't have such method!\")\n\n row_no = get_row_no(self._keys[0])\n if row_no == -1:\n warnings.warn(\"Data for interpolation is not enough!\")\n self._hyper_surface = None\n return self._hyper_surface\n else:\n f = h5py.File(self.ewlibpath, 'r')\n if self.cal == \"nlte\":\n ews = [np.array(f[k+\"/table\"])[row_no][1][3] for k in self._keys]\n else:\n ews = [np.array(f[k+\"/table\"])[row_no][1][2] for k in self._keys]\n f.close()\n\n\n datapoints = np.concatenate((np.array(self._atmos_pars), np.transpose([ews])), axis=1)\n datapoints = datapoints[~np.isnan(datapoints).any(axis=1)]\n\n if datapoints.shape[0] <= 3:\n warnings.warn(\"Data for interpolation is not enough!\")\n self._hyper_surface = None\n del datapoints\n return self._hyper_surface\n else:\n self._hyper_surface = datapoints\n print(\"Grid is prepared!\")\n del datapoints\n return self._hyper_surface",
"def image(self, state):\n valid_time = _to_datetime(state.valid_time)\n\n # 15 minute/1 hour slice of data?\n window = dt.timedelta(minutes=60) # 1 hour window\n paths = self.locator.find_period(valid_time, window)\n frame = self.loader.load(paths)\n frame = self.select_date(frame, valid_time, window)\n\n # Filter intra-cloud/cloud-ground rows\n if \"intra-cloud\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"IC\"]\n elif \"cloud-ground\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"CG\"]\n\n # EarthNetworks validity box (not needed if tiling algorithm)\n longitude_range = (26, 40)\n latitude_range = (-12, 4)\n x_range, y_range = geo.web_mercator(longitude_range, latitude_range)\n\n x, y = geo.web_mercator(frame[\"longitude\"], frame[\"latitude\"])\n frame[\"x\"] = x\n frame[\"y\"] = y\n pixels = 256\n canvas = datashader.Canvas(\n plot_width=pixels,\n plot_height=pixels,\n x_range=x_range,\n y_range=y_range,\n )\n\n if \"density\" in state.variable.lower():\n # N flashes per pixel\n agg = canvas.points(frame, \"x\", \"y\", datashader.count())\n else:\n frame[\"since_flash\"] = self.since_flash(frame[\"date\"], valid_time)\n agg = canvas.points(frame, \"x\", \"y\", datashader.max(\"since_flash\"))\n\n # Note: DataArray objects are not JSON serializable, .values is the\n # same data cast as a numpy array\n x = agg.x.values.min()\n y = agg.y.values.min()\n dw = agg.x.values.max() - x\n dh = agg.y.values.max() - y\n image = np.ma.masked_array(\n agg.values.astype(np.float), mask=np.isnan(agg.values)\n )\n if \"density\" in state.variable.lower():\n image[image == 0] = np.ma.masked # Remove pixels with no data\n\n # Update color_mapper\n color_mapper = self.color_mappers[\"image\"]\n if \"density\" in state.variable.lower():\n color_mapper.palette = bokeh.palettes.all_palettes[\"Spectral\"][8]\n color_mapper.low = 0\n color_mapper.high = agg.values.max()\n else:\n color_mapper.palette = bokeh.palettes.all_palettes[\"RdGy\"][8]\n color_mapper.low = 0\n color_mapper.high = 60 * 60 # 1 hour\n\n # Update tooltips\n for hover_tool in self.hover_tools[\"image\"]:\n hover_tool.tooltips = self.tooltips(state.variable)\n hover_tool.formatters = self.formatters(state.variable)\n\n if \"density\" in state.variable.lower():\n units = \"events\"\n else:\n units = \"seconds\"\n\n data = {\n \"x\": [x],\n \"y\": [y],\n \"dw\": [dw],\n \"dh\": [dh],\n \"image\": [image],\n }\n meta_data = {\n \"variable\": [state.variable],\n \"date\": [valid_time],\n \"units\": [units],\n \"window\": [window.total_seconds()],\n }\n data.update(meta_data)\n self.sources[\"image\"].data = data",
"def get_observation_(self):\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:,:,0] = np.array(self.input_img_.data).reshape((self.STATE_SIZE[0:2]))\n\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:,:,0])\n return obs",
"def inpaint(self):\n\n self._validate_inputs()\n self._initialize_attributes()\n\n start_time = time.time()\n keep_going = True\n while keep_going:\n self._find_front()\n print(self.front.shape)\n #imwrite('front.jpg',self.front)\n if self.plot_progress:\n self._plot_image()\n\n self._update_priority()\n\n target_pixel = self._find_highest_priority_pixel()\n find_start_time = time.time()\n source_patch = self._find_source_patch(target_pixel)\n #print('Time to find best: %f seconds'\n #% (time.time()-find_start_time))\n\n self._update_image(target_pixel, source_patch)\n\n keep_going = not self._finished()\n\n print('Took %f seconds to complete' % (time.time() - start_time))\n return self.working_image",
"def drfl_dsurfaceb(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))",
"def getState(self) -> vector:\n return self._denormalizeState(self.Z)",
"def _get_fcoe_intf_port_state(self):\n return self.__fcoe_intf_port_state",
"def surface(self):\n # return sum(np.outer(basis_function, control_point) for basis_function, control_point in zip(self.basis_1, self.basis)).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n # return sum(np.outer(basis_function_1, self.control_net[ii, jj]) + np.outer(basis_function_2, self.control_net[ii, jj]) for ((ii, basis_function_1), (jj, basis_function_2)) in zip(enumerate(self.basis_1), enumerate(self.basis_2))).T\n\n # x = np.zeros_like(self.xi_1_mesh)\n # y = np.zeros_like(self.xi_1_mesh)\n # z = np.zeros_like(self.xi_1_mesh)\n xyz = np.zeros((*self.xi_1_mesh.shape, 3))\n for (i, basis_function_i), (j, basis_function_j) in itertools.product(enumerate(self.basis_1), enumerate(self.basis_2)):\n print(i, basis_function_i)\n print(j, basis_function_j)\n print(self.control_net[i, j])\n # b1, b2 = np.meshgrid(basis_function_i, basis_function_j, indexing = 'ij')\n control_x, control_y, control_z = self.control_net[i, j]\n # print(b1.shape, b2.shape, np.array(self.control_net[i, j]).shape)\n # print((b1 * b2).shape)\n # z += np.outer(b1 * b2, self.control_net[i, j])\n # print(np.shape(z))\n print(np.outer(basis_function_i, basis_function_j))\n # x += np.outer(basis_function_i, basis_function_j) * control_x\n # y += np.outer(basis_function_i, basis_function_j) * control_y\n # z += np.outer(basis_function_i, basis_function_j) * control_z\n print(np.outer(basis_function_i, basis_function_j).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), self.control_net[i, j]).shape)\n print(np.outer(np.outer(basis_function_i, basis_function_j), np.array(self.control_net[i, j])).shape)\n r = np.einsum('i,j,k->ijk', basis_function_i, basis_function_j, np.array(self.control_net[i, j]))\n print(r.shape)\n xyz += r\n\n # print(x, y, z)\n\n # return x, y, z\n return xyz",
"def _removeClicked(self):\n isosurface = self.isosurface()\n if isosurface is not None:\n volume = isosurface.parent()\n if volume is not None:\n volume.removeIsosurface(isosurface)",
"def surface(self):\n return BRep_Tool_Surface(self.topods_shape())",
"def surface(self):\n # bare soil contribution\n I_bs = (self.I0 * self._mu_0\n * self.SRF.brdf(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict))\n\n\n Isurf = (np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex))) * I_bs\n\n return self.SRF.NormBRDF * ((1. - self.bsf) * Isurf + self.bsf * I_bs)",
"def update_satellite_state(self, current_date):\n self.in_date = current_date\n self.spacecraft_state = self.state_observer.spacecraftState\n\n self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()\n self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity()",
"def exposedSurf(self):\n if self.precision:\n h = self.evaluations.exposedWing.edges[1].point1.x # height of trapezoid\n B = self.chordRootW # major base of trapezoid\n b = self.evaluations.chordIntersected.edges[1].length # minor base of trapezoid\n internalS = 2 * (0.5 * (b + B) * h) # wing surface internal at fuselage\n return self.surfaceW - internalS\n else:\n return self.surfaceW - self.fuselageDiameter * self.cMACW # first guess for a faster evaluation",
"def surfaceInfo(self, index):\n rawInfo = self.rawSurfaceInfo(index)\n if rawInfo is None:\n return None\n\n if \"GLAS\" in rawInfo:\n mat = Material.findByName(name=rawInfo[\"GLAS\"][0])\n else:\n mat = Air()\n\n curvature = float(rawInfo[\"CURV\"][0])\n if curvature == 0.0:\n radius = float(\"+inf\")\n else:\n radius = 1/curvature*self.factor\n \n if \"DIAM\" in rawInfo:\n diameter = 2*float(rawInfo[\"DIAM\"][0])*self.factor\n else:\n diameter = float(\"+inf\")\n\n spacing = float(rawInfo[\"DISZ\"][0])*self.factor\n\n return Surface(number=index, \n R=radius,\n mat=mat,\n spacing=spacing,\n diameter=diameter)",
"def get_surface(self, new: bool = True) -> 'pygame.Surface':\n if new:\n return self.get_crop_rect(self.get_rect())\n return self._surface",
"def update_satellite_state(self, integration_date):\n self.in_date = integration_date\n self.spacecraft_state = self.state_observer.spacecraftState\n\n self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()\n self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity()",
"def drawIsoSurfaces0(self):\r\n # research\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n v = vtk.vtkAppendPolyData()\r\n\r\n for modelNode in modelNodes.values():\r\n if modelNode.GetAttribute(\"nth\") != None and modelNode.GetDisplayVisibility() == 1 :\r\n v.AddInput(modelNode.GetPolyData())\r\n\r\n modeller = vtk.vtkImplicitModeller()\r\n modeller.SetInput(v.GetOutput())\r\n modeller.SetSampleDimensions(self.dim.value, self.dim.value, self.dim.value)\r\n modeller.SetCapping(0)\r\n modeller.SetAdjustBounds(self.abonds.value)\r\n modeller.SetProcessModeToPerVoxel()\r\n modeller.SetAdjustDistance(self.adist.value / 100)\r\n modeller.SetMaximumDistance(self.maxdist.value / 100)\r\n\r\n contourFilter = vtk.vtkContourFilter()\r\n contourFilter.SetNumberOfContours(self.nb.value)\r\n contourFilter.SetInputConnection(modeller.GetOutputPort())\r\n contourFilter.ComputeNormalsOn()\r\n contourFilter.ComputeScalarsOn()\r\n contourFilter.UseScalarTreeOn()\r\n contourFilter.SetValue(self.contour.value, self.contourValue.value)\r\n contourFilter.SetValue(self.contour2.value, self.contourValue2.value)\r\n contourFilter.SetValue(self.contour3.value, self.contourValue3.value)\r\n contourFilter.SetValue(self.contour4.value, self.contourValue4.value)\r\n contourFilter.SetValue(self.contour5.value, self.contourValue5.value)\r\n\r\n isoSurface = contourFilter.GetOutput()\r\n self.AddContour(isoSurface)",
"def state_to_features(self, game_state: dict) -> np.array:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # This is the dict before the game begins and after it ends\n if game_state is None:\n return None\n\n \n #get global information as a 17x17 channel\n x = game_state['field']\n x = np.swapaxes(x,0,1)\n for i in range(len(game_state['coins'])):\n a = game_state['coins'][i][1]\n b = game_state['coins'][i][0]\n x[a][b] = 4\n for i in range(len(game_state['bombs'])):\n a = game_state['bombs'][i][0][1]\n b = game_state['bombs'][i][0][0]\n x[a][b] = -(5+game_state['bombs'][i][1])\n for i in game_state['others']:\n if i[2]:\n x[i[3][1]][i[3][0]] = -10\n else:\n x[i[3][1]][i[3][0]] = -11\n if game_state['self'][2]:\n x[game_state['self'][3][1]][game_state['self'][3][0]] = 5\n else:\n x[game_state['self'][3][1]][game_state['self'][3][0]] = 6\n expl_List = np.argwhere(game_state['explosion_map'] != 0)\n for i in expl_List:\n x[i[1]][i[0]] = -4\n channel1 = x.copy()\n \n \n #prep local channel\n if self.modelToUse != 0:\n #get simpele direction to and aways from closest coin or crate if no coin on the field\n x_axis,y_axis,coin_creat_encoding = directionToNearestCoin_Crate(game_state['coins'], game_state['self'][3], game_state['field'])\n if x_axis == \"left\":\n if x[game_state['self'][3][1]][game_state['self'][3][0]-1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]-1] = coin_creat_encoding\n if x[game_state['self'][3][1]][game_state['self'][3][0]+1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]+1] = -2\n if x_axis == \"right\":\n if x[game_state['self'][3][1]][game_state['self'][3][0]-1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]-1] = -2\n if x[game_state['self'][3][1]][game_state['self'][3][0]+1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]+1] = coin_creat_encoding\n if y_axis == \"up\":\n if x[game_state['self'][3][1]-1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]-1][game_state['self'][3][0]] = coin_creat_encoding\n if x[game_state['self'][3][1]+1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]+1][game_state['self'][3][0]] = -2\n if y_axis == \"down\":\n if x[game_state['self'][3][1]-1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]-1][game_state['self'][3][0]] = -2\n if x[game_state['self'][3][1]+1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]+1][game_state['self'][3][0]] = coin_creat_encoding\n \n \n \n #get information of bombs: on which position the explotion will be and how far away the bomb is\n bombs = game_state['bombs']\n bombs.sort(key=lambda x: x[1],reverse=True)\n x = np.pad(x, (3,3), 'constant', constant_values=(-1))\n for i in (bombs):\n y_bomb = i[0][1] + 3\n x_bomb = i[0][0] + 3\n for j in range(4):\n if abs(x[y_bomb,x_bomb+j]) != 1 and x[y_bomb,x_bomb+j] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb,x_bomb+j-l] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb,x_bomb+j] = -(9-j)\n #print(\"test1\")\n if abs(x[y_bomb,x_bomb-j]) != 1 and x[y_bomb,x_bomb-j] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb,x_bomb-j+l] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb,x_bomb-j] = -(9-j)\n #print(\"test2\")\n if abs(x[y_bomb+j,x_bomb]) != 1 and x[y_bomb+j,x_bomb] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb+j-l,x_bomb] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb+j,x_bomb] = -(9-j)\n #print(\"test3\")\n if abs(x[y_bomb-j,x_bomb]) != 1 and x[y_bomb-j,x_bomb] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb-j+l,x_bomb] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb-j,x_bomb] = -(9-j)\n #print(\"test4\")\n x = x[3:-3,3:-3]\n \n \n #get local view and concatenate it with channel 1 (will be sliced apart in the model later)\n z = np.zeros(17)\n y = x[game_state['self'][3][1]-1:game_state['self'][3][1]+2,game_state['self'][3][0]-1:game_state['self'][3][0]+2]\n y = y.flatten()\n z[0:9] = y\n #get correct input for the model used\n if self.modelToUse == 2:\n z = Variable(torch.from_numpy(z)).to(device).to(torch.float)\n z = z.unsqueeze(0).unsqueeze(0).unsqueeze(0)\n channel1 = Variable(torch.from_numpy(channel1)).to(device).to(torch.float)\n channel1 = channel1.unsqueeze(0).unsqueeze(0)\n return torch.cat((channel1,z),2)\n elif self.modelToUse == 1:\n y = Variable(torch.from_numpy(y)).to(device).to(torch.float)\n y = y.unsqueeze(0)\n return y\n else:\n channel1 = Variable(torch.from_numpy(channel1)).to(device).to(torch.float)\n channel1 = channel1.unsqueeze(0).unsqueeze(0)\n return channel1\n return",
"def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))",
"def get_surfaces(self):\n if not self.surfaces is None:\n return self.surfaces.copy()\n else:\n return None",
"def drawIsoSurfaces( self ):\n #research\n profprint()\n\n slicer.modules.NeedleFinderWidget.hideContourButton.setEnabled(1)\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n \n v= vtk.vtkAppendPolyData()\n canContinue = 0\n for modelNode in modelNodes.values():\n print \"for\"\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n canContinue = 1\n v.AddInputData(modelNode.GetPolyData())\n \n if canContinue ==1:\n modeller = vtk.vtkImplicitModeller()\n modeller.SetInputConnection(v.GetOutputPort())\n modeller.SetSampleDimensions(60,60,60)\n modeller.SetCapping(0)\n modeller.AdjustBoundsOn()\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(1)\n modeller.SetMaximumDistance(1.0)\n modeller.Update()\n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(1)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(1,10)\n # contourFilter.SetValue(2,13)\n # contourFilter.SetValue(3,15)\n # contourFilter.SetValue(4,20)\n # contourFilter.SetValue(5,25)\n contourFilter.Update()\n isoSurface = contourFilter.GetOutputDataObject(0)\n\n self.AddContour(isoSurface)",
"def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())",
"def surfaces(self):\n return self._surfaces",
"def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True",
"def drawIsoSurfaces0( self ):\n #research\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n v= vtk.vtkAppendPolyData()\n \n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n v.AddInput(modelNode.GetPolyData())\n \n modeller = vtk.vtkImplicitModeller()\n modeller.SetInput(v.GetOutput())\n modeller.SetSampleDimensions(self.dim.value,self.dim.value,self.dim.value)\n modeller.SetCapping(0)\n modeller.SetAdjustBounds(self.abonds.value)\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(self.adist.value/100)\n modeller.SetMaximumDistance(self.maxdist.value/100) \n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(self.nb.value)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(self.contour.value,self.contourValue.value)\n contourFilter.SetValue(self.contour2.value,self.contourValue2.value)\n contourFilter.SetValue(self.contour3.value,self.contourValue3.value)\n contourFilter.SetValue(self.contour4.value,self.contourValue4.value)\n contourFilter.SetValue(self.contour5.value,self.contourValue5.value)\n\n isoSurface = contourFilter.GetOutput()\n self.AddContour(isoSurface)",
"def get_frame_ship_state_basic(self, obs, current_player):\n\n entities_map = np.zeros((self.MAP_SIZE, self.MAP_SIZE))\n\n for player, (_, sy, ships) in enumerate(obs.players):\n for ship in ships.values():\n ship_pos, halite = ship[0], ship[1]\n row = ship_pos // self.MAP_SIZE\n col = ship_pos % self.MAP_SIZE\n if player != current_player:\n entities_map[row, col] = -halite if halite > 0 else -1\n else:\n entities_map[row, col] = halite if halite > 0 else 1\n return entities_map",
"def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky",
"def energy_surf(dbf, comps, phases, mode=None, **kwargs):\n # Here we check for any keyword arguments that are special, i.e.,\n # there may be keyword arguments that aren't state variables\n pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)\n model_dict = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model)\n\n # Convert keyword strings to proper state variable objects\n # If we don't do this, sympy will get confused during substitution\n statevar_dict = \\\n dict((v.StateVariable(key), value) \\\n for (key, value) in kwargs.items())\n\n # Generate all combinations of state variables for 'map' calculation\n # Wrap single values of state variables in lists\n # Use 'kwargs' because we want state variable names to be stringified\n statevar_values = [_listify(val) for val in kwargs.values()]\n statevars_to_map = [dict(zip(kwargs.keys(), prod)) \\\n for prod in itertools.product(*statevar_values)]\n\n # Consider only the active phases\n active_phases = dict((name.upper(), dbf.phases[name.upper()]) \\\n for name in phases)\n comp_sets = {}\n # Construct a list to hold all the data\n all_phase_data = []\n for phase_name, phase_obj in sorted(active_phases.items()):\n # Build the symbolic representation of the energy\n mod = model_dict[phase_name]\n # if this is an object type, we need to construct it\n if isinstance(mod, type):\n try:\n mod = mod(dbf, comps, phase_name)\n except DofError:\n # we can't build the specified phase because the\n # specified components aren't found in every sublattice\n # we'll just skip it\n logger.warning(\"\"\"Suspending specified phase %s due to\n some sublattices containing only unspecified components\"\"\",\n phase_name)\n continue\n # As a last resort, treat undefined symbols as zero\n # But warn the user when we do this\n # This is consistent with TC's behavior\n undefs = list(mod.ast.atoms(Symbol) - mod.ast.atoms(v.StateVariable))\n for undef in undefs:\n mod.ast = mod.ast.xreplace({undef: float(0)})\n logger.warning('Setting undefined symbol %s for phase %s to zero',\n undef, phase_name)\n # Construct an ordered list of the variables\n variables, sublattice_dof = generate_dof(phase_obj, mod.components)\n\n # Build the \"fast\" representation of that model\n comp_sets[phase_name] = make_callable(mod.ast, \\\n list(statevar_dict.keys()) + variables, mode=mode)\n\n # Get the site ratios in each sublattice\n site_ratios = list(phase_obj.sublattices)\n\n # Eliminate pure vacancy endmembers from the calculation\n vacancy_indices = list()\n for idx, sublattice in enumerate(phase_obj.constituents):\n if 'VA' in sorted(sublattice) and 'VA' in sorted(comps):\n vacancy_indices.append(sorted(sublattice).index('VA'))\n if len(vacancy_indices) != len(phase_obj.constituents):\n vacancy_indices = None\n logger.debug('vacancy_indices: %s', vacancy_indices)\n # Add all endmembers to guarantee their presence\n points = endmember_matrix(sublattice_dof,\n vacancy_indices=vacancy_indices)\n\n # Sample composition space for more points\n if sum(sublattice_dof) > len(sublattice_dof):\n points = np.concatenate((points,\n point_sample(sublattice_dof,\n pdof=pdens_dict[phase_name])\n ))\n\n\n\n # If there are nontrivial sublattices with vacancies in them,\n # generate a set of points where their fraction is zero and renormalize\n for idx, sublattice in enumerate(phase_obj.constituents):\n if 'VA' in set(sublattice) and len(sublattice) > 1:\n var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA'))\n addtl_pts = np.copy(points)\n # set vacancy fraction to log-spaced between 1e-10 and 1e-6\n addtl_pts[:, var_idx] = np.power(10.0, -10.0*(1.0 - addtl_pts[:, var_idx]))\n # renormalize site fractions\n cur_idx = 0\n for ctx in sublattice_dof:\n end_idx = cur_idx + ctx\n addtl_pts[:, cur_idx:end_idx] /= \\\n addtl_pts[:, cur_idx:end_idx].sum(axis=1)[:, None]\n cur_idx = end_idx\n # add to points matrix\n points = np.concatenate((points, addtl_pts), axis=0)\n\n data_dict = {'Phase': phase_name}\n # Generate input d.o.f matrix for all state variable combinations\n for statevars in statevars_to_map:\n # Prefill the state variable arguments to the energy function\n energy_func = \\\n lambda *args: comp_sets[phase_name](\n *itertools.chain(list(statevars.values()),\n args))\n # Get the stable points and energies for this configuration\n # Set max refinements equal to the number of independent dof\n mxr = sum(phase_obj.sublattices) - len(phase_obj.sublattices)\n refined_points, energies = \\\n refine_energy_surf(points, None, phase_obj, comps,\n variables, energy_func, max_iterations=-1)\n try:\n data_dict['GM'].extend(energies)\n for statevar in kwargs.keys():\n data_dict[statevar].extend(\n list(np.repeat(list(statevars.values()),\n len(refined_points))))\n except KeyError:\n data_dict['GM'] = list(energies)\n for statevar in kwargs.keys():\n data_dict[statevar] = \\\n list(np.repeat(list(statevars.values()),\n len(refined_points)))\n\n # Map the internal degrees of freedom to global coordinates\n\n # Normalize site ratios\n # Normalize by the sum of site ratios times a factor\n # related to the site fraction of vacancies\n site_ratio_normalization = np.zeros(len(refined_points))\n for idx, sublattice in enumerate(phase_obj.constituents):\n vacancy_column = np.ones(len(refined_points))\n if 'VA' in set(sublattice):\n var_idx = variables.index(v.SiteFraction(phase_name, idx, 'VA'))\n vacancy_column -= refined_points[:, var_idx]\n site_ratio_normalization += site_ratios[idx] * vacancy_column\n\n for comp in sorted(comps):\n if comp == 'VA':\n continue\n avector = [float(cur_var.species == comp) * \\\n site_ratios[cur_var.sublattice_index] for cur_var in variables]\n try:\n data_dict['X('+comp+')'].extend(list(np.divide(np.dot(\n refined_points[:, :], avector), site_ratio_normalization)))\n except KeyError:\n data_dict['X('+comp+')'] = list(np.divide(np.dot(\n refined_points[:, :], avector), site_ratio_normalization))\n\n # Copy coordinate information into data_dict\n # TODO: Is there a more memory-efficient way to deal with this?\n # Perhaps with hierarchical indexing...\n try:\n for column_idx, data in enumerate(refined_points.T):\n data_dict[str(variables[column_idx])].extend(list(data))\n except KeyError:\n for column_idx, data in enumerate(refined_points.T):\n data_dict[str(variables[column_idx])] = list(data)\n\n all_phase_data.append(pd.DataFrame(data_dict))\n\n # all_phases_data now contains energy surface information for the system\n return pd.concat(all_phase_data, axis=0, join='outer', \\\n ignore_index=True, verify_integrity=False)",
"def __getstate__(self):\n state = composites.Composite.__getstate__(self)\n return state",
"def _get_igp_isis(self):\n return self.__igp_isis",
"def surface_mask(self):\n return np.vectorize(lambda name: name in self.nvertices.keys())(self.name)",
"def drawIsoSurfaces(self):\r\n # research\r\n profprint()\r\n\r\n slicer.modules.NeedleFinderWidget.hideContourButton.setEnabled(1)\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n\r\n v = vtk.vtkAppendPolyData()\r\n canContinue = 0\r\n for modelNode in modelNodes.values():\r\n print \"for\"\r\n if modelNode.GetAttribute(\"nth\") != None and modelNode.GetDisplayVisibility() == 1 :\r\n canContinue = 1\r\n v.AddInputData(modelNode.GetPolyData())\r\n\r\n if canContinue == 1:\r\n modeller = vtk.vtkImplicitModeller()\r\n modeller.SetInputConnection(v.GetOutputPort())\r\n modeller.SetSampleDimensions(60, 60, 60)\r\n modeller.SetCapping(0)\r\n modeller.AdjustBoundsOn()\r\n modeller.SetProcessModeToPerVoxel()\r\n modeller.SetAdjustDistance(1)\r\n modeller.SetMaximumDistance(1.0)\r\n modeller.Update()\r\n\r\n contourFilter = vtk.vtkContourFilter()\r\n contourFilter.SetNumberOfContours(1)\r\n contourFilter.SetInputConnection(modeller.GetOutputPort())\r\n contourFilter.ComputeNormalsOn()\r\n contourFilter.ComputeScalarsOn()\r\n contourFilter.UseScalarTreeOn()\r\n contourFilter.SetValue(1, 10)\r\n # contourFilter.SetValue(2,13)\r\n # contourFilter.SetValue(3,15)\r\n # contourFilter.SetValue(4,20)\r\n # contourFilter.SetValue(5,25)\r\n contourFilter.Update()\r\n isoSurface = contourFilter.GetOutputDataObject(0)\r\n\r\n self.AddContour(isoSurface)",
"def squareSurface(*args, caching: bool=True, continuityType1: Union[int, bool]=2,\n continuityType2: Union[int, bool]=2, continuityType3: Union[int, bool]=2,\n continuityType4: Union[int, bool]=2, curveFitCheckpoints: Union[int, bool]=5,\n endPointTolerance: Union[float, bool]=0.1, nodeState: Union[int, bool]=0,\n rebuildCurve1: bool=False, rebuildCurve2: bool=False, rebuildCurve3:\n bool=False, rebuildCurve4: bool=False, constructionHistory: bool=True, name:\n AnyStr=\"\", object: bool=True, polygon: int=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def closed_u(self):\n sa = ShapeAnalysis_Surface(self.surface())\n return sa.IsUClosed()",
"def surface():\n \"\"\"\n Get surface for plotting.\n\n :return fsaverage: surface locations as in nilearn\n :return surf: surface for plotting\n \"\"\"\n\n fsaverage = fetch_surf_fsaverage('fsaverage')\n surf = {}\n\n for key in [t + '_' + h for t in ['pial', 'infl'] for h in ['left', 'right']]:\n\n surf = load_surf_mesh(fsaverage[key])\n x, y, z = np.asarray(surf[0].T, dtype='<f4')\n i, j, k = np.asarray(surf[1].T, dtype='<i4')\n\n surf[key] = dict(x=x, y=y, z=z, i=i, j=j, k=k)\n\n return fsaverage, surf",
"def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):",
"def render(self):\n self.axial.Render()\n self.coronal.Render()\n self.sagittal.Render()\n #self.isosurface.Render()\n #self.rwi_pcp.Render()",
"def getstate(self):\r\n return GPBase.getstate(self) + [self.Z,\r\n self.num_inducing,\r\n self.has_uncertain_inputs,\r\n self.X_variance]",
"def mc_inv_iso(self, instafname=None, ref=True, phase=True, group=False, outdir='./workingdir', wdisp=0.2, rffactor=40.,\\\n monoc=True, verbose=False, step4uwalk=1500, numbrun=10000, subsize=1000, nprocess=None, parallel=True):\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n if instafname is None:\n stalst = self.waveforms.list()\n else:\n stalst = []\n with open(instafname, 'r') as fid:\n for line in fid.readlines():\n sline = line.split()\n if sline[2] == '1':\n stalst.append(sline[0])\n if not ref and wdisp != 1.:\n wdisp = 1.\n print 'wdisp is forced to be 1. for inversion without receiver function data'\n if phase and group:\n dispdtype = 'both'\n elif phase and not group:\n dispdtype = 'ph'\n else:\n dispdtype = 'gr'\n ista = 0\n Nsta = len(stalst)\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n #-----------------------------\n # get data\n #-----------------------------\n vpr = vprofile.vprofile1d()\n if phase:\n try:\n indisp = self.auxiliary_data['RayDISPcurve']['ray']['ph'][staid_aux].data.value\n vpr.get_disp(indata=indisp, dtype='ph', wtype='ray')\n except KeyError:\n print 'WARNING: No phase dispersion data for station: '+staid\n if group:\n try:\n indisp = self.auxiliary_data['RayDISPcurve']['ray']['gr'][staid_aux].data.value\n vpr.get_disp(indata=indisp, dtype='gr', wtype='ray')\n except KeyError:\n print 'WARNING: No group dispersion data for station: '+staid\n if vpr.data.dispR.npper == 0 and vpr.data.dispR.ngper == 0:\n print 'WARNING: No dispersion data for station: '+staid \n continue\n if ref:\n try:\n inrf = self.auxiliary_data['RefR'][staid_aux+'_P'].data.value\n N = self.auxiliary_data['RefR'][staid_aux+'_P'].parameters['npts']\n dt = self.auxiliary_data['RefR'][staid_aux+'_P'].parameters['delta']\n indata = np.zeros((3, N))\n indata[0, :]= np.arange(N)*dt\n indata[1, :]= inrf[0, :]\n indata[2, :]= inrf[3, :]\n vpr.get_rf(indata = indata)\n except KeyError:\n print 'WARNING: No receiver function data for station: '+staid\n #-----------------------------\n # initial model parameters\n #-----------------------------\n vsdata = self.auxiliary_data['ReferenceModel'][staid_aux].data.value\n crtthk = self.auxiliary_data['MohoDepth'][staid_aux].parameters['moho_depth']\n sedthk = self.auxiliary_data['SediDepth'][staid_aux].parameters['sedi_depth']\n vpr.model.isomod.parameterize_input(zarr=vsdata[:, 0], vsarr=vsdata[:, 1], crtthk=crtthk, sedthk=sedthk, maxdepth=200.)\n vpr.getpara()\n ista += 1\n # if staid != 'AK.HDA': continue\n # # # if np.random.rand() > 0.9:\n # # # print staid\n # # # return vpr, vsdata\n # # # else:\n # # # continue\n print '--- Joint MC inversion for station: '+staid+' '+str(ista)+'/'+str(Nsta)\n if parallel:\n vpr.mc_joint_inv_iso_mp(outdir=outdir, dispdtype=dispdtype, wdisp=wdisp, rffactor=rffactor,\\\n monoc=monoc, pfx=staid, verbose=verbose, step4uwalk=step4uwalk, numbrun=numbrun, subsize=subsize, nprocess=nprocess)\n else:\n vpr.mc_joint_inv_iso(outdir=outdir, dispdtype=dispdtype, wdisp=wdisp, rffactor=rffactor,\\\n monoc=monoc, pfx=staid, verbose=verbose, step4uwalk=step4uwalk, numbrun=numbrun)\n # vpr.mc_joint_inv_iso(outdir=outdir, wdisp=wdisp, rffactor=rffactor,\\\n # monoc=monoc, pfx=staid, verbose=verbose, step4uwalk=step4uwalk, numbrun=numbrun)\n # if staid == 'AK.COLD':\n # return vpr\n return",
"def gameToNetInput(g, current):\n\n gridCount = Q_GAME_NUM_GRIDS if current is None else Q_PIECE_NUM_GRIDS\n if Q_USE_CONVOLUTIONAL_LAYERS:\n states = np.zeros((1, g.height, g.width, gridCount))\n grid = g.currentGrid()\n for j, r in enumerate(grid):\n for i, s in enumerate(r):\n if s is not None:\n states[0][j][i][netInputIndex(s, current, i, j)] = 1\n\n return states\n else:\n # get the state of the environment\n state = g.toList()\n\n # set up an array which can be fed into a network\n size = g.area()\n states = np.zeros((1, size * gridCount), dtype=np.int)\n # go through each element in the state, and set the input array position, corresponding to the\n # index of the state and condition of the state\n for i, s in enumerate(state):\n x, y = g.singlePos(i)\n if s is not None:\n states[0][netInputIndex(s, current, x, y) * size + y * g.width + x] = 1\n\n return states",
"def state(self) -> np.ndarray:\n raise NotImplementedError(\n \"state() method has not been implemented in the environment {}.\".format(\n self.metadata.get(\"name\", self.__class__.__name__)\n )\n )",
"def state(self) -> np.ndarray:\n raise NotImplementedError(\n \"state() method has not been implemented in the environment {}.\".format(\n self.metadata.get(\"name\", self.__class__.__name__)\n )\n )",
"def is_stationary(self):\n return self.kernel.is_stationary()",
"def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)",
"def current_state(self):\n return {\n \"cube_pos\": self.mujoco_simulation.get_qpos(\"cube_position\"),\n \"cube_quat\": self.mujoco_simulation.get_qpos(\"cube_rotation\"),\n \"cube_face_angle\": self.mujoco_simulation.get_face_angles(\"cube\"),\n }",
"def get_simple_surfaces(self, sub):\n cpt_arg_1 = 't1mri/default_acquisition/default_analysis/folds/3.1/default_session_auto'\n cpt_arg_2 = '_default_session_auto.arg'\n\n if os.path.isdir(os.path.join(self.data_dir, str(sub) + '/')):\n self.surfaces = dict()\n graph_file = os.path.join(self.data_dir, str(sub), cpt_arg_1,\n self.side + str(sub) + cpt_arg_2)\n skel_file = os.path.join(self.data_dir, str(sub), self.cpt_skel_1,\n self.side + self.cpt_skel_2 + str(sub) + self.cpt_skel_3)\n graph = aims.read(graph_file)\n self.skel = aims.read(skel_file)\n\n for v in graph.vertices():\n if 'label' in v:\n bbmin_surface = v['Tal_boundingbox_min']\n bbmax_surface = v['Tal_boundingbox_max']\n bck_map = v['aims_ss']\n\n if all([a >= b for (a, b) in zip(bbmin_surface, self.bbmin)]) and all([a <= b for (a, b) in zip(bbmax_surface, self.bbmax)]):\n for bucket in bck_map:\n if bucket.size() > self.ss_size: # In order to keep only large enough simple surfaces\n self.surfaces[len(self.surfaces)] = v\n\n return self.surfaces",
"def test_qft_reconstruction(self, interface):\n circuit = qft_circuit(3, interface=interface)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n state = shadow.global_snapshots()\n assert state.shape == (10000, 8, 8)\n\n state = np.mean(state, axis=0)\n expected = np.exp(np.arange(8) * 2j * np.pi / 8) / np.sqrt(8)\n expected = np.outer(expected, np.conj(expected))\n\n assert qml.math.allclose(state, expected, atol=1e-1)",
"def get_oxygen_surface(self):\n\n # Fill the background as black\n self.oxygen_surface.fill(pygame.SRCALPHA)\n\n # Draw each cell onto the surface, but make it damaged if it is\n # damaged.\n\n for item in self.oxygen_cards[-self.count:]:\n item.draw()\n for item in self.damaged_oxygen_cards[:-self.count]:\n item.draw()\n if self.count == 0:\n for item in self.damaged_oxygen_cards[:]:\n item.draw()\n\n # Return the surface\n return self.oxygen_surface",
"def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux",
"def __getitem__(self, i):\n T0, S0 = get_surface_ts(self.nc, i)\n \n # average the variables if we got multiple time elements\n if isinstance(i, slice):\n T0, S0, = T0.mean(axis=0), S0.mean(axis=0)\n if self.p == 0.:\n rho, drhodT, drhodS = jmd95.eos.state_surface(T0, S0)\n else:\n rho, drhodT, drhodS = jmd95.eos.state(self.p, T0, S0)\n return rho",
"def get_observation_verbose(self):\n state = {}\n for grid_id, grid in self.grids.items():\n o = grid.get_active_orders(self.city_time)\n d = list(grid.get_idle_drivers().values())\n state[grid_id] = [o,d]\n return state",
"def get_state(self):\r\n return self.currentObservation",
"def extract_goal_state(self):\n time = rospy.get_time()\n ref_time = time - self.last_time\n future_time = ref_time + self.update_rate\n\n # get state of future time in global trajectory\n return df.compute_output3D(self.global_solution, self.order, self.time[self.future_index], future_time)",
"def StateMachine(self):\n if self.mode is ALL:\n self.which_state()\n\n if self.current_state == FB:\n # print(\"FORWARD/BACKWARD\")\n self.FB()\n elif self.current_state == LAT:\n # print(\"LATERAL\")\n self.LAT()\n elif self.current_state == ROT:\n # print(\"ROTATION\")\n self.ROT()\n elif self.current_state == COMBI:\n # print(\"COMBINED\")\n self.COMBI()\n\n return self.return_bezier_params()",
"def get_state(self):\n return self.kf.x[:self.dim_z].squeeze()",
"def inactive_surfaces(self):\n return [surface for surface in self._surfaces if not surface.active]",
"def update(self):\n if self.api is None:\n return\n self.api.update()\n\n if self.var_type == 'Time':\n self.var_state = self.api.result['timeObservation']\n return\n result = self.api.result[self.var_type.lower()]\n if self.var_type == 'Sky':\n self.var_state = result['name']\n self.var_icon = get_sky_icon(result['code'])\n elif self.var_type == 'Temperature':\n self.var_state = round(float(result['tc']), 1)\n elif self.var_type == 'Humidity':\n self.var_state = result\n elif self.var_type == 'Wind':\n if self.var_detail == 'Direction':\n self.var_state = round(float(result['wdir']), 1)\n else:\n self.var_state = round(float(result['wspd']), 1)\n elif self.var_type == 'Precipitation':\n self.var_state = round(float(result['sinceOntime']), 1)\n p_type = result['type']\n if p_type == 0:\n self.var_units = 'mm'\n self.var_icon = 'mdi:weather-sunny'\n elif p_type == 1:\n self.var_units = 'mm'\n self.var_icon = 'mdi:weather-rainy'\n elif p_type == 2:\n self.var_units = 'mm'\n self.var_icon = 'mdi:weather-snowy'\n elif p_type == 3:\n self.var_units = 'cm'\n self.var_icon = 'mdi:weather-snowy-rainy'\n elif self.var_type == 'Pressure':\n if self.var_detail == 'Surface':\n self.var_state = round(float(result['surface']), 1)\n else:\n self.var_state = round(float(result['seaLevel']), 1)\n elif self.var_type == 'Lightning':\n if result == '1':\n self.var_state = 'Exist'\n else:\n self.var_state = 'None'",
"def Sa(self, x_surface, geom):\n\n return np.zeros((0, 0), dtype=float)",
"def hideIsoSurfaces(self):\n #research\n profprint()\n contourNode = slicer.util.getNode(self.contourNode)\n widget = slicer.modules.NeedleFinderWidget\n if contourNode != None:\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked()-1))\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked()-1))",
"def observation_function(self, game_state):\n return game_state.make_observation(self.index)",
"def is_Hypersurface(self):\n return isinstance(self, (ProjectiveHypersurface, AffineHypersurface))",
"def _get_obs(self):\n # return np.concatenate((self.world.state[:6], self.world.state[7:13]))\n return np.concatenate((self.world.state, np.zeros(7)))\n # return self.world.state",
"def plot_state(self, **options):\n f = plt.gcf()\n if len(f.axes) < 2:\n f, _ = plt.subplots(1, 2, figsize=(\n 13, 6), sharex='row', sharey='row')\n\n gp = self.target_model\n\n # Draw the GP surface\n visin.draw_contour(\n gp.predict_mean,\n gp.bounds,\n self.target_model.parameter_names,\n title='GP target surface',\n points=gp.X,\n axes=f.axes[0],\n **options)\n\n # Draw the latest acquisitions\n if options.get('interactive'):\n point = gp.X[-1, :]\n if len(gp.X) > 1:\n f.axes[1].scatter(*point, color='red')\n\n displays = [gp.instance]\n\n if options.get('interactive'):\n from IPython import display\n displays.insert(\n 0,\n display.HTML('<span><b>Iteration {}:</b> Acquired {} at {}</span>'.format(\n len(gp.Y), gp.Y[-1][0], point)))\n\n # Update\n visin._update_interactive(displays, options)\n\n acq_index = self._get_acquisition_index(self.state['n_batches'])\n\n def acq(x):\n return self.acquisition_method.evaluate(x, acq_index)\n\n # Draw the acquisition surface\n visin.draw_contour(\n acq,\n gp.bounds,\n self.target_model.parameter_names,\n title='Acquisition surface',\n points=None,\n axes=f.axes[1],\n **options)\n\n if options.get('close'):\n plt.close()",
"def drawIsoSurface(xlist, ylist, zlist, values, levels):\n dislin.suriso(xlist, len(xlist), \n ylist, len(ylist),\n zlist, len(zlist), values, levels)",
"def update_state(self):\n # return initial state if no observation was yet\n if len(self.obs_history) == 0:\n return self.kf.initial_state_mean, self.kf.initial_state_covariance\n\n hist = np.ma.masked_array(self.obs_history, mask=np.zeros((1,)))\n for i in range(len(hist)):\n if hist[i] == -1e8:\n hist[i] = np.ma.masked\n\n # print(hist, hist.shape)\n return self.kf.filter(hist)",
"def goify(self, layout=None):\n xx,yy,zz = self.getXYZ(layout)\n surf = dict(\n type='surface',\n x=xx,\n y=yy,\n z=zz\n )\n return surf",
"def full_output_state(self):\n outcomes = self.fock_basis()\n return self.calculate_state_amplitudes(outcomes, reduce_state=False)",
"def _compute_state(self):\n # TODO: duplicated code with stock_picking.py\n for production in self:\n if not production.move_raw_ids:\n production.state = 'draft'\n elif all(move.state == 'draft' for move in production.move_raw_ids):\n production.state = 'draft'\n elif all(move.state == 'cancel' for move in production.move_raw_ids):\n production.state = 'cancel'\n elif all(move.state in ('cancel', 'done') for move in production.move_raw_ids):\n production.state = 'done'\n elif production.workorder_ids and all(wo_state in ('done', 'cancel') for wo_state in production.workorder_ids.mapped('state')):\n production.state = 'to_close'\n elif not production.workorder_ids and production.qty_producing >= production.product_qty:\n production.state = 'to_close'\n elif any(wo_state in ('progress', 'done') for wo_state in production.workorder_ids.mapped('state')):\n production.state = 'progress'\n elif not float_is_zero(production.qty_producing, precision_rounding=production.product_uom_id.rounding):\n production.state = 'progress'\n elif any(not float_is_zero(move.quantity_done, precision_rounding=move.product_uom.rounding or move.product_id.uom_id.rounding) for move in production.move_raw_ids):\n production.state = 'progress'\n else:\n production.state = 'confirmed'\n\n # Compute reservation state\n # State where the reservation does not matter.\n production.reservation_state = False\n # Compute reservation state according to its component's moves.\n if production.state not in ('draft', 'done', 'cancel'):\n relevant_move_state = production.move_raw_ids._get_relevant_state_among_moves()\n if relevant_move_state == 'partially_available':\n if production.bom_id.operation_ids and production.bom_id.ready_to_produce == 'asap':\n production.reservation_state = production._get_ready_to_produce_state()\n else:\n production.reservation_state = 'confirmed'\n elif relevant_move_state != 'draft':\n production.reservation_state = relevant_move_state",
"def __init__(self, config, forward, inverse, active_rows, active_cols):\n\n # Default IO configuration options\n self.input = {}\n self.output = {'plot_surface_components': False}\n\n self.iv = inverse\n self.fm = forward\n self.bbl = '[]'\n self.radiance_correction = None\n self.meas_wl = forward.instrument.wl_init\n self.meas_fwhm = forward.instrument.fwhm_init\n self.writes = 0\n self.n_rows = 1\n self.n_cols = 1\n self.n_sv = len(self.fm.statevec)\n self.n_chan = len(self.fm.instrument.wl_init)\n\n if 'input' in config:\n self.input.update(config['input'])\n if 'output' in config:\n self.output.update(config['output'])\n if 'logging' in config:\n logging.config.dictConfig(config)\n\n # A list of all possible input data sources\n self.possible_inputs = [\"measured_radiance_file\",\n \"reference_reflectance_file\",\n \"reflectance_file\",\n \"obs_file\",\n \"glt_file\",\n \"loc_file\",\n \"surface_prior_mean_file\",\n \"surface_prior_variance_file\",\n \"rt_prior_mean_file\",\n \"rt_prior_variance_file\",\n \"instrument_prior_mean_file\",\n \"instrument_prior_variance_file\",\n \"radiometry_correction_file\"]\n\n # A list of all possible outputs. There are several special cases\n # that we handle differently - the \"plot_directory\", \"summary_file\",\n # \"Data dump file\", etc.\n wl_names = [('Channel %i' % i) for i in range(self.n_chan)]\n sv_names = self.fm.statevec.copy()\n self.output_info = {\n \"estimated_state_file\":\n (sv_names,\n '{State Parameter, Value}',\n '{}'),\n \"estimated_reflectance_file\":\n (wl_names,\n '{Wavelength (nm), Lambertian Reflectance}',\n '{0.0,1.0}'),\n \"estimated_emission_file\":\n (wl_names,\n '{Wavelength (nm), Emitted Radiance (uW nm-1 cm-2 sr-1)}',\n '{}'),\n \"modeled_radiance_file\":\n (wl_names,\n '{Wavelength (nm), Modeled Radiance (uW nm-1 cm-2 sr-1)}',\n '{}'),\n \"apparent_reflectance_file\":\n (wl_names,\n '{Wavelength (nm), Apparent Surface Reflectance}',\n '{}'),\n \"path_radiance_file\":\n (wl_names,\n '{Wavelength (nm), Path Radiance (uW nm-1 cm-2 sr-1)}',\n '{}'),\n \"simulated_measurement_file\":\n (wl_names,\n '{Wavelength (nm), Simulated Radiance (uW nm-1 cm-2 sr-1)}',\n '{}'),\n \"algebraic_inverse_file\":\n (wl_names,\n '{Wavelength (nm), Apparent Surface Reflectance}',\n '{}'),\n \"atmospheric_coefficients_file\":\n (wl_names,\n '{Wavelength (nm), Atmospheric Optical Parameters}',\n '{}'),\n \"radiometry_correction_file\":\n (wl_names,\n '{Wavelength (nm), Radiometric Correction Factors}',\n '{}'),\n \"spectral_calibration_file\":\n (wl_names,\n '{}',\n '{}'),\n \"posterior_uncertainty_file\":\n (sv_names,\n '{State Parameter, Value}',\n '{}')}\n\n self.defined_outputs, self.defined_inputs = {}, {}\n self.infiles, self.outfiles, self.map_info = {}, {}, '{}'\n\n # Load input files and record relevant metadata\n for q in self.input:\n if q in self.possible_inputs:\n self.infiles[q] = SpectrumFile(self.input[q])\n\n if (self.infiles[q].n_rows > self.n_rows) or \\\n (self.infiles[q].n_cols > self.n_cols):\n self.n_rows = self.infiles[q].n_rows\n self.n_cols = self.infiles[q].n_cols\n\n for inherit in ['map info', 'bbl']:\n if inherit in self.infiles[q].meta:\n setattr(self, inherit.replace(' ', '_'),\n self.infiles[q].meta[inherit])\n\n for q in self.output:\n if q in self.output_info:\n band_names, ztitle, zrange = self.output_info[q]\n n_bands = len(band_names)\n self.outfiles[q] = SpectrumFile(self.output[q], write=True,\n n_rows=self.n_rows, n_cols=self.n_cols,\n n_bands=n_bands, interleave='bip', dtype=s.float32,\n wavelengths=self.meas_wl, fwhm=self.meas_fwhm,\n band_names=band_names, bad_bands=self.bbl,\n map_info=self.map_info, zrange=zrange,\n ztitles=ztitle)\n\n # Do we apply a radiance correction?\n if 'radiometry_correction_file' in self.input:\n filename = self.input['radiometry_correction_file']\n self.radiance_correction, wl = load_spectrum(filename)\n\n # Last thing is to define the active image area\n if active_rows is None:\n active_rows = s.arange(self.n_rows)\n if active_cols is None:\n active_cols = s.arange(self.n_cols)\n self.iter_inds = []\n for row in active_rows:\n for col in active_cols:\n self.iter_inds.append([row, col])\n self.iter_inds = s.array(self.iter_inds)\n\n # Dave Connelly adds this line to allow iteration outside for loops.\n self.iter = 0",
"def wireframe_only(self):\n return self._wireframe_only",
"def get_subsurface(self):\n w, h = self.rect.w, self.rect.h\n surface = pg.Surface((w, h))\n surface.set_colorkey((0, 0, 0))\n return pg.Surface((w, h))",
"def irs_method(state):\n\n # First, importing all variables from the dictionary 'state'\n theta_ein2cm = state['theta_ein2cm']\n beta_boundary = state['beta_boundary']\n beta_res = state['beta_res']\n epsilon = state['epsilon']\n mu_h = state['mu_h']\n mu_v = state['mu_v']\n m = state['m']\n zeta = state['zeta']\n max_memory = state['max_memory']\n rays_per_pixel = state['rays_per_pixel']\n\n pixel2cm = theta_ein2cm * beta_boundary / beta_res # size of 1 pixel in cm in the source plane\n print('The physical size of 1 pixel is ' + str(beta_boundary / beta_res) + ' Einstein radii\\nor ' + str(\n np.format_float_scientific(pixel2cm, 2)) + ' cm in the source plane\\n')\n\n theta_boundaries = [epsilon * mu_h * beta_boundary / 2,\n epsilon * mu_v * beta_boundary / 2]\n # The number of images to draw in IRS method, assuming an ellipse in the image plane\n num_of_img = int((beta_res * epsilon) ** 2 * mu_v * mu_h * rays_per_pixel)\n print('A total of ' + str(num_of_img) + ' images for IRS method')\n state['num_of_img'] = num_of_img\n print(str(num_of_img / beta_res ** 2) + ' rays per source plane pixels')\n # The area in (Einstein-radii)^2 that each ray uniquely occupies\n s_ray = (epsilon ** 2 * mu_h * mu_v * beta_boundary ** 2) / num_of_img\n\n l_tmp = int(max_memory / m.shape[0] * 10 ** 9 / 8) # the maximum number of images to vector-compute\n n_runs = max(int(num_of_img / l_tmp), 1) # the number of sub arrays to vector-compute\n print('Max memory for array: ' + str(l_tmp * m.shape[0] * 8 / 10 ** 9) + 'GB')\n mu_grid = np.zeros((beta_res, beta_res)) # this will save the total number of rays per cell in the source plane\n start_time = time.time()\n theta = []\n beta = []\n num_cores = multiprocessing.cpu_count()\n print(str(num_cores) + ' active CPU cores')\n # starting the parallel routine, the variable mu_grid_temp_array is just a placeholder.\n mu_grid_temp_array = Parallel(n_jobs=num_cores, require='sharedmem')\\\n (delayed(parallel_irs)(i,mu_grid,l_tmp,n_runs,s_ray,theta_boundaries,start_time,state) for i in range(n_runs))\n\n if n_runs * l_tmp < num_of_img: # if some values are left\n # Drawing images locations\n theta = random_image_draw(int(num_of_img - n_runs * l_tmp), theta_boundaries[0], theta_boundaries[1])\n # Calculating locations of sources and corresponding magnitudes\n beta = af.img2src(theta, m, zeta, state)\n # Binning sources magnification\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n mu_grid += mu_grid_temp\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n else:\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n beta = np.ones(2, 2) # Just so that the next line can run smoothly and return beta_grid_h and beta_grid_v\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n\n return beta_grid_h, beta_grid_v, mu_grid",
"def Gridding(vis,uvw,image_params,obs_params,pswf):\t\n\tref_freq = obs_params['ref_freq']/1e6\n\t#print 'ref freq =', ref_freq\n\tlat \t = obs_params['lat']\n\tch_width = obs_params['ch_width']\n\tDEC \t = obs_params['DEC']\n\tStokes = image_params['Stokes']\n\t\n\tprint '--------------Gridding X stokes--------------------'\n\txgrid_wt, xgrid_uv, N = gridder(vis[0],uvw,image_params,obs_params,pswf)\n\tprint '--------------Gridding Y stokes--------------------'\n\tygrid_wt, ygrid_uv, N = gridder(vis[1],uvw,image_params,obs_params,pswf)\n\n\tN = np.shape(xgrid_wt)[0]\n\tgrid_uv = np.zeros([N, N], dtype=complex)\n\tgrid_wt = np.zeros([N, N], dtype=complex)\n\t\n\tif Stokes == 'I':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# I = (XX+YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real + xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag + xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real + xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag + xgrid_wt.imag)/2\n\n\telif Stokes == 'Q':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# Q = (XX-YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real - xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag - xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real - xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag - xgrid_wt.imag)/2\n\n\tdty_image=np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(IF.pad_fft(grid_uv))))\n\tpsf_image=np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(IF.pad_fft(grid_wt))))\n\n\treturn dty_image, psf_image",
"def plot_fft_isosurface(title: str, omega: np.ndarray, \n ut: np.ndarray, filename: str) -> None:\n\n print(f'Plotting fft isosurface: {title}...')\n\n (omega_x_grid, omega_y_grid, omega_z_grid) = np.meshgrid(omega, omega, \n omega, indexing='ij')\n\n fig = go.Figure()\n fig.add_trace(\n go.Isosurface(\n x=omega_x_grid.flatten(), \n y=omega_y_grid.flatten(), \n z=omega_z_grid.flatten(), \n value=normalize(ut).flatten(),\n opacity=0.5,\n isomin=0.6,\n isomax=0.9,\n surface_count=3,\n colorscale=\"Viridis\",\n )\n )\n fig.update_layout(\n title_text=title,\n scene_xaxis_title_text='omega_x',\n scene_yaxis_title_text='omega_y',\n scene_zaxis_title_text='omega_z',\n )\n pio.write_html(fig, filename)"
] | [
"0.7749475",
"0.64308286",
"0.6121801",
"0.6089994",
"0.5459395",
"0.5374221",
"0.49754205",
"0.49631813",
"0.494308",
"0.49310347",
"0.49100575",
"0.48892054",
"0.4882994",
"0.48674196",
"0.4839715",
"0.47886744",
"0.4778599",
"0.476231",
"0.47320402",
"0.47290966",
"0.47155273",
"0.4709471",
"0.46993586",
"0.4696804",
"0.46892542",
"0.46792185",
"0.46694776",
"0.4662081",
"0.4648043",
"0.46425027",
"0.4641825",
"0.4627846",
"0.4619006",
"0.46120262",
"0.4602931",
"0.45998654",
"0.45917982",
"0.45896012",
"0.4580284",
"0.45754173",
"0.4563907",
"0.4560562",
"0.45577884",
"0.45565766",
"0.4533884",
"0.45297185",
"0.45183158",
"0.4516271",
"0.4511517",
"0.4511496",
"0.45105335",
"0.45054743",
"0.4504425",
"0.4504382",
"0.449948",
"0.4493323",
"0.44858178",
"0.44848213",
"0.44699198",
"0.44698787",
"0.4467449",
"0.44601297",
"0.4457738",
"0.4433271",
"0.4415557",
"0.44152093",
"0.44150037",
"0.44150037",
"0.43997544",
"0.43995366",
"0.439406",
"0.43889397",
"0.43807796",
"0.43790197",
"0.43782017",
"0.43768504",
"0.43746743",
"0.43577597",
"0.4351756",
"0.43459988",
"0.43449336",
"0.43449312",
"0.43432093",
"0.43420586",
"0.43402857",
"0.4337381",
"0.43336865",
"0.4331644",
"0.43290585",
"0.43280742",
"0.43249974",
"0.432369",
"0.4321703",
"0.43198198",
"0.43178663",
"0.43161657",
"0.43161568",
"0.43125194",
"0.43119624",
"0.43112347"
] | 0.71063083 | 1 |
Perform picking in this item at given widget position. | def _pickFull(self, context):
rayObject = context.getPickingSegment(frame=self._getScenePrimitive())
if rayObject is None:
return None
rayObject = rayObject[:, :3]
data = self.getData(copy=False)
bins = utils.segmentVolumeIntersect(
rayObject, numpy.array(data.shape) - 1)
if bins is None:
return None
# gather bin data
offsets = [(i, j, k) for i in (0, 1) for j in (0, 1) for k in (0, 1)]
indices = bins[:, numpy.newaxis, :] + offsets
binsData = data[indices[:, :, 0], indices[:, :, 1], indices[:, :, 2]]
# binsData.shape = nbins, 8
# TODO up-to this point everything can be done once for all isosurfaces
# check bin candidates
level = self.getLevel()
mask = numpy.logical_and(numpy.nanmin(binsData, axis=1) <= level,
level <= numpy.nanmax(binsData, axis=1))
bins = bins[mask]
binsData = binsData[mask]
if len(bins) == 0:
return None # No bin candidate
# do picking on candidates
intersections = []
depths = []
for currentBin, data in zip(bins, binsData):
mc = MarchingCubes(data.reshape(2, 2, 2), isolevel=level)
points = mc.get_vertices() + currentBin
triangles = points[mc.get_indices()]
t = glu.segmentTrianglesIntersection(rayObject, triangles)[1]
t = numpy.unique(t) # Duplicates happen on triangle edges
if len(t) != 0:
# Compute intersection points and get closest data point
points = t.reshape(-1, 1) * (rayObject[1] - rayObject[0]) + rayObject[0]
# Get closest data points by rounding to int
intersections.extend(points)
depths.extend(t)
if len(intersections) == 0:
return None # No intersected triangles
intersections = numpy.array(intersections)[numpy.argsort(depths)]
indices = numpy.transpose(numpy.round(intersections).astype(numpy.int64))
return PickingResult(self, positions=intersections, indices=indices) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Point_Pick(self):\n self.vtkWidget.iren.AddObserver('RightButtonPressEvent', self.pick_loc)\n self.renWin.Render()",
"def pick_loc(self, event, x):\n #print(event, x)\n self.vtkWidget.iren.RemoveObservers('RightButtonPressEvent')\n loc = event.GetEventPosition()\n\n # Currently this only allow one pick points, but in the future, more reference points may be needed\n if self.pnt is None: # Check no points are already picked\n self.pnt = vtkRenWin.Pick_point(self.renWin, loc)\n else:\n show_message(\"A point is already set as the reference.\\n\"\n \"Clear the picked points to change reference\",\n message_type=\"info\")\n #vtkRenWin.mark(self.renWin,self.pnt[0],self.pnt[1],self.pnt[2])\n # print(self.pnt)",
"def _on_pick(self, event):\n pix_id = event.ind[-1]\n xx, yy, aa = u.Quantity(self.geom.pix_x[pix_id]).value, \\\n u.Quantity(self.geom.pix_y[pix_id]).value, \\\n u.Quantity(np.array(self.geom.pix_area)[pix_id])\n if self.geom.pix_type.startswith(\"hex\"):\n self._active_pixel.xy = (xx, yy)\n else:\n rr = sqrt(aa)\n self._active_pixel.xy = (xx - rr / 2., yy - rr / 2.)\n self._active_pixel.set_visible(True)\n self._active_pixel_label.set_x(xx)\n self._active_pixel_label.set_y(yy)\n self._active_pixel_label.set_text(f\"{pix_id:003d}\")\n self._active_pixel_label.set_visible(True)\n self._update()\n self.on_pixel_clicked(pix_id) # call user-function",
"def click(self, position):\n w, h = self.window.size\n sx, sy = self.tictactoe.size\n rx, ry = position\n x, y = sx * rx // w, sy * ry // h\n if self.tictactoe.available((x, y)):\n self.choice = (x, y)",
"def PerformPick(self, x, y, ignoreEntID = -1):\n pass",
"def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)",
"def selectedWidget(self, p_int): # real signature unknown; restored from __doc__\n pass",
"def selected(self, point):\n local_point = (point[0] - self.x, point[1] - self.y)\n self.remove(self.slide.rect)\n self.slide.update(local_point)\n self.insert(1, self.slide.rect)\n self.slide.rect.fill = self.slide_color\n self.title.text = f\"{self.name}:{int(self.slide.value)}\"",
"def on_click(self, event):\n item = self.identify(\"item\", event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return \"break\"",
"def _press(self, event):\n # Check for selection of a tool handle.\n if ((self._selection_completed or 'move_vertex' in self._state)\n and len(self._xys) > 0):\n h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)\n if h_dist < self.grab_range:\n self._active_handle_idx = h_idx\n # Save the vertex positions at the time of the press event (needed to\n # support the 'move_all' state modifier).\n self._xys_at_press = self._xys.copy()",
"def _updateSelectedItem(self):\n plot = self.plot\n if plot is not None:\n selected = plot.selection().getSelectedItems()\n # Give priority to image over scatter\n for klass in (items.ImageBase, items.Scatter):\n for item in selected:\n if isinstance(item, klass):\n # Found a matching item, use it\n self.getHistogramWidget().setItem(item)\n return\n self.getHistogramWidget().setItem(None)",
"def interact(self,mouseY):\n index = floor((mouseY+self.scroll-50)/150)-1\n if index >= -1 and index < len(self.itemList.items):\n self.selected = index\n #i*150+50-self.scroll",
"def set_piece_selected(self, uid, val):\n piece = self.get_piece_by_uid(uid)\n if piece:\n piece.selected = val",
"def _right_click(self, event, widget):\n self._currently_selected_widget = widget\n\n # need an actual mechanism for populating the menu, rather than this!!\n ### copied from edit_PO_in_currently...\n param_name = None\n for name,representation in self.representations.items():\n if self._currently_selected_widget is representation['widget']:\n param_name=name\n break\n # CEBALERT: should have used get_parameter_value(param_name)?\n PO_to_edit = self._string2object(param_name,self._tkvars[param_name].get())\n ###\n\n if hasattr(PO_to_edit,'params'):\n self.menu.tk_popup(event.x_root, event.y_root)",
"def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)",
"def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True",
"def _left_button_release_event(self, obj, event):\n #self.OnLeftButtonUp()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n #selector = vtk.vtkVisibleCellSelector()\n\n self.picker_points.append((pixel_x, pixel_y))\n\n #print(self.picker_points)\n if len(self.picker_points) == 2:\n p1x, p1y = self.picker_points[0]\n p2x, p2y = self.picker_points[1]\n self.picker_points = []\n xmin = min(p1x, p2x)\n ymin = min(p1y, p2y)\n xmax = max(p1x, p2x)\n ymax = max(p1y, p2y)\n #print(self.picker_points)\n #print('_area_pick_left_button_release', cell_id)\n\n dx = abs(p1x - p2x)\n dy = abs(p1y - p2y)\n self.picker_points = []\n if dx > 0 and dy > 0:\n if self._pick_visible:\n self._pick_visible_ids(xmin, ymin, xmax, ymax)\n else:\n self._pick_depth_ids(xmin, ymin, xmax, ymax)\n self.parent.vtk_interactor.Render()\n self.picker_points = []",
"def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)",
"def select(self,item):\r\n pass",
"def selectItem(*args):",
"def select_me(self, mouse_pos):\r\n\t\tself.active = self.rect.collidepoint(mouse_pos)",
"def pickUpAction(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colour = kwargs[\"fname\"]\n# pdb.set_trace()\n self.locator.update_pose() #get current pose of arm\n# x_offset = self.locator.pose[0] + pose_offset[0]\n# y_offset = self.locator.pose[1] + pose_offset[1]\n# goal_pose = (x_offset,y_offset,0,0,0,0)\n\n if self.exp_position_occupied:\n self.colour = 'blue'\n self.baxter.no()\n else:\n success = self.locator.locate(colour, pose_offset, 1)\n self.mm.loadMenu(\"actionMenu\")",
"def item_selected(self, _widget, _idx):\n # get item title\n self.sel_fmt = str(self.types_list.getItemNameAt(_idx))\n \n # enable \"ok\" button if any item selected\n self.button_ok.setEnabled(True)\n # update editor checkbox\n self.checkbox_edit.setStateCheck(False)\n self.checkbox_edit.setEnabled(self._formats[self.sel_fmt][1])",
"def select(self, position: Union[Sequence[int], int]) -> None:\n if isinstance(position, (list, tuple, np.ndarray)):\n y, x = round(position[0]), round(position[1])\n self._assert_valid(y, x)\n position = int(y * self.size[1] + x)\n\n if not isinstance(position, int):\n raise TypeError('`position` must be a integer, tuple or list.')\n\n if self.source != -1:\n self.cancel()\n self.current = self._opt_path(self.source, position)\n else:\n self.start = position\n\n self.destiny = position # must be after cancel",
"def select_items(self):\n\n self.listWidget.currentItem().setSelected(True)\n self.items_selected = self.listWidget.selectedItems()\n\n if self.frame_ordering == \"quality\":\n self.indices_selected = [self.quality_sorted_indices[self.listWidget.row(item)] for item\n in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n else:\n self.indices_selected = [self.listWidget.row(item) for item in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n\n self.synchronize_slider()",
"def _press(self, event):\n self._set_cursor(True)\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if self._active_handle is None or not self._interactive:\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n # self._pressv and self._prev are deprecated but we still need to\n # maintain them\n self._pressv = v\n self._prev = self._get_data(event)\n\n if self._active_handle is None and not self.ignore_event_outside:\n # when the press event outside the span, we initially set the\n # visibility to False and extents to (v, v)\n # update will be called when setting the extents\n self._visible = False\n self.extents = v, v\n # We need to set the visibility back, so the span selector will be\n # drawn when necessary (span width > 0)\n self._visible = True\n else:\n self.set_visible(True)\n\n return False",
"def select(self, _: int = 0) -> None:\n if not self.all_items:\n self._exit()\n return\n self.selected_option = self.current_option\n\n assert self.selected_item is not None\n self.selected_item.set_up()\n self.selected_item.action()\n self.selected_item.clean_up()\n\n self.returned_value = self.selected_item.get_return()\n self.should_exit = self.selected_item.should_exit\n\n if not self.should_exit:\n self.draw()",
"def withTouch(self, item, contentType=None, length=None, timeout=None, index=1, containerObject=None, relatedAreaEnd=None, doNotSelect=False):\r\n # Press (x, y) coordinate point when item is tuple\r\n if isinstance(item, tuple):\r\n self.phone._touch.press(item,length)\r\n self.phone._run('Press to coordinates: %s,%s' % item)\r\n return\r\n\r\n # Press HW key\r\n if item.startswith('KBD_KEY_'):\r\n self.phone._pressKey(item, length)\r\n self.phone._run('Press (%s) key' % item)\r\n if item == 'KBD_KEY_KEYLOCK_TOGGLE':\r\n self.phone.delay(100, False)\r\n return\r\n\r\n touchType=False#Fix touchable not working with images ,must be set not to false\r\n\r\n coordinates = self.phone.uiState.revealItem(item,timeout, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if coordinates:\r\n if not doNotSelect:\r\n itemCommented = self.phone.uiState._getCommented(item) # get step commented here so teststep won't be messed up with sx queries\r\n\r\n if containerObject != None:\r\n containerCommented = self.phone.uiState._getCommented(containerObject) # get container in commented format before touching\r\n self.phone._touch.press(coordinates,length)\r\n self.phone._run('select(%s) (on same area as %s)' % (itemCommented, containerCommented))\r\n else:\r\n self.phone._touch.press(coordinates,length)\r\n self.phone._run('select(%s)' % itemCommented)\r\n elif containerObject != None:\r\n self.phone.fail(\"Cannot select %s, no item found related to \\\"%s\\\".\"%(self.phone.uiState._getCommented(item), self.phone.uiState._getCommented(containerObject)))\r\n else:\r\n self.phone.fail(\"Cannot select %s, item is not found from screen.\"%self.phone.uiState._getCommented(item))",
"def pick(self, x, y, pb=2, multiple=False):\n width = self.size().width()\n height = self.size().height()\n #print('coords: %d, %d' % (x, y))\n # constrain to within border 1 pix smaller than widget, for glReadPixels call\n if not (pb <= x < width-pb and pb <= y < height-pb): # cursor out of range\n return\n if self.npoints > 2**24-2: # the last one is the full white background used as a no hit\n raise OverflowError(\"Can't pick from more than 2**24-2 sids\")\n # draw encoded RGB values to back buffer\n #GL.glDrawBuffer(GL_BACK) # defaults to back\n GL.glClearColor(1.0, 1.0, 1.0, 1.0) # highest possible RGB means no hit\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n GL.glEnableClientState(GL.GL_COLOR_ARRAY)\n GL.glEnableClientState(GL.GL_VERTEX_ARRAY)\n GL.glColorPointerub(self.rgbsids) # unsigned byte, ie uint8\n GL.glVertexPointerf(self.points) # float32\n GL.glDrawArrays(GL.GL_POINTS, 0, self.npoints) # to back buffer\n GL.glClearColor(0.0, 0.0, 0.0, 1.0) # restore to default black\n # grab back buffer:\n #GL.glReadBuffer(GL.GL_BACK) # defaults to back\n # find rgb at or around cursor coords, decode sid:\n backbuffer = GL.glReadPixels(x=x-pb, y=y-pb, width=2*pb+1, height=2*pb+1,\n format=GL.GL_RGB, type=GL.GL_UNSIGNED_BYTE,\n array=None, outputType=None)\n # NOTE: outputType kwarg above must be set to something other than str to ensure\n # that an array is returned, instead of a string of bytes\n if (backbuffer == 255).all(): # no hit\n return\n if not multiple:\n sid = self.decodeRGB(backbuffer[pb, pb]) # check center of backbuffer\n if sid != None:\n #print('hit at exact cursor pos')\n return sid # hit at exact cursor position\n # 2D array with nonzero entries at hits:\n hitpix = (backbuffer != [255, 255, 255]).sum(axis=2)\n if not multiple:\n ri = np.where(hitpix.ravel())[0][0] # get ravelled index of first hit\n i, j = np.unravel_index(ri, dims=hitpix.shape) # unravel to 2D index\n #print('Hit at %d, %d' % (i, j))\n return self.decodeRGB(backbuffer[i, j]) # should be a valid sid\n ijs = zip(*np.where(hitpix)) # list of ij tuples\n return np.asarray([ self.decodeRGB(backbuffer[i, j]) for i, j in ijs ])",
"def pick(self, x, y, pb=2, multiple=False):\n width = self.size().width()\n height = self.size().height()\n #print('coords: %d, %d' % (x, y))\n # constrain to within border 1 pix smaller than widget, for glReadPixels call\n if not (pb <= x < width-pb and pb <= y < height-pb): # cursor out of range\n return\n if self.npoints > 2**24-2: # the last one is the full white background used as a no hit\n raise OverflowError(\"Can't pick from more than 2**24-2 sids\")\n # draw encoded RGB values to back buffer\n #GL.glDrawBuffer(GL_BACK) # defaults to back\n GL.glClearColor(1.0, 1.0, 1.0, 1.0) # highest possible RGB means no hit\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n GL.glEnableClientState(GL.GL_COLOR_ARRAY)\n GL.glEnableClientState(GL.GL_VERTEX_ARRAY)\n GL.glColorPointerub(self.rgbsids) # unsigned byte, ie uint8\n GL.glVertexPointerf(self.points) # float32\n GL.glDrawArrays(GL.GL_POINTS, 0, self.npoints) # to back buffer\n GL.glClearColor(0.0, 0.0, 0.0, 1.0) # restore to default black\n # grab back buffer:\n #GL.glReadBuffer(GL.GL_BACK) # defaults to back\n # find rgb at or around cursor coords, decode sid:\n backbuffer = GL.glReadPixels(x=x-pb, y=y-pb, width=2*pb+1, height=2*pb+1,\n format=GL.GL_RGB, type=GL.GL_UNSIGNED_BYTE,\n array=None, outputType=None)\n # NOTE: outputType kwarg above must be set to something other than str to ensure\n # that an array is returned, instead of a string of bytes\n if (backbuffer == 255).all(): # no hit\n return\n if not multiple:\n sid = self.decodeRGB(backbuffer[pb, pb]) # check center of backbuffer\n if sid != None:\n #print('hit at exact cursor pos')\n return sid # hit at exact cursor position\n # 2D array with nonzero entries at hits:\n hitpix = (backbuffer != [255, 255, 255]).sum(axis=2)\n if not multiple:\n ri = np.where(hitpix.ravel())[0][0] # get ravelled index of first hit\n i, j = np.unravel_index(ri, dims=hitpix.shape) # unravel to 2D index\n #print('hit at %d, %d' % (i, j))\n return self.decodeRGB(backbuffer[i, j]) # should be a valid sid\n ijs = zip(*np.where(hitpix)) # list of ij tuples\n return np.asarray([ self.decodeRGB(backbuffer[i, j]) for i, j in ijs ])",
"def getSelectedPosition(*args):",
"def onButtonPress(self, event):\n\n if event.xdata and event.ydata:\n self.emit(QtCore.SIGNAL(\"positionSelected(float, float)\"),\n float(event.xdata), float(event.ydata))",
"def apply_selection(self, rv, index, is_selected):\r\n self.selected = is_selected",
"def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected",
"def select_entry(self):\n logging.debug(\"element selected\")\n if len(self.contents) > 0:\n self.to_background()\n self.contents[self.pointer][1]()\n self.to_foreground()\n if self.path_chosen:\n self.deactivate()\n else:\n self.to_foreground()",
"def on_click(self, event):\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify(\"item\", event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return \"break\"",
"def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None",
"def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False",
"def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def onDown():\n currentIndex = selector.currentRow()\n if currentIndex != selector.count() - 1 and currentIndex != -1:\n selector.blockSignals(True)\n currentItem = selector.takeItem(currentIndex)\n selector.insertItem(currentIndex + 1, currentItem)\n selector.setCurrentRow(currentIndex + 1)\n selector.blockSignals(False)\n position = []\n for index in range(selector.count()):\n position.append(selector.item(index).data(32))\n p.SetString(\"Position\", \",\".join(position))\n onItemChanged()",
"def act(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n item = self._get_item_on_floor(source_entity)\n if item is None:\n raise Exception(\"Could not find item on floor.\", source_entity, item)\n pickup_succeded = self.parent.inventory.try_add(item)\n if pickup_succeded:\n item.remove_component_of_type(\"player_auto_pick_up\")\n msg.send_visual_message(messenger.PICK_UP_MESSAGE % {\"item\": item.description.name},\n source_entity.position.value)\n self.parent.actor.newly_spent_energy += gametime.single_turn\n _item_flash_animation(source_entity, item)",
"def click(self, button, coord):\n if coord in self._ship_blocks:\n # selection doesn't work, if grid is blocked\n return\n\n if self._selection is None:\n # selection works, if not selected before\n self._selection = coord\n elif self._selection == coord:\n for coord in self._selection_buttons:\n self.remove_selection(coord)\n self._selection = None\n else:\n ship = list(self._selection_buttons)\n result = True\n\n if self.on_ship is not None:\n result = self.on_ship(ship)\n\n if result:\n self.ships.append(ship)\n self._ship_blocks += ship\n else:\n for coord in self._selection_buttons:\n self.remove_selection(coord)\n\n self._selection_buttons.clear()\n self._selection = None",
"def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj",
"def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj",
"def use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i included\" % frame_selected)\n item.setBackground(self.background_included)\n item.setForeground(QtGui.QColor(0, 0, 0))\n self.index_included[index_selected] = True\n self.frame_selector.setPhoto(self.frame_index)",
"def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()",
"def __menu_item_chosen(self, m):\n # Get selected item\n self.__selected_robot = m.index\n\n # Update the checkboxes/sliders for the selected robot\n self.__ui_controls.get('chkbox_ref').checked = \\\n self.__robots[self.__selected_robot].ref_shown\n\n self.__ui_controls.get('chkbox_rob').checked = \\\n self.__robots[self.__selected_robot].rob_shown\n\n self.__ui_controls.get('sld_opc').value = \\\n self.__robots[self.__selected_robot].opacity",
"def on_mouse_press(self, x, y, button):\n\n pass",
"def select_hand_pointer(self):\n self.locator_finder_by_hover_item(self.select_hand_pointer_id)\n time.sleep(1)",
"def _on_point_selected(self, _event):\r\n selected_iid = self._tree.selection()\r\n self._index_of_sel_point = self._tree.index(selected_iid)\r\n self._edit_zone.set_editable_point(self._tree.item(selected_iid)[\"values\"][0])\r\n self._notify(\"focus\", {})",
"def paint_item(self, posx, index):\r\n raise NotImplementedError()",
"def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()",
"def draw(self):\n if context.click():\n self.place()",
"def items_picking_window(self,\n instance: Nobleman,\n name: str,\n variable: Any,\n widget=None):\n window = tk.Toplevel()\n window.title(f'Pick new {name.lstrip(\"_\")}')\n window.geometry('350x250')\n\n search_variable = StringVar()\n entry = Entry(window, textvariable=search_variable)\n entry.pack(side=TOP)\n\n listbox = self.create_items_listbox(variable, widget, window)\n for name in self.get_data_for_listbox(instance, name):\n listbox.insert(END, name)\n\n entry.bind(\n '<Key>', partial(input_match_search, search_variable,\n lambda: self.manager.lords if name in LORDS_SETS\n else self.manager.locations, listbox))\n\n TkButton(window, text='Confirm and close',\n command=window.destroy).pack(side=TOP)",
"def _left_button_press_event(self, obj, event):\n #print('area_picker - left_button_press_event')\n self.OnLeftButtonDown()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n self.picker_points.append((pixel_x, pixel_y))",
"def add_selection(self, coord):\n button = self.grid[coord]\n button['bg'] = active\n button['activebackground'] = active",
"def pickup_item(self, ):\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['ether']:\n self.pos_item['ether'] = (0, 0 * sprite_size)\n self.item_count += 1\n self.sound_item.play()\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['tube']:\n self.pos_item['tube'] = (0, 1 * sprite_size)\n self.item_count += 1\n self.sound_item.play()\n if (self.charac_pos.x, self.charac_pos.y) == self.pos_item['needle']:\n self.pos_item['needle'] = (0, 2 * sprite_size)\n self.item_count += 1\n self.sound_item.play()",
"def selected(self, item):\n self.elementoSeleccionado = item",
"def set_selection(self, index, value):\n if not self._has_cbox[index]:\n return\n i = self._widgets[index][\"values\"].index( str(value) )\n self._widgets[index].current(i)",
"def onpick(cls, event):\n if cls.rate_limiting():\n return True\n\n if len(event.ind) != 1:\n print(\"Two or more points are too close! Please zoom in.\")\n print(\"Showing the one with higher fitness score\")\n\n cloud_plot = gs.canvas2cloud_plot[event.canvas]\n artist = event.artist\n ind = event.ind[-1]\n button = event.mouseevent.button\n\n if button == 1:\n cls.button_1(cloud_plot, artist, ind)\n elif button == 3:\n cls.button_3(cloud_plot, artist, ind)",
"def trigger_open(self):\n self.get_selected()\n if self.selected_item:\n self.controller.display_item(self.selected_item)",
"def paint_item(self, posy, index):\r\n order = self.items[index]\r\n if order in self.selected:\r\n marker = \"*\"\r\n if index == self.item_sel:\r\n attr = COLOR_PAIR[\"dialog_sel_sel\"]\r\n else:\r\n attr = COLOR_PAIR[\"dialog_sel_text\"] + curses.A_BOLD\r\n else:\r\n marker = \"\"\r\n if index == self.item_sel:\r\n attr = COLOR_PAIR[\"dialog_sel\"]\r\n else:\r\n attr = COLOR_PAIR[\"dialog_text\"]\r\n\r\n self.addstr(posy, 2, marker, attr)\r\n self.addstr(posy, 5, order.typ, attr)\r\n self.addstr(posy, 9, self.gox.quote2str(order.price), attr)\r\n self.addstr(posy, 22, self.gox.base2str(order.volume), attr)",
"def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())",
"def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())",
"def _handleClick(self, x, y, btn):\n\n if btn == LEFT_BTN:\n result = self.plot._pickTopMost(x, y, lambda i: i.isSelectable())\n if result is None:\n return None\n\n item = result.getItem()\n\n if isinstance(item, items.MarkerBase):\n xData, yData = item.getPosition()\n if xData is None:\n xData = [0, 1]\n if yData is None:\n yData = [0, 1]\n\n eventDict = prepareMarkerSignal('markerClicked',\n 'left',\n item.getName(),\n 'marker',\n item.isDraggable(),\n item.isSelectable(),\n (xData, yData),\n (x, y), None)\n return eventDict\n\n elif isinstance(item, items.Curve):\n dataPos = self.plot.pixelToData(x, y)\n assert dataPos is not None\n\n xData = item.getXData(copy=False)\n yData = item.getYData(copy=False)\n\n indices = result.getIndices(copy=False)\n eventDict = prepareCurveSignal('left',\n item.getName(),\n 'curve',\n xData[indices],\n yData[indices],\n dataPos[0], dataPos[1],\n x, y)\n return eventDict\n\n elif isinstance(item, items.ImageBase):\n dataPos = self.plot.pixelToData(x, y)\n assert dataPos is not None\n\n indices = result.getIndices(copy=False)\n row, column = indices[0][0], indices[1][0]\n eventDict = prepareImageSignal('left',\n item.getName(),\n 'image',\n column, row,\n dataPos[0], dataPos[1],\n x, y)\n return eventDict\n\n return None",
"def setSelectedFromItem(self, item):\n row = self.model.indexFromItem(item).row()\n self.selectRow(row)",
"def move_mouse_to_and_click(self, selector, x=0, y=0):\n self.move_mouse_to(selector, x, y, return_action_chain=True).click().perform()",
"def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')",
"def rightselectitem(self, pos):\n self._rightlist.select(pos)",
"def selectShot(self):\r\n shot = self.mapToShot(self.remainingCoordinates.pop())\r\n logging.debug(\"select shot: %s\" % (shot))\r\n return shot",
"def grab_slider(self, mouse_x, mouse_y):\n\n for slider in self._menu_items:\n if slider['menu_type'] == 'Slider':\n if slider['rect'].collidepoint(mouse_x, mouse_y):\n slider['grabbed'] = True",
"def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()",
"def Scale_Pick( self, event ):\r\n x = event.x - cb.xorigin\r\n y = event.y\r\n #Was the position within the scale?\r\n if x < 0 and x > -2:\r\n x = 0 #low adjust\r\n if x > cb.xtotal and x < cb.xtotal+2:\r\n x = cb.xtotal #high adjust\r\n if( x >= 0 and x <= cb.xtotal ):\r\n self.filter_distance = round((cb.xtotal - float(x))/cb.xtotal*cb.longx,3)\r\n self.Draw_Scale()\r\n return",
"def middleselectitem(self, pos):\n self._linklist.select(pos)",
"def mouseDoubleClickEvent(self, event):\n try:\n item = self.currentItem()\n self.clearSelection()\n item.setSelected(True)\n super(ListPreviewImages, self).mouseDoubleClickEvent(event)\n except:\n print('No item selected')",
"def on_pointPickerButton(self, key_t, button):\n\n result = self.tabs.pick_mpoint( 'random', key_t, \n self.cfgmgr, self.prefs.get('lts') )\n\n ctxt = result.log if not result.err else result.err \n\n if ctxt != None:\n self.consoleBuffer.insert_at_cursor(ctxt)\n\n self._refresh_ui()",
"def on_touch_down(self, touch):\n if super(SelectableButton, self).on_touch_down(touch):\n return True\n if self.collide_point(*touch.pos) and self.selectable:\n return self.parent.select_with_touch(self.index, touch)",
"def HitTest(self, x, y):\r\n\r\n result = None\r\n\r\n for item in self._uiparts:\r\n # we are not interested in typeDock, because this space \r\n # isn't used to draw anything, just for measurements\r\n # besides, the entire dock area is covered with other\r\n # rectangles, which we are interested in.\r\n if item.type == AuiDockUIPart.typeDock:\r\n continue\r\n\r\n # if we already have a hit on a more specific item, we are not\r\n # interested in a pane hit. If, however, we don't already have\r\n # a hit, returning a pane hit is necessary for some operations\r\n if item.type in [AuiDockUIPart.typePane, AuiDockUIPart.typePaneBorder] and result:\r\n continue\r\n \r\n # if the point is inside the rectangle, we have a hit\r\n if item.rect.Contains((x, y)):\r\n result = item\r\n \r\n return result",
"def button_press_cb(self, darea, event):\n x, y = event.x, event.y\n self.draw_pointer(self.cr, x, y)\n self.queue_draw()\n self.oldx, self.oldy = x, y\n rel_x, rel_y = self.absolute_to_relative(x, y)\n self.emit('dnd-value', rel_x, rel_y)\n self.emit('start-dnd')\n return True",
"def pick(layer, event):\n # on press\n layer.selected_label = layer._value or 0",
"def corner_click(position: Corner) -> None:\n actions.self.corner_hover(Corner)\n actions.mouse_click()",
"def radioButtonItem_Clicked( self, event ):\n\t\tself.activateTreasureBox(0)",
"def select_item(self, text):\n items = self.list_widget.findItems(text, Qt.MatchExactly)\n for item in items:\n item.setSelected(True)\n self.handle_item_changed()",
"def _selectInd(self, ind):\n logger.info(f'plotNumber:{self.plotNumber} ind: {ind}')\n if ind > len(self.plotDf)-1:\n return\n xVal = self.plotDf.at[ind, self.stateDict['xStat']]\n yVal = self.plotDf.at[ind, self.stateDict['yStat']]\n if self.scatterPlotSelection is not None:\n logger.info(f' setting scatterPlotSelection x:{xVal} y:{yVal}')\n self.scatterPlotSelection.set_data(xVal, yVal)\n self.fig.canvas.draw()",
"def handle_press( self, x, y ):\n self.pressed_flag = True\n self.first_point = (x, y)",
"def OnButtonClick(self):\n self.choice()",
"def getSelectedItem(*args):",
"def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected\n if is_selected:\n SelectableLabel.selected_hotel = rv.data[index]['text']",
"def grab_point(self, pos):\n self.move_cartesian_frame_linear_interpolation(tfx.pose(np.array(pos), np.array(self.GRAB_ORIENTATION)), 0.1)\n self.grab_current_point()",
"def update_selection(self):\n raise NotImplementedError",
"def selectShot(self):\r\n raise NotImplementedError(\"Subclass needs to implement this\")",
"def set_item_selection(self, item):\n self._set_item_selection(item.encode())",
"def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]",
"def select_multiple_item(self):\n time.sleep(2)\n self.wait_for_ajax()\n self.locator_finder_by_hover_item(self.row1_id)\n self.locator_finder_by_hover_item(self.row2_id)\n self.locator_finder_by_hover_item(self.row3_id)\n self.locator_finder_by_hover_item(self.row4_id)\n time.sleep(1)\n self.wait_for_ajax()",
"def _handle_select_event(self):\n selected_item = self.item_list[self.item_cursor.cursor]\n if selected_item == \"CANCEL\":\n self.is_dead = True\n\n # You can't sell key items.\n elif selected_item.type == ItemTypes.KEY_ITEMS:\n self.do_what_response_menu = \\\n Dialogue(\"29\", self.player, self.player,\n replace=[selected_item.name.upper()], show_curs=False)\n\n # Create a sell event with the selected item.\n else:\n self.active_sell_event = SellHowMany(self.player,\n selected_item)",
"def onclick_pick(self, click):\n from ..backend.util import _annot\n from ..backend.viz_raw import _plot_single_psd\n\n if self.plotType == 'All PSD':\n _annot(self, click, self.annot)\n # If double click, we plot the PSD\n if click.mouseevent.dblclick:\n ch = str(click.artist.get_label())\n index = self.psd.info['ch_names'].index(ch)\n index = self.psd.picks.index(index)\n _plot_single_psd(self, index + 1)",
"def emitPressEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mousePressEvent signal\n self.mousePress.emit(self, clickLocation, button, currentKbKey, items)",
"def activate_item(self, index):\n item = index.model().listdata[index.row()]\n self.get_selected(item)\n self.controller.display_item(item)",
"def select_asset_from_items(self):\n\n # items = self.get_nodes(selection=False)\n # nodes = []\n # for item in items.values():\n # nodes.extend(item[\"nodes\"])\n #\n # commands.select(nodes)\n\n raise NotImplementedError",
"def pickUpActionAny(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colour = kwargs[\"fname\"]\n self.locator.recognise_grid()\n red = self.locator.detect_colour(0, 'red')\n rospy.loginfo(\"permutation(): looking for red object: %s\" % str(red))\n blue = self.locator.detect_colour(0, 'blue')\n rospy.loginfo(\"permutation(): looking for blue object: %s\" % str(blue))\n if red[0] < blue[0]:\n colour = 'blue'\n else:\n colour = 'red'\n\n self.locator.update_pose() #get current pose of arm\n\n success = self.locator.locate(colour, pose_offset, 1)\n self.mm.loadMenu(\"actionMenu\")",
"def operate_row(self, point: QtCore.QPoint, opt: str):\n raise NotImplementedError"
] | [
"0.66232896",
"0.58813524",
"0.5879792",
"0.5860174",
"0.5787622",
"0.57383424",
"0.570975",
"0.56717336",
"0.564384",
"0.5572894",
"0.55690295",
"0.5561806",
"0.55518645",
"0.5547201",
"0.55388415",
"0.5531264",
"0.5512377",
"0.5507659",
"0.54962397",
"0.5493375",
"0.5490328",
"0.54831296",
"0.5481948",
"0.5471091",
"0.54537976",
"0.5385567",
"0.53819734",
"0.5380689",
"0.5376581",
"0.5376581",
"0.53687215",
"0.5354697",
"0.53440946",
"0.5337128",
"0.53037",
"0.52962893",
"0.5296004",
"0.52738595",
"0.524684",
"0.5238567",
"0.5237964",
"0.52072847",
"0.5201069",
"0.5201069",
"0.5189809",
"0.5175269",
"0.51695746",
"0.5162868",
"0.51596415",
"0.51481307",
"0.5143322",
"0.51402134",
"0.51390123",
"0.5114692",
"0.51132935",
"0.51132095",
"0.51111865",
"0.51073176",
"0.5106252",
"0.50770056",
"0.5075046",
"0.5071524",
"0.5039546",
"0.5019741",
"0.5013485",
"0.50122666",
"0.50101805",
"0.5009396",
"0.50026864",
"0.49794713",
"0.49772796",
"0.49754158",
"0.4969919",
"0.4964197",
"0.49609694",
"0.4960346",
"0.49599373",
"0.4958935",
"0.49562597",
"0.4949249",
"0.49417773",
"0.4940599",
"0.49330366",
"0.49327916",
"0.49322525",
"0.4929804",
"0.4929371",
"0.4921385",
"0.49172404",
"0.49140868",
"0.4913035",
"0.49122664",
"0.49008182",
"0.49006447",
"0.49001527",
"0.48981848",
"0.48945206",
"0.48936823",
"0.48891103",
"0.48823547",
"0.4878011"
] | 0.0 | -1 |
Compute range info (min, min positive, max) from data | def _computeRangeFromData(data):
if data is None:
return None
dataRange = min_max(data, min_positive=True, finite=True)
if dataRange.minimum is None: # Only non-finite data
return None
if dataRange is not None:
min_positive = dataRange.min_positive
if min_positive is None:
min_positive = float('nan')
return dataRange.minimum, min_positive, dataRange.maximum | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)",
"def data_range(x):\n return max(x)-min(x)",
"def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range",
"def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)",
"def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)",
"def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]",
"def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin",
"def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges",
"def range_to_m(self, data):\n return data * self._total_range + self._min_range_m",
"def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def _parse_vrange(self, data):\n vmin = self.config.get('vmin', np.nanmin(data))\n vmax = self.config.get('vmax', np.nanmax(data))\n vrange = self.config.get('vrange', None)\n\n # Parse vmin, vmax\n if isinstance(vmin, str):\n vmin = np.nanquantile(data, q=float(vmin))\n if isinstance(vmax, str):\n vmax = np.nanquantile(data, q=float(vmax))\n\n # Parse vrange\n if vrange is True:\n vrange = max(abs(np.nanmin(data)), abs(np.nanmax(data)))\n elif isinstance(vrange, str):\n vrange = abs(np.nanquantile(data, q=(float(vrange), 1-float(vrange)))).max()\n\n if vrange is not None:\n if isinstance(vrange, (list, tuple, np.ndarray)):\n vmin, vmax = vrange\n else:\n vmin, vmax = -vrange, vrange\n return vmin, vmax",
"def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)",
"def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))",
"def range(df):\r\n\r\n\tdf_range_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_range_dict[col] = [df[col].max(), df[col].min(), df[col].max() - df[col].min()]\r\n\r\n\tdf_range = pd.DataFrame(df_range_dict, index=['Max Value', 'Min Value', 'Range (Max - Min)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_range",
"def calcrange(data, log=False):\n xmin, xmax = None, None\n for x in data:\n if not log or x > 0.:\n if xmin is None or x < xmin: xmin = x\n if xmax is None or x > xmax: xmax = x\n\n if xmin is None and xmax is None:\n if log:\n return 0.1, 1.\n else:\n return 0., 1.\n else:\n return xmin, xmax",
"def range(self):\n\n return time_stat(self, stat=\"range\")",
"def get_range(df, col):\n return df[col].min(), df[col].max()",
"def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2",
"def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data",
"def map_to_range(val, old_min, old_max, new_min, new_max):\n return new_max - (val - old_min) * (new_max - new_min) / (old_max - old_min)",
"def normalizeToRange(data,max=255,min=0):\n if min: return (max-min)*normalize(data)+min\n else: return max*normalize2(data) # speeds up operation",
"def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes",
"def get_range(lst):\n return float(max(lst)) - float(min(lst))",
"def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])",
"def range(series):\n return min(series), max(series)",
"def range(self) -> ty.Tuple[float, float]:\r\n ...",
"def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def _parse_requantization_ranges(self):\n res = {}\n\n print_suffix = \"__print__\"\n lines = self._get_valid_log()\n temp_min = {}\n temp_max = {}\n pattern_def = r\"{};{}:\\[\\-?\\d+\\.?\\d*e?-?\\+?\\d*\\]\".format(print_suffix, self.postfix)\n for i in lines:\n if not re.search(pattern_def, i):\n continue\n\n max_line_data = i.split(print_suffix + \";\" + self.postfix)[-1]\n min_value = max_line_data.split('][')[0].split('[')[1]\n max_value = max_line_data.split('][')[1].split(']')[0]\n name = i.split(';')[1].strip()[:-len(print_suffix)]\n if name not in temp_min:\n temp_min[name] = []\n if name not in temp_max:\n temp_max[name] = []\n\n temp_min[name].append(float(min_value))\n temp_max[name].append(float(max_value))\n\n for key in temp_min:\n target_min_index = int(np.ceil(len(temp_min[key]) * (1 - self.threshold)))\n\n if key not in res:\n res[key] = []\n\n if target_min_index > len(temp_min[key]) - 1:\n target_min_index = len(temp_min[key]) - 1\n res[key].append(sorted(temp_min[key])[target_min_index])\n\n for key in temp_max:\n target_max_index = int(np.floor(len(temp_max[key]) * self.threshold)) - 1\n\n if target_max_index > len(temp_max[key]) - 1:\n target_max_index = len(temp_max[key]) - 1\n\n res[key].append(sorted(temp_max[key])[target_max_index])\n\n if self.tensor_data:\n for k, v in self.tensor_data.items():\n if k in res:\n self.logger.debug(\"Update node {} min to {}, max to {}.\".format(k, v[2], v[3]))\n res[k] = [v[2], v[3]]\n return res",
"def get_bounds():\n return [0.00], [1.00]",
"def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))",
"def test_get_range(self):\n pass",
"def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans",
"def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []",
"def GetScalarRange(self):\n ...",
"def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return",
"def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng",
"def heckbert_interval(data_low, data_high, numticks=8, nicefunc=_nice, enclose=False):\n if data_high == data_low:\n return data_high, data_low, 0\n if numticks == 0:\n numticks = 1\n\n range = nicefunc(data_high - data_low)\n if numticks > 1:\n numticks -= 1\n d = nicefunc(range / numticks, round=True)\n if enclose:\n graphmin = ceil(data_low / d) * d\n graphmax = floor(data_high / d) * d\n else:\n graphmin = floor(data_low / d) * d\n graphmax = ceil(data_high / d) * d\n return graphmin, graphmax, d",
"def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range",
"def getRangeMM(self) -> float:\n ...",
"def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))",
"def get_range(self):\n return time_to_range(self.get_time())",
"def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds",
"def get_statistics(data):\n v_min = None\n v_max = None\n v_avg = None\n v = None\n v_sum = .0\n count = 0\n for d in data:\n if d is None:\n continue\n try:\n v = float(d)\n except ValueError:\n print(pc.CRED, d, pc.CEND, end=',')\n continue\n if count == 0:\n v_min = v\n v_max = v\n else:\n if v < v_min:\n v_min = v\n if v > v_max:\n v_max = v\n v_sum += v\n count += 1\n if count > 0:\n v_avg = round(v_sum/count, 2)\n return v_min, v_max, v_avg",
"def detect_range(self) -> Union[int, float]:\n return self.proto.detect_range",
"def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range",
"def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()",
"def bounds(self):\n\n if self.size == 0:\n lo, hi = np.nan, np.nan\n elif self.is_monotonic:\n lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])\n elif self.dtype is np.datetime64:\n lo, hi = np.min(self.coordinates), np.max(self.coordinates)\n else:\n lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)\n\n return lo, hi",
"def _get_display_range(image): # pragma: no cover\n ip = _get_image_properties(image)\n immin, immax = np.min(image), np.max(image)\n if ip.signed:\n magnitude = max(abs(immin), abs(immax))\n lo, hi = -magnitude, magnitude\n cmap = _diverging_colormap\n elif any(ip):\n _raise_warnings(ip)\n lo, hi = immin, immax\n cmap = _nonstandard_colormap\n else:\n lo = 0\n imtype = image.dtype.type\n hi = dtype_range[imtype][1]\n cmap = _default_colormap\n return lo, hi, cmap",
"def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax",
"def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def get_dec_i_range(data_decs):\n data_dec_is = map_list(lambda x: (x - 1800)//10, data_decs)\n lower_range_i = data_dec_is[0]\n upper_range_i = data_dec_is[-1] + 1\n return (lower_range_i, upper_range_i)",
"def test_inclusive_intervals(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5.5)\n assert dim.interval() == (-3, 3)",
"def __init__(self, data):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself.data_min = data.min(axis=0)\n\t\tself.data_max = data.max(axis=0)\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################",
"def checkRange(x,y,w,h,maxW,maxH):\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n if x + w >= maxW:\n w = maxW-x-1\n if y + h >= maxH:\n h = maxH-y-1\n return [x,y,w,h]",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def getCellData(X, y, min0, max0, min1, max1):\n Xcell = []\n ycell = []\n\n for x,label in zip(X, y):\n if (x[0] >= min0) and (x[0] < max0) and (x[1] >= min1) and (x[1] < max1):\n Xcell.append(x)\n ycell.append(label)\n\n return np.array(Xcell), np.array(ycell)",
"def map_range(x, in_min, in_max, out_min, out_max):\n mapped = (x-in_min) * (out_max - out_min) / (in_max-in_min) + out_min\n if out_min <= out_max:\n return max(min(mapped, out_max), out_min)\n return min(max(mapped, out_max), out_min)",
"def get_continuum_in_range(w,s,low_low, low_high, high_low, high_high,\n pmin=12,pmax=88, only_correct_negative_values = False,\n fit_degree=2, plot = True, verbose = True, warnings=True) :\n s_low = s[np.where((w <= low_low))] \n s_high = s[np.where((w >= high_high))] \n \n w_fit = w[np.where((w > low_low) & (w < high_high))]\n w_fit_low = w[np.where((w > low_low) & (w < low_high))]\n w_fit_high = w[np.where((w > high_low) & (w < high_high))]\n\n y_fit = s[np.where((w > low_low) & (w < high_high))]\n y_fit_low = s[np.where((w > low_low) & (w < low_high))]\n y_fit_high = s[np.where((w > high_low) & (w < high_high))]\n\n # Remove outliers\n median_y_fit_low = np.nanmedian(y_fit_low)\n for i in range(len(y_fit_low)):\n if np.nanpercentile(y_fit_low,2) > y_fit_low[i] or y_fit_low[i] > np.nanpercentile(y_fit_low,98): y_fit_low[i] =median_y_fit_low\n\n median_y_fit_high = np.nanmedian(y_fit_high)\n for i in range(len(y_fit_high)):\n if np.nanpercentile(y_fit_high,2) > y_fit_high[i] or y_fit_high[i] > np.nanpercentile(y_fit_high,98): y_fit_high[i] =median_y_fit_high\n \n w_fit_cont = np.concatenate((w_fit_low,w_fit_high))\n y_fit_cont = np.concatenate((y_fit_low,y_fit_high))\n \n try:\n fit = np.polyfit(w_fit_cont,y_fit_cont, fit_degree)\n yfit = np.poly1d(fit)\n y_fitted = yfit(w_fit)\n \n y_fitted_low = yfit(w_fit_low)\n median_low = np.nanmedian(y_fit_low-y_fitted_low)\n rms=[]\n for i in range(len(y_fit_low)):\n rms.append(y_fit_low[i]-y_fitted_low[i]-median_low)\n \n # rms=y_fit-y_fitted\n lowlimit=np.nanpercentile(rms,pmin)\n highlimit=np.nanpercentile(rms,pmax)\n \n corrected_s_ =copy.deepcopy(y_fit)\n for i in range(len(w_fit)):\n if w_fit[i] >= low_high and w_fit[i] <= high_low: # ONLY CORRECT in [low_high,high_low] \n if only_correct_negative_values:\n if y_fit[i] <= 0 : \n corrected_s_[i] = y_fitted[i]\n else:\n if y_fit[i]-y_fitted[i] <= lowlimit or y_fit[i]-y_fitted[i] >= highlimit: corrected_s_[i] = y_fitted[i]\n \n \n corrected_s = np.concatenate((s_low,corrected_s_))\n corrected_s = np.concatenate((corrected_s,s_high))\n \n \n if plot:\n ptitle = \"CorrectionBase in range \"+np.str(np.round(low_low,2))+\" - [ \"+np.str(np.round(low_high,2))+\" - \"+np.str(np.round(high_low,2))+\" ] - \"+np.str(np.round(high_high,2))\n plot_plot(w_fit,[y_fit,y_fitted,y_fitted-highlimit,y_fitted-lowlimit,corrected_s_], color=[\"r\",\"b\", \"black\",\"black\",\"green\"], alpha=[0.3,0.7,0.2,0.2,0.5],xmin=low_low-40, xmax=high_high+40,vlines=[low_low,low_high,high_low,high_high],ptitle=ptitle, ylabel=\"Normalized flux\") \n #plot_plot(w,[s,corrected_s],xmin=low_low-40, xmax=high_high+40,vlines=[low_low,low_high,high_low,high_high])\n except Exception:\n if warnings: print(\" Fitting the continuum failed! Nothing done.\")\n corrected_s = s\n\n return corrected_s",
"def minmax(data, fields):\n vmin = min(data[field].min() for field in fields)\n vmax = max(data[field].max() for field in fields)\n return dict(vmin=vmin, vmax=vmax)",
"def range(self):\n return self.range_array",
"def get_dyn_range(scale, zero_point, dtype):\n if dtype == torch.quint8:\n min_val, max_val = 0, 255\n elif dtype == torch.qint8:\n min_val, max_val = -128, 127\n else:\n raise RuntimeError(f\"Unsupported quantized dtype {dtype}\")\n\n return (min_val - zero_point) * scale, (max_val - zero_point) * scale",
"def x_y_coor_min_max(x_y_coor):\n\tx_range = [np.min(x_y_coor[\"X\"]),np.max(x_y_coor[\"X\"])]\n\ty_range = [np.min(x_y_coor[\"Y\"]),np.max(x_y_coor[\"Y\"])]\n\treturn x_range, y_range",
"def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max",
"def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]",
"def minima_in_range(r, g_r, r_min, r_max):\n idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r)))\n g_r_slice = g_r[idx]\n g_r_min = g_r_slice[g_r_slice.argmin()]\n idx_min, _ = find_nearest(g_r, g_r_min)\n return r[idx_min], g_r[idx_min]",
"def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)",
"def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)",
"def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)",
"def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)",
"def min_range(self):\n return self._min_range",
"def min_max(self, data, era):\n return 0, np.max(data)",
"def get_range(value):\n\n raw = value\n\n # If we find a '@' at the beginning of the range, we should invert\n # the match.\n\n invert = False\n\n if value.find('@') == 0:\n invert = True\n value = value.lstrip('@')\n\n # The : separates a max/min range. If it exists, there is at least\n # a minimum. We'll start our ranges at zero and infinity so we don't\n # have to worry about complex testing logic.\n\n bottom = 0\n top = float('infinity')\n\n if value.find(':') > 0:\n (bottom, top) = value.split(':')\n if top == '':\n top = float('infinity')\n else:\n top = float(top)\n\n if bottom == '':\n bottom = 0\n elif bottom == '~':\n bottom = -float('infinity')\n else:\n bottom = float(bottom)\n else:\n top = float(value)\n\n return (bottom, top, invert, raw)",
"def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output",
"def calc_bounds(roi: np.ndarray) -> Dict[int, BoundInfo]:\n try:\n min_bounds, max_bounds = calc_bounds(roi)\n return {\n num: BoundInfo(lower=lower, upper=upper)\n for num, (lower, upper) in enumerate(zip(min_bounds, max_bounds))\n if num != 0 and upper[0] != -1\n }\n except KeyError:\n bound_info = {}\n points = np.nonzero(roi)\n comp_num = roi[points]\n point_dict = defaultdict(list)\n for num, point in zip(comp_num, np.transpose(points)):\n point_dict[num].append(point)\n for num, points_for_num in point_dict.items():\n lower = np.min(points_for_num, 0)\n upper = np.max(points_for_num, 0)\n bound_info[num] = BoundInfo(lower=lower, upper=upper)\n return bound_info",
"def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)",
"def min_max_range(s):\n # note np.argmax, np.argmin returns the position of first occurence of global max, min\n sign = np.sign(np.argmax(s) - np.argmin(s))\n if sign == 0:\n return 0.0\n else:\n return sign*(np.max(s) - np.min(s))",
"def range_callback(data):\n global D\n D.ranges = data.ranges",
"def test_get_meta_range(self):\n pass",
"def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)",
"def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)",
"def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))",
"def _set_min_max_values(self):\n\n p_1, p_2 = self.points[0], self.points[1]\n nb_dim = len(p_1.values)\n self._min_values = []\n self._max_values = []\n for d in range(nb_dim):\n d_min = min(p_1[d], p_2[d])\n d_max = max(p_2[d], p_2[d])\n self._min_values.append(d_min)\n self._max_values.append(d_max)",
"def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]",
"def get_range(self, range, last):\n found = re.match('\\s*([+-]?)([0-9]+)(%?)(\\s*,\\s*([+-]?)([0-9]+)(%?))?\\s*', str(range))\n if not found:\n raise ConfigError('Failed to parse range \"%s\" in \"__size\"!' % str(range))\n (s1, n1, p1, v2, s2, n2, p2) = found.groups()\n if v2:\n d1 = last * int(n1) / 100.0 if p1 else int(n1)\n d2 = last * int(n2) / 100.0 if p2 else int(n2)\n m = last + d1 if s1 == '+' else last - d1 if s1 == '-' or p1 else d1\n M = last - d2 if s2 == '-' else last + d2 if s2 == '+' or p2 else d2\n else:\n d = last * int(n1) / 100.0 if p1 else int(n1)\n m = last - d\n M = last + d\n return m, M",
"def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_",
"def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1",
"def mapRange(num, min1, max1, min2, max2, clamp=True):\n if(clamp and num < min1):\n return min2\n if(clamp and num > max1):\n return max2\n\n num1 = (num - min1) / (max1 - min1)\n num2 = (num1 * (max2 - min2)) + min2\n return num2",
"def _adjust_data_range_using_min_ratio(c_min, c_max, c_axis_range, *, min_ratio=0.01):\n c_range = c_max - c_min\n if c_range < c_axis_range * min_ratio:\n c_center = (c_max + c_min) / 2\n c_new_range = c_axis_range * min_ratio\n c_min = c_center - c_new_range / 2\n c_max = c_center + c_new_range / 2\n return c_min, c_max",
"def calculate_min_max_tiles(self):",
"def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)",
"def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx",
"def value_ranges(self, attributes):\n ranges = []\n for attribute in attributes:\n if not attribute.is_continuous():\n raise inv.InvalidDataError('Cannot discretise non continuous attribute ' + attribute.name)\n values = self.values_grouped_by_attribute(attributes)\n for value in values: #each entry in values is the range of values for a particular attribute\n value.sort()\n ranges.append(r.Range(value[0], value[-1], True))\n return ranges",
"def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]",
"def max_in_range(self, x, y, low, high):\r\n data = np.vstack((x,y)) \r\n y_values = data[1][np.logical_and(low < data[0], data[0] < high)]\r\n x_values = data[0][np.logical_and(low < data[0], data[0] < high)]\r\n index_max_y = y_values.argmax()\r\n max_y = y_values[index_max_y]\r\n max_x = x_values[index_max_y]\r\n return max_x, max_y",
"def standardize(data, params=None, rangeval=1.0):\n\t\n\tif params == None:\n\t\t(mins, maxs) = (np.min(data, axis=0), np.max(data, axis=0))\n\telse:\n\t\t(mins, maxs) = params\n\n\tdelta_vals = maxs-mins\n\tdelta_vals[delta_vals < 1e-10] = 1e-9\n\n\tstd_data = (data - mins)/delta_vals\n\tstd_data = -rangeval + 2.0*rangeval * std_data\n\t\n\tif params == None:\n\t\treturn (std_data, (mins, maxs))\n\telse:\n\t\treturn std_data"
] | [
"0.74131215",
"0.7375797",
"0.7371305",
"0.73698366",
"0.7079913",
"0.69400847",
"0.6933809",
"0.68565273",
"0.6837749",
"0.6828716",
"0.68116766",
"0.677769",
"0.6730659",
"0.66858673",
"0.6631129",
"0.661481",
"0.66129404",
"0.6605451",
"0.65986764",
"0.65552515",
"0.6548753",
"0.6524711",
"0.65024316",
"0.64580476",
"0.6457069",
"0.6452427",
"0.64333034",
"0.6412027",
"0.6397173",
"0.638291",
"0.63797027",
"0.6364414",
"0.63459617",
"0.63133013",
"0.6302742",
"0.6300528",
"0.6300048",
"0.6287227",
"0.6286539",
"0.6271436",
"0.62367195",
"0.6234636",
"0.6234574",
"0.62244546",
"0.62205666",
"0.6217337",
"0.6216446",
"0.62119436",
"0.62093484",
"0.620763",
"0.62004495",
"0.6177165",
"0.6166013",
"0.61641014",
"0.61449605",
"0.6142985",
"0.61324966",
"0.6126849",
"0.6126068",
"0.6120047",
"0.61181283",
"0.6116497",
"0.6110406",
"0.6103878",
"0.60987335",
"0.6098159",
"0.6088087",
"0.6082834",
"0.6082672",
"0.60765046",
"0.60750127",
"0.6074345",
"0.60702527",
"0.6065357",
"0.60548556",
"0.60504097",
"0.6037784",
"0.603454",
"0.60197985",
"0.6019715",
"0.60178894",
"0.6016931",
"0.601543",
"0.60120815",
"0.6005861",
"0.6003042",
"0.60015225",
"0.6001492",
"0.60002404",
"0.5990648",
"0.59905463",
"0.5978318",
"0.597661",
"0.59592265",
"0.5955927",
"0.595179",
"0.59468234",
"0.59426534",
"0.5931839",
"0.592978"
] | 0.80179286 | 0 |
Set the 3D scalar data represented by this item. Dataset order is zyx (i.e., first dimension is z). | def setData(self, data, copy=True):
if data is None:
self._data = None
self._boundedGroup.shape = None
else:
data = numpy.array(data, copy=copy, dtype=numpy.float32, order='C')
assert data.ndim == 3
assert min(data.shape) >= 2
self._data = data
self._boundedGroup.shape = self._data.shape
self._dataRange = self._computeRangeFromData(self._data)
self._updated(ItemChangedType.DATA) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _setitem3d(self, index, value):\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iy, slice): sss[1:1] = [1]\n if not isinstance(iz, slice): sss[2:2] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value",
"def SetDataSlice(vDataSet,arr,aIndexZ,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n s = s.swapaxes(0,1)\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSliceBytes\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataSliceShorts\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataSliceFloat32\r\n\r\n SetData(s,aIndexZ,aIndexC,aIndexT)\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r",
"def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')",
"def __setitem__(self, i, value):\n if i < X:\n raise IndexError(\"point3d::__setitem__: negative index {0}\".format(i))\n if i == X:\n self._x = value\n return\n if i == Y:\n self._y = value\n return\n if i == Z:\n self._z = value\n return\n # beyond Z\n raise IndexError(\"point3d::__setitem__: index too large {0}\".format(i))",
"def _set_data(self, value):\n if len(value.shape) == 1:\n if self.index_dimension == 0:\n value = value[:,newaxis]\n else:\n value = value[newaxis,:]\n\n if len(value.shape) != 2:\n msg = 'Input is %d dimensional, but it must be 1 or 2' \\\n 'dimensional.' % len(value.shape)\n raise ValueError, msg\n\n self._data = value",
"def set_voxel(self, x, y, z, value, ignore=True):\n try:\n if isinstance(y, list):\n y_trans = [self._y_shift - item for item in y]\n # check coordinate validation\n coord_list = [(x[i], y_trans[i], z[i]) for i in range(len(x))]\n coord_list = [c for c in coord_list if c[0]>=0 and \n c[0]<self.get_data_shape()[0] and\n c[1]>=0 and\n c[1]<self.get_data_shape()[1] and\n c[2]>=0 and\n c[2]<self.get_data_shape()[2]]\n x = [c[0] for c in coord_list]\n y_trans = [c[1] for c in coord_list]\n z = [c[2] for c in coord_list]\n if self.is_4d():\n orig_data = self._data[y_trans, x, z, self._time_point]\n else:\n orig_data = self._data[y_trans, x, z]\n if np.any(orig_data != 0) and not ignore:\n force = QMessageBox.question(None, \"Replace?\",\n \"Would you like to replace the original values?\",\n QMessageBox.Yes,\n QMessageBox.No)\n if force == QMessageBox.No:\n return\n if self.is_4d():\n self.undo_stack.push((x, y, z, self._data[y_trans, x, z,\n self._time_point]))\n self._data[y_trans, x, z, self._time_point] = value\n else:\n self.undo_stack.push((x, y, z, self._data[y_trans, x, z]))\n self._data[y_trans, x, z] = value\n try:\n for z_ in range(min(z), max(z)+1):\n self.update_rgba(z_)\n except TypeError:\n self.update_rgba(z)\n if self._cross_pos:\n self.update_orth_rgba()\n except:\n raise\n print \"Input coordinates are invalid.\"",
"def __setitem__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___setitem__(self, *args)",
"def is3_d(self, is3_d):\n\n self.container['is3_d'] = is3_d",
"def SetData(self, data_):\n return _hypre.HypreParVector_SetData(self, data_)",
"def SetPyData(self, item, data):\r\n\r\n item.SetData(data)",
"def set_data(self, data):\n self.__data = np.asarray(data, dtype=np.float32)\n if data is not None:\n self.account(data)\n return self",
"def scale_on_3d(x3d, scaler):\n (n_segs, n_concat, n_freq) = x3d.shape\n x2d = x3d.reshape((n_segs * n_concat, n_freq))\n x2d = scaler.transform(x2d)\n x3d = x2d.reshape((n_segs, n_concat, n_freq))\n return x3d",
"def test_simple_3d(self):\r\n a = tt.dtensor3()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n sl3 = 2\r\n\r\n for do_set in [True, False]:\r\n print \"Set\", do_set\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl3, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl3, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 3, 4))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n expected_result = numpy.copy(val_a)\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n if do_set:\r\n expected_result[:, sl3, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, sl3, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)",
"def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)",
"def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z",
"def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)",
"def __init__(self, x = np.float32(0.0), y = np.float32(0.0), z = np.float32(0.0)):\n\n self._x = np.float32( x )\n self._y = np.float32( y )\n self._z = np.float32( z )",
"def transform(self, data):\n self.cube = self.trf.transform(data)",
"def __init__(self, xx: float or Vec3 or 'Mat33' = 0.0, xy: float or Vec3 = 0.0, xz: float or Vec3 = 0.0,\n yx: float = 0.0, yy: float = 0.0, yz: float = 0.0, zx: float = 0.0, zy: float = 0.0, zz: float = 0.0):\n\n if isinstance(xx, Mat33):\n self.data = xx.data.copy()\n else:\n self.data = [xx, xy, xz, yx, yy, yz, zx, zy, zz]",
"def __call__(self, data):\n if data.x is None:\n c = torch.full((data.num_nodes, 1), self.value, dtype=torch.float)\n data.x = c\n return data",
"def MakeCoordinates3D(self):\n\n self.points = np.concatenate((self.points, np.zeros((self.points.shape[0],1)) ), axis=1)\n self.points = np.ascontiguousarray(self.points)",
"def setS3(self, num):\n self.space3 = num",
"def __setitem__(self, index, value):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._setitem2d(index, value)\n elif self.dim == 3:\n return self._setitem3d(index, value)",
"def __init__(self, w, x, y, z):\n self.__scalar = w\n self.__vector = np.array([x, y, z])",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def z(self, value=None):\n if isinstance(value, (int, float)):\n self[2] = value\n else:\n if value is not None:\n raise TypeError(\"Cannot be set to {}\".format(type(value)))\n return self[2]",
"def f3z1(self, f3z1):\n\n self._f3z1 = f3z1",
"def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')",
"def assign(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3_assign(self, *args)",
"def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self",
"def Value(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Value(self, *args)",
"def setValue(self, *args):\n return _osgAnimation.Vec3Keyframe_setValue(self, *args)",
"def SetDataVolume(vDataSet,arr,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayBytes\r\n s = s.tostring()\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayShorts\r\n s = np.ravel(s)\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayFloats\r\n s = np.ravel(s)\r\n SetData(s,aIndexC,aIndexT)\r\n\r\n if 0:\r\n #Old method slice by slice\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayBytes\r\n elif dtype == np.uint16:\r\n s = np.ravel(s)\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayShorts\r\n elif dtype == np.float32:\r\n s = np.ravel(s)\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayFloats\r\n\r\n for z in range(nz):\r\n t = time.time()\r\n l = arr[z,...].swapaxes(0,1).tostring()\r\n SetData(l,0,0,z,aIndexC,aIndexT,nx,ny,1)\r\n print z,time.time()-t\r\n\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r",
"def Value(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_Value(self, *args)",
"def __setitem__(self, key: Tuple, value: np.array) -> np.array:\n\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = start / _normalize_units[0]\n stop = stop / _normalize_units[0]\n\n xs = (int(start), int(stop))\n\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n if len(value.shape) == 2:\n # TODO: Support other 2D shapes as well\n value = np.array([value])\n\n cutout = self.volume_provider.create_cutout(\n self._channel, self.resolution, xs, ys, zs, value\n )",
"def setAxisLengths3D(x=2.,y=2.,z=2.):\n dislin.axis3d(x,y,z)",
"def _set_data(self, polyhedron, data):\n assert polyhedron.parent() is self._polyhedron_parent\n if len(data) != self._vector.degree():\n raise ValueError('V-representation data requires a list of length ambient_dim')\n\n self._vector[:] = data\n\n self._index = len(polyhedron._Vrepresentation)\n polyhedron._Vrepresentation.append(self)\n self._polyhedron = polyhedron",
"def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T",
"def setData(self, data, copy=True):\n if data is None:\n self._data = None\n self._dataRangeCache = None\n self._boundedGroup.shape = None\n\n else:\n data = numpy.array(data, copy=copy, dtype=numpy.complex64, order='C')\n assert data.ndim == 3\n assert min(data.shape) >= 2\n\n self._data = data\n self._dataRangeCache = {}\n self._boundedGroup.shape = self._data.shape\n\n self._updated(ItemChangedType.DATA)",
"def __setitem__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___setitem__(self, *args)",
"def multiply( self, scalar ):\n self._coords[:3] *= scalar\n return self",
"def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])",
"def set_data(self, y: Iterable[torch.Tensor]):\n self._y = y\n\n return self",
"def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)",
"def putdata(self, dat, scale=1.0, offset=0.0):\r\n data = np.array(dat)\r\n data = data * scale + offset\r\n channels, depth = self._get_channels_and_depth(self._mode)\r\n siz = self.size\r\n _im = np.ravel(self._instance)\r\n data = data[:len(_im)]\r\n _im = _im[:len(data)] = data\r\n self._instance = _im.reshape((siz[1], siz[0], channels))\r\n self._instance = self._instance.astype(depth)",
"def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)",
"def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)",
"def test_set_v3(self):\n\n test_vec = Vec3(1, 3, 5)\n test_vec.set(2, 4, 6)\n\n self.assertEqual(test_vec, Vec3(2, 4, 6))",
"def __init__(self, _x, _y, _z):\n self.position = Position3d(int(_x), int(_y), int(_z))\n self.velocity = Velocity3d(0, 0, 0)",
"def setView3D(x,y,z, viewtype='absolute'):\n vdict = {'absolute':'ABS','user':'USER','angle':'ANGLE'}\n dislin.view3d(x,y,z,vdict[viewtype])",
"def __init__(self, x, y, z, dx, dy, dz, *args, **kwargs):\n super().__init__((0, 0), (0, 0), *args, **kwargs)\n self.set_data(x, y, z, dx, dy, dz)",
"def z(self) -> np.ndarray:\n return self.array[:, 3] if self.scalar_vector else self.array[:, 2]",
"def set_data(self, data, *pos):\n r, c = pos\n self._grid[r][c] = data",
"def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)",
"def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)",
"def ipset_y_3d():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11, 2, 5), x_new=np.linspace(1, 4, 3))",
"def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data",
"def SetRange3d(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_SetRange3d(self, *args)",
"def Values(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Values(self, *args)",
"def initializePointCloudScalar(self, ctrs, scalar, label):\n self.ctrs = ctrs\n self.scalar = scalar\n self.Npts = len(scalar)\n self.label = label\n return",
"def coord(self, tensor: Union[Tensor, np.ndarray]) -> None:\n try:\n tensor = tensor.reshape(self.shape[0], 3)\n except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray\n raise ValueError(f'got unexpected shape {tensor.shape}')\n if not isinstance(tensor, Tensor):\n tensor = self.tensor.new_tensor(tensor)\n self.tensor[:, :3] = tensor",
"def f_set(self, data):\n raise NotImplementedError(\"Should have implemented this.\")",
"def vector3(x, y, z):\n return np.array([x, y, z], dtype=np.float)",
"def _set_data(self, polyhedron, data):\n assert polyhedron.parent() is self._polyhedron_parent\n if len(data) != self._vector.degree():\n raise ValueError('H-representation data requires a list of length ambient_dim+1')\n\n self._vector[:] = data\n self._A[:] = data[1:]\n self._b = self._base_ring(data[0])\n\n self._index = len(polyhedron._Hrepresentation)\n polyhedron._Hrepresentation.append(self)\n self._polyhedron = polyhedron",
"def SetItem3StateValue(self, item, state):\r\n\r\n item.Set3StateValue(state)",
"def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True",
"def Set3dConversion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_Set3dConversion(self, *args)",
"def __getitem__(self, item):\n data = np.random.normal(0, 1.0, self.width).astype(np.float32)\n return data",
"def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')",
"def vector3(x, y, z):\n return np.array([x, y, z], dtype=float)",
"def test_3d_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/data/test%03d.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback_3D(dic,data)",
"def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r",
"def cubify(\n arr: xr.DataArray,\n *spatial_dims: str,\n pixel_dim: Hashable = 'pixel'\n ):\n if not spatial_dims:\n spatial_dims = ('x', 'y')\n cube = arr.set_index({pixel_dim: spatial_dims}).unstack(pixel_dim) # type: ignore[union-attr]\n for d in spatial_dims:\n cube.coords[d].attrs = arr.coords[d].attrs\n return cube",
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def appendlistdata_f3xyzf3rgb(self, x, y, z, r, g, b):\n pass",
"def make_data(self): \n dims = numpy.array((64, 64, 64), 'i')\n\n # Create some scalars to render.\n dx, dy, dz = 10.0/(dims - 1)\n x = numpy.reshape(numpy.arange(-5.0, 5.0+dx*0.5, dx, 'f'),\n (dims[0], 1, 1))\n y = numpy.reshape(numpy.arange(-5.0, 5.0+dy*0.5, dy, 'f'),\n (1, dims[1], 1))\n z = numpy.reshape(numpy.arange(-5.0, 5.0+dz*0.5, dz, 'f'),\n (1, 1, dims[0]))\n scalars = numpy.sin(x*y*z)/(x*y*z)\n return scalars",
"def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)",
"def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])",
"def __init__(self, channels):\n super(PositionalEncodingPermute3D, self).__init__()\n self.penc = PositionalEncoding3D(channels)",
"def _setitem2d(self, index, value):\n ix = index[0]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iz, slice): sss[1:1] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value",
"def _solve_3d_scalar(self, simu=None):\n # # Call fftw filter\n # self._output_field.data[0] = fftw2py.solve_poisson_3d_pressure(\n # self._input_field.data[0],\n # self._input_field.data[1],\n # self._input_field.data[2])\n pass",
"def __init__(self, x, y, z):\n self.x = float(x)\n self.y = float(y)\n self.z = float(z)",
"def _setLayer(items, layer):\n for i in items:\n i.setZValue(layer)",
"def item(self):\n if self.data.shape != ():\n raise RuntimeError(\"Cannot call item on non-scalar type!\")\n return self.data",
"def test_set_scale():\n data = io.create_sample_Dataset()\n tmp = data.piv.set_scale(1.0)\n assert np.allclose(tmp[\"x\"], data[\"x\"])\n\n tmp = data.copy()\n tmp.piv.set_scale(2.0)\n tmp_mean = tmp[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n data_mean = data[\"u\"].mean(dim=(\"t\", \"x\", \"y\")).values\n assert np.allclose(tmp_mean / data_mean, 2.0)",
"def scale(self, x, y, z) -> None:\n ...",
"def z(self, value: Number):\n self._translation[2, 0] = value",
"def initializeMeshScalar(self, mesh, scalar, label):\n self.mesh = mesh\n self.scalar = scalar\n self.Npts = len(scalar)\n self.label = label\n return",
"def EncodeMorton3D(x, y, z):\r\n return Expand3D(x) + (Expand3D(y) << 1) + (Expand3D(z) << 2)",
"def __setslice__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___setslice__(self, *args)",
"def custom_data_3(self, custom_data_3):\n # type: (string_types) -> None\n\n if custom_data_3 is not None:\n if not isinstance(custom_data_3, string_types):\n raise TypeError(\"Invalid type for `custom_data_3`, type has to be `string_types`\")\n\n self._custom_data_3 = custom_data_3",
"def visualise_data_pca_3d(self, component1, component2, component3, input_data=False):\n if input_data:\n self.__generate_input_data()\n pca_3d(array(self.input_data), component1, component2, component3, self.class_indices, self.path,\n 'high_dimension_data', self.legend)\n else:\n self.__generate_output_data()\n pca_3d(array(self.output_data), component1, component2, component3, self.class_indices, self.path,\n 'low_dimension_data', self.legend)",
"def scale(self, k_x, k_y = None, k_z = None):\r\n if (k_y is None):\r\n return vec3(k_x*self.x, k_x*self.y, k_x*self.z)\r\n else:\r\n return vec3(k_x*self.x, k_y*self.y, k_z*self.z)",
"def on_plot_3d(self, event):\n data = self._get_data_selection(event)\n from sas.sasgui.guiframe.local_perspectives.plotting.masking \\\n import FloatPanel as Float3dDialog\n\n panel = Float3dDialog(base=self, data=data,\n dimension=3, id=wx.NewId())\n panel.ShowModal()",
"def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Curve_D3(self, *args)",
"def _scalar_update(self, d_t, **kwargs):\n for key, val in kwargs.items():\n if isinstance(val, GPUArray):\n kwargs[key] = val.get()\n self.solver(d_t, **kwargs)\n self.post()",
"def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())",
"def render_vertices_3d(self, **kwds):\n return point3d(self.coordinates_of(self.points), **kwds)",
"def place(self, x: _vector_like = _null_vector, y: _vector_like = _null_vector,\n z: _vector_like = _null_vector):\n transform = Matrix3D.create()\n transform.translation = Vector3D.create(x.x, y.y, z.z)\n self._local_transform.transformBy(transform)\n self._reset_cache()\n return self",
"def test_write_element(self):\n dt = np.dtype('(3,)f8')\n dset = self.f.create_dataset('x', (10,), dtype=dt)\n\n data = np.array([1,2,3.0])\n dset[4] = data\n\n out = dset[4]\n self.assertTrue(np.all(out == data))"
] | [
"0.6446806",
"0.611566",
"0.59838897",
"0.5964819",
"0.59601384",
"0.59502226",
"0.58907306",
"0.58347994",
"0.58287907",
"0.5692124",
"0.5668796",
"0.5661337",
"0.5661213",
"0.5655243",
"0.56467724",
"0.5642596",
"0.56412905",
"0.56365776",
"0.5629091",
"0.5627743",
"0.5624125",
"0.5615374",
"0.5607968",
"0.56051767",
"0.5530916",
"0.5522781",
"0.5513383",
"0.5492536",
"0.54846233",
"0.54831505",
"0.54815614",
"0.5477183",
"0.5455957",
"0.54354495",
"0.5407473",
"0.5405901",
"0.539349",
"0.53869236",
"0.53858167",
"0.53661394",
"0.53536344",
"0.5314084",
"0.53119254",
"0.5308982",
"0.53032565",
"0.52998847",
"0.5277549",
"0.52661765",
"0.52661306",
"0.52534753",
"0.5252519",
"0.5248727",
"0.52482516",
"0.5244089",
"0.52360183",
"0.5225819",
"0.522459",
"0.5212746",
"0.5211904",
"0.51960826",
"0.5195525",
"0.5195227",
"0.51919985",
"0.51915437",
"0.5189277",
"0.5180733",
"0.51792514",
"0.5168208",
"0.5165092",
"0.51613975",
"0.51513463",
"0.51504433",
"0.5145166",
"0.5140357",
"0.512573",
"0.51209235",
"0.5106859",
"0.5098848",
"0.509798",
"0.5088655",
"0.50827223",
"0.5082447",
"0.50816536",
"0.5079081",
"0.5075261",
"0.5074075",
"0.5073526",
"0.5071901",
"0.50682676",
"0.5062161",
"0.5061924",
"0.50582445",
"0.50545454",
"0.5051875",
"0.50459445",
"0.50407934",
"0.5040651",
"0.5038147",
"0.5034389",
"0.50325555"
] | 0.5278795 | 46 |
Return the range of the data as a 3tuple of values. positive min is NaN if no data is positive. | def getDataRange(self):
return self._dataRange | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum",
"def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)",
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def data_range(x):\n return max(x)-min(x)",
"def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)",
"def getDataRange(self):\n return None if self._dataRange is None else tuple(self._dataRange)",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def range(self) -> ty.Tuple[float, float]:\r\n ...",
"def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range",
"def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))",
"def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))",
"def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()",
"def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin",
"def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)",
"def range(series):\n return min(series), max(series)",
"def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)",
"def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])",
"def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans",
"def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range",
"def bounds(self):\n\n if self.size == 0:\n lo, hi = np.nan, np.nan\n elif self.is_monotonic:\n lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])\n elif self.dtype is np.datetime64:\n lo, hi = np.min(self.coordinates), np.max(self.coordinates)\n else:\n lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)\n\n return lo, hi",
"def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))",
"def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]",
"def scalar_range2tuple(sr: ScalarRange, defaults=(-np.inf, np.inf)):\n return (\n sr.min.value if sr.HasField(\"min\") else defaults[0],\n sr.max.value if sr.HasField(\"max\") else defaults[1],\n )",
"def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)",
"def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range",
"def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes",
"def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value",
"def _get_time_range(self, data):\n time = data.coords[self.time_field]\n if time.size == 0:\n raise ProviderNoDataError()\n else:\n start = _to_datetime_string(data[self.time_field].values.min())\n end = _to_datetime_string(data[self.time_field].values.max())\n return [start, end]",
"def get_bounds(self, value = None, index = None):\n\n if self._data is None or 0 in self._data.shape:\n return (0.0, 0.0)\n\n if type(value) == types.IntType:\n if self.value_dimension == 0:\n maxi = nanmax(self._data[value, ::])\n mini = nanmin(self._data[value, ::])\n else:\n # value_dimension == 1\n maxi = nanmax(self._data[::, value])\n mini = nanmin(self._data[::, value])\n elif type(index) == types.IntType:\n if self.index_dimension == 0:\n maxi = nanmax(self._data[index, ::])\n mini = nanmin(self._data[index, ::])\n else:\n # index_dimension == 1\n maxi = nanmax(self._data[::, index])\n mini = nanmin(self._data[::, index])\n else:\n # value is None and index is None:\n maxi = nanmax(self._data)\n mini = nanmin(self._data)\n\n return (mini, maxi)",
"def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1",
"def get_range(lst):\n return float(max(lst)) - float(min(lst))",
"def get_range(df, col):\n return df[col].min(), df[col].max()",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges",
"def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng",
"def _parse_vrange(self, data):\n vmin = self.config.get('vmin', np.nanmin(data))\n vmax = self.config.get('vmax', np.nanmax(data))\n vrange = self.config.get('vrange', None)\n\n # Parse vmin, vmax\n if isinstance(vmin, str):\n vmin = np.nanquantile(data, q=float(vmin))\n if isinstance(vmax, str):\n vmax = np.nanquantile(data, q=float(vmax))\n\n # Parse vrange\n if vrange is True:\n vrange = max(abs(np.nanmin(data)), abs(np.nanmax(data)))\n elif isinstance(vrange, str):\n vrange = abs(np.nanquantile(data, q=(float(vrange), 1-float(vrange)))).max()\n\n if vrange is not None:\n if isinstance(vrange, (list, tuple, np.ndarray)):\n vmin, vmax = vrange\n else:\n vmin, vmax = -vrange, vrange\n return vmin, vmax",
"def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]",
"def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)",
"def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)",
"def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end",
"def calcrange(data, log=False):\n xmin, xmax = None, None\n for x in data:\n if not log or x > 0.:\n if xmin is None or x < xmin: xmin = x\n if xmax is None or x > xmax: xmax = x\n\n if xmin is None and xmax is None:\n if log:\n return 0.1, 1.\n else:\n return 0., 1.\n else:\n return xmin, xmax",
"def min_range(self):\n return self._min_range",
"def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]",
"def range(self):\n return (self._start, self._end)",
"def values(self):\n lower = float(self.lowerSpnbx.value())\n upper = float(self.upperSpnbx.value())\n return lower, upper",
"def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds",
"def range(df):\r\n\r\n\tdf_range_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_range_dict[col] = [df[col].max(), df[col].min(), df[col].max() - df[col].min()]\r\n\r\n\tdf_range = pd.DataFrame(df_range_dict, index=['Max Value', 'Min Value', 'Range (Max - Min)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_range",
"def regression_range(self):\n regression_range = detect_regression_range.DetectRegressionRange(\n self.historical_metadata)\n if regression_range is None: # pragma: no cover\n logging.warning('Got ``None`` for the regression range.')\n else:\n regression_range = tuple(regression_range)\n\n return regression_range",
"def interval(self):\n return (self.start, S.Infinity)",
"def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]",
"def xmin(self):\n return asarray([b[0] for b in self.bounds])",
"def get_index_range_inclusive(self):\n nx, ny, nz = self.get_mesh_size()\n return (1, nx, 1, ny, 1, nz)",
"def get_xrange(self):\n return self.xvec[0], self.xvec[-1]",
"def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum",
"def range(self):\n return self.range_array",
"def range(self):\n lower, upper = sorted((self.y1, self.y2))\n return FloatRange(lower=lower, upper=upper)",
"def get_dyn_range(scale, zero_point, dtype):\n if dtype == torch.quint8:\n min_val, max_val = 0, 255\n elif dtype == torch.qint8:\n min_val, max_val = -128, 127\n else:\n raise RuntimeError(f\"Unsupported quantized dtype {dtype}\")\n\n return (min_val - zero_point) * scale, (max_val - zero_point) * scale",
"def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)",
"def min_max(self, data, era):\n return 0, np.max(data)",
"def GetScalarRange(self):\n ...",
"def test_inclusive_intervals(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5.5)\n assert dim.interval() == (-3, 3)",
"def get_bounds():\n return [0.00], [1.00]",
"def get_statistics(data):\n v_min = None\n v_max = None\n v_avg = None\n v = None\n v_sum = .0\n count = 0\n for d in data:\n if d is None:\n continue\n try:\n v = float(d)\n except ValueError:\n print(pc.CRED, d, pc.CEND, end=',')\n continue\n if count == 0:\n v_min = v\n v_max = v\n else:\n if v < v_min:\n v_min = v\n if v > v_max:\n v_max = v\n v_sum += v\n count += 1\n if count > 0:\n v_avg = round(v_sum/count, 2)\n return v_min, v_max, v_avg",
"def _query_range_get(self):\n return (self.query_start, self.query_end)",
"def range_to_m(self, data):\n return data * self._total_range + self._min_range_m",
"def _rangeQueryFloatFeature(self):\n\n # create args\n minToGet = c_double()\n maxToGet = c_double()\n\n errorCode = VimbaDLL.featureFloatRangeQuery(self._handle,\n self._name,\n byref(minToGet),\n byref(maxToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return (minToGet.value, maxToGet.value)",
"def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)",
"def getCellData(X, y, min0, max0, min1, max1):\n Xcell = []\n ycell = []\n\n for x,label in zip(X, y):\n if (x[0] >= min0) and (x[0] < max0) and (x[1] >= min1) and (x[1] < max1):\n Xcell.append(x)\n ycell.append(label)\n\n return np.array(Xcell), np.array(ycell)",
"def get_range(value):\n\n raw = value\n\n # If we find a '@' at the beginning of the range, we should invert\n # the match.\n\n invert = False\n\n if value.find('@') == 0:\n invert = True\n value = value.lstrip('@')\n\n # The : separates a max/min range. If it exists, there is at least\n # a minimum. We'll start our ranges at zero and infinity so we don't\n # have to worry about complex testing logic.\n\n bottom = 0\n top = float('infinity')\n\n if value.find(':') > 0:\n (bottom, top) = value.split(':')\n if top == '':\n top = float('infinity')\n else:\n top = float(top)\n\n if bottom == '':\n bottom = 0\n elif bottom == '~':\n bottom = -float('infinity')\n else:\n bottom = float(bottom)\n else:\n top = float(value)\n\n return (bottom, top, invert, raw)",
"def possible_vals(pp):\n\n if pp[\"type\"] == \"w\":\n vals = [0, pp[\"pmax\"]]\n\n elif pp[\"type\"] == \"windturbine\":\n vals = [0, pp[\"pmin\"]]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"] + 1):\n vals.append(pp[\"pmin\"] + i)\n\n else: # Turbojet\n vals = [0]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"]):\n vals.append(pp[\"pmin\"] + i)\n return vals",
"def _read_range(range: str) -> Tuple[str, List[Tuple[Union[int, None], Union[int, None]]]]:\n format, split_on_pairs = range.split('=', 1)\n split_on_pairs = split_on_pairs.split(',')\n pairs = []\n for pair_str in split_on_pairs:\n split_on_range = pair_str.split('-', 1)\n start = int(split_on_range[0]) if len(split_on_range[0]) > 0 else None\n stop = int(split_on_range[1]) if len(split_on_range[1]) > 0 else None\n pairs.append((start, stop))\n return format, pairs",
"def get_range(self):\n return time_to_range(self.get_time())",
"def _get_shear_vals(lower_bound: float,\n upper_bound: float,\n step: float) -> Tuple[float]:\n return tuple(np.arange(lower_bound, upper_bound + step, step))",
"def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]",
"def Min(data):\n return data.min()",
"def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max",
"def mins(self) -> Tensor:\n return self._ranges[:, 0]",
"def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]",
"def get_range(self, field, deep=False, axis=None):\n variables = list(self.vars(deep, with_name=field))\n\n if not variables:\n raise KeyError(\"No variable named '%s' was found!\" % field)\n\n start = [np.nanmin(self[var], axis).item(0) for var in variables]\n end = [np.nanmax(self[var], axis).item(0) for var in variables]\n return min(start), max(end)",
"def minimum(self) -> Union[int, float]:\n return self.range[0]",
"def eta_range(self):\n\t\tticks = self.eta_details.keys()\n\t\treturn min(ticks), max(ticks)",
"def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)",
"def heckbert_interval(data_low, data_high, numticks=8, nicefunc=_nice, enclose=False):\n if data_high == data_low:\n return data_high, data_low, 0\n if numticks == 0:\n numticks = 1\n\n range = nicefunc(data_high - data_low)\n if numticks > 1:\n numticks -= 1\n d = nicefunc(range / numticks, round=True)\n if enclose:\n graphmin = ceil(data_low / d) * d\n graphmax = floor(data_high / d) * d\n else:\n graphmin = floor(data_low / d) * d\n graphmax = ceil(data_high / d) * d\n return graphmin, graphmax, d",
"def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax",
"def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)",
"def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])",
"def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx",
"def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax",
"def range(self):\n return self.timerange()",
"def get_refrange(self):\n if np.all(np.isnan(self.par)):\n print( 'Run params() before')\n return\n if hasattr(self,'refranges'):\n return self.refranges\n ice_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[1,r,10,0])]\n liq_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[0,r,10,0])]\n return (liq_r,ice_r)",
"def min_max(xs):\n return min(xs), max(xs)",
"def GetTotalRange(vDataSet):\r\n dtype = GetType(vDataSet)\r\n if dtype == np.uint8 or dtype == np.uint16:\r\n info = np.iinfo(dtype)\r\n else:\r\n info = np.finfo(dtype)\r\n return info.min,info.max",
"def x_y_coor_min_max(x_y_coor):\n\tx_range = [np.min(x_y_coor[\"X\"]),np.max(x_y_coor[\"X\"])]\n\ty_range = [np.min(x_y_coor[\"Y\"]),np.max(x_y_coor[\"Y\"])]\n\treturn x_range, y_range",
"def _get_extremes(self, attr='values'):\n # calculate the maximum and minimum for all series\n series_max = [0]\n series_min = [0]\n for s in self:\n if s is not None:\n series_max.append(s.max(attr))\n series_min.append(s.min(attr))\n return min(series_min), max(series_max)",
"def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)",
"def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return",
"def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_",
"def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)"
] | [
"0.80794895",
"0.7623016",
"0.7399985",
"0.7339498",
"0.7240076",
"0.7169807",
"0.7086574",
"0.7079901",
"0.70793736",
"0.6944895",
"0.6782193",
"0.6766099",
"0.6677474",
"0.66592604",
"0.6654025",
"0.6621791",
"0.6613688",
"0.6587012",
"0.6554875",
"0.6525205",
"0.6514943",
"0.64271283",
"0.6386248",
"0.634553",
"0.63312197",
"0.63110703",
"0.62724155",
"0.62720484",
"0.62662834",
"0.6266116",
"0.6264186",
"0.62542087",
"0.6253586",
"0.62519306",
"0.6207791",
"0.61888206",
"0.6186835",
"0.616809",
"0.61655664",
"0.61417156",
"0.6118097",
"0.6116166",
"0.6101557",
"0.60865176",
"0.6076441",
"0.60661817",
"0.60623837",
"0.60571235",
"0.6052849",
"0.6051161",
"0.6039221",
"0.6036516",
"0.6025402",
"0.6013011",
"0.6007117",
"0.59838825",
"0.5983818",
"0.5980281",
"0.5965841",
"0.59648746",
"0.5953986",
"0.5951801",
"0.5950642",
"0.59454656",
"0.59387624",
"0.59369665",
"0.59355134",
"0.59345365",
"0.59342474",
"0.5926222",
"0.5923732",
"0.59197104",
"0.59195",
"0.59058934",
"0.5903206",
"0.5869031",
"0.5863317",
"0.5834739",
"0.58343405",
"0.58285433",
"0.5815397",
"0.5813248",
"0.5812595",
"0.57993114",
"0.57934475",
"0.57877153",
"0.57704145",
"0.5766496",
"0.576398",
"0.5763425",
"0.57589126",
"0.57578534",
"0.57340467",
"0.5730579",
"0.5725628",
"0.5716928",
"0.57082254",
"0.5708046",
"0.57004607",
"0.56980264"
] | 0.587778 | 75 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.