body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results): 'Does corresponding validations if histograms.xml or enums.xml is changed.\n\n Args:\n input_api: An input_api instance that contains information about changes.\n output_api: An output_api instance to create results of the PRESUBMIT check.\n file_obj: A file object of one of the changed files.\n cwd: Path to current working directory.\n results: The returned variable which is a list of output_api results.\n\n Returns:\n A boolean that True if a histograms.xml or enums.xml file is changed.\n ' p = file_obj.AbsoluteLocalPath() if (input_api.os_path.commonprefix([p, cwd]) != cwd): return False filepath = input_api.os_path.relpath(p, cwd) if ('test_data' in filepath): return False if ('obsolete_histograms.xml' in filepath): GetObsoleteXmlErrors(input_api, output_api, cwd, results) return False elif (('histograms.xml' in filepath) or ('histogram_suffixes_list.xml' in filepath)): GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results) return True elif ('enums.xml' in filepath): GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results) return True return False
8,915,752,243,759,788,000
Does corresponding validations if histograms.xml or enums.xml is changed. Args: input_api: An input_api instance that contains information about changes. output_api: An output_api instance to create results of the PRESUBMIT check. file_obj: A file object of one of the changed files. cwd: Path to current working directory. results: The returned variable which is a list of output_api results. Returns: A boolean that True if a histograms.xml or enums.xml file is changed.
tools/metrics/histograms/PRESUBMIT.py
ValidateSingleFile
Ron423c/chromium
python
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results): 'Does corresponding validations if histograms.xml or enums.xml is changed.\n\n Args:\n input_api: An input_api instance that contains information about changes.\n output_api: An output_api instance to create results of the PRESUBMIT check.\n file_obj: A file object of one of the changed files.\n cwd: Path to current working directory.\n results: The returned variable which is a list of output_api results.\n\n Returns:\n A boolean that True if a histograms.xml or enums.xml file is changed.\n ' p = file_obj.AbsoluteLocalPath() if (input_api.os_path.commonprefix([p, cwd]) != cwd): return False filepath = input_api.os_path.relpath(p, cwd) if ('test_data' in filepath): return False if ('obsolete_histograms.xml' in filepath): GetObsoleteXmlErrors(input_api, output_api, cwd, results) return False elif (('histograms.xml' in filepath) or ('histogram_suffixes_list.xml' in filepath)): GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results) return True elif ('enums.xml' in filepath): GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results) return True return False
def CheckChange(input_api, output_api): 'Checks that histograms.xml is pretty-printed and well-formatted.' results = [] cwd = input_api.PresubmitLocalPath() xml_changed = False for file_obj in input_api.AffectedTextFiles(): is_changed = ValidateSingleFile(input_api, output_api, file_obj, cwd, results) xml_changed = (xml_changed or is_changed) if xml_changed: GetValidateHistogramsError(input_api, output_api, cwd, results) return results
-6,462,003,374,697,380,000
Checks that histograms.xml is pretty-printed and well-formatted.
tools/metrics/histograms/PRESUBMIT.py
CheckChange
Ron423c/chromium
python
def CheckChange(input_api, output_api): results = [] cwd = input_api.PresubmitLocalPath() xml_changed = False for file_obj in input_api.AffectedTextFiles(): is_changed = ValidateSingleFile(input_api, output_api, file_obj, cwd, results) xml_changed = (xml_changed or is_changed) if xml_changed: GetValidateHistogramsError(input_api, output_api, cwd, results) return results
def test_randomizer_basic(self): '\n Test functionality of basic randomizer.\n ' randomizer = EnvParameterRandomizer(DummyEnvParameter()) assert (len(randomizer.get_parameters()) == 3) with self.assertRaises(AssertionError): randomizer.register_parameter(DummyRandomizerParameter('a', 1)) randomizer.register_parameter(DummyRandomizerParameter('d', 1)) assert (len(randomizer.get_parameters()) == 4) randomizer.get_parameter('a').set_value(1) randomizer.get_parameter('b').set_value(0.5) randomizer.get_parameter('c').set_value(2) parameters = randomizer.randomize(DummyEnvParameter(), self.random_state) assert (parameters.a == 1) assert (parameters.b == 0.5) assert (parameters.nested.c == 2) randomizer.disable() parameters = randomizer.randomize(DummyEnvParameter(), self.random_state) randomizer.get_parameter('a').set_value(1) assert (parameters.a == 0)
6,277,896,458,699,098,000
Test functionality of basic randomizer.
robogym/randomization/tests/test_randomization.py
test_randomizer_basic
0xflotus/robogym
python
def test_randomizer_basic(self): '\n \n ' randomizer = EnvParameterRandomizer(DummyEnvParameter()) assert (len(randomizer.get_parameters()) == 3) with self.assertRaises(AssertionError): randomizer.register_parameter(DummyRandomizerParameter('a', 1)) randomizer.register_parameter(DummyRandomizerParameter('d', 1)) assert (len(randomizer.get_parameters()) == 4) randomizer.get_parameter('a').set_value(1) randomizer.get_parameter('b').set_value(0.5) randomizer.get_parameter('c').set_value(2) parameters = randomizer.randomize(DummyEnvParameter(), self.random_state) assert (parameters.a == 1) assert (parameters.b == 0.5) assert (parameters.nested.c == 2) randomizer.disable() parameters = randomizer.randomize(DummyEnvParameter(), self.random_state) randomizer.get_parameter('a').set_value(1) assert (parameters.a == 0)
def __init__(self, model, name=None, category=None, endpoint=None, url=None, static_folder=None, menu_class_name=None, menu_icon_type=None, menu_icon_value=None): '\n Constructor\n\n :param model:\n Model class\n :param name:\n Display name\n :param category:\n Display category\n :param endpoint:\n Endpoint\n :param url:\n Custom URL\n :param menu_class_name:\n Optional class name for the menu item.\n :param menu_icon_type:\n Optional icon. Possible icon types:\n\n - `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon\n - `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon\n - `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory\n - `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL\n\n :param menu_icon_value:\n Icon glyph name or URL, depending on `menu_icon_type` setting\n ' self._search_fields = [] super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder, menu_class_name=menu_class_name, menu_icon_type=menu_icon_type, menu_icon_value=menu_icon_value) self._primary_key = self.scaffold_pk()
-1,822,298,911,502,670,000
Constructor :param model: Model class :param name: Display name :param category: Display category :param endpoint: Endpoint :param url: Custom URL :param menu_class_name: Optional class name for the menu item. :param menu_icon_type: Optional icon. Possible icon types: - `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon - `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon - `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory - `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL :param menu_icon_value: Icon glyph name or URL, depending on `menu_icon_type` setting
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
__init__
hexlism/css_platform
python
def __init__(self, model, name=None, category=None, endpoint=None, url=None, static_folder=None, menu_class_name=None, menu_icon_type=None, menu_icon_value=None): '\n Constructor\n\n :param model:\n Model class\n :param name:\n Display name\n :param category:\n Display category\n :param endpoint:\n Endpoint\n :param url:\n Custom URL\n :param menu_class_name:\n Optional class name for the menu item.\n :param menu_icon_type:\n Optional icon. Possible icon types:\n\n - `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon\n - `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon\n - `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory\n - `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL\n\n :param menu_icon_value:\n Icon glyph name or URL, depending on `menu_icon_type` setting\n ' self._search_fields = [] super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder, menu_class_name=menu_class_name, menu_icon_type=menu_icon_type, menu_icon_value=menu_icon_value) self._primary_key = self.scaffold_pk()
def _refresh_cache(self): '\n Refresh cache.\n ' if (self.form_subdocuments is None): self.form_subdocuments = {} self._form_subdocuments = convert_subdocuments(self.form_subdocuments) super(ModelView, self)._refresh_cache()
3,943,640,994,616,751,000
Refresh cache.
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
_refresh_cache
hexlism/css_platform
python
def _refresh_cache(self): '\n \n ' if (self.form_subdocuments is None): self.form_subdocuments = {} self._form_subdocuments = convert_subdocuments(self.form_subdocuments) super(ModelView, self)._refresh_cache()
def _process_ajax_references(self): '\n AJAX endpoint is exposed by top-level admin view class, but\n subdocuments might have AJAX references too.\n\n This method will recursively go over subdocument configuration\n and will precompute AJAX references for them ensuring that\n subdocuments can also use AJAX to populate their ReferenceFields.\n ' references = super(ModelView, self)._process_ajax_references() return process_ajax_references(references, self)
6,137,773,626,474,018,000
AJAX endpoint is exposed by top-level admin view class, but subdocuments might have AJAX references too. This method will recursively go over subdocument configuration and will precompute AJAX references for them ensuring that subdocuments can also use AJAX to populate their ReferenceFields.
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
_process_ajax_references
hexlism/css_platform
python
def _process_ajax_references(self): '\n AJAX endpoint is exposed by top-level admin view class, but\n subdocuments might have AJAX references too.\n\n This method will recursively go over subdocument configuration\n and will precompute AJAX references for them ensuring that\n subdocuments can also use AJAX to populate their ReferenceFields.\n ' references = super(ModelView, self)._process_ajax_references() return process_ajax_references(references, self)
def _get_model_fields(self, model=None): '\n Inspect model and return list of model fields\n\n :param model:\n Model to inspect\n ' if (model is None): model = self.model return sorted(iteritems(model._fields), key=(lambda n: n[1].creation_counter))
707,176,458,598,894,100
Inspect model and return list of model fields :param model: Model to inspect
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
_get_model_fields
hexlism/css_platform
python
def _get_model_fields(self, model=None): '\n Inspect model and return list of model fields\n\n :param model:\n Model to inspect\n ' if (model is None): model = self.model return sorted(iteritems(model._fields), key=(lambda n: n[1].creation_counter))
def get_pk_value(self, model): '\n Return the primary key value from the model instance\n\n :param model:\n Model instance\n ' return model.pk
3,359,469,410,447,919,600
Return the primary key value from the model instance :param model: Model instance
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
get_pk_value
hexlism/css_platform
python
def get_pk_value(self, model): '\n Return the primary key value from the model instance\n\n :param model:\n Model instance\n ' return model.pk
def scaffold_list_columns(self): '\n Scaffold list columns\n ' columns = [] for (n, f) in self._get_model_fields(): field_class = type(f) if ((field_class == mongoengine.ListField) and isinstance(f.field, mongoengine.EmbeddedDocumentField)): continue if (field_class == mongoengine.EmbeddedDocumentField): continue if (self.column_display_pk or (field_class != mongoengine.ObjectIdField)): columns.append(n) return columns
-1,257,183,833,292,583,000
Scaffold list columns
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
scaffold_list_columns
hexlism/css_platform
python
def scaffold_list_columns(self): '\n \n ' columns = [] for (n, f) in self._get_model_fields(): field_class = type(f) if ((field_class == mongoengine.ListField) and isinstance(f.field, mongoengine.EmbeddedDocumentField)): continue if (field_class == mongoengine.EmbeddedDocumentField): continue if (self.column_display_pk or (field_class != mongoengine.ObjectIdField)): columns.append(n) return columns
def scaffold_sortable_columns(self): '\n Return a dictionary of sortable columns (name, field)\n ' columns = {} for (n, f) in self._get_model_fields(): if (type(f) in SORTABLE_FIELDS): if (self.column_display_pk or (type(f) != mongoengine.ObjectIdField)): columns[n] = f return columns
-1,914,602,255,142,035,000
Return a dictionary of sortable columns (name, field)
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
scaffold_sortable_columns
hexlism/css_platform
python
def scaffold_sortable_columns(self): '\n \n ' columns = {} for (n, f) in self._get_model_fields(): if (type(f) in SORTABLE_FIELDS): if (self.column_display_pk or (type(f) != mongoengine.ObjectIdField)): columns[n] = f return columns
def init_search(self): '\n Init search\n ' if self.column_searchable_list: for p in self.column_searchable_list: if isinstance(p, string_types): p = self.model._fields.get(p) if (p is None): raise Exception('Invalid search field') field_type = type(p) if (field_type not in self.allowed_search_types): raise Exception(('Can only search on text columns. ' + ('Failed to setup search for "%s"' % p))) self._search_fields.append(p) return bool(self._search_fields)
-2,954,653,945,820,696,600
Init search
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
init_search
hexlism/css_platform
python
def init_search(self): '\n \n ' if self.column_searchable_list: for p in self.column_searchable_list: if isinstance(p, string_types): p = self.model._fields.get(p) if (p is None): raise Exception('Invalid search field') field_type = type(p) if (field_type not in self.allowed_search_types): raise Exception(('Can only search on text columns. ' + ('Failed to setup search for "%s"' % p))) self._search_fields.append(p) return bool(self._search_fields)
def scaffold_filters(self, name): '\n Return filter object(s) for the field\n\n :param name:\n Either field name or field instance\n ' if isinstance(name, string_types): attr = self.model._fields.get(name) else: attr = name if (attr is None): raise Exception(('Failed to find field for filter: %s' % name)) visible_name = None if (not isinstance(name, string_types)): visible_name = self.get_column_name(attr.name) if (not visible_name): visible_name = self.get_column_name(name) type_name = type(attr).__name__ flt = self.filter_converter.convert(type_name, attr, visible_name) return flt
-1,826,513,185,149,695,500
Return filter object(s) for the field :param name: Either field name or field instance
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
scaffold_filters
hexlism/css_platform
python
def scaffold_filters(self, name): '\n Return filter object(s) for the field\n\n :param name:\n Either field name or field instance\n ' if isinstance(name, string_types): attr = self.model._fields.get(name) else: attr = name if (attr is None): raise Exception(('Failed to find field for filter: %s' % name)) visible_name = None if (not isinstance(name, string_types)): visible_name = self.get_column_name(attr.name) if (not visible_name): visible_name = self.get_column_name(name) type_name = type(attr).__name__ flt = self.filter_converter.convert(type_name, attr, visible_name) return flt
def is_valid_filter(self, filter): '\n Validate if the provided filter is a valid MongoEngine filter\n\n :param filter:\n Filter object\n ' return isinstance(filter, BaseMongoEngineFilter)
-6,644,047,060,746,446,000
Validate if the provided filter is a valid MongoEngine filter :param filter: Filter object
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
is_valid_filter
hexlism/css_platform
python
def is_valid_filter(self, filter): '\n Validate if the provided filter is a valid MongoEngine filter\n\n :param filter:\n Filter object\n ' return isinstance(filter, BaseMongoEngineFilter)
def scaffold_form(self): '\n Create form from the model.\n ' form_class = get_form(self.model, self.model_form_converter(self), base_class=self.form_base_class, only=self.form_columns, exclude=self.form_excluded_columns, field_args=self.form_args, extra_fields=self.form_extra_fields) return form_class
-5,255,561,762,624,256,000
Create form from the model.
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
scaffold_form
hexlism/css_platform
python
def scaffold_form(self): '\n \n ' form_class = get_form(self.model, self.model_form_converter(self), base_class=self.form_base_class, only=self.form_columns, exclude=self.form_excluded_columns, field_args=self.form_args, extra_fields=self.form_extra_fields) return form_class
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList, validators=None): "\n Create form for the `index_view` using only the columns from\n `self.column_editable_list`.\n\n :param validators:\n `form_args` dict with only validators\n {'name': {'validators': [required()]}}\n :param custom_fieldlist:\n A WTForm FieldList class. By default, `ListEditableFieldList`.\n " form_class = get_form(self.model, self.model_form_converter(self), base_class=self.form_base_class, only=self.column_editable_list, field_args=validators) return wrap_fields_in_fieldlist(self.form_base_class, form_class, custom_fieldlist)
1,362,413,955,699,600,400
Create form for the `index_view` using only the columns from `self.column_editable_list`. :param validators: `form_args` dict with only validators {'name': {'validators': [required()]}} :param custom_fieldlist: A WTForm FieldList class. By default, `ListEditableFieldList`.
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
scaffold_list_form
hexlism/css_platform
python
def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList, validators=None): "\n Create form for the `index_view` using only the columns from\n `self.column_editable_list`.\n\n :param validators:\n `form_args` dict with only validators\n {'name': {'validators': [required()]}}\n :param custom_fieldlist:\n A WTForm FieldList class. By default, `ListEditableFieldList`.\n " form_class = get_form(self.model, self.model_form_converter(self), base_class=self.form_base_class, only=self.column_editable_list, field_args=validators) return wrap_fields_in_fieldlist(self.form_base_class, form_class, custom_fieldlist)
def get_query(self): '\n Returns the QuerySet for this view. By default, it returns all the\n objects for the current model.\n ' return self.model.objects
-4,986,645,289,561,858,000
Returns the QuerySet for this view. By default, it returns all the objects for the current model.
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
get_query
hexlism/css_platform
python
def get_query(self): '\n Returns the QuerySet for this view. By default, it returns all the\n objects for the current model.\n ' return self.model.objects
def get_list(self, page, sort_column, sort_desc, search, filters, execute=True): '\n Get list of objects from MongoEngine\n\n :param page:\n Page number\n :param sort_column:\n Sort column\n :param sort_desc:\n Sort descending\n :param search:\n Search criteria\n :param filters:\n List of applied filters\n :param execute:\n Run query immediately or not\n ' query = self.get_query() if self._filters: for (flt, flt_name, value) in filters: f = self._filters[flt] query = f.apply(query, f.clean(value)) if (self._search_supported and search): query = self._search(query, search) count = (query.count() if (not self.simple_list_pager) else None) if sort_column: query = query.order_by(('%s%s' % (('-' if sort_desc else ''), sort_column))) else: order = self._get_default_order() if order: query = query.order_by(('%s%s' % (('-' if order[1] else ''), order[0]))) if (page is not None): query = query.skip((page * self.page_size)) query = query.limit(self.page_size) if execute: query = query.all() return (count, query)
-6,554,609,987,189,399,000
Get list of objects from MongoEngine :param page: Page number :param sort_column: Sort column :param sort_desc: Sort descending :param search: Search criteria :param filters: List of applied filters :param execute: Run query immediately or not
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
get_list
hexlism/css_platform
python
def get_list(self, page, sort_column, sort_desc, search, filters, execute=True): '\n Get list of objects from MongoEngine\n\n :param page:\n Page number\n :param sort_column:\n Sort column\n :param sort_desc:\n Sort descending\n :param search:\n Search criteria\n :param filters:\n List of applied filters\n :param execute:\n Run query immediately or not\n ' query = self.get_query() if self._filters: for (flt, flt_name, value) in filters: f = self._filters[flt] query = f.apply(query, f.clean(value)) if (self._search_supported and search): query = self._search(query, search) count = (query.count() if (not self.simple_list_pager) else None) if sort_column: query = query.order_by(('%s%s' % (('-' if sort_desc else ), sort_column))) else: order = self._get_default_order() if order: query = query.order_by(('%s%s' % (('-' if order[1] else ), order[0]))) if (page is not None): query = query.skip((page * self.page_size)) query = query.limit(self.page_size) if execute: query = query.all() return (count, query)
def get_one(self, id): '\n Return a single model instance by its ID\n\n :param id:\n Model ID\n ' try: return self.get_query().filter(pk=id).first() except mongoengine.ValidationError as ex: flash(gettext('Failed to get model. %(error)s', error=format_error(ex)), 'error') return None
7,872,424,681,330,008,000
Return a single model instance by its ID :param id: Model ID
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
get_one
hexlism/css_platform
python
def get_one(self, id): '\n Return a single model instance by its ID\n\n :param id:\n Model ID\n ' try: return self.get_query().filter(pk=id).first() except mongoengine.ValidationError as ex: flash(gettext('Failed to get model. %(error)s', error=format_error(ex)), 'error') return None
def create_model(self, form): '\n Create model helper\n\n :param form:\n Form instance\n ' try: model = self.model() form.populate_obj(model) self._on_model_change(form, model, True) model.save() except Exception as ex: if (not self.handle_view_exception(ex)): flash(gettext('Failed to create record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to create record.') return False else: self.after_model_change(form, model, True) return model
903,978,773,780,675,300
Create model helper :param form: Form instance
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
create_model
hexlism/css_platform
python
def create_model(self, form): '\n Create model helper\n\n :param form:\n Form instance\n ' try: model = self.model() form.populate_obj(model) self._on_model_change(form, model, True) model.save() except Exception as ex: if (not self.handle_view_exception(ex)): flash(gettext('Failed to create record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to create record.') return False else: self.after_model_change(form, model, True) return model
def update_model(self, form, model): '\n Update model helper\n\n :param form:\n Form instance\n :param model:\n Model instance to update\n ' try: form.populate_obj(model) self._on_model_change(form, model, False) model.save() except Exception as ex: if (not self.handle_view_exception(ex)): flash(gettext('Failed to update record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to update record.') return False else: self.after_model_change(form, model, False) return True
764,570,345,739,556,700
Update model helper :param form: Form instance :param model: Model instance to update
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
update_model
hexlism/css_platform
python
def update_model(self, form, model): '\n Update model helper\n\n :param form:\n Form instance\n :param model:\n Model instance to update\n ' try: form.populate_obj(model) self._on_model_change(form, model, False) model.save() except Exception as ex: if (not self.handle_view_exception(ex)): flash(gettext('Failed to update record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to update record.') return False else: self.after_model_change(form, model, False) return True
def delete_model(self, model): '\n Delete model helper\n\n :param model:\n Model instance\n ' try: self.on_model_delete(model) model.delete() except Exception as ex: if (not self.handle_view_exception(ex)): flash(gettext('Failed to delete record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to delete record.') return False else: self.after_model_delete(model) return True
3,090,824,996,892,932,000
Delete model helper :param model: Model instance
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/mongoengine/view.py
delete_model
hexlism/css_platform
python
def delete_model(self, model): '\n Delete model helper\n\n :param model:\n Model instance\n ' try: self.on_model_delete(model) model.delete() except Exception as ex: if (not self.handle_view_exception(ex)): flash(gettext('Failed to delete record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to delete record.') return False else: self.after_model_delete(model) return True
def estimate(particles, weights): 'returns mean and variance of the weighted particles' pos = particles mean = np.average(pos, weights=weights, axis=0) var = np.average(((pos - mean) ** 2), weights=weights, axis=0) return (mean, var)
1,993,265,891,749,680,400
returns mean and variance of the weighted particles
002_Particle_Filter/Particle_Filter.py
estimate
zhyongquan/Automotive-Software-Blog
python
def estimate(particles, weights): pos = particles mean = np.average(pos, weights=weights, axis=0) var = np.average(((pos - mean) ** 2), weights=weights, axis=0) return (mean, var)
def test_builder_is_pickled(self): 'Unlike most tree builders, HTMLParserTreeBuilder and will\n be restored after pickling.\n ' tree = self.soup('<a><b>foo</a>') dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
-6,423,651,160,975,675,000
Unlike most tree builders, HTMLParserTreeBuilder and will be restored after pickling.
virtual/lib/python3.6/site-packages/bs4/tests/test_htmlparser.py
test_builder_is_pickled
AG371/bus-reservation-system
python
def test_builder_is_pickled(self): 'Unlike most tree builders, HTMLParserTreeBuilder and will\n be restored after pickling.\n ' tree = self.soup('<a><b>foo</a>') dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_error(self): "Verify that our HTMLParser subclass implements error() in a way\n that doesn't cause a crash.\n " parser = BeautifulSoupHTMLParser() parser.error("don't crash")
-6,519,268,211,641,346,000
Verify that our HTMLParser subclass implements error() in a way that doesn't cause a crash.
virtual/lib/python3.6/site-packages/bs4/tests/test_htmlparser.py
test_error
AG371/bus-reservation-system
python
def test_error(self): "Verify that our HTMLParser subclass implements error() in a way\n that doesn't cause a crash.\n " parser = BeautifulSoupHTMLParser() parser.error("don't crash")
async def async_setup_entry(hass, config_entry, async_add_entities): 'Set up the inverter select entities from a config entry.' inverter = hass.data[DOMAIN][config_entry.entry_id][KEY_INVERTER] device_info = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE_INFO] entities = [] for description in NUMBERS: try: current_value = (await description.getter(inverter)) except InverterError: _LOGGER.debug('Could not read inverter setting %s', description.key) continue entities.append(InverterNumberEntity(device_info, description, inverter, current_value)) async_add_entities(entities)
-6,143,632,953,803,178,000
Set up the inverter select entities from a config entry.
homeassistant/components/goodwe/number.py
async_setup_entry
kubawolanin/core
python
async def async_setup_entry(hass, config_entry, async_add_entities): inverter = hass.data[DOMAIN][config_entry.entry_id][KEY_INVERTER] device_info = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE_INFO] entities = [] for description in NUMBERS: try: current_value = (await description.getter(inverter)) except InverterError: _LOGGER.debug('Could not read inverter setting %s', description.key) continue entities.append(InverterNumberEntity(device_info, description, inverter, current_value)) async_add_entities(entities)
def __init__(self, device_info: DeviceInfo, description: GoodweNumberEntityDescription, inverter: Inverter, current_value: int) -> None: 'Initialize the number inverter setting entity.' self.entity_description = description self._attr_unique_id = f'{DOMAIN}-{description.key}-{inverter.serial_number}' self._attr_device_info = device_info self._attr_value = float(current_value) self._inverter: Inverter = inverter
-6,500,139,910,043,170,000
Initialize the number inverter setting entity.
homeassistant/components/goodwe/number.py
__init__
kubawolanin/core
python
def __init__(self, device_info: DeviceInfo, description: GoodweNumberEntityDescription, inverter: Inverter, current_value: int) -> None: self.entity_description = description self._attr_unique_id = f'{DOMAIN}-{description.key}-{inverter.serial_number}' self._attr_device_info = device_info self._attr_value = float(current_value) self._inverter: Inverter = inverter
async def async_set_value(self, value: float) -> None: 'Set new value.' if self.entity_description.setter: (await self.entity_description.setter(self._inverter, int(value))) self._attr_value = value self.async_write_ha_state()
6,044,406,391,341,202,000
Set new value.
homeassistant/components/goodwe/number.py
async_set_value
kubawolanin/core
python
async def async_set_value(self, value: float) -> None: if self.entity_description.setter: (await self.entity_description.setter(self._inverter, int(value))) self._attr_value = value self.async_write_ha_state()
@runnable def run_targets(*args): 'Run targets for Python.' Options.show_coverage = ('coverage' in args) count = 0 for (count, (command, title, retry)) in enumerate(Options.targets, start=1): success = call(command, title, retry) if (not success): message = (('✅ ' * (count - 1)) + '❌') show_notification(message, title) return False message = ('✅ ' * count) title = 'All Targets' show_notification(message, title) show_coverage() return True
-73,758,141,938,422,160
Run targets for Python.
scent.py
run_targets
EazeAI/AI-WS
python
@runnable def run_targets(*args): Options.show_coverage = ('coverage' in args) count = 0 for (count, (command, title, retry)) in enumerate(Options.targets, start=1): success = call(command, title, retry) if (not success): message = (('✅ ' * (count - 1)) + '❌') show_notification(message, title) return False message = ('✅ ' * count) title = 'All Targets' show_notification(message, title) show_coverage() return True
def call(command, title, retry): 'Run a command-line program and display the result.' if Options.rerun_args: (command, title, retry) = Options.rerun_args Options.rerun_args = None success = call(command, title, retry) if (not success): return False print('') print(('$ %s' % ' '.join(command))) failure = subprocess.call(command) if (failure and retry): Options.rerun_args = (command, title, retry) return (not failure)
3,937,803,438,086,842,000
Run a command-line program and display the result.
scent.py
call
EazeAI/AI-WS
python
def call(command, title, retry): if Options.rerun_args: (command, title, retry) = Options.rerun_args Options.rerun_args = None success = call(command, title, retry) if (not success): return False print() print(('$ %s' % ' '.join(command))) failure = subprocess.call(command) if (failure and retry): Options.rerun_args = (command, title, retry) return (not failure)
def show_notification(message, title): 'Show a user notification.' if (notify and title): notify(message, title=title, group=Options.group)
1,002,222,407,043,525,500
Show a user notification.
scent.py
show_notification
EazeAI/AI-WS
python
def show_notification(message, title): if (notify and title): notify(message, title=title, group=Options.group)
def show_coverage(): 'Launch the coverage report.' if Options.show_coverage: subprocess.call(['make', 'read-coverage']) Options.show_coverage = False
2,950,738,002,091,552,300
Launch the coverage report.
scent.py
show_coverage
EazeAI/AI-WS
python
def show_coverage(): if Options.show_coverage: subprocess.call(['make', 'read-coverage']) Options.show_coverage = False
def download_scripts(destination_dir: Path=Path('ptlflow_scripts')) -> None: 'Download the main scripts and configs to start working with PTLFlow.' github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/' script_names = ['datasets.yml', 'infer.py', 'test.py', 'train.py', 'validate.py'] destination_dir.mkdir(parents=True, exist_ok=True) for sname in script_names: script_url = (github_url + sname) data = requests.get(script_url) if (data.status_code == 200): with open((destination_dir / sname), 'wb') as f: f.write(data.content) else: logging.warning('Script %s was not found.', script_url) logging.info('Downloaded scripts to %s.', str(destination_dir))
-5,417,779,943,224,005,000
Download the main scripts and configs to start working with PTLFlow.
ptlflow/__init__.py
download_scripts
hmorimitsu/ptlflow
python
def download_scripts(destination_dir: Path=Path('ptlflow_scripts')) -> None: github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/' script_names = ['datasets.yml', 'infer.py', 'test.py', 'train.py', 'validate.py'] destination_dir.mkdir(parents=True, exist_ok=True) for sname in script_names: script_url = (github_url + sname) data = requests.get(script_url) if (data.status_code == 200): with open((destination_dir / sname), 'wb') as f: f.write(data.content) else: logging.warning('Script %s was not found.', script_url) logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(model_name: str, pretrained_ckpt: Optional[str]=None, args: Optional[Namespace]=None) -> BaseModel: 'Return an instance of a chosen model.\n\n The instance can have configured by he arguments, and load some existing pretrained weights.\n\n Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,\n returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to\n "return get_model_reference()()", which looks confusing. This can be rewritten as\n "model_ref = get_model_reference(); return model_ref()".\n\n Parameters\n ----------\n model_name : str\n Name of the model to get an instance of.\n pretrained_ckpt : Optional[str], optional\n Name of the pretrained weight to load or a path to a local checkpoint file.\n args : Optional[Namespace], optional\n Some arguments that ill be provided to the model.\n\n Returns\n -------\n BaseModel\n The instance of the chosen model.\n\n Raises\n ------\n ValueError\n If the given checkpoint name is not a valid choice.\n ValueError\n If a checkpoint name is given, but the model does not have any pretrained weights available.\n\n See Also\n --------\n get_model_reference : To get a reference to the class of a model.\n ' model_ref = get_model_reference(model_name) if (args is None): parser = model_ref.add_model_specific_args() args = parser.parse_args([]) model = model_ref(args) if ((pretrained_ckpt is None) and (args is not None) and (args.pretrained_ckpt is not None)): pretrained_ckpt = args.pretrained_ckpt if (pretrained_ckpt is not None): if Path(pretrained_ckpt).exists(): ckpt_path = pretrained_ckpt elif hasattr(model_ref, 'pretrained_checkpoints'): ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt) if (ckpt_path is None): raise ValueError(f"Invalid checkpoint name {pretrained_ckpt}. Choose one from {{{','.join(model.pretrained_checkpoints.keys())}}}") else: raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}') device = ('cuda' if torch.cuda.is_available() else 'cpu') if Path(ckpt_path).exists(): ckpt = torch.load(ckpt_path, map_location=torch.device(device)) else: model_dir = ((Path(hub.get_dir()) / 'ptlflow') / 'checkpoints') ckpt = hub.load_state_dict_from_url(ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True) state_dict = ckpt['state_dict'] model.load_state_dict(state_dict) return model
-7,413,552,895,945,898,000
Return an instance of a chosen model. The instance can have configured by he arguments, and load some existing pretrained weights. Note that this is different from get_model_reference(), which returns a reference to the model class. The instance, returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to "return get_model_reference()()", which looks confusing. This can be rewritten as "model_ref = get_model_reference(); return model_ref()". Parameters ---------- model_name : str Name of the model to get an instance of. pretrained_ckpt : Optional[str], optional Name of the pretrained weight to load or a path to a local checkpoint file. args : Optional[Namespace], optional Some arguments that ill be provided to the model. Returns ------- BaseModel The instance of the chosen model. Raises ------ ValueError If the given checkpoint name is not a valid choice. ValueError If a checkpoint name is given, but the model does not have any pretrained weights available. See Also -------- get_model_reference : To get a reference to the class of a model.
ptlflow/__init__.py
get_model
hmorimitsu/ptlflow
python
def get_model(model_name: str, pretrained_ckpt: Optional[str]=None, args: Optional[Namespace]=None) -> BaseModel: 'Return an instance of a chosen model.\n\n The instance can have configured by he arguments, and load some existing pretrained weights.\n\n Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,\n returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to\n "return get_model_reference()()", which looks confusing. This can be rewritten as\n "model_ref = get_model_reference(); return model_ref()".\n\n Parameters\n ----------\n model_name : str\n Name of the model to get an instance of.\n pretrained_ckpt : Optional[str], optional\n Name of the pretrained weight to load or a path to a local checkpoint file.\n args : Optional[Namespace], optional\n Some arguments that ill be provided to the model.\n\n Returns\n -------\n BaseModel\n The instance of the chosen model.\n\n Raises\n ------\n ValueError\n If the given checkpoint name is not a valid choice.\n ValueError\n If a checkpoint name is given, but the model does not have any pretrained weights available.\n\n See Also\n --------\n get_model_reference : To get a reference to the class of a model.\n ' model_ref = get_model_reference(model_name) if (args is None): parser = model_ref.add_model_specific_args() args = parser.parse_args([]) model = model_ref(args) if ((pretrained_ckpt is None) and (args is not None) and (args.pretrained_ckpt is not None)): pretrained_ckpt = args.pretrained_ckpt if (pretrained_ckpt is not None): if Path(pretrained_ckpt).exists(): ckpt_path = pretrained_ckpt elif hasattr(model_ref, 'pretrained_checkpoints'): ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt) if (ckpt_path is None): raise ValueError(f"Invalid checkpoint name {pretrained_ckpt}. Choose one from {{{','.join(model.pretrained_checkpoints.keys())}}}") else: raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}') device = ('cuda' if torch.cuda.is_available() else 'cpu') if Path(ckpt_path).exists(): ckpt = torch.load(ckpt_path, map_location=torch.device(device)) else: model_dir = ((Path(hub.get_dir()) / 'ptlflow') / 'checkpoints') ckpt = hub.load_state_dict_from_url(ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True) state_dict = ckpt['state_dict'] model.load_state_dict(state_dict) return model
def get_model_reference(model_name: str) -> BaseModel: 'Return a reference to the class of a chosen model.\n\n Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this\n function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as\n "model_ref = get_model_reference(); model_instance = model_ref()".\n\n Parameters\n ----------\n model_name : str\n Name of the model to get a reference of.\n\n Returns\n -------\n BaseModel\n A reference to the chosen model.\n\n Raises\n ------\n ValueError\n If the given name is not a valid choice.\n\n See Also\n --------\n get_model : To get an instance of a model.\n ' try: return models_dict[model_name] except KeyError: raise ValueError(f"Unknown model name: {model_name}. Choose from [{', '.join(models_dict.keys())}]")
-1,848,291,867,390,122,500
Return a reference to the class of a chosen model. Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as "model_ref = get_model_reference(); model_instance = model_ref()". Parameters ---------- model_name : str Name of the model to get a reference of. Returns ------- BaseModel A reference to the chosen model. Raises ------ ValueError If the given name is not a valid choice. See Also -------- get_model : To get an instance of a model.
ptlflow/__init__.py
get_model_reference
hmorimitsu/ptlflow
python
def get_model_reference(model_name: str) -> BaseModel: 'Return a reference to the class of a chosen model.\n\n Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this\n function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as\n "model_ref = get_model_reference(); model_instance = model_ref()".\n\n Parameters\n ----------\n model_name : str\n Name of the model to get a reference of.\n\n Returns\n -------\n BaseModel\n A reference to the chosen model.\n\n Raises\n ------\n ValueError\n If the given name is not a valid choice.\n\n See Also\n --------\n get_model : To get an instance of a model.\n ' try: return models_dict[model_name] except KeyError: raise ValueError(f"Unknown model name: {model_name}. Choose from [{', '.join(models_dict.keys())}]")
def get_trainable_model_names() -> List[str]: 'Return a list of model names that are able to be trained.\n \n This function return the names of the model that have a loss function defined.\n\n Returns\n =======\n List[str]\n The list of the model names that can be trained.\n ' return [mname for mname in models_dict.keys() if (get_model(mname).loss_fn is not None)]
2,144,936,162,105,759,000
Return a list of model names that are able to be trained. This function return the names of the model that have a loss function defined. Returns ======= List[str] The list of the model names that can be trained.
ptlflow/__init__.py
get_trainable_model_names
hmorimitsu/ptlflow
python
def get_trainable_model_names() -> List[str]: 'Return a list of model names that are able to be trained.\n \n This function return the names of the model that have a loss function defined.\n\n Returns\n =======\n List[str]\n The list of the model names that can be trained.\n ' return [mname for mname in models_dict.keys() if (get_model(mname).loss_fn is not None)]
def blackbody_specific_intensity(wl_nm, T_K): 'Get the monochromatic specific intensity for a blackbody -\n wl_nm = wavelength [nm]\n T_K = temperature [K]\n This is the energy radiated per second per unit wavelength per unit solid angle.\n Reference - Shu, eq. 4.6, p. 78.' a = ((PLANCK_CONSTANT * SPEED_OF_LIGHT) / BOLTZMAN_CONSTANT) b = (((2.0 * PLANCK_CONSTANT) * SPEED_OF_LIGHT) * SPEED_OF_LIGHT) wl_m = (wl_nm * 1e-09) try: exponent = (a / (wl_m * T_K)) except ZeroDivisionError: return 0.0 if (exponent > 500.0): return 0.0 specific_intensity = (b / (math.pow(wl_m, 5) * (math.exp(exponent) - 1.0))) return specific_intensity
2,590,742,495,800,728,600
Get the monochromatic specific intensity for a blackbody - wl_nm = wavelength [nm] T_K = temperature [K] This is the energy radiated per second per unit wavelength per unit solid angle. Reference - Shu, eq. 4.6, p. 78.
colorpy/colorpy-0.1.0/blackbody.py
blackbody_specific_intensity
gmweir/QuasiOptics
python
def blackbody_specific_intensity(wl_nm, T_K): 'Get the monochromatic specific intensity for a blackbody -\n wl_nm = wavelength [nm]\n T_K = temperature [K]\n This is the energy radiated per second per unit wavelength per unit solid angle.\n Reference - Shu, eq. 4.6, p. 78.' a = ((PLANCK_CONSTANT * SPEED_OF_LIGHT) / BOLTZMAN_CONSTANT) b = (((2.0 * PLANCK_CONSTANT) * SPEED_OF_LIGHT) * SPEED_OF_LIGHT) wl_m = (wl_nm * 1e-09) try: exponent = (a / (wl_m * T_K)) except ZeroDivisionError: return 0.0 if (exponent > 500.0): return 0.0 specific_intensity = (b / (math.pow(wl_m, 5) * (math.exp(exponent) - 1.0))) return specific_intensity
def blackbody_spectrum(T_K): 'Get the spectrum of a blackbody, as a numpy array.' spectrum = ciexyz.empty_spectrum() (num_rows, num_cols) = spectrum.shape for i in xrange(0, num_rows): specific_intensity = blackbody_specific_intensity(spectrum[i][0], T_K) spectrum[i][1] = ((specific_intensity * ciexyz.delta_wl_nm) * 1e-09) return spectrum
-4,814,021,675,059,225,000
Get the spectrum of a blackbody, as a numpy array.
colorpy/colorpy-0.1.0/blackbody.py
blackbody_spectrum
gmweir/QuasiOptics
python
def blackbody_spectrum(T_K): spectrum = ciexyz.empty_spectrum() (num_rows, num_cols) = spectrum.shape for i in xrange(0, num_rows): specific_intensity = blackbody_specific_intensity(spectrum[i][0], T_K) spectrum[i][1] = ((specific_intensity * ciexyz.delta_wl_nm) * 1e-09) return spectrum
def blackbody_color(T_K): 'Given a temperature (K), return the xyz color of a thermal blackbody.' spectrum = blackbody_spectrum(T_K) xyz = ciexyz.xyz_from_spectrum(spectrum) return xyz
-5,026,878,936,742,433,000
Given a temperature (K), return the xyz color of a thermal blackbody.
colorpy/colorpy-0.1.0/blackbody.py
blackbody_color
gmweir/QuasiOptics
python
def blackbody_color(T_K): spectrum = blackbody_spectrum(T_K) xyz = ciexyz.xyz_from_spectrum(spectrum) return xyz
def blackbody_patch_plot(T_list, title, filename): 'Draw a patch plot of blackbody colors for the given temperature range.' xyz_colors = [] color_names = [] for Ti in T_list: xyz = blackbody_color(Ti) xyz_colors.append(xyz) name = ('%g K' % Ti) color_names.append(name) plots.xyz_patch_plot(xyz_colors, color_names, title, filename)
687,483,277,840,238,200
Draw a patch plot of blackbody colors for the given temperature range.
colorpy/colorpy-0.1.0/blackbody.py
blackbody_patch_plot
gmweir/QuasiOptics
python
def blackbody_patch_plot(T_list, title, filename): xyz_colors = [] color_names = [] for Ti in T_list: xyz = blackbody_color(Ti) xyz_colors.append(xyz) name = ('%g K' % Ti) color_names.append(name) plots.xyz_patch_plot(xyz_colors, color_names, title, filename)
def blackbody_color_vs_temperature_plot(T_list, title, filename): 'Draw a color vs temperature plot for the given temperature range.' num_T = len(T_list) rgb_list = numpy.empty((num_T, 3)) for i in xrange(0, num_T): T_i = T_list[i] xyz = blackbody_color(T_i) rgb_list[i] = colormodels.rgb_from_xyz(xyz) plots.color_vs_param_plot(T_list, rgb_list, title, filename, plotfunc=pylab.semilogy, tight=True, xlabel='Temperature (K)', ylabel='RGB Color')
3,642,871,054,975,440,400
Draw a color vs temperature plot for the given temperature range.
colorpy/colorpy-0.1.0/blackbody.py
blackbody_color_vs_temperature_plot
gmweir/QuasiOptics
python
def blackbody_color_vs_temperature_plot(T_list, title, filename): num_T = len(T_list) rgb_list = numpy.empty((num_T, 3)) for i in xrange(0, num_T): T_i = T_list[i] xyz = blackbody_color(T_i) rgb_list[i] = colormodels.rgb_from_xyz(xyz) plots.color_vs_param_plot(T_list, rgb_list, title, filename, plotfunc=pylab.semilogy, tight=True, xlabel='Temperature (K)', ylabel='RGB Color')
def blackbody_spectrum_plot(T_K): 'Draw the spectrum of a blackbody at the given temperature.' spectrum = blackbody_spectrum(T_K) title = ('Blackbody Spectrum - T %d K' % int(T_K)) filename = ('BlackbodySpectrum-%dK' % int(T_K)) plots.spectrum_plot(spectrum, title, filename, xlabel='Wavelength (nm)', ylabel='Specific Intensity')
-2,097,509,183,976,489,700
Draw the spectrum of a blackbody at the given temperature.
colorpy/colorpy-0.1.0/blackbody.py
blackbody_spectrum_plot
gmweir/QuasiOptics
python
def blackbody_spectrum_plot(T_K): spectrum = blackbody_spectrum(T_K) title = ('Blackbody Spectrum - T %d K' % int(T_K)) filename = ('BlackbodySpectrum-%dK' % int(T_K)) plots.spectrum_plot(spectrum, title, filename, xlabel='Wavelength (nm)', ylabel='Specific Intensity')
def figures(): 'Create some blackbody plots.' T_list_0 = plots.log_interpolate(1200.0, 20000.0, 48) T_list_hot = plots.log_interpolate(10000.0, 40000.0, 24) T_list_cool = plots.log_interpolate(950.0, 1200.0, 24) blackbody_patch_plot(T_list_0, 'Blackbody Colors', 'Blackbody-Patch') blackbody_patch_plot(T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch') blackbody_patch_plot(T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch') blackbody_color_vs_temperature_plot(range(1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors') blackbody_color_vs_temperature_plot(range(10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors') blackbody_color_vs_temperature_plot(range(950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors') blackbody_spectrum_plot(2000.0) blackbody_spectrum_plot(3000.0) blackbody_spectrum_plot(SUN_TEMPERATURE) blackbody_spectrum_plot(11000.0) blackbody_spectrum_plot(15000.0)
5,560,260,944,193,342,000
Create some blackbody plots.
colorpy/colorpy-0.1.0/blackbody.py
figures
gmweir/QuasiOptics
python
def figures(): T_list_0 = plots.log_interpolate(1200.0, 20000.0, 48) T_list_hot = plots.log_interpolate(10000.0, 40000.0, 24) T_list_cool = plots.log_interpolate(950.0, 1200.0, 24) blackbody_patch_plot(T_list_0, 'Blackbody Colors', 'Blackbody-Patch') blackbody_patch_plot(T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch') blackbody_patch_plot(T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch') blackbody_color_vs_temperature_plot(range(1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors') blackbody_color_vs_temperature_plot(range(10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors') blackbody_color_vs_temperature_plot(range(950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors') blackbody_spectrum_plot(2000.0) blackbody_spectrum_plot(3000.0) blackbody_spectrum_plot(SUN_TEMPERATURE) blackbody_spectrum_plot(11000.0) blackbody_spectrum_plot(15000.0)
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
1,702,168,743,392,494,600
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
cryptoapis/model/coins_forwarding_success_data.py
additional_properties_type
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
python
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' lazy_import() return {'product': (str,), 'event': (str,), 'item': (CoinsForwardingSuccessDataItem,)}
-2,012,564,197,808,641,800
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
cryptoapis/model/coins_forwarding_success_data.py
openapi_types
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' lazy_import() return {'product': (str,), 'event': (str,), 'item': (CoinsForwardingSuccessDataItem,)}
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, product, event, item, *args, **kwargs): 'CoinsForwardingSuccessData - a model defined in OpenAPI\n\n Args:\n product (str): Represents the Crypto APIs 2.0 product which sends the callback.\n event (str): Defines the specific event, for which a callback subscription is set.\n item (CoinsForwardingSuccessDataItem):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.product = product self.event = event self.item = item for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
-2,732,593,822,327,209,000
CoinsForwardingSuccessData - a model defined in OpenAPI Args: product (str): Represents the Crypto APIs 2.0 product which sends the callback. event (str): Defines the specific event, for which a callback subscription is set. item (CoinsForwardingSuccessDataItem): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
cryptoapis/model/coins_forwarding_success_data.py
_from_openapi_data
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
python
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, product, event, item, *args, **kwargs): 'CoinsForwardingSuccessData - a model defined in OpenAPI\n\n Args:\n product (str): Represents the Crypto APIs 2.0 product which sends the callback.\n event (str): Defines the specific event, for which a callback subscription is set.\n item (CoinsForwardingSuccessDataItem):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.product = product self.event = event self.item = item for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
@convert_js_args_to_python_args def __init__(self, product, event, item, *args, **kwargs): 'CoinsForwardingSuccessData - a model defined in OpenAPI\n\n Args:\n product (str): Represents the Crypto APIs 2.0 product which sends the callback.\n event (str): Defines the specific event, for which a callback subscription is set.\n item (CoinsForwardingSuccessDataItem):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.product = product self.event = event self.item = item for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
-791,863,898,841,504,600
CoinsForwardingSuccessData - a model defined in OpenAPI Args: product (str): Represents the Crypto APIs 2.0 product which sends the callback. event (str): Defines the specific event, for which a callback subscription is set. item (CoinsForwardingSuccessDataItem): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
cryptoapis/model/coins_forwarding_success_data.py
__init__
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
python
@convert_js_args_to_python_args def __init__(self, product, event, item, *args, **kwargs): 'CoinsForwardingSuccessData - a model defined in OpenAPI\n\n Args:\n product (str): Represents the Crypto APIs 2.0 product which sends the callback.\n event (str): Defines the specific event, for which a callback subscription is set.\n item (CoinsForwardingSuccessDataItem):\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.product = product self.event = event self.item = item for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
def _activation_summary(x): 'Helper to create summaries for activations.\n\n Creates a summary that provides a histogram of activations.\n Creates a summary that measures the sparsity of activations.\n\n Args:\n x: Tensor\n Returns:\n nothing\n ' tensor_name = re.sub(('%s_[0-9]*/' % TOWER_NAME), '', x.op.name) tf.summary.histogram((tensor_name + '/activations'), x) tf.summary.scalar((tensor_name + '/sparsity'), tf.nn.zero_fraction(x))
553,231,555,851,507,140
Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measures the sparsity of activations. Args: x: Tensor Returns: nothing
examples/cifar10/cifar10.py
_activation_summary
13927729580/TensorFlowOnSpark
python
def _activation_summary(x): 'Helper to create summaries for activations.\n\n Creates a summary that provides a histogram of activations.\n Creates a summary that measures the sparsity of activations.\n\n Args:\n x: Tensor\n Returns:\n nothing\n ' tensor_name = re.sub(('%s_[0-9]*/' % TOWER_NAME), , x.op.name) tf.summary.histogram((tensor_name + '/activations'), x) tf.summary.scalar((tensor_name + '/sparsity'), tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer): 'Helper to create a Variable stored on CPU memory.\n\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n\n Returns:\n Variable Tensor\n ' with tf.device('/cpu:0'): dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
2,365,013,275,469,490,700
Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor
examples/cifar10/cifar10.py
_variable_on_cpu
13927729580/TensorFlowOnSpark
python
def _variable_on_cpu(name, shape, initializer): 'Helper to create a Variable stored on CPU memory.\n\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n\n Returns:\n Variable Tensor\n ' with tf.device('/cpu:0'): dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
def _variable_with_weight_decay(name, shape, stddev, wd): 'Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n\n Returns:\n Variable Tensor\n ' dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if (wd is not None): weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
8,132,621,707,787,137,000
Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor
examples/cifar10/cifar10.py
_variable_with_weight_decay
13927729580/TensorFlowOnSpark
python
def _variable_with_weight_decay(name, shape, stddev, wd): 'Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n\n Returns:\n Variable Tensor\n ' dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32) var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if (wd is not None): weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
def distorted_inputs(): 'Construct distorted input for CIFAR training using the Reader ops.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n ' if (not FLAGS.data_dir): raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') (images, labels) = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return (images, labels)
5,244,124,816,898,738,000
Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir
examples/cifar10/cifar10.py
distorted_inputs
13927729580/TensorFlowOnSpark
python
def distorted_inputs(): 'Construct distorted input for CIFAR training using the Reader ops.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n ' if (not FLAGS.data_dir): raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') (images, labels) = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return (images, labels)
def inputs(eval_data): 'Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n ' if (not FLAGS.data_dir): raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') (images, labels) = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return (images, labels)
5,745,744,370,990,534,000
Construct input for CIFAR evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir
examples/cifar10/cifar10.py
inputs
13927729580/TensorFlowOnSpark
python
def inputs(eval_data): 'Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n ' if (not FLAGS.data_dir): raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') (images, labels) = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return (images, labels)
def inference(images): 'Build the CIFAR-10 model.\n\n Args:\n images: Images returned from distorted_inputs() or inputs().\n\n Returns:\n Logits.\n ' with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75, name='norm1') with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75, name='norm2') pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') with tf.variable_scope('local3') as scope: reshape = tf.reshape(pool2, [FLAGS.batch_size, (- 1)]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu((tf.matmul(reshape, weights) + biases), name=scope.name) _activation_summary(local3) with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu((tf.matmul(local3, weights) + biases), name=scope.name) _activation_summary(local4) with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=(1 / 192.0), wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear
4,760,979,126,026,873,000
Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits.
examples/cifar10/cifar10.py
inference
13927729580/TensorFlowOnSpark
python
def inference(images): 'Build the CIFAR-10 model.\n\n Args:\n images: Images returned from distorted_inputs() or inputs().\n\n Returns:\n Logits.\n ' with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75, name='norm1') with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75, name='norm2') pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') with tf.variable_scope('local3') as scope: reshape = tf.reshape(pool2, [FLAGS.batch_size, (- 1)]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu((tf.matmul(reshape, weights) + biases), name=scope.name) _activation_summary(local3) with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu((tf.matmul(local3, weights) + biases), name=scope.name) _activation_summary(local4) with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=(1 / 192.0), wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear
def loss(logits, labels): 'Add L2Loss to all the trainable variables.\n\n Add summary for "Loss" and "Loss/avg".\n Args:\n logits: Logits from inference().\n labels: Labels from distorted_inputs or inputs(). 1-D tensor\n of shape [batch_size]\n\n Returns:\n Loss tensor of type float.\n ' labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')
2,034,962,917,631,843,600
Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float.
examples/cifar10/cifar10.py
loss
13927729580/TensorFlowOnSpark
python
def loss(logits, labels): 'Add L2Loss to all the trainable variables.\n\n Add summary for "Loss" and "Loss/avg".\n Args:\n logits: Logits from inference().\n labels: Labels from distorted_inputs or inputs(). 1-D tensor\n of shape [batch_size]\n\n Returns:\n Loss tensor of type float.\n ' labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss): 'Add summaries for losses in CIFAR-10 model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n ' loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply((losses + [total_loss])) for l in (losses + [total_loss]): tf.summary.scalar((l.op.name + ' (raw)'), l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
3,010,989,842,750,706,000
Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses.
examples/cifar10/cifar10.py
_add_loss_summaries
13927729580/TensorFlowOnSpark
python
def _add_loss_summaries(total_loss): 'Add summaries for losses in CIFAR-10 model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n ' loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply((losses + [total_loss])) for l in (losses + [total_loss]): tf.summary.scalar((l.op.name + ' (raw)'), l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
def train(total_loss, global_step): 'Train CIFAR-10 model.\n\n Create an optimizer and apply to all trainable variables. Add moving\n average for all trainable variables.\n\n Args:\n total_loss: Total loss from loss().\n global_step: Integer Variable counting the number of training steps\n processed.\n Returns:\n train_op: op for training.\n ' num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size) decay_steps = int((num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)) lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) loss_averages_op = _add_loss_summaries(total_loss) with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) for (grad, var) in grads: if (grad is not None): tf.summary.histogram((var.op.name + '/gradients'), grad) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
-1,121,517,191,392,497,900
Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training.
examples/cifar10/cifar10.py
train
13927729580/TensorFlowOnSpark
python
def train(total_loss, global_step): 'Train CIFAR-10 model.\n\n Create an optimizer and apply to all trainable variables. Add moving\n average for all trainable variables.\n\n Args:\n total_loss: Total loss from loss().\n global_step: Integer Variable counting the number of training steps\n processed.\n Returns:\n train_op: op for training.\n ' num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size) decay_steps = int((num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)) lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) loss_averages_op = _add_loss_summaries(total_loss) with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) for (grad, var) in grads: if (grad is not None): tf.summary.histogram((var.op.name + '/gradients'), grad) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
def maybe_download_and_extract(): "Download and extract the tarball from Alex's website." dest_directory = FLAGS.data_dir if (not os.path.exists(dest_directory)): os.makedirs(dest_directory) filename = DATA_URL.split('/')[(- 1)] filepath = os.path.join(dest_directory, filename) if (not os.path.exists(filepath)): def _progress(count, block_size, total_size): sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0)))) sys.stdout.flush() (filepath, _) = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin') if (not os.path.exists(extracted_dir_path)): tarfile.open(filepath, 'r:gz').extractall(dest_directory)
-304,177,207,173,734,000
Download and extract the tarball from Alex's website.
examples/cifar10/cifar10.py
maybe_download_and_extract
13927729580/TensorFlowOnSpark
python
def maybe_download_and_extract(): dest_directory = FLAGS.data_dir if (not os.path.exists(dest_directory)): os.makedirs(dest_directory) filename = DATA_URL.split('/')[(- 1)] filepath = os.path.join(dest_directory, filename) if (not os.path.exists(filepath)): def _progress(count, block_size, total_size): sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0)))) sys.stdout.flush() (filepath, _) = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin') if (not os.path.exists(extracted_dir_path)): tarfile.open(filepath, 'r:gz').extractall(dest_directory)
@unavailable((not _has_matplotlib), 'matplotlib') def plot_ellipsoid_3D(p, q, ax, n_points=100): ' Plot an ellipsoid in 3D\n\n Based on\n https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib\n\n TODO: Untested!\n\n Parameters\n ----------\n p: 3x1 array[float]\n Center of the ellipsoid\n q: 3x3 array[float]\n Shape matrix of the ellipsoid\n ax: matplotlib.Axes object\n Ax on which to plot the ellipsoid\n\n Returns\n -------\n ax: matplotlib.Axes object\n The Ax containing the ellipsoid\n\n ' assert (np.shape(p) == (3, 1)), 'p needs to be a 3x1 vector' assert (np.shape(q) == (3, 3)), 'q needs to be a spd 3x3 matrix' assert np.allclose(q, (0.5 * (q + q.T)), 'q needs to be spd') (U, s, rotation) = linalg.svd(q) assert np.all((s > 0)), 'q needs to be positive definite' radii = (1.0 / np.sqrt(s)) u = np.linspace(0.0, (2.0 * np.pi), n_points) v = np.linspace(0.0, np.pi, n_points) x = (radii[0] * np.outer(np.cos(u), np.sin(v))) y = (radii[1] * np.outer(np.sin(u), np.sin(v))) z = (radii[2] * np.outer(np.ones_like(u), np.cos(v))) for i in range(len(x)): for j in range(len(x)): [x[(i, j)], y[(i, j)], z[(i, j)]] = (np.dot([x[(i, j)], y[(i, j)], z[(i, j)]], rotation) + center) ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2) return ax
-1,664,333,436,179,161,300
Plot an ellipsoid in 3D Based on https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib TODO: Untested! Parameters ---------- p: 3x1 array[float] Center of the ellipsoid q: 3x3 array[float] Shape matrix of the ellipsoid ax: matplotlib.Axes object Ax on which to plot the ellipsoid Returns ------- ax: matplotlib.Axes object The Ax containing the ellipsoid
safe_exploration/visualization/utils_visualization.py
plot_ellipsoid_3D
Pathetiue/safe-exploration
python
@unavailable((not _has_matplotlib), 'matplotlib') def plot_ellipsoid_3D(p, q, ax, n_points=100): ' Plot an ellipsoid in 3D\n\n Based on\n https://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib\n\n TODO: Untested!\n\n Parameters\n ----------\n p: 3x1 array[float]\n Center of the ellipsoid\n q: 3x3 array[float]\n Shape matrix of the ellipsoid\n ax: matplotlib.Axes object\n Ax on which to plot the ellipsoid\n\n Returns\n -------\n ax: matplotlib.Axes object\n The Ax containing the ellipsoid\n\n ' assert (np.shape(p) == (3, 1)), 'p needs to be a 3x1 vector' assert (np.shape(q) == (3, 3)), 'q needs to be a spd 3x3 matrix' assert np.allclose(q, (0.5 * (q + q.T)), 'q needs to be spd') (U, s, rotation) = linalg.svd(q) assert np.all((s > 0)), 'q needs to be positive definite' radii = (1.0 / np.sqrt(s)) u = np.linspace(0.0, (2.0 * np.pi), n_points) v = np.linspace(0.0, np.pi, n_points) x = (radii[0] * np.outer(np.cos(u), np.sin(v))) y = (radii[1] * np.outer(np.sin(u), np.sin(v))) z = (radii[2] * np.outer(np.ones_like(u), np.cos(v))) for i in range(len(x)): for j in range(len(x)): [x[(i, j)], y[(i, j)], z[(i, j)]] = (np.dot([x[(i, j)], y[(i, j)], z[(i, j)]], rotation) + center) ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2) return ax
@unavailable((not _has_matplotlib), 'matplotlib') def plot_ellipsoid_2D(p, q, ax, n_points=100, color='r'): ' Plot an ellipsoid in 2D\n\n TODO: Untested!\n\n Parameters\n ----------\n p: 3x1 array[float]\n Center of the ellipsoid\n q: 3x3 array[float]\n Shape matrix of the ellipsoid\n ax: matplotlib.Axes object\n Ax on which to plot the ellipsoid\n\n Returns\n -------\n ax: matplotlib.Axes object\n The Ax containing the ellipsoid\n ' plt.sca(ax) r = nLa.cholesky(q).T t = np.linspace(0, (2 * np.pi), n_points) z = [np.cos(t), np.sin(t)] ellipse = (np.dot(r, z) + p) (handle,) = ax.plot(ellipse[0, :], ellipse[1, :], color) return (ax, handle)
-6,846,811,146,890,000,000
Plot an ellipsoid in 2D TODO: Untested! Parameters ---------- p: 3x1 array[float] Center of the ellipsoid q: 3x3 array[float] Shape matrix of the ellipsoid ax: matplotlib.Axes object Ax on which to plot the ellipsoid Returns ------- ax: matplotlib.Axes object The Ax containing the ellipsoid
safe_exploration/visualization/utils_visualization.py
plot_ellipsoid_2D
Pathetiue/safe-exploration
python
@unavailable((not _has_matplotlib), 'matplotlib') def plot_ellipsoid_2D(p, q, ax, n_points=100, color='r'): ' Plot an ellipsoid in 2D\n\n TODO: Untested!\n\n Parameters\n ----------\n p: 3x1 array[float]\n Center of the ellipsoid\n q: 3x3 array[float]\n Shape matrix of the ellipsoid\n ax: matplotlib.Axes object\n Ax on which to plot the ellipsoid\n\n Returns\n -------\n ax: matplotlib.Axes object\n The Ax containing the ellipsoid\n ' plt.sca(ax) r = nLa.cholesky(q).T t = np.linspace(0, (2 * np.pi), n_points) z = [np.cos(t), np.sin(t)] ellipse = (np.dot(r, z) + p) (handle,) = ax.plot(ellipse[0, :], ellipse[1, :], color) return (ax, handle)
def __init__(self, p): 'Defines the modulus p which must be a prime\n ' self.F = self self.p = gmpy.mpz(p) self.char = self.p self.q = (self.p + 1) assert gmpy.is_prime(p) self.rep = None self.g = None '\n g is a random quadratic residue used to compute square roots and it is\n initialized the first time a square root is computed\n ' self.to_fingerprint = ['p'] self.to_export = {'fingerprint': [], 'value': ['p']} super(Field, self).__init__()
3,367,812,278,402,845,000
Defines the modulus p which must be a prime
mathTools/field.py
__init__
ecuvelier/PPAT
python
def __init__(self, p): '\n ' self.F = self self.p = gmpy.mpz(p) self.char = self.p self.q = (self.p + 1) assert gmpy.is_prime(p) self.rep = None self.g = None '\n g is a random quadratic residue used to compute square roots and it is\n initialized the first time a square root is computed\n ' self.to_fingerprint = ['p'] self.to_export = {'fingerprint': [], 'value': ['p']} super(Field, self).__init__()
def one(self): 'unit element for multiplication' return FieldElem(1, self)
-5,955,065,089,090,498,000
unit element for multiplication
mathTools/field.py
one
ecuvelier/PPAT
python
def one(self): return FieldElem(1, self)
def zero(self): 'unit element for addition' return FieldElem(0, self)
3,455,417,634,989,473,300
unit element for addition
mathTools/field.py
zero
ecuvelier/PPAT
python
def zero(self): return FieldElem(0, self)
def elem(self, x): ' return an element of value x\n ' if isinstance(x, FieldElem): assert (x.F == self) return x m = gmpy.mpz(1) assert (isinstance(x, int) or isinstance(x, long) or (type(x) == type(m))) return FieldElem(x, self)
-8,415,939,500,760,490,000
return an element of value x
mathTools/field.py
elem
ecuvelier/PPAT
python
def elem(self, x): ' \n ' if isinstance(x, FieldElem): assert (x.F == self) return x m = gmpy.mpz(1) assert (isinstance(x, int) or isinstance(x, long) or (type(x) == type(m))) return FieldElem(x, self)
def random(self, low=1, high=None): ' Return a random element of the Field\n ' if (high == None): high = int((self.p - 1)) rand = randint(low, high) return self.elem(rand)
-6,226,670,074,696,561,000
Return a random element of the Field
mathTools/field.py
random
ecuvelier/PPAT
python
def random(self, low=1, high=None): ' \n ' if (high == None): high = int((self.p - 1)) rand = randint(low, high) return self.elem(rand)
def __eq__(self, other): 'testing if we are working in the same field' try: return (self.p == other.p) except: return False
-4,467,265,234,563,478,500
testing if we are working in the same field
mathTools/field.py
__eq__
ecuvelier/PPAT
python
def __eq__(self, other): try: return (self.p == other.p) except: return False
def add(self, a, b): '\n field operation: addition mod p\n ' return FieldElem(((a.val + b.val) % self.p), self)
-6,150,732,098,473,979,000
field operation: addition mod p
mathTools/field.py
add
ecuvelier/PPAT
python
def add(self, a, b): '\n \n ' return FieldElem(((a.val + b.val) % self.p), self)
def sub(self, a, b): '\n field operation: substraction mod p\n ' return FieldElem(((a.val - b.val) % self.p), self)
-921,403,297,015,566,600
field operation: substraction mod p
mathTools/field.py
sub
ecuvelier/PPAT
python
def sub(self, a, b): '\n \n ' return FieldElem(((a.val - b.val) % self.p), self)
def neg(self, a): '\n field operation: opposite mod p\n ' return FieldElem(((self.p - a.val) % self.p), self)
6,358,035,490,914,622,000
field operation: opposite mod p
mathTools/field.py
neg
ecuvelier/PPAT
python
def neg(self, a): '\n \n ' return FieldElem(((self.p - a.val) % self.p), self)
def mul(self, a, b): '\n field operation: multiplication of field elements\n ' '\n if isinstance(a,FieldElem) and isinstance(b, FieldElem) and not a.F == b.F :\n raise Exception("multiplication between elements of different fields")\n ' if (not isinstance(b, FieldElem)): if (b < 0): return self.smul((- a), (- b)) return self.smul(a, b) else: return self.pmul(a, b)
-6,066,617,828,850,303,000
field operation: multiplication of field elements
mathTools/field.py
mul
ecuvelier/PPAT
python
def mul(self, a, b): '\n \n ' '\n if isinstance(a,FieldElem) and isinstance(b, FieldElem) and not a.F == b.F :\n raise Exception("multiplication between elements of different fields")\n ' if (not isinstance(b, FieldElem)): if (b < 0): return self.smul((- a), (- b)) return self.smul(a, b) else: return self.pmul(a, b)
def smul(self, a, b): ' Return a*b where a or b is scalar\n ' if (not isinstance(b, FieldElem)): return FieldElem(((gmpy.mpz(b) * a.val) % self.p), self) else: return self.smul(b, a)
4,092,931,305,875,793,000
Return a*b where a or b is scalar
mathTools/field.py
smul
ecuvelier/PPAT
python
def smul(self, a, b): ' \n ' if (not isinstance(b, FieldElem)): return FieldElem(((gmpy.mpz(b) * a.val) % self.p), self) else: return self.smul(b, a)
def sm(self, b, a): ' Quick multiplication between a field element a and a scalar b\n ' return FieldElem(((gmpy.mpz(b) * a.val) % self.p), self)
1,369,684,940,761,127,000
Quick multiplication between a field element a and a scalar b
mathTools/field.py
sm
ecuvelier/PPAT
python
def sm(self, b, a): ' \n ' return FieldElem(((gmpy.mpz(b) * a.val) % self.p), self)
def pmul(self, a, b): ' product between two field element in Fp\n ' return FieldElem(((a.val * b.val) % self.p), self)
-1,657,878,191,410,303,200
product between two field element in Fp
mathTools/field.py
pmul
ecuvelier/PPAT
python
def pmul(self, a, b): ' \n ' return FieldElem(((a.val * b.val) % self.p), self)
def dbleAndAdd(self, P, Pp, n): 'return n*P using double and add technique' if (n == 0): return self.zero() if (n == 1): return P elif ((n % 2) == 1): Q = self.dbleAndAdd(P, Pp, ((n - 1) / 2)) return ((P + Q) + Q) elif ((n % 2) == 0): Q = self.dbleAndAdd(P, Pp, (n / 2)) return (Q + Q)
304,071,089,569,589,100
return n*P using double and add technique
mathTools/field.py
dbleAndAdd
ecuvelier/PPAT
python
def dbleAndAdd(self, P, Pp, n): if (n == 0): return self.zero() if (n == 1): return P elif ((n % 2) == 1): Q = self.dbleAndAdd(P, Pp, ((n - 1) / 2)) return ((P + Q) + Q) elif ((n % 2) == 0): Q = self.dbleAndAdd(P, Pp, (n / 2)) return (Q + Q)
def powop(self, a, b): 'return a**b' m = gmpy.mpz(1) 'exponentiation by a scalar' if ((not isinstance(b, int)) and (not isinstance(b, long)) and (not (type(b) == type(m)))): raise Exception('Exponentation by a non integer, long or mpz') c = b if ((c > (self.char - 1)) or (c < 0)): c = (b % (self.char - 1)) if (c == 0): assert (not ((a.val % self.char) == 0)) return self.one() elif (c == 1): return a else: return self.sqrtAndMultply(a, a, c)
-2,940,484,699,847,163,000
return a**b
mathTools/field.py
powop
ecuvelier/PPAT
python
def powop(self, a, b): m = gmpy.mpz(1) 'exponentiation by a scalar' if ((not isinstance(b, int)) and (not isinstance(b, long)) and (not (type(b) == type(m)))): raise Exception('Exponentation by a non integer, long or mpz') c = b if ((c > (self.char - 1)) or (c < 0)): c = (b % (self.char - 1)) if (c == 0): assert (not ((a.val % self.char) == 0)) return self.one() elif (c == 1): return a else: return self.sqrtAndMultply(a, a, c)
def sqrtAndMultply(self, P, Pp, n): 'return P**n using square and multiply technique' if (n == 0): return self.one() elif (n == 1): return P elif ((n % 2) == 1): Q = self.sqrtAndMultply(P, Pp, ((n - 1) / 2)) return (P * self.square(Q)) elif ((n % 2) == 0): Q = self.sqrtAndMultply(P, Pp, (n / 2)) return self.square(Q)
781,381,006,290,647,300
return P**n using square and multiply technique
mathTools/field.py
sqrtAndMultply
ecuvelier/PPAT
python
def sqrtAndMultply(self, P, Pp, n): if (n == 0): return self.one() elif (n == 1): return P elif ((n % 2) == 1): Q = self.sqrtAndMultply(P, Pp, ((n - 1) / 2)) return (P * self.square(Q)) elif ((n % 2) == 0): Q = self.sqrtAndMultply(P, Pp, (n / 2)) return self.square(Q)
def square(self, a): '\n This method returns the square of a\n ' return FieldElem(pow(a.val, 2, self.p), self)
-7,983,734,526,397,552,000
This method returns the square of a
mathTools/field.py
square
ecuvelier/PPAT
python
def square(self, a): '\n \n ' return FieldElem(pow(a.val, 2, self.p), self)
def findnonresidue(self): '\n find a random non quadratic residue in the Field F,\n that is, find g that is not a square in F, this is\n needed to compute square roots\n ' g = self.random() while g.isquadres(): g = self.random() return g
1,602,797,222,163,183,900
find a random non quadratic residue in the Field F, that is, find g that is not a square in F, this is needed to compute square roots
mathTools/field.py
findnonresidue
ecuvelier/PPAT
python
def findnonresidue(self): '\n find a random non quadratic residue in the Field F,\n that is, find g that is not a square in F, this is\n needed to compute square roots\n ' g = self.random() while g.isquadres(): g = self.random() return g
def __init__(self, val, F): 'Creating a new field element.\n ' self.F = F self.val = gmpy.mpz(val) self.poly = polynom(self.F, [self])
5,470,238,425,599,405,000
Creating a new field element.
mathTools/field.py
__init__
ecuvelier/PPAT
python
def __init__(self, val, F): '\n ' self.F = F self.val = gmpy.mpz(val) self.poly = polynom(self.F, [self])
def isquadres(self): ' This method return True if the element is a quadratic residue mod q\n different than zero\n it returns False otherwhise\n ' if (self + self.F.zero()).iszero(): return False else: c = (self ** ((self.F.q - 1) / 2)) return (c == self.F.one())
-1,876,804,704,010,010,000
This method return True if the element is a quadratic residue mod q different than zero it returns False otherwhise
mathTools/field.py
isquadres
ecuvelier/PPAT
python
def isquadres(self): ' This method return True if the element is a quadratic residue mod q\n different than zero\n it returns False otherwhise\n ' if (self + self.F.zero()).iszero(): return False else: c = (self ** ((self.F.q - 1) / 2)) return (c == self.F.one())
def squareroot(self): ' This method returns the positive square root of\n an element of the field\n using the Tonelli-Shanks algorithm\n\n Carefull : if the element has no square root, the method does not\n check this case and raises an error. Verification has to be done\n before calling the method.\n ' g = self.F.g if (g == None): g = self.F.findnonresidue() self.F.g = g q = self.F.q s = 0 t = (self.F.q - 1) while ((t % 2) == 0): s = (s + 1) t = (t / 2) e = 0 for i in range(2, (s + 1)): b = (2 ** (i - 1)) b1 = (b * 2) c = ((self * (g ** (- e))) ** ((q - 1) / b1)) if (not (c == self.F.one())): e = (e + b) h = (self * (g ** (- e))) b = ((g ** (e / 2)) * (h ** ((t + 1) / 2))) assert ((b ** 2) == self) return b
-3,225,873,158,965,586,000
This method returns the positive square root of an element of the field using the Tonelli-Shanks algorithm Carefull : if the element has no square root, the method does not check this case and raises an error. Verification has to be done before calling the method.
mathTools/field.py
squareroot
ecuvelier/PPAT
python
def squareroot(self): ' This method returns the positive square root of\n an element of the field\n using the Tonelli-Shanks algorithm\n\n Carefull : if the element has no square root, the method does not\n check this case and raises an error. Verification has to be done\n before calling the method.\n ' g = self.F.g if (g == None): g = self.F.findnonresidue() self.F.g = g q = self.F.q s = 0 t = (self.F.q - 1) while ((t % 2) == 0): s = (s + 1) t = (t / 2) e = 0 for i in range(2, (s + 1)): b = (2 ** (i - 1)) b1 = (b * 2) c = ((self * (g ** (- e))) ** ((q - 1) / b1)) if (not (c == self.F.one())): e = (e + b) h = (self * (g ** (- e))) b = ((g ** (e / 2)) * (h ** ((t + 1) / 2))) assert ((b ** 2) == self) return b
def __init__(self, F, irpoly, g=None, rep=None): "Define the base Field or extension Field and the irreducible polynomial\n F is the base field on top of which the extension\n field is built\n irpoly is the irreducible polynomial used to build\n the extension field as F/irpoly\n g is a non quadratic residue used to compute square\n roots, if it is set to None, computing a square root\n will initialize g\n rep is the representation of the root of irpoly\n (note that letter 'A' is reserved for the Complex extension field)\n " self.F = F self.irpoly = irpoly self.deg = len(irpoly.coef) assert (self.deg > 0) self.q = (self.F.q ** (self.deg - 1)) self.tabular = self.table() if (rep == None): self.rep = rd.choice(['B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L']) else: self.rep = rep self.char = F.char self.primefield = gmpy.is_prime(self.char) self.g = g self.to_fingerprint = ['F', 'irpoly'] self.to_export = {'fingerprint': [], 'value': ['F', 'irpoly']}
3,613,551,871,637,081,600
Define the base Field or extension Field and the irreducible polynomial F is the base field on top of which the extension field is built irpoly is the irreducible polynomial used to build the extension field as F/irpoly g is a non quadratic residue used to compute square roots, if it is set to None, computing a square root will initialize g rep is the representation of the root of irpoly (note that letter 'A' is reserved for the Complex extension field)
mathTools/field.py
__init__
ecuvelier/PPAT
python
def __init__(self, F, irpoly, g=None, rep=None): "Define the base Field or extension Field and the irreducible polynomial\n F is the base field on top of which the extension\n field is built\n irpoly is the irreducible polynomial used to build\n the extension field as F/irpoly\n g is a non quadratic residue used to compute square\n roots, if it is set to None, computing a square root\n will initialize g\n rep is the representation of the root of irpoly\n (note that letter 'A' is reserved for the Complex extension field)\n " self.F = F self.irpoly = irpoly self.deg = len(irpoly.coef) assert (self.deg > 0) self.q = (self.F.q ** (self.deg - 1)) self.tabular = self.table() if (rep == None): self.rep = rd.choice(['B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L']) else: self.rep = rep self.char = F.char self.primefield = gmpy.is_prime(self.char) self.g = g self.to_fingerprint = ['F', 'irpoly'] self.to_export = {'fingerprint': [], 'value': ['F', 'irpoly']}
def one(self): 'unit element for multiplication' One = ([self.F.zero()] * (self.deg - 1)) One[(self.deg - 2)] = self.F.one() return ExtensionFieldElem(self, polynom(self.F, One))
8,906,210,674,035,437,000
unit element for multiplication
mathTools/field.py
one
ecuvelier/PPAT
python
def one(self): One = ([self.F.zero()] * (self.deg - 1)) One[(self.deg - 2)] = self.F.one() return ExtensionFieldElem(self, polynom(self.F, One))
def zero(self): 'unit element for addition' Zero = ([self.F.zero()] * (self.deg - 1)) return ExtensionFieldElem(self, polynom(self.F, Zero))
-6,735,012,592,271,312,000
unit element for addition
mathTools/field.py
zero
ecuvelier/PPAT
python
def zero(self): Zero = ([self.F.zero()] * (self.deg - 1)) return ExtensionFieldElem(self, polynom(self.F, Zero))
def unit(self): ' root of the irreducible polynomial\n e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1\n ' I = self.zero() I.poly.coef[(- 2)] = self.F.one() return I
8,046,764,757,817,881,000
root of the irreducible polynomial e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1
mathTools/field.py
unit
ecuvelier/PPAT
python
def unit(self): ' root of the irreducible polynomial\n e.g. return element 1*A+0 (or the complex value i) if the irpoly is X**2+1\n ' I = self.zero() I.poly.coef[(- 2)] = self.F.one() return I
def elem(self, x): ' Provided that x belongs to F, return an element of the extension field\n of value x\n ' P = self.zero() P.poly.coef[(- 1)] = x return P
7,193,894,804,630,438,000
Provided that x belongs to F, return an element of the extension field of value x
mathTools/field.py
elem
ecuvelier/PPAT
python
def elem(self, x): ' Provided that x belongs to F, return an element of the extension field\n of value x\n ' P = self.zero() P.poly.coef[(- 1)] = x return P
def random(self): ' Return a random element of the Extension Field\n ' polycoef = ([0] * (self.deg - 1)) for i in range((self.deg - 1)): polycoef[i] = self.F.random() poly = polynom(self.F, polycoef) return ExtensionFieldElem(self, poly)
2,462,468,952,993,113,000
Return a random element of the Extension Field
mathTools/field.py
random
ecuvelier/PPAT
python
def random(self): ' \n ' polycoef = ([0] * (self.deg - 1)) for i in range((self.deg - 1)): polycoef[i] = self.F.random() poly = polynom(self.F, polycoef) return ExtensionFieldElem(self, poly)
def __eq__(self, other): 'testing if we are working in the same extension field' try: return ((self.F == other.F) and (self.irpoly == other.irpoly)) except: return False
-6,034,864,863,400,111,000
testing if we are working in the same extension field
mathTools/field.py
__eq__
ecuvelier/PPAT
python
def __eq__(self, other): try: return ((self.F == other.F) and (self.irpoly == other.irpoly)) except: return False
def add(self, a, b): '\n field operation: addition of polynomial > addition of coefficients in the appropriate field\n ' if (not (a.deg == b.deg)): a = self.reduc(a) b = self.reduc(b) polysum = ([0] * a.deg) for i in range(a.deg): polysum[i] = (a.poly.coef[i] + b.poly.coef[i]) P = polynom(self.F, polysum) return ExtensionFieldElem(self, P)
-4,685,965,467,103,141,000
field operation: addition of polynomial > addition of coefficients in the appropriate field
mathTools/field.py
add
ecuvelier/PPAT
python
def add(self, a, b): '\n \n ' if (not (a.deg == b.deg)): a = self.reduc(a) b = self.reduc(b) polysum = ([0] * a.deg) for i in range(a.deg): polysum[i] = (a.poly.coef[i] + b.poly.coef[i]) P = polynom(self.F, polysum) return ExtensionFieldElem(self, P)
def sub(self, a, b): '\n field operation: substraction of polynomials > substraction of each coefficient in the appropriate field\n ' if (not (a.deg == b.deg)): a = self.reduc(a) b = self.reduc(b) c = self.neg(b) return self.add(a, c)
-7,821,902,978,157,330,000
field operation: substraction of polynomials > substraction of each coefficient in the appropriate field
mathTools/field.py
sub
ecuvelier/PPAT
python
def sub(self, a, b): '\n \n ' if (not (a.deg == b.deg)): a = self.reduc(a) b = self.reduc(b) c = self.neg(b) return self.add(a, c)
def neg(self, a): '\n field operation: opposite of a polynomial > opposite of each coefficient in appropriate field\n ' ap = ([0] * a.deg) for i in range(a.deg): ap[i] = (- a.poly.coef[i]) P = polynom(self.F, ap) return ExtensionFieldElem(self, P)
-7,593,053,574,250,158,000
field operation: opposite of a polynomial > opposite of each coefficient in appropriate field
mathTools/field.py
neg
ecuvelier/PPAT
python
def neg(self, a): '\n \n ' ap = ([0] * a.deg) for i in range(a.deg): ap[i] = (- a.poly.coef[i]) P = polynom(self.F, ap) return ExtensionFieldElem(self, P)
def smul(self, a, b): ' Return a*b where a or b is scalar\n ' if (not isinstance(b, FieldElem)): A = a.poly.coef Pc = ([0] * len(A)) for i in range(len(Pc)): Pc[i] = (A[i] * gmpy.mpz(b)) return ExtensionFieldElem(self, polynom(self.F, Pc)) else: return self.smul(b, a)
-2,841,629,317,917,324,300
Return a*b where a or b is scalar
mathTools/field.py
smul
ecuvelier/PPAT
python
def smul(self, a, b): ' \n ' if (not isinstance(b, FieldElem)): A = a.poly.coef Pc = ([0] * len(A)) for i in range(len(Pc)): Pc[i] = (A[i] * gmpy.mpz(b)) return ExtensionFieldElem(self, polynom(self.F, Pc)) else: return self.smul(b, a)
def pmul(self, a, b): 'Multiplication between polynomials\n ' if (not (a.deg == b.deg)): a = self.reduc(a) b = self.reduc(b) A = a.poly.coef B = b.poly.coef k = (self.deg - 1) if ((k == 2) and (self.F.rep == 'A')): (a0, a1, b0, b1) = (A[0].val, A[1].val, B[0].val, B[1].val) p = self.char v0 = (a0 * b0) v1 = (a1 * b1) c0 = (((((a0 + a1) * (b0 + b1)) - v0) - v1) % p) c1 = ((v1 - v0) % p) c0e = FieldElem(c0, self.F) c1e = FieldElem(c1, self.F) cp = polynom(self.F, [c0e, c1e]) C = ExtensionFieldElem(self, cp) return C elif (k == 2): a0 = A[0] a1 = A[1] b0 = B[0] b1 = B[1] beta = (- self.irpoly.coef[(- 1)]) v0 = self.F.pmul(a0, b0) v1 = self.F.pmul(a1, b1) c0 = ((self.F.pmul((a0 + a1), (b0 + b1)) - v0) - v1) c1 = (v1 + self.F.pmul(v0, beta)) cp = polynom(self.F, [c0, c1]) C = ExtensionFieldElem(self, cp) return C elif (k == 3): (a0, a1, a2) = A (b0, b1, b2) = B beta = (- self.irpoly.coef[(- 1)]) (v0, v1, v2) = (self.F.pmul(a0, b0), self.F.pmul(a1, b1), self.F.pmul(a2, b2)) c0 = (((self.F.pmul((a0 + a2), (b0 + b2)) - v0) + v1) - v2) c1 = (((self.F.pmul((a2 + a1), (b2 + b1)) - v2) - v1) + self.F.pmul(beta, v0)) c2 = (v2 + self.F.pmul(beta, ((self.F.pmul((a1 + a0), (b1 + b0)) - v1) - v0))) cp = polynom(self.F, [c0, c1, c2]) C = ExtensionFieldElem(self, cp) return C else: prod = convolve(A, B) return self.reduc2(prod)
-429,399,036,203,879,040
Multiplication between polynomials
mathTools/field.py
pmul
ecuvelier/PPAT
python
def pmul(self, a, b): '\n ' if (not (a.deg == b.deg)): a = self.reduc(a) b = self.reduc(b) A = a.poly.coef B = b.poly.coef k = (self.deg - 1) if ((k == 2) and (self.F.rep == 'A')): (a0, a1, b0, b1) = (A[0].val, A[1].val, B[0].val, B[1].val) p = self.char v0 = (a0 * b0) v1 = (a1 * b1) c0 = (((((a0 + a1) * (b0 + b1)) - v0) - v1) % p) c1 = ((v1 - v0) % p) c0e = FieldElem(c0, self.F) c1e = FieldElem(c1, self.F) cp = polynom(self.F, [c0e, c1e]) C = ExtensionFieldElem(self, cp) return C elif (k == 2): a0 = A[0] a1 = A[1] b0 = B[0] b1 = B[1] beta = (- self.irpoly.coef[(- 1)]) v0 = self.F.pmul(a0, b0) v1 = self.F.pmul(a1, b1) c0 = ((self.F.pmul((a0 + a1), (b0 + b1)) - v0) - v1) c1 = (v1 + self.F.pmul(v0, beta)) cp = polynom(self.F, [c0, c1]) C = ExtensionFieldElem(self, cp) return C elif (k == 3): (a0, a1, a2) = A (b0, b1, b2) = B beta = (- self.irpoly.coef[(- 1)]) (v0, v1, v2) = (self.F.pmul(a0, b0), self.F.pmul(a1, b1), self.F.pmul(a2, b2)) c0 = (((self.F.pmul((a0 + a2), (b0 + b2)) - v0) + v1) - v2) c1 = (((self.F.pmul((a2 + a1), (b2 + b1)) - v2) - v1) + self.F.pmul(beta, v0)) c2 = (v2 + self.F.pmul(beta, ((self.F.pmul((a1 + a0), (b1 + b0)) - v1) - v0))) cp = polynom(self.F, [c0, c1, c2]) C = ExtensionFieldElem(self, cp) return C else: prod = convolve(A, B) return self.reduc2(prod)
def square(self, a): ' This algortihm returns the square of a in the field\n using different methods if the degree of the extension\n is 2,3 or more\n ' assert (a.F == self) if (not (a.deg == (self.deg - 1))): a = self.reduc(a) A = a.poly.coef k = (self.deg - 1) if ((k == 2) and (self.F.rep == 'A')): (a1, a0) = (A[0].val, A[1].val) p = self.char v0 = (a0 * a1) c0 = (((a0 + a1) * (a0 - a1)) % p) c1 = ((v0 + v0) % p) c0e = FieldElem(c0, self.F) c1e = FieldElem(c1, self.F) cp = polynom(self.F, [c1e, c0e]) C = ExtensionFieldElem(self, cp) return C elif (k == 2): (a1, a0) = A beta = (- self.irpoly.coef[(- 1)]) v0 = self.F.pmul(a0, a1) c0 = ((self.F.pmul((a0 + a1), (a0 + self.F.pmul(a1, beta))) - v0) - self.F.pmul(beta, v0)) c1 = (v0 + v0) cp = polynom(self.F, [c1, c0]) return ExtensionFieldElem(self, cp) elif (k == 3): (a2, a1, a0) = A assert (a0.F == self.F) beta = (- self.irpoly.coef[(- 1)]) s0 = self.F.square(a0) t1 = self.F.pmul(a0, a1) s1 = (t1 + t1) s2 = self.F.square(((a0 - a1) + a2)) t3 = (a1 * a2) s3 = (t3 + t3) s4 = self.F.square(a2) c0 = (s0 + self.F.pmul(beta, s3)) c1 = (s1 + self.F.pmul(beta, s4)) c2 = ((((s1 + s2) + s3) - s0) - s4) cp = polynom(self.F, [c2, c1, c0]) return ExtensionFieldElem(self, cp) else: return self.F.pmul(a, a)
392,524,527,384,159,500
This algortihm returns the square of a in the field using different methods if the degree of the extension is 2,3 or more
mathTools/field.py
square
ecuvelier/PPAT
python
def square(self, a): ' This algortihm returns the square of a in the field\n using different methods if the degree of the extension\n is 2,3 or more\n ' assert (a.F == self) if (not (a.deg == (self.deg - 1))): a = self.reduc(a) A = a.poly.coef k = (self.deg - 1) if ((k == 2) and (self.F.rep == 'A')): (a1, a0) = (A[0].val, A[1].val) p = self.char v0 = (a0 * a1) c0 = (((a0 + a1) * (a0 - a1)) % p) c1 = ((v0 + v0) % p) c0e = FieldElem(c0, self.F) c1e = FieldElem(c1, self.F) cp = polynom(self.F, [c1e, c0e]) C = ExtensionFieldElem(self, cp) return C elif (k == 2): (a1, a0) = A beta = (- self.irpoly.coef[(- 1)]) v0 = self.F.pmul(a0, a1) c0 = ((self.F.pmul((a0 + a1), (a0 + self.F.pmul(a1, beta))) - v0) - self.F.pmul(beta, v0)) c1 = (v0 + v0) cp = polynom(self.F, [c1, c0]) return ExtensionFieldElem(self, cp) elif (k == 3): (a2, a1, a0) = A assert (a0.F == self.F) beta = (- self.irpoly.coef[(- 1)]) s0 = self.F.square(a0) t1 = self.F.pmul(a0, a1) s1 = (t1 + t1) s2 = self.F.square(((a0 - a1) + a2)) t3 = (a1 * a2) s3 = (t3 + t3) s4 = self.F.square(a2) c0 = (s0 + self.F.pmul(beta, s3)) c1 = (s1 + self.F.pmul(beta, s4)) c2 = ((((s1 + s2) + s3) - s0) - s4) cp = polynom(self.F, [c2, c1, c0]) return ExtensionFieldElem(self, cp) else: return self.F.pmul(a, a)
def invert(self, a): " Ths method returns the inverse of a in the field\n The inverse is computed by determining the Bezout coefficient using the\n extended Euclide's algorithm or by specialized algorithms depending\n on the degree of the extension (2 or 3)\n " assert (a.F == self) k = (self.deg - 1) if ((k == 2) and (self.F.rep == 'A')): A = a.poly.coef (a1, a0) = (A[0].val, A[1].val) p = self.char norm = ((a0 * a0) + (a1 * a1)) invnorm = gmpy.invert(norm, p) c0 = ((a0 * invnorm) % p) c1 = (((- a1) * invnorm) % p) c0e = FieldElem(c0, self.F) c1e = FieldElem(c1, self.F) invap = polynom(self.F, [c1e, c0e]) inva = ExtensionFieldElem(self, invap) return inva elif (k == 2): A = a.poly.coef (a1, a0) = (A[0], A[1]) mod = self.irpoly.coef[(- 1)] a12 = self.F.square(a1) mid = self.F.pmul(a12, mod) norm = (self.F.square(a0) + mid) invnorm = self.F.invert(norm) c = self.F.pmul(a0, invnorm) d = (- self.F.pmul(a1, invnorm)) invap = polynom(self.F, [d, c]) inva = ExtensionFieldElem(self, invap) return inva elif (k == 3): A = a.poly.coef (a2, a1, a0) = (A[0], A[1], A[2]) mod = (- self.irpoly.coef[(- 1)]) z0 = self.F.zero() z1 = self.F.one() if (a0 == z0): if (a1 == z0): (c0, c1, c2) = (z0, self.F.invert(self.F.pmul(a2, mod)), z0) elif (a2 == z0): (c0, c1, c2) = (z0, z0, self.F.invert(self.F.pmul(a1, mod))) else: a22 = self.F.square(a2) a12 = self.F.square(a1) c2 = self.F.pmul(a12, self.F.invert((self.F.pmul(self.F.pmul(a22, a2), mod) + self.F.pmul(self.F.pmul(a12, a1), mod)))) c1 = self.F.pmul((z1 - self.F.pmul(self.F.pmul(a1, c2), mod)), self.F.invert(self.F.pmul(a2, mod))) c0 = self.F.pmul((- self.F.pmul(self.F.pmul(a2, mod), c2)), self.F.invert(a1)) elif ((a1 == z0) and (a2 == z0)): (c0, c1, c2) = (self.F.invert(a0), z0, z0) else: a12 = self.F.pmul(a1, a2) a12m = self.F.pmul(a12, mod) a00 = self.F.square(a0) abis = (a00 - a12m) if (abis == z0): a11 = self.F.square(a1) a22 = self.F.square(a2) a02 = self.F.pmul(a0, a2) a01 = self.F.pmul(a0, a1) c2 = self.F.pmul((- a), self.F.invert(self.F.pmul((a02 - a11), mod))) c1 = self.F.pmul((- a2), self.F.invert((a01 - self.F.pmul(a22, mod)))) a1c2 = self.F.pmul(a1, c2) a2c1 = self.F.pmul(a2, c1) c0 = self.F.pmul((z1 - self.F.pmul((a1c2 + a2c1), mod)), self.F.invert(a0)) elif (a1 == z0): inva0 = self.F.invert(a0) a02 = self.F.pmul(a0, a2) a000 = self.F.pmul(a00, a0) a22 = self.F.square(a2) a222 = self.F.pmul(a22, a2) mm = self.F.square(mod) a222mm = self.F.pmul(a222, mm) c2 = self.F.pmul((- a02), self.F.invert((a000 + a222mm))) a02m = self.F.pmul(a02, mod) a02mc2 = self.F.pmul(a02m, c2) inva00 = self.F.square(inva0) c1 = self.F.pmul((- a02mc2), inva00) a2m = self.F.pmul(a2, mod) a2mc1 = self.F.pmul(a2m, c1) c0 = self.F.pmul((z1 - a2mc1), inva0) elif (a2 == z0): a11 = self.F.square(a1) a111 = self.F.pmul(a11, a1) a000 = self.F.pmul(a00, a0) a111m = self.F.pmul(a111, mod) inva0 = self.F.invert(a0) c2 = self.F.pmul(a11, self.F.invert((a111m + a000))) a11m = self.F.pmul(a11, mod) a11mc2 = self.F.pmul(a11m, c2) inva00 = self.F.square(inva0) c1 = self.F.pmul((a11mc2 - a1), inva00) a1m = self.F.pmul(a1, mod) a1mc2 = self.F.pmul(a1m, c2) c0 = self.F.pmul((z1 - a1mc2), inva0) else: a01 = self.F.pmul(a0, a1) a22 = self.F.square(a2) a22m = self.F.pmul(a22, mod) a02 = self.F.pmul(a0, a2) a11 = self.F.square(a1) abus = (a01 - a22m) abos = self.F.pmul((a02 - a11), mod) invabis = self.F.invert(abis) abb = self.F.pmul(abus, invabis) abb1 = self.F.pmul(abb, a1) abbbos = self.F.pmul(abb, abos) c2 = self.F.pmul((abb1 - a2), self.F.invert((abis - abbbos))) abosc2 = self.F.pmul(abos, c2) c1 = self.F.pmul(((- a1) - abosc2), invabis) a1c2 = self.F.pmul(a1, c2) a2c1 = self.F.pmul(a2, c1) c0 = self.F.pmul((z1 - self.F.pmul((a1c2 + a2c1), mod)), self.F.invert(a0)) invap = polynom(self.F, [c2, c1, c0]) inva = ExtensionFieldElem(self, invap) return inva else: P = ExtensionFieldElem(self, self.irpoly) (r, u, v) = self.extendedeuclide(P, a) (n, d) = r.poly.truedeg() assert (n == (self.deg - 2)) c = r.poly.coef[(len(r.poly.coef) - 1)].invert() cp = polynom(self.F, [c]) ce = ExtensionFieldElem(self, cp) return (ce * v)
1,190,864,874,066,247,400
Ths method returns the inverse of a in the field The inverse is computed by determining the Bezout coefficient using the extended Euclide's algorithm or by specialized algorithms depending on the degree of the extension (2 or 3)
mathTools/field.py
invert
ecuvelier/PPAT
python
def invert(self, a): " Ths method returns the inverse of a in the field\n The inverse is computed by determining the Bezout coefficient using the\n extended Euclide's algorithm or by specialized algorithms depending\n on the degree of the extension (2 or 3)\n " assert (a.F == self) k = (self.deg - 1) if ((k == 2) and (self.F.rep == 'A')): A = a.poly.coef (a1, a0) = (A[0].val, A[1].val) p = self.char norm = ((a0 * a0) + (a1 * a1)) invnorm = gmpy.invert(norm, p) c0 = ((a0 * invnorm) % p) c1 = (((- a1) * invnorm) % p) c0e = FieldElem(c0, self.F) c1e = FieldElem(c1, self.F) invap = polynom(self.F, [c1e, c0e]) inva = ExtensionFieldElem(self, invap) return inva elif (k == 2): A = a.poly.coef (a1, a0) = (A[0], A[1]) mod = self.irpoly.coef[(- 1)] a12 = self.F.square(a1) mid = self.F.pmul(a12, mod) norm = (self.F.square(a0) + mid) invnorm = self.F.invert(norm) c = self.F.pmul(a0, invnorm) d = (- self.F.pmul(a1, invnorm)) invap = polynom(self.F, [d, c]) inva = ExtensionFieldElem(self, invap) return inva elif (k == 3): A = a.poly.coef (a2, a1, a0) = (A[0], A[1], A[2]) mod = (- self.irpoly.coef[(- 1)]) z0 = self.F.zero() z1 = self.F.one() if (a0 == z0): if (a1 == z0): (c0, c1, c2) = (z0, self.F.invert(self.F.pmul(a2, mod)), z0) elif (a2 == z0): (c0, c1, c2) = (z0, z0, self.F.invert(self.F.pmul(a1, mod))) else: a22 = self.F.square(a2) a12 = self.F.square(a1) c2 = self.F.pmul(a12, self.F.invert((self.F.pmul(self.F.pmul(a22, a2), mod) + self.F.pmul(self.F.pmul(a12, a1), mod)))) c1 = self.F.pmul((z1 - self.F.pmul(self.F.pmul(a1, c2), mod)), self.F.invert(self.F.pmul(a2, mod))) c0 = self.F.pmul((- self.F.pmul(self.F.pmul(a2, mod), c2)), self.F.invert(a1)) elif ((a1 == z0) and (a2 == z0)): (c0, c1, c2) = (self.F.invert(a0), z0, z0) else: a12 = self.F.pmul(a1, a2) a12m = self.F.pmul(a12, mod) a00 = self.F.square(a0) abis = (a00 - a12m) if (abis == z0): a11 = self.F.square(a1) a22 = self.F.square(a2) a02 = self.F.pmul(a0, a2) a01 = self.F.pmul(a0, a1) c2 = self.F.pmul((- a), self.F.invert(self.F.pmul((a02 - a11), mod))) c1 = self.F.pmul((- a2), self.F.invert((a01 - self.F.pmul(a22, mod)))) a1c2 = self.F.pmul(a1, c2) a2c1 = self.F.pmul(a2, c1) c0 = self.F.pmul((z1 - self.F.pmul((a1c2 + a2c1), mod)), self.F.invert(a0)) elif (a1 == z0): inva0 = self.F.invert(a0) a02 = self.F.pmul(a0, a2) a000 = self.F.pmul(a00, a0) a22 = self.F.square(a2) a222 = self.F.pmul(a22, a2) mm = self.F.square(mod) a222mm = self.F.pmul(a222, mm) c2 = self.F.pmul((- a02), self.F.invert((a000 + a222mm))) a02m = self.F.pmul(a02, mod) a02mc2 = self.F.pmul(a02m, c2) inva00 = self.F.square(inva0) c1 = self.F.pmul((- a02mc2), inva00) a2m = self.F.pmul(a2, mod) a2mc1 = self.F.pmul(a2m, c1) c0 = self.F.pmul((z1 - a2mc1), inva0) elif (a2 == z0): a11 = self.F.square(a1) a111 = self.F.pmul(a11, a1) a000 = self.F.pmul(a00, a0) a111m = self.F.pmul(a111, mod) inva0 = self.F.invert(a0) c2 = self.F.pmul(a11, self.F.invert((a111m + a000))) a11m = self.F.pmul(a11, mod) a11mc2 = self.F.pmul(a11m, c2) inva00 = self.F.square(inva0) c1 = self.F.pmul((a11mc2 - a1), inva00) a1m = self.F.pmul(a1, mod) a1mc2 = self.F.pmul(a1m, c2) c0 = self.F.pmul((z1 - a1mc2), inva0) else: a01 = self.F.pmul(a0, a1) a22 = self.F.square(a2) a22m = self.F.pmul(a22, mod) a02 = self.F.pmul(a0, a2) a11 = self.F.square(a1) abus = (a01 - a22m) abos = self.F.pmul((a02 - a11), mod) invabis = self.F.invert(abis) abb = self.F.pmul(abus, invabis) abb1 = self.F.pmul(abb, a1) abbbos = self.F.pmul(abb, abos) c2 = self.F.pmul((abb1 - a2), self.F.invert((abis - abbbos))) abosc2 = self.F.pmul(abos, c2) c1 = self.F.pmul(((- a1) - abosc2), invabis) a1c2 = self.F.pmul(a1, c2) a2c1 = self.F.pmul(a2, c1) c0 = self.F.pmul((z1 - self.F.pmul((a1c2 + a2c1), mod)), self.F.invert(a0)) invap = polynom(self.F, [c2, c1, c0]) inva = ExtensionFieldElem(self, invap) return inva else: P = ExtensionFieldElem(self, self.irpoly) (r, u, v) = self.extendedeuclide(P, a) (n, d) = r.poly.truedeg() assert (n == (self.deg - 2)) c = r.poly.coef[(len(r.poly.coef) - 1)].invert() cp = polynom(self.F, [c]) ce = ExtensionFieldElem(self, cp) return (ce * v)
def invertible(self, a): ' Return True if a is invertible\n ' return (not (self.reduc(a) == self.zero()))
8,636,644,282,939,494,000
Return True if a is invertible
mathTools/field.py
invertible
ecuvelier/PPAT
python
def invertible(self, a): ' \n ' return (not (self.reduc(a) == self.zero()))
def eucldiv(self, a, b): ' Return a/b and a%b\n a and b are of length d-1 where d is the degree of the irreducible polynomial\n ' zero = self.F.zero() izero = self.zero() d = self.deg assert (not b.poly.iszero()) if a.poly.iszero(): return (izero, izero) elif (a == b): return (self.one(), izero) A = a.poly.coef B = b.poly.coef (n, da) = a.poly.truedeg() (m, db) = b.poly.truedeg() if (da < db): return (izero, a) elif (da == db): deg = max((d - 1), da) rc = ([zero] * deg) qc = ([zero] * deg) q = (A[n] / B[m]) for i in range(1, deg): rc[i] = (A[(n + i)] - (q * B[(m + i)])) qc[(deg - 1)] = q rp = polynom(self.F, rc) qp = polynom(self.F, qc) remain = ExtensionFieldElem(self, rp) quotient = ExtensionFieldElem(self, qp) return (quotient, remain) else: deg = max((d - 1), da) p = (deg - da) rc = ([zero] * deg) qc = ([zero] * deg) rc[(deg - da):] = A[n:] pm = 0 while (((p + pm) + db) < (deg + 1)): k = (((deg - (da - db)) - 1) + pm) qc[k] = (rc[(p + pm)] / B[m]) for i in range(db): rc[((i + p) + pm)] = (rc[((i + p) + pm)] - (qc[k] * B[(m + i)])) pm = (pm + 1) rp = polynom(self.F, rc) qp = polynom(self.F, qc) remain = ExtensionFieldElem(self, rp) quotient = ExtensionFieldElem(self, qp) return (quotient, remain)
-7,539,930,612,907,431,000
Return a/b and a%b a and b are of length d-1 where d is the degree of the irreducible polynomial
mathTools/field.py
eucldiv
ecuvelier/PPAT
python
def eucldiv(self, a, b): ' Return a/b and a%b\n a and b are of length d-1 where d is the degree of the irreducible polynomial\n ' zero = self.F.zero() izero = self.zero() d = self.deg assert (not b.poly.iszero()) if a.poly.iszero(): return (izero, izero) elif (a == b): return (self.one(), izero) A = a.poly.coef B = b.poly.coef (n, da) = a.poly.truedeg() (m, db) = b.poly.truedeg() if (da < db): return (izero, a) elif (da == db): deg = max((d - 1), da) rc = ([zero] * deg) qc = ([zero] * deg) q = (A[n] / B[m]) for i in range(1, deg): rc[i] = (A[(n + i)] - (q * B[(m + i)])) qc[(deg - 1)] = q rp = polynom(self.F, rc) qp = polynom(self.F, qc) remain = ExtensionFieldElem(self, rp) quotient = ExtensionFieldElem(self, qp) return (quotient, remain) else: deg = max((d - 1), da) p = (deg - da) rc = ([zero] * deg) qc = ([zero] * deg) rc[(deg - da):] = A[n:] pm = 0 while (((p + pm) + db) < (deg + 1)): k = (((deg - (da - db)) - 1) + pm) qc[k] = (rc[(p + pm)] / B[m]) for i in range(db): rc[((i + p) + pm)] = (rc[((i + p) + pm)] - (qc[k] * B[(m + i)])) pm = (pm + 1) rp = polynom(self.F, rc) qp = polynom(self.F, qc) remain = ExtensionFieldElem(self, rp) quotient = ExtensionFieldElem(self, qp) return (quotient, remain)
def reduc(self, a): ' Return a % self.irpoly\n The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial\n The reduced polynomial has length at most d-1 where d is the length\n of the irreducible polynomial\n ' assert (a.F.F == self.F) if a.poly.iszero(): return self.zero() elif (a.poly == self.irpoly): return self.zero() elif (a.deg < self.deg): c = ([self.F.zero()] * ((self.deg - 1) - a.deg)) newacoef = (c + a.poly.coef) newapoly = polynom(self.F, newacoef) newaelem = ExtensionFieldElem(self, newapoly) return newaelem else: (q, r) = self.eucldiv(a, ExtensionFieldElem(self, self.irpoly)) r = self.trunc(r) return self.reduc(r)
-6,581,890,971,430,029,000
Return a % self.irpoly The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial The reduced polynomial has length at most d-1 where d is the length of the irreducible polynomial
mathTools/field.py
reduc
ecuvelier/PPAT
python
def reduc(self, a): ' Return a % self.irpoly\n The polynomial a = [a_0,...,a_n-1] is returned modulo the irreducible polynomial\n The reduced polynomial has length at most d-1 where d is the length\n of the irreducible polynomial\n ' assert (a.F.F == self.F) if a.poly.iszero(): return self.zero() elif (a.poly == self.irpoly): return self.zero() elif (a.deg < self.deg): c = ([self.F.zero()] * ((self.deg - 1) - a.deg)) newacoef = (c + a.poly.coef) newapoly = polynom(self.F, newacoef) newaelem = ExtensionFieldElem(self, newapoly) return newaelem else: (q, r) = self.eucldiv(a, ExtensionFieldElem(self, self.irpoly)) r = self.trunc(r) return self.reduc(r)
def reduc2(self, a): ' a is a list of length (d-1)*2-1 (polynomial length)\n this method returns the equivalent element of length d-1\n using the table of equivalences (build from the irreducible polynomial)\n in the function self.table()\n ' As = a[:(self.deg - 2)] Ad = a[(self.deg - 2):] b = list((dot(As, self.tabular) + Ad)) newapoly = polynom(self.F, b) newa = ExtensionFieldElem(self, newapoly) return newa
8,261,773,372,050,492,000
a is a list of length (d-1)*2-1 (polynomial length) this method returns the equivalent element of length d-1 using the table of equivalences (build from the irreducible polynomial) in the function self.table()
mathTools/field.py
reduc2
ecuvelier/PPAT
python
def reduc2(self, a): ' a is a list of length (d-1)*2-1 (polynomial length)\n this method returns the equivalent element of length d-1\n using the table of equivalences (build from the irreducible polynomial)\n in the function self.table()\n ' As = a[:(self.deg - 2)] Ad = a[(self.deg - 2):] b = list((dot(As, self.tabular) + Ad)) newapoly = polynom(self.F, b) newa = ExtensionFieldElem(self, newapoly) return newa
def trunc(self, a): 'Return an ExtensionFieldElem of length d-1 where d = deg(irpoly)\n ' d = self.deg if (a.deg == (d - 1)): return a c = a.poly.coef[((a.deg - d) + 1):] cp = polynom(self.F, c) return ExtensionFieldElem(self, cp)
-675,842,933,097,962,800
Return an ExtensionFieldElem of length d-1 where d = deg(irpoly)
mathTools/field.py
trunc
ecuvelier/PPAT
python
def trunc(self, a): '\n ' d = self.deg if (a.deg == (d - 1)): return a c = a.poly.coef[((a.deg - d) + 1):] cp = polynom(self.F, c) return ExtensionFieldElem(self, cp)
def table(self): ' This method returns a table (usually) stored in self.tabular\n which is used to compute reduction after a multiplication\n between two elements\n ' d = self.deg T = zeros(((d - 2), (d - 1)), dtype=object_) Pc = self.irpoly.coef[1:] for i in range(0, (d - 2)): Qc = ([self.F.zero()] * ((2 * (d - 1)) - 1)) Qc[(i + 1):(i + d)] = Pc Qp = polynom(self.F, Qc) Qe = ExtensionFieldElem(self, Qp) Q = self.reduc((- Qe)) T[i] = array(Q.poly.coef) return T
1,688,632,231,972,374,500
This method returns a table (usually) stored in self.tabular which is used to compute reduction after a multiplication between two elements
mathTools/field.py
table
ecuvelier/PPAT
python
def table(self): ' This method returns a table (usually) stored in self.tabular\n which is used to compute reduction after a multiplication\n between two elements\n ' d = self.deg T = zeros(((d - 2), (d - 1)), dtype=object_) Pc = self.irpoly.coef[1:] for i in range(0, (d - 2)): Qc = ([self.F.zero()] * ((2 * (d - 1)) - 1)) Qc[(i + 1):(i + d)] = Pc Qp = polynom(self.F, Qc) Qe = ExtensionFieldElem(self, Qp) Q = self.reduc((- Qe)) T[i] = array(Q.poly.coef) return T
def extendedeuclide(self, a, b): 'Return s,u,v such as s = ua + vb, s is the gcd of a and b\n This method is used to compute the inverse of a mod b (when s=1)\n ' one = self.one() zero = self.zero() s = a u = one v = zero sp = b up = zero vp = one while (not sp.poly.iszero()): (q, r) = self.eucldiv(s, sp) (s, u, v, sp, up, vp) = (sp, up, vp, r, (u - (up * q)), (v - (vp * q))) return (self.reduc(s), self.reduc(u), self.reduc(v))
2,513,439,641,807,605,000
Return s,u,v such as s = ua + vb, s is the gcd of a and b This method is used to compute the inverse of a mod b (when s=1)
mathTools/field.py
extendedeuclide
ecuvelier/PPAT
python
def extendedeuclide(self, a, b): 'Return s,u,v such as s = ua + vb, s is the gcd of a and b\n This method is used to compute the inverse of a mod b (when s=1)\n ' one = self.one() zero = self.zero() s = a u = one v = zero sp = b up = zero vp = one while (not sp.poly.iszero()): (q, r) = self.eucldiv(s, sp) (s, u, v, sp, up, vp) = (sp, up, vp, r, (u - (up * q)), (v - (vp * q))) return (self.reduc(s), self.reduc(u), self.reduc(v))