code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return _get_bucket_attribute(bucket, 'storageClass', 'StorageClass', retry_params=retry_params, _account_id=_account_id)
def get_storage_class(bucket, retry_params=None, _account_id=None)
Returns the storage class for the given bucket. https://cloud.google.com/storage/docs/storage-classes Args: bucket: A Google Cloud Storage bucket of form '/bucket'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: The storage class as a string. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if the bucket does not exist.
3.450047
4.451004
0.775117
api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) common.validate_bucket_path(bucket) status, headers, content = api.get_bucket('%s?%s' % (bucket, query_param)) errors.check_status(status, [200], bucket, resp_headers=headers, body=content) root = ET.fromstring(content) if root.tag == xml_response_tag and root.text: return root.text return None
def _get_bucket_attribute(bucket, query_param, xml_response_tag, retry_params=None, _account_id=None)
Helper method to request a bucket parameter and parse the response. Args: bucket: A Google Cloud Storage bucket of form '/bucket'. query_param: The query parameter to include in the get bucket request. xml_response_tag: The expected tag in the xml response. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: The xml value as a string. None if the returned xml does not match expected format. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if the bucket does not exist.
3.854723
3.786155
1.01811
common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) status, headers, content = api.head_object( api_utils._quote_filename(filename)) errors.check_status(status, [200], filename, resp_headers=headers, body=content) file_stat = common.GCSFileStat( filename=filename, st_size=common.get_stored_content_length(headers), st_ctime=common.http_time_to_posix(headers.get('last-modified')), etag=headers.get('etag'), content_type=headers.get('content-type'), metadata=common.get_metadata(headers)) return file_stat
def stat(filename, retry_params=None, _account_id=None)
Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing info about this file. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't.
3.703864
3.600425
1.02873
common.validate_file_path(src) common.validate_file_path(dst) if metadata is None: metadata = {} copy_meta = 'COPY' else: copy_meta = 'REPLACE' metadata.update({'x-goog-copy-source': src, 'x-goog-metadata-directive': copy_meta}) api = storage_api._get_storage_api(retry_params=retry_params) status, resp_headers, content = api.put_object( api_utils._quote_filename(dst), headers=metadata) errors.check_status(status, [200], src, metadata, resp_headers, body=content)
def copy2(src, dst, metadata=None, retry_params=None)
Copy the file content from src to dst. Args: src: /bucket/filename dst: /bucket/filename metadata: a dict of metadata for this copy. If None, old metadata is copied. For example, {'x-goog-meta-foo': 'bar'}. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't.
3.693357
3.591774
1.028282
if prefix: common.validate_bucket_path(path_prefix) bucket = path_prefix else: bucket, prefix = common._process_path_prefix(path_prefix) if marker and marker.startswith(bucket): marker = marker[len(bucket) + 1:] api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) options = {} if marker: options['marker'] = marker if max_keys: options['max-keys'] = max_keys if prefix: options['prefix'] = prefix if delimiter: options['delimiter'] = delimiter return _Bucket(api, bucket, options)
def listbucket(path_prefix, marker=None, prefix=None, max_keys=None, delimiter=None, retry_params=None, _account_id=None)
Returns a GCSFileStat iterator over a bucket. Optional arguments can limit the result to a subset of files under bucket. This function has two modes: 1. List bucket mode: Lists all files in the bucket without any concept of hierarchy. GCS doesn't have real directory hierarchies. 2. Directory emulation mode: If you specify the 'delimiter' argument, it is used as a path separator to emulate a hierarchy of directories. In this mode, the "path_prefix" argument should end in the delimiter specified (thus designates a logical directory). The logical directory's contents, both files and subdirectories, are listed. The names of subdirectories returned will end with the delimiter. So listbucket can be called with the subdirectory name to list the subdirectory's contents. Args: path_prefix: A Google Cloud Storage path of format "/bucket" or "/bucket/prefix". Only objects whose fullpath starts with the path_prefix will be returned. marker: Another path prefix. Only objects whose fullpath starts lexicographically after marker will be returned (exclusive). prefix: Deprecated. Use path_prefix. max_keys: The limit on the number of objects to return. int. For best performance, specify max_keys only if you know how many objects you want. Otherwise, this method requests large batches and handles pagination for you. delimiter: Use to turn on directory mode. str of one or multiple chars that your bucket uses as its directory separator. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Examples: For files "/bucket/a", "/bucket/bar/1" "/bucket/foo", "/bucket/foo/1", "/bucket/foo/2/1", "/bucket/foo/3/1", Regular mode: listbucket("/bucket/f", marker="/bucket/foo/1") will match "/bucket/foo/2/1", "/bucket/foo/3/1". Directory mode: listbucket("/bucket/", delimiter="/") will match "/bucket/a, "/bucket/bar/" "/bucket/foo", "/bucket/foo/". listbucket("/bucket/foo/", delimiter="/") will match "/bucket/foo/1", "/bucket/foo/2/", "/bucket/foo/3/" Returns: Regular mode: A GCSFileStat iterator over matched files ordered by filename. The iterator returns GCSFileStat objects. filename, etag, st_size, st_ctime, and is_dir are set. Directory emulation mode: A GCSFileStat iterator over matched files and directories ordered by name. The iterator returns GCSFileStat objects. For directories, only the filename and is_dir fields are set. The last name yielded can be used as next call's marker.
2.426115
3.077791
0.788265
api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) if os.getenv('SERVER_SOFTWARE').startswith('Dev'): def _temp_func(file_list, destination_file, content_type): bucket = '/' + destination_file.split('/')[1] + '/' with open(destination_file, 'w', content_type=content_type) as gcs_merge: for source_file in file_list: with open(bucket + source_file['Name'], 'r') as gcs_source: gcs_merge.write(gcs_source.read()) compose_object = _temp_func else: compose_object = api.compose_object file_list, _ = _validate_compose_list(destination_file, list_of_files, files_metadata, 32) compose_object(file_list, destination_file, content_type)
def compose(list_of_files, destination_file, files_metadata=None, content_type=None, retry_params=None, _account_id=None)
Runs the GCS Compose on the given files. Merges between 2 and 32 files into one file. Composite files may even be built from other existing composites, provided that the total component count does not exceed 1024. See here for details: https://cloud.google.com/storage/docs/composite-objects Args: list_of_files: List of file name strings with no leading slashes or bucket. destination_file: Path to the output file. Must have the bucket in the path. files_metadata: Optional, file metadata, order must match list_of_files, see link for available options: https://cloud.google.com/storage/docs/composite-objects#_Xml content_type: Optional, used to specify content-header of the output file. retry_params: Optional, an api_utils.RetryParams for this call to GCS. If None,the default one is used. _account_id: Internal-use only. Raises: ValueError: If the number of files is outside the range of 2-32.
3.441729
3.571709
0.963608
common.validate_file_path(destination_file) bucket = destination_file[0:(destination_file.index('/', 1) + 1)] try: if isinstance(file_list, types.StringTypes): raise TypeError list_len = len(file_list) except TypeError: raise TypeError('file_list must be a list') if list_len > number_of_files: raise ValueError( 'Compose attempted to create composite with too many' '(%i) components; limit is (%i).' % (list_len, number_of_files)) if list_len <= 0: raise ValueError('Compose operation requires at' ' least one component; 0 provided.') if files_metadata is None: files_metadata = [] elif len(files_metadata) > list_len: raise ValueError('files_metadata contains more entries(%i)' ' than file_list(%i)' % (len(files_metadata), list_len)) list_of_files = [] for source_file, meta_data in itertools.izip_longest(file_list, files_metadata): if not isinstance(source_file, str): raise TypeError('Each item of file_list must be a string') if source_file.startswith('/'): logging.warn('Detected a "/" at the start of the file, ' 'Unless the file name contains a "/" it ' ' may cause files to be misread') if source_file.startswith(bucket): logging.warn('Detected bucket name at the start of the file, ' 'must not specify the bucket when listing file_names.' ' May cause files to be misread') common.validate_file_path(bucket + source_file) list_entry = {} if meta_data is not None: list_entry.update(meta_data) list_entry['Name'] = source_file list_of_files.append(list_entry) return list_of_files, bucket
def _validate_compose_list(destination_file, file_list, files_metadata=None, number_of_files=32)
Validates the file_list and merges the file_list, files_metadata. Args: destination: Path to the file (ie. /destination_bucket/destination_file). file_list: List of files to compose, see compose for details. files_metadata: Meta details for each file in the file_list. number_of_files: Maximum number of files allowed in the list. Returns: A tuple (list_of_files, bucket): list_of_files: Ready to use dict version of the list. bucket: bucket name extracted from the file paths.
3.422943
3.371963
1.015119
for e in root.getiterator(common._T_CONTENTS): st_ctime, size, etag, key = None, None, None, None for child in e.getiterator('*'): if child.tag == common._T_LAST_MODIFIED: st_ctime = common.dt_str_to_posix(child.text) elif child.tag == common._T_ETAG: etag = child.text elif child.tag == common._T_SIZE: size = child.text elif child.tag == common._T_KEY: key = child.text yield common.GCSFileStat(self._path + '/' + key, size, etag, st_ctime) e.clear() yield None
def _next_file_gen(self, root)
Generator for next file element in the document. Args: root: root element of the XML tree. Yields: GCSFileStat for the next file.
3.397257
3.037824
1.118319
for e in root.getiterator(common._T_COMMON_PREFIXES): yield common.GCSFileStat( self._path + '/' + e.find(common._T_PREFIX).text, st_size=None, etag=None, st_ctime=None, is_dir=True) e.clear() yield None
def _next_dir_gen(self, root)
Generator for next directory element in the document. Args: root: root element in the XML tree. Yields: GCSFileStat for the next directory.
10.036906
7.393932
1.357452
if ('max-keys' in self._options and self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT): return False elements = self._find_elements( content, set([common._T_IS_TRUNCATED, common._T_NEXT_MARKER])) if elements.get(common._T_IS_TRUNCATED, 'false').lower() != 'true': return False next_marker = elements.get(common._T_NEXT_MARKER) if next_marker is None: self._options.pop('marker', None) return False self._options['marker'] = next_marker return True
def _should_get_another_batch(self, content)
Whether to issue another GET bucket call. Args: content: response XML. Returns: True if should, also update self._options for the next request. False otherwise.
4.070587
3.385119
1.202495
element_mapping = {} result = StringIO.StringIO(result) for _, e in ET.iterparse(result, events=('end',)): if not elements: break if e.tag in elements: element_mapping[e.tag] = e.text elements.remove(e.tag) return element_mapping
def _find_elements(self, result, elements)
Find interesting elements from XML. This function tries to only look for specified elements without parsing the entire XML. The specified elements is better located near the beginning. Args: result: response XML. elements: a set of interesting element tags. Returns: A dict from element tag to element value.
3.617611
3.193244
1.132895
self.response.write('Creating file %s\n' % filename) write_retry_params = gcs.RetryParams(backoff_factor=1.1) gcs_file = gcs.open(filename, 'w', content_type='text/plain', options={'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params) gcs_file.write('abcde\n') gcs_file.write('f'*1024*4 + '\n') gcs_file.close() self.tmp_filenames_to_clean_up.append(filename)
def create_file(self, filename)
Create a file. The retry_params specified in the open call will override the default retry params for this particular file handle. Args: filename: filename.
2.32423
2.505136
0.927786
self.response.write('Listbucket result:\n') page_size = 1 stats = gcs.listbucket(bucket + '/foo', max_keys=page_size) while True: count = 0 for stat in stats: count += 1 self.response.write(repr(stat)) self.response.write('\n') if count != page_size or count == 0: break stats = gcs.listbucket(bucket + '/foo', max_keys=page_size, marker=stat.filename)
def list_bucket(self, bucket)
Create several files and paginate through them. Production apps should set page_size to a practical value. Args: bucket: bucket.
3.443981
3.45461
0.996923
with gcs.open(filename, 'w') as f: f.write('abcde\n') blobstore_filename = '/gs' + filename return blobstore.create_gs_key(blobstore_filename)
def CreateFile(filename)
Create a GCS file with GCS client lib. Args: filename: GCS filename. Returns: The corresponding string blobkey for this GCS file.
5.038405
5.055385
0.996641
rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) token, expires_at = yield rpc raise ndb.Return((token, expires_at))
def _make_token_async(scopes, service_account_id)
Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch.
5.061475
4.384845
1.154311
def sync_wrapper(self, *args, **kwds): method = getattr(self, name) future = method(*args, **kwds) return future.get_result() return sync_wrapper
def _make_sync_method(name)
Helper to synthesize a synchronous method from an async method name. Used by the @add_sync_methods class decorator below. Args: name: The name of the synchronous method. Returns: A method (with first argument 'self') that retrieves and calls self.<name>, passing its own arguments, expects it to return a Future, and then waits for and returns that Future's result.
3.605031
2.816468
1.279983
for name in cls.__dict__.keys(): if name.endswith('_async'): sync_name = name[:-6] if not hasattr(cls, sync_name): setattr(cls, sync_name, _make_sync_method(name)) return cls
def add_sync_methods(cls)
Class decorator to add synchronous methods corresponding to async methods. This modifies the class in place, adding additional methods to it. If a synchronous method of a given name already exists it is not replaced. Args: cls: A class. Returns: The same class, modified in place.
2.732586
2.74543
0.995321
retry_wrapper = api_utils._RetryWrapper( self.retry_params, retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS, should_retry=api_utils._should_retry) resp = yield retry_wrapper.run( self.urlfetch_async, url=url, method=method, headers=headers, payload=payload, deadline=deadline, callback=callback, follow_redirects=False) raise ndb.Return((resp.status_code, resp.headers, resp.content))
def do_request_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None)
Issue one HTTP request. It performs async retries using tasklets. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. Yields: The async fetch of the url.
2.846779
3.277542
0.868571
key = '%s,%s' % (self.service_account_id, ','.join(self.scopes)) ts = yield _AE_TokenStorage_.get_by_id_async( key, use_cache=True, use_memcache=self.retry_params.memcache_access_token, use_datastore=self.retry_params.save_access_token) if refresh or ts is None or ts.expires < ( time.time() + self.expiration_headroom): token, expires_at = yield self.make_token_async( self.scopes, self.service_account_id) timeout = int(expires_at - time.time()) ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at) if timeout > 0: yield ts.put_async(memcache_timeout=timeout, use_datastore=self.retry_params.save_access_token, force_writes=True, use_cache=True, use_memcache=self.retry_params.memcache_access_token) raise ndb.Return(ts.token)
def get_token_async(self, refresh=False)
Get an authentication token. The token is cached in memcache, keyed by the scopes argument. Uses a random token expiration headroom value generated in the constructor to eliminate a burst of GET_ACCESS_TOKEN API requests. Args: refresh: If True, ignore a cached token; default False. Yields: An authentication token. This token is guaranteed to be non-expired.
3.604033
3.473031
1.03772
headers = {} if headers is None else dict(headers) headers.update(self.user_agent) try: self.token = yield self.get_token_async() except app_identity.InternalError, e: if os.environ.get('DATACENTER', '').endswith('sandman'): self.token = None logging.warning('Could not fetch an authentication token in sandman ' 'based Appengine devel setup; proceeding without one.') else: raise e if self.token: headers['authorization'] = 'OAuth ' + self.token deadline = deadline or self.retry_params.urlfetch_timeout ctx = ndb.get_context() resp = yield ctx.urlfetch( url, payload=payload, method=method, headers=headers, follow_redirects=follow_redirects, deadline=deadline, callback=callback) raise ndb.Return(resp)
def urlfetch_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None, follow_redirects=False)
Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. follow_redirects: whether or not to follow redirects. Yields: This returns a Future despite not being decorated with @ndb.tasklet!
4.416503
4.719441
0.935811
length = headers.get('x-goog-stored-content-length') if length is None: length = headers.get('content-length') return length
def get_stored_content_length(headers)
Return the content length (in bytes) of the object as stored in GCS. x-goog-stored-content-length should always be present except when called via the local dev_appserver. Therefore if it is not present we default to the standard content-length header. Args: headers: a dict of headers from the http response. Returns: the stored content length.
3.657306
2.891211
1.264974
return dict((k, v) for k, v in headers.iteritems() if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def get_metadata(headers)
Get user defined options from HTTP response headers.
6.434036
5.866315
1.096776
_validate_path(path_prefix) if not _GCS_PATH_PREFIX_REGEX.match(path_prefix): raise ValueError('Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix) bucket_name_end = path_prefix.find('/', 1) bucket = path_prefix prefix = None if bucket_name_end != -1: bucket = path_prefix[:bucket_name_end] prefix = path_prefix[bucket_name_end + 1:] or None return bucket, prefix
def _process_path_prefix(path_prefix)
Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None.
2.894457
2.686888
1.077253
if not path: raise ValueError('Path is empty') if not isinstance(path, basestring): raise TypeError('Path should be a string but is %s (%s).' % (path.__class__, path))
def _validate_path(path)
Basic validation of Google Storage paths. Args: path: a Google Storage path. It should have form '/bucket/filename' or '/bucket'. Raises: ValueError: if path is invalid. TypeError: if path is not of type basestring.
3.826698
3.547414
1.078729
if not options: return for k, v in options.iteritems(): if not isinstance(k, str): raise TypeError('option %r should be a str.' % k) if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS): raise ValueError('option %s is not supported.' % k) if not isinstance(v, basestring): raise TypeError('value %r for option %s should be of type basestring.' % (v, k))
def validate_options(options)
Validate Google Cloud Storage options. Args: options: a str->basestring dict of options to pass to Google Cloud Storage. Raises: ValueError: if option is not supported. TypeError: if option is not of type str or value of an option is not of type basestring.
3.370697
2.768247
1.217629
parsable, _ = dt_str.split('.') dt = datetime.datetime.strptime(parsable, _DT_FORMAT) return calendar.timegm(dt.utctimetuple())
def dt_str_to_posix(dt_str)
format str to posix. datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ, e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator between date and time when they are on the same line. Z indicates UTC (zero meridian). A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html This is used to parse LastModified node from GCS's GET bucket XML response. Args: dt_str: A datetime str. Returns: A float of secs from unix epoch. By posix definition, epoch is midnight 1970/1/1 UTC.
4.912409
5.991446
0.819904
dt = datetime.datetime.utcfromtimestamp(posix) dt_str = dt.strftime(_DT_FORMAT) return dt_str + '.000Z'
def posix_to_dt_str(posix)
Reverse of str_to_datetime. This is used by GCS stub to generate GET bucket XML response. Args: posix: A float of secs from unix epoch. Returns: A datetime str.
3.906369
4.81231
0.811745
server_software = os.environ.get('SERVER_SOFTWARE') if server_software is None: return True if 'remote_api' in server_software: return False if server_software.startswith(('Development', 'testutil')): return True return False
def local_run()
Whether we should hit GCS dev appserver stub.
5.130584
3.593247
1.427841
def wrapper(*args, **kwargs): logging.info('Memory before method %s is %s.', method.__name__, runtime.memory_usage().current()) result = method(*args, **kwargs) logging.info('Memory after method %s is %s', method.__name__, runtime.memory_usage().current()) return result return wrapper
def memory_usage(method)
Log memory usage before and after a method.
2.701719
2.520803
1.071769
api = _StorageApi(_StorageApi.full_control_scope, service_account_id=account_id, retry_params=retry_params) # when running local unit tests, the service account is test@localhost # from google.appengine.api.app_identity.app_identity_stub.APP_SERVICE_ACCOUNT_NAME service_account = app_identity.get_service_account_name() if (common.local_run() and not common.get_access_token() and (not service_account or service_account.endswith('@localhost'))): api.api_url = common.local_api_url() if common.get_access_token(): api.token = common.get_access_token() return api
def _get_storage_api(retry_params, account_id=None)
Returns storage_api instance for API methods. Args: retry_params: An instance of api_utils.RetryParams. If none, thread's default will be used. account_id: Internal-use only. Returns: A storage_api instance to handle urlfetch work to GCS. On dev appserver, this instance will talk to a local stub by default. However, if you pass the arguments --appidentity_email_address and --appidentity_private_key_path to dev_appserver.py it will attempt to use the real GCS with these credentials. Alternatively, you can set a specific access token with common.set_access_token. You can also pass --default_gcs_bucket_name to set the default bucket.
5.154644
4.571634
1.127528
if headers is None: headers = {} if 'x-goog-api-version' not in headers: headers['x-goog-api-version'] = '2' headers['accept-encoding'] = 'gzip, *' try: resp_tuple = yield super(_StorageApi, self).do_request_async( url, method=method, headers=headers, payload=payload, deadline=deadline, callback=callback) except urlfetch.DownloadError as e: raise errors.TimeoutError( 'Request to Google Cloud Storage timed out.', e) raise ndb.Return(resp_tuple)
def do_request_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None)
Inherit docs. This method translates urlfetch exceptions to more service specific ones.
2.800537
2.821513
0.992565
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def post_object_async(self, path, **kwds)
POST to an object.
5.456117
5.637441
0.967836
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def put_object_async(self, path, **kwds)
PUT an object.
5.949553
6.113973
0.973107
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def get_object_async(self, path, **kwds)
GET an object. Note: No payload argument is supported.
5.987063
6.904568
0.867116
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def delete_object_async(self, path, **kwds)
DELETE an object. Note: No payload argument is supported.
5.536346
7.445181
0.743615
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def head_object_async(self, path, **kwds)
HEAD an object. Depending on request headers, HEAD returns various object properties, e.g. Content-Length, Last-Modified, and ETag. Note: No payload argument is supported.
5.77278
8.221437
0.702162
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def get_bucket_async(self, path, **kwds)
GET a bucket.
6.618447
6.612217
1.000942
xml_setting_list = ['<ComposeRequest>'] for meta_data in file_list: xml_setting_list.append('<Component>') for key, val in meta_data.iteritems(): xml_setting_list.append('<%s>%s</%s>' % (key, val, key)) xml_setting_list.append('</Component>') xml_setting_list.append('</ComposeRequest>') xml = ''.join(xml_setting_list) if content_type is not None: headers = {'Content-Type': content_type} else: headers = None status, resp_headers, content = self.put_object( api_utils._quote_filename(destination_file) + '?compose', payload=xml, headers=headers) errors.check_status(status, [200], destination_file, resp_headers, body=content)
def compose_object(self, file_list, destination_file, content_type)
COMPOSE multiple objects together. Using the given list of files, calls the put object with the compose flag. This call merges all the files into the destination file. Args: file_list: list of dicts with the file name. destination_file: Path to the destination file. content_type: Content type for the destination file.
3.321434
3.067461
1.082796
self._check_open() if size == 0 or not self._remaining(): return '' data_list = [] newline_offset = self._buffer.find_newline(size) while newline_offset < 0: data = self._buffer.read(size) size -= len(data) self._offset += len(data) data_list.append(data) if size == 0 or not self._remaining(): return ''.join(data_list) self._buffer.reset(self._buffer_future.get_result()) self._request_next_buffer() newline_offset = self._buffer.find_newline(size) data = self._buffer.read_to_offset(newline_offset + 1) self._offset += len(data) data_list.append(data) return ''.join(data_list)
def readline(self, size=-1)
Read one line delimited by '\n' from the file. A trailing newline character is kept in the string. It may be absent when a file ends with an incomplete line. If the size argument is non-negative, it specifies the maximum string size (counting the newline) to return. A negative size is the same as unspecified. Empty string is returned only when EOF is encountered immediately. Args: size: Maximum number of bytes to read. If not specified, readline stops only on '\n' or EOF. Returns: The data read as a string. Raises: IOError: When this buffer is closed.
2.777022
2.867131
0.968572
self._check_open() if not self._remaining(): return '' data_list = [] while True: remaining = self._buffer.remaining() if size >= 0 and size < remaining: data_list.append(self._buffer.read(size)) self._offset += size break else: size -= remaining self._offset += remaining data_list.append(self._buffer.read()) if self._buffer_future is None: if size < 0 or size >= self._remaining(): needs = self._remaining() else: needs = size data_list.extend(self._get_segments(self._offset, needs)) self._offset += needs break if self._buffer_future: self._buffer.reset(self._buffer_future.get_result()) self._buffer_future = None if self._buffer_future is None: self._request_next_buffer() return ''.join(data_list)
def read(self, size=-1)
Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is closed.
3.068032
3.113316
0.985455
self._buffer_future = None next_offset = self._offset + self._buffer.remaining() if next_offset != self._file_size: self._buffer_future = self._get_segment(next_offset, self._buffer_size)
def _request_next_buffer(self)
Request next buffer. Requires self._offset and self._buffer are in consistent state.
4.973589
4.33041
1.148526
if not request_size: return [] end = start + request_size futures = [] while request_size > self._max_request_size: futures.append(self._get_segment(start, self._max_request_size)) request_size -= self._max_request_size start += self._max_request_size if start < end: futures.append(self._get_segment(start, end - start)) return [fut.get_result() for fut in futures]
def _get_segments(self, start, request_size)
Get segments of the file from Google Storage as a list. A large request is broken into segments to avoid hitting urlfetch response size limit. Each segment is returned from a separate urlfetch. Args: start: start offset to request. Inclusive. Have to be within the range of the file. request_size: number of bytes to request. Returns: A list of file segments in order
2.292042
2.426096
0.944745
end = start + request_size - 1 content_range = '%d-%d' % (start, end) headers = {'Range': 'bytes=' + content_range} status, resp_headers, content = yield self._api.get_object_async( self._path, headers=headers) def _checker(): errors.check_status(status, [200, 206], self._path, headers, resp_headers, body=content) self._check_etag(resp_headers.get('etag')) if check_response: _checker() raise ndb.Return(content) raise ndb.Return(content, _checker)
def _get_segment(self, start, request_size, check_response=True)
Get a segment of the file from Google Storage. Args: start: start offset of the segment. Inclusive. Have to be within the range of the file. request_size: number of bytes to request. Have to be small enough for a single urlfetch request. May go over the logical range of the file. check_response: True to check the validity of GCS response automatically before the future returns. False otherwise. See Yields section. Yields: If check_response is True, the segment [start, start + request_size) of the file. Otherwise, a tuple. The first element is the unverified file segment. The second element is a closure that checks response. Caller should first invoke the closure before consuing the file segment. Raises: ValueError: if the file has changed while reading.
3.716991
3.835196
0.969179
if etag is None: return elif self._etag is None: self._etag = etag elif self._etag != etag: raise ValueError('File on GCS has changed while reading.')
def _check_etag(self, etag)
Check if etag is the same across requests to GCS. If self._etag is None, set it. If etag is set, check that the new etag equals the old one. In the __init__ method, we fire one HEAD and one GET request using ndb tasklet. One of them would return first and set the first value. Args: etag: etag from a GCS HTTP response. None if etag is not part of the response header. It could be None for example in the case of GCS composite file. Raises: ValueError: if two etags are not equal.
4.645007
4.335534
1.07138
self._check_open() self._buffer.reset() self._buffer_future = None if whence == os.SEEK_SET: self._offset = offset elif whence == os.SEEK_CUR: self._offset += offset elif whence == os.SEEK_END: self._offset = self._file_size + offset else: raise ValueError('Whence mode %s is invalid.' % str(whence)) self._offset = min(self._offset, self._file_size) self._offset = max(self._offset, 0) if self._remaining(): self._request_next_buffer()
def seek(self, offset, whence=os.SEEK_SET)
Set the file's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (seek relative to the end, offset should be negative). Raises: IOError: When this buffer is closed. ValueError: When whence is invalid.
2.522865
2.507135
1.006274
if size < 0: offset = len(self._buffer) else: offset = self._offset + size return self.read_to_offset(offset)
def read(self, size=-1)
Returns bytes from self._buffer and update related offsets. Args: size: number of bytes to read starting from current offset. Read the entire buffer if negative. Returns: Requested bytes from buffer.
4.657034
3.889977
1.197188
assert offset >= self._offset result = self._buffer[self._offset: offset] self._offset += len(result) return result
def read_to_offset(self, offset)
Returns bytes from self._buffer and update related offsets. Args: offset: read from current offset to this offset, exclusive. Returns: Requested bytes from buffer.
4.391972
3.69275
1.18935
if size < 0: return self._buffer.find('\n', self._offset) return self._buffer.find('\n', self._offset, self._offset + size)
def find_newline(self, size=-1)
Search for newline char in buffer starting from current offset. Args: size: number of bytes to search. -1 means all. Returns: offset of newline char in buffer. -1 if doesn't exist.
2.827333
2.647459
1.067942
self._check_open() if not isinstance(data, str): raise TypeError('Expected str but got %s.' % type(data)) if not data: return self._buffer.append(data) self._buffered += len(data) self._offset += len(data) if self._buffered >= self._flushsize: self._flush()
def write(self, data)
Write some bytes. Args: data: data to write. str. Raises: TypeError: if data is not of type str.
3.157082
3.195366
0.988019
if not self.closed: self.closed = True self._flush(finish=True) self._buffer = None
def close(self)
Flush the buffer and finalize the file. When this returns the new file is available for reading.
7.605606
7.033398
1.081356
while ((finish and self._buffered >= 0) or (not finish and self._buffered >= self._blocksize)): tmp_buffer = [] tmp_buffer_len = 0 excess = 0 while self._buffer: buf = self._buffer.popleft() size = len(buf) self._buffered -= size tmp_buffer.append(buf) tmp_buffer_len += size if tmp_buffer_len >= self._maxrequestsize: excess = tmp_buffer_len - self._maxrequestsize break if not finish and ( tmp_buffer_len % self._blocksize + self._buffered < self._blocksize): excess = tmp_buffer_len % self._blocksize break if excess: over = tmp_buffer.pop() size = len(over) assert size >= excess tmp_buffer_len -= size head, tail = over[:-excess], over[-excess:] self._buffer.appendleft(tail) self._buffered += len(tail) if head: tmp_buffer.append(head) tmp_buffer_len += len(head) data = ''.join(tmp_buffer) file_len = '*' if finish and not self._buffered: file_len = self._written + len(data) self._send_data(data, self._written, file_len) self._written += len(data) if file_len != '*': break
def _flush(self, finish=False)
Internal API to flush. Buffer is flushed to GCS only when the total amount of buffered data is at least self._blocksize, or to flush the final (incomplete) block of the file with finish=True.
2.915126
2.848913
1.023242
headers = {} end_offset = start_offset + len(data) - 1 if data: headers['content-range'] = ('bytes %d-%d/%s' % (start_offset, end_offset, file_len)) else: headers['content-range'] = ('bytes */%s' % file_len) status, response_headers, content = self._api.put_object( self._path_with_token, payload=data, headers=headers) if file_len == '*': expected = 308 else: expected = 200 errors.check_status(status, [expected], self._path, headers, response_headers, content, {'upload_path': self._path_with_token})
def _send_data(self, data, start_offset, file_len)
Send the block to the storage service. This is a utility method that does not modify self. Args: data: data to send in str. start_offset: start offset of the data in relation to the file. file_len: an int if this is the last data to append to the file. Otherwise '*'.
3.614523
3.467521
1.042394
headers = {'content-range': 'bytes */*'} status, response_headers, content = self._api.put_object( self._path_with_token, headers=headers) errors.check_status(status, [308], self._path, headers, response_headers, content, {'upload_path': self._path_with_token}) val = response_headers.get('range') if val is None: return -1 _, offset = val.rsplit('-', 1) return int(offset)
def _get_offset_from_gcs(self)
Get the last offset that has been written to GCS. This is a utility method that does not modify self. Returns: an int of the last offset written to GCS by this upload, inclusive. -1 means nothing has been written.
5.102049
4.577492
1.114595
if file_length is None: file_length = self._get_offset_from_gcs() + 1 self._send_data('', 0, file_length)
def _force_close(self, file_length=None)
Close this buffer on file_length. Finalize this upload immediately on file_length. Contents that are still in memory will not be uploaded. This is a utility method that does not modify self. Args: file_length: file length. Must match what has been uploaded. If None, it will be queried from GCS.
6.477224
6.40016
1.012041
import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 self.extract(tarinfo, path) if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e)
def _extractall(self, path=".", members=None)
Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers().
2.487439
2.386001
1.042514
old_states = dict(self._cstate.states) self._cstate.start_next_cycle() self._request_method = None # self.their_http_version gets left alone, since it presumably lasts # beyond a single request/response cycle assert not self.client_is_waiting_for_100_continue self._respond_to_state_changes(old_states)
def start_next_cycle(self)
Attempt to reset our connection state for a new request/response cycle. If both client and server are in :data:`DONE` state, then resets them both to :data:`IDLE` state in preparation for a new request/response cycle on this same connection. Otherwise, raises a :exc:`LocalProtocolError`. See :ref:`keepalive-and-pipelining`.
11.579619
10.009425
1.156872
if data: if self._receive_buffer_closed: raise RuntimeError( "received close, then received more data?") self._receive_buffer += data else: self._receive_buffer_closed = True
def receive_data(self, data)
Add data to our internal recieve buffer. This does not actually do any processing on the data, just stores it. To trigger processing, you have to call :meth:`next_event`. Args: data (:term:`bytes-like object`): The new data that was just received. Special case: If *data* is an empty byte-string like ``b""``, then this indicates that the remote side has closed the connection (end of file). Normally this is convenient, because standard Python APIs like :meth:`file.read` or :meth:`socket.recv` use ``b""`` to indicate end-of-file, while other failures to read are indicated using other mechanisms like raising :exc:`TimeoutError`. When using such an API you can just blindly pass through whatever you get from ``read`` to :meth:`receive_data`, and everything will work. But, if you have an API where reading an empty string is a valid non-EOF condition, then you need to be aware of this and make sure to check for such strings and avoid passing them to :meth:`receive_data`. Returns: Nothing, but after calling this you should call :meth:`next_event` to parse the newly received data. Raises: RuntimeError: Raised if you pass an empty *data*, indicating EOF, and then pass a non-empty *data*, indicating more data that somehow arrived after the EOF. (Calling ``receive_data(b"")`` multiple times is fine, and equivalent to calling it once.)
6.531369
6.36
1.026945
if self.their_state is ERROR: raise RemoteProtocolError( "Can't receive data when peer state is ERROR") try: event = self._extract_next_receive_event() if event not in [NEED_DATA, PAUSED]: self._process_event(self.their_role, event) self._receive_buffer.compress() if event is NEED_DATA: if len(self._receive_buffer) > self._max_incomplete_event_size: # 431 is "Request header fields too large" which is pretty # much the only situation where we can get here raise RemoteProtocolError("Receive buffer too long", error_status_hint=431) if self._receive_buffer_closed: # We're still trying to complete some event, but that's # never going to happen because no more data is coming raise RemoteProtocolError( "peer unexpectedly closed connection") return event except BaseException as exc: self._process_error(self.their_role) if isinstance(exc, LocalProtocolError): exc._reraise_as_remote_protocol_error() else: raise
def next_event(self)
Parse the next event out of our receive buffer, update our internal state, and return it. This is a mutating operation -- think of it like calling :func:`next` on an iterator. Returns: : One of three things: 1) An event object -- see :ref:`events`. 2) The special constant :data:`NEED_DATA`, which indicates that you need to read more data from your socket and pass it to :meth:`receive_data` before this method will be able to return any more events. 3) The special constant :data:`PAUSED`, which indicates that we are not in a state where we can process incoming data (usually because the peer has finished their part of the current request/response cycle, and you have not yet called :meth:`start_next_cycle`). See :ref:`flow-control` for details. Raises: RemoteProtocolError: The peer has misbehaved. You should close the connection (possibly after sending some kind of 4xx response). Once this method returns :class:`ConnectionClosed` once, then all subsequent calls will also return :class:`ConnectionClosed`. If this method raises any exception besides :exc:`RemoteProtocolError` then that's a bug -- if it happens please file a bug report! If this method raises any exception then it also sets :attr:`Connection.their_state` to :data:`ERROR` -- see :ref:`error-handling` for discussion.
6.114956
4.866131
1.256636
data_list = self.send_with_data_passthrough(event) if data_list is None: return None else: return b"".join(data_list)
def send(self, event)
Convert a high-level event into bytes that can be sent to the peer, while updating our internal state machine. Args: event: The :ref:`event <events>` to send. Returns: If ``type(event) is ConnectionClosed``, then returns ``None``. Otherwise, returns a :term:`bytes-like object`. Raises: LocalProtocolError: Sending this event at this time would violate our understanding of the HTTP/1.1 protocol. If this method raises any exception then it also sets :attr:`Connection.our_state` to :data:`ERROR` -- see :ref:`error-handling` for discussion.
5.664944
5.609054
1.009964
if self.our_state is ERROR: raise LocalProtocolError( "Can't send data when our state is ERROR") try: if type(event) is Response: self._clean_up_response_headers_for_sending(event) # We want to call _process_event before calling the writer, # because if someone tries to do something invalid then this will # give a sensible error message, while our writers all just assume # they will only receive valid events. But, _process_event might # change self._writer. So we have to do a little dance: writer = self._writer self._process_event(self.our_role, event) if type(event) is ConnectionClosed: return None else: # In any situation where writer is None, process_event should # have raised ProtocolError assert writer is not None data_list = [] writer(event, data_list.append) return data_list except: self._process_error(self.our_role) raise
def send_with_data_passthrough(self, event)
Identical to :meth:`send`, except that in situations where :meth:`send` returns a single :term:`bytes-like object`, this instead returns a list of them -- and when sending a :class:`Data` event, this list is guaranteed to contain the exact object you passed in as :attr:`Data.data`. See :ref:`sendfile` for discussion.
7.061586
6.828357
1.034156
for xstart, ystart, xstep, ystep in adam7: if xstart >= width: continue yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
def adam7_generate(width, height)
Generate the coordinates for the reduced scanlines of an Adam7 interlaced image of size `width` by `height` pixels. Yields a generator for each pass, and each pass generator yields a series of (x, y, xstep) triples, each one identifying a reduced scanline consisting of pixels starting at (x, y) and taking every xstep pixel to the right.
5.106574
3.745416
1.36342
# None is the default and is allowed. if palette is None: return None p = list(palette) if not (0 < len(p) <= 256): raise ProtocolError( "a palette must have between 1 and 256 entries," " see https://www.w3.org/TR/PNG/#11PLTE") seen_triple = False for i, t in enumerate(p): if len(t) not in (3, 4): raise ProtocolError( "palette entry %d: entries must be 3- or 4-tuples." % i) if len(t) == 3: seen_triple = True if seen_triple and len(t) == 4: raise ProtocolError( "palette entry %d: all 4-tuples must precede all 3-tuples" % i) for x in t: if int(x) != x or not(0 <= x <= 255): raise ProtocolError( "palette entry %d: " "values must be integer: 0 <= x <= 255" % i) return p
def check_palette(palette)
Check a palette argument (to the :class:`Writer` class) for validity. Returns the palette as a list if okay; raises an exception otherwise.
2.881241
2.867483
1.004798
if not size: return width, height if len(size) != 2: raise ProtocolError( "size argument should be a pair (width, height)") if width is not None and width != size[0]: raise ProtocolError( "size[0] (%r) and width (%r) should match when both are used." % (size[0], width)) if height is not None and height != size[1]: raise ProtocolError( "size[1] (%r) and height (%r) should match when both are used." % (size[1], height)) return size
def check_sizes(size, width, height)
Check that these arguments, if supplied, are consistent. Return a (width, height) pair.
2.306082
2.121224
1.087147
if c is None: return c if greyscale: try: len(c) except TypeError: c = (c,) if len(c) != 1: raise ProtocolError("%s for greyscale must be 1-tuple" % which) if not is_natural(c[0]): raise ProtocolError( "%s colour for greyscale must be integer" % which) else: if not (len(c) == 3 and is_natural(c[0]) and is_natural(c[1]) and is_natural(c[2])): raise ProtocolError( "%s colour must be a triple of integers" % which) return c
def check_color(c, greyscale, which)
Checks that a colour argument for transparent or background options is the right form. Returns the colour (which, if it's a bare integer, is "corrected" to a 1-tuple).
2.536611
2.36282
1.073552
data = bytes(data) # http://www.w3.org/TR/PNG/#5Chunk-layout outfile.write(struct.pack("!I", len(data))) outfile.write(tag) outfile.write(data) checksum = zlib.crc32(tag) checksum = zlib.crc32(data, checksum) checksum &= 2 ** 32 - 1 outfile.write(struct.pack("!I", checksum))
def write_chunk(outfile, tag, data=b'')
Write a PNG chunk to the output file, including length and checksum.
2.464453
2.053153
1.200326
out.write(signature) for chunk in chunks: write_chunk(out, *chunk)
def write_chunks(out, chunks)
Create a PNG file by writing out the chunks.
5.476696
5.100472
1.073763
# One factor for each channel fs = [float(2 ** s[1] - 1)/float(2 ** s[0] - 1) for s in rescale] # Assume all target_bitdepths are the same target_bitdepths = set(s[1] for s in rescale) assert len(target_bitdepths) == 1 (target_bitdepth, ) = target_bitdepths typecode = 'BH'[target_bitdepth > 8] # Number of channels n_chans = len(rescale) for row in rows: rescaled_row = array(typecode, iter(row)) for i in range(n_chans): channel = array( typecode, (int(round(fs[i] * x)) for x in row[i::n_chans])) rescaled_row[i::n_chans] = channel yield rescaled_row
def rescale_rows(rows, rescale)
Take each row in rows (an iterator) and yield a fresh row with the pixels scaled according to the rescale parameters in the list `rescale`. Each element of `rescale` is a tuple of (source_bitdepth, target_bitdepth), with one element per channel.
3.494182
2.853627
1.224471
assert bitdepth < 8 assert 8 % bitdepth == 0 # samples per byte spb = int(8 / bitdepth) def make_byte(block): res = 0 for v in block: res = (res << bitdepth) + v return res for row in rows: a = bytearray(row) # Adding padding bytes so we can group into a whole # number of spb-tuples. n = float(len(a)) extra = math.ceil(n / spb) * spb - n a.extend([0] * int(extra)) # Pack into bytes. # Each block is the samples for one byte. blocks = group(a, spb) yield bytearray(make_byte(block) for block in blocks)
def pack_rows(rows, bitdepth)
Yield packed rows that are a byte array. Each byte is packed with the values from several pixels.
5.232723
4.983342
1.050043
for row in rows: fmt = '!%dH' % len(row) yield bytearray(struct.pack(fmt, *row))
def unpack_rows(rows)
Unpack each row from being 16-bits per value, to being a sequence of bytes.
4.60766
4.100169
1.123773
p = bytearray() t = bytearray() for x in palette: p.extend(x[0:3]) if len(x) > 3: t.append(x[3]) if t: return p, t return p, None
def make_palette_chunks(palette)
Create the byte sequences for a ``PLTE`` and if necessary a ``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be ``None`` if no ``tRNS`` chunk is necessary.
3.530527
2.637214
1.338734
if palette: if len(bitdepth) != 1: raise ProtocolError( "with palette, only a single bitdepth may be used") (bitdepth, ) = bitdepth if bitdepth not in (1, 2, 4, 8): raise ProtocolError( "with palette, bitdepth must be 1, 2, 4, or 8") if transparent is not None: raise ProtocolError("transparent and palette not compatible") if alpha: raise ProtocolError("alpha and palette not compatible") if greyscale: raise ProtocolError("greyscale and palette not compatible") return bitdepth, None # No palette, check for sBIT chunk generation. if greyscale and not alpha: # Single channel, L. (bitdepth,) = bitdepth if bitdepth in (1, 2, 4, 8, 16): return bitdepth, None if bitdepth > 8: targetbitdepth = 16 elif bitdepth == 3: targetbitdepth = 4 else: assert bitdepth in (5, 6, 7) targetbitdepth = 8 return targetbitdepth, [(bitdepth, targetbitdepth)] assert alpha or not greyscale depth_set = tuple(set(bitdepth)) if depth_set in [(8,), (16,)]: # No sBIT required. (bitdepth, ) = depth_set return bitdepth, None targetbitdepth = (8, 16)[max(bitdepth) > 8] return targetbitdepth, [(b, targetbitdepth) for b in bitdepth]
def check_bitdepth_rescale( palette, bitdepth, transparent, alpha, greyscale)
Returns (bitdepth, rescale) pair.
2.898846
2.905487
0.997714
# Currently, with no max_length parameter to decompress, # this routine will do one yield per IDAT chunk: Not very # incremental. d = zlib.decompressobj() # Each IDAT chunk is passed to the decompressor, then any # remaining state is decompressed out. for data in data_blocks: # :todo: add a max_length argument here to limit output size. yield bytearray(d.decompress(data)) yield bytearray(d.flush())
def decompress(data_blocks)
`data_blocks` should be an iterable that yields the compressed data (from the ``IDAT`` chunks). This yields decompressed byte strings.
11.327638
10.320704
1.097564
if bitdepth not in (1, 2, 4, 8, 16): raise FormatError("invalid bit depth %d" % bitdepth) if colortype not in (0, 2, 3, 4, 6): raise FormatError("invalid colour type %d" % colortype) # Check indexed (palettized) images have 8 or fewer bits # per pixel; check only indexed or greyscale images have # fewer than 8 bits per pixel. if colortype & 1 and bitdepth > 8: raise FormatError( "Indexed images (colour type %d) cannot" " have bitdepth > 8 (bit depth %d)." " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ." % (bitdepth, colortype)) if bitdepth < 8 and colortype not in (0, 3): raise FormatError( "Illegal combination of bit depth (%d)" " and colour type (%d)." " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ." % (bitdepth, colortype))
def check_bitdepth_colortype(bitdepth, colortype)
Check that `bitdepth` and `colortype` are both valid, and specified in a valid combination. Returns (None) if valid, raise an Exception if not valid.
2.708316
2.789125
0.971027
try: is_integer = int(x) == x except (TypeError, ValueError): return False return is_integer and x >= 0
def is_natural(x)
A non-negative integer.
3.162153
3.178486
0.994861
ai = 0 # Loops starts at index fu. Observe that the initial part # of the result is already filled in correctly with # scanline. for i in range(filter_unit, len(result)): x = scanline[i] a = result[ai] result[i] = (x + a) & 0xff ai += 1
def undo_filter_sub(filter_unit, scanline, previous, result)
Undo sub filter.
7.730202
7.585195
1.019117
for i in range(len(result)): x = scanline[i] b = previous[i] result[i] = (x + b) & 0xff
def undo_filter_up(filter_unit, scanline, previous, result)
Undo up filter.
3.334425
3.262263
1.02212
ai = -filter_unit for i in range(len(result)): x = scanline[i] if ai < 0: a = 0 else: a = result[ai] b = previous[i] result[i] = (x + ((a + b) >> 1)) & 0xff ai += 1
def undo_filter_average(filter_unit, scanline, previous, result)
Undo up filter.
3.350786
3.274351
1.023344
# Also used for ci. ai = -filter_unit for i in range(len(result)): x = scanline[i] if ai < 0: a = c = 0 else: a = result[ai] c = previous[ai] b = previous[i] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: pr = a elif pb <= pc: pr = b else: pr = c result[i] = (x + pr) & 0xff ai += 1
def undo_filter_paeth(filter_unit, scanline, previous, result)
Undo Paeth filter.
2.70942
2.638993
1.026687
# First there is a Python3 issue. try: stdout = sys.stdout.buffer except AttributeError: # Probably Python 2, where bytes are strings. stdout = sys.stdout # On Windows the C runtime file orientation needs changing. if sys.platform == "win32": import msvcrt import os msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return stdout
def binary_stdout()
A sys.stdout that accepts bytes.
5.605894
5.073099
1.105024
# Values per row vpr = self.width * self.planes def check_rows(rows): for i, row in enumerate(rows): try: wrong_length = len(row) != vpr except TypeError: # When using an itertools.ichain object or # other generator not supporting __len__, # we set this to False to skip the check. wrong_length = False if wrong_length: # Note: row numbers start at 0. raise ProtocolError( "Expected %d values but got %d value, in row %d" % (vpr, len(row), i)) yield row if self.interlace: fmt = 'BH'[self.bitdepth > 8] a = array(fmt, itertools.chain(*check_rows(rows))) return self.write_array(outfile, a) nrows = self.write_passes(outfile, check_rows(rows)) if nrows != self.height: raise ProtocolError( "rows supplied (%d) does not match height (%d)" % (nrows, self.height))
def write(self, outfile, rows)
Write a PNG image to the output file. `rows` should be an iterable that yields each row (each row is a sequence of values). The rows should be the rows of the original image, so there should be ``self.height`` rows of ``self.width * self.planes`` values. If `interlace` is specified (when creating the instance), then an interlaced PNG file will be written. Supply the rows in the normal image order; the interlacing is carried out internally. .. note :: Interlacing requires the entire image to be in working memory.
5.553526
4.838789
1.14771
# Ensure rows are scaled (to 4-/8-/16-bit), # and packed into bytes. if self.rescale: rows = rescale_rows(rows, self.rescale) if self.bitdepth < 8: rows = pack_rows(rows, self.bitdepth) elif self.bitdepth == 16: rows = unpack_rows(rows) return self.write_packed(outfile, rows)
def write_passes(self, outfile, rows)
Write a PNG image to the output file. Most users are expected to find the :meth:`write` or :meth:`write_array` method more convenient. The rows should be given to this method in the order that they appear in the output file. For straightlaced images, this is the usual top to bottom ordering. For interlaced images the rows should have been interlaced before passing them to this function. `rows` should be an iterable that yields each row (each row being a sequence of values).
4.729245
4.713798
1.003277
self.write_preamble(outfile) # http://www.w3.org/TR/PNG/#11IDAT if self.compression is not None: compressor = zlib.compressobj(self.compression) else: compressor = zlib.compressobj() # data accumulates bytes to be compressed for the IDAT chunk; # it's compressed when sufficiently large. data = bytearray() for i, row in enumerate(rows): # Add "None" filter type. # Currently, it's essential that this filter type be used # for every scanline as # we do not mark the first row of a reduced pass image; # that means we could accidentally compute # the wrong filtered scanline if we used # "up", "average", or "paeth" on such a line. data.append(0) data.extend(row) if len(data) > self.chunk_limit: # :todo: bytes() only necessary in Python 2 compressed = compressor.compress(bytes(data)) if len(compressed): write_chunk(outfile, b'IDAT', compressed) data = bytearray() compressed = compressor.compress(bytes(data)) flushed = compressor.flush() if len(compressed) or len(flushed): write_chunk(outfile, b'IDAT', compressed + flushed) # http://www.w3.org/TR/PNG/#11IEND write_chunk(outfile, b'IEND') return i + 1
def write_packed(self, outfile, rows)
Write PNG file to `outfile`. `rows` should be an iterator that yields each packed row; a packed row being a sequence of packed bytes. The rows have a filter byte prefixed and are then compressed into one or more IDAT chunks. They are not processed any further, so if bitdepth is other than 1, 2, 4, 8, 16, the pixel values should have been scaled before passing them to this method. This method does work for interlaced images but it is best avoided. For interlaced images, the rows should be presented in the order that they appear in the file.
4.73825
4.507591
1.051171
if self.interlace: if type(pixels) != array: # Coerce to array type fmt = 'BH'[self.bitdepth > 8] pixels = array(fmt, pixels) self.write_passes(outfile, self.array_scanlines_interlace(pixels)) else: self.write_passes(outfile, self.array_scanlines(pixels))
def write_array(self, outfile, pixels)
Write an array that holds all the image values as a PNG file on the output file. See also :meth:`write` method.
4.990457
4.803427
1.038937
# Values per row vpr = self.width * self.planes stop = 0 for y in range(self.height): start = stop stop = start + vpr yield pixels[start:stop]
def array_scanlines(self, pixels)
Generates rows (each a sequence of values) from a single array of values.
5.373328
4.974947
1.080077
# http://www.w3.org/TR/PNG/#8InterlaceMethods # Array type. fmt = 'BH'[self.bitdepth > 8] # Value per row vpr = self.width * self.planes # Each iteration generates a scanline starting at (x, y) # and consisting of every xstep pixels. for lines in adam7_generate(self.width, self.height): for x, y, xstep in lines: # Pixels per row (of reduced image) ppr = int(math.ceil((self.width - x) / float(xstep))) # Values per row (of reduced image) reduced_row_len = ppr * self.planes if xstep == 1: # Easy case: line is a simple slice. offset = y * vpr yield pixels[offset: offset + vpr] continue # We have to step by xstep, # which we can do one plane at a time # using the step in Python slices. row = array(fmt) # There's no easier way to set the length of an array row.extend(pixels[0:reduced_row_len]) offset = y * vpr + x * self.planes end_offset = (y + 1) * vpr skip = self.planes * xstep for i in range(self.planes): row[i::self.planes] = \ pixels[offset + i: end_offset: skip] yield row
def array_scanlines_interlace(self, pixels)
Generator for interlaced scanlines from an array. `pixels` is the full source image as a single array of values. The generator yields each scanline of the reduced passes in turn, each scanline being a sequence of values.
5.691334
5.576667
1.020562
w = Writer(**self.info) with open(file, 'wb') as fd: w.write(fd, self.rows)
def save(self, file)
Save the image to the named *file*. See `.write()` if you already have an open file object. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again.
8.073407
8.311494
0.971355
w = Writer(**self.info) w.write(file, self.rows)
def write(self, file)
Write the image to the open file object. See `.save()` if you have a filename. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again.
16.045704
17.060797
0.940501
self.validate_signature() # http://www.w3.org/TR/PNG/#5Chunk-layout if not self.atchunk: self.atchunk = self._chunk_len_type() if not self.atchunk: raise ChunkError("No more chunks.") length, type = self.atchunk self.atchunk = None data = self.file.read(length) if len(data) != length: raise ChunkError( 'Chunk %s too short for required %i octets.' % (type, length)) checksum = self.file.read(4) if len(checksum) != 4: raise ChunkError('Chunk %s too short for checksum.' % type) verify = zlib.crc32(type) verify = zlib.crc32(data, verify) # Whether the output from zlib.crc32 is signed or not varies # according to hideous implementation details, see # http://bugs.python.org/issue1202 . # We coerce it to be positive here (in a way which works on # Python 2.3 and older). verify &= 2**32 - 1 verify = struct.pack('!I', verify) if checksum != verify: (a, ) = struct.unpack('!I', checksum) (b, ) = struct.unpack('!I', verify) message = ("Checksum error in %s chunk: 0x%08X != 0x%08X." % (type.decode('ascii'), a, b)) if lenient: warnings.warn(message, RuntimeWarning) else: raise ChunkError(message) return type, data
def chunk(self, lenient=False)
Read the next PNG chunk from the input file; returns a (*type*, *data*) tuple. *type* is the chunk's type as a byte string (all PNG chunk types are 4 bytes long). *data* is the chunk's data content, as a byte string. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions.
3.475635
3.180253
1.09288
while True: t, v = self.chunk() yield t, v if t == b'IEND': break
def chunks(self)
Return an iterator that will yield each chunk as a (*chunktype*, *content*) pair.
10.64927
7.14708
1.490017
# :todo: Would it be better to update scanline in place? result = scanline if filter_type == 0: return result if filter_type not in (1, 2, 3, 4): raise FormatError( 'Invalid PNG Filter Type. ' 'See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .') # Filter unit. The stride from one pixel to the corresponding # byte from the previous pixel. Normally this is the pixel # size in bytes, but when this is smaller than 1, the previous # byte is used instead. fu = max(1, self.psize) # For the first line of a pass, synthesize a dummy previous # line. An alternative approach would be to observe that on the # first line 'up' is the same as 'null', 'paeth' is the same # as 'sub', with only 'average' requiring any special case. if not previous: previous = bytearray([0] * len(scanline)) # Call appropriate filter algorithm. Note that 0 has already # been dealt with. fn = (None, undo_filter_sub, undo_filter_up, undo_filter_average, undo_filter_paeth)[filter_type] fn(fu, scanline, previous, result) return result
def undo_filter(self, filter_type, scanline, previous)
Undo the filter for a scanline. `scanline` is a sequence of bytes that does not include the initial filter type byte. `previous` is decoded previous scanline (for straightlaced images this is the previous pixel row, but for interlaced images, it is the previous scanline in the reduced image, which in general is not the previous pixel row in the final image). When there is no previous scanline (the first row of a straightlaced image, or the first row in one of the passes in an interlaced image), then this argument should be ``None``. The scanline will have the effects of filtering removed; the result will be returned as a fresh sequence of bytes.
6.213413
6.324096
0.982498
# Values per row (of the target image) vpr = self.width * self.planes # Values per image vpi = vpr * self.height # Interleaving writes to the output array randomly # (well, not quite), so the entire output array must be in memory. # Make a result array, and make it big enough. if self.bitdepth > 8: a = array('H', [0] * vpi) else: a = bytearray([0] * vpi) source_offset = 0 for lines in adam7_generate(self.width, self.height): # The previous (reconstructed) scanline. # `None` at the beginning of a pass # to indicate that there is no previous line. recon = None for x, y, xstep in lines: # Pixels per row (reduced pass image) ppr = int(math.ceil((self.width - x) / float(xstep))) # Row size in bytes for this pass. row_size = int(math.ceil(self.psize * ppr)) filter_type = raw[source_offset] source_offset += 1 scanline = raw[source_offset: source_offset + row_size] source_offset += row_size recon = self.undo_filter(filter_type, scanline, recon) # Convert so that there is one element per pixel value flat = self._bytes_to_values(recon, width=ppr) if xstep == 1: assert x == 0 offset = y * vpr a[offset: offset + vpr] = flat else: offset = y * vpr + x * self.planes end_offset = (y + 1) * vpr skip = self.planes * xstep for i in range(self.planes): a[offset + i: end_offset: skip] = \ flat[i:: self.planes] return a
def _deinterlace(self, raw)
Read raw pixel data, undo filters, deinterlace, and flatten. Return a single array of values.
5.163463
4.919772
1.049533
if self.bitdepth == 8: return bytearray(bs) if self.bitdepth == 16: return array('H', struct.unpack('!%dH' % (len(bs) // 2), bs)) assert self.bitdepth < 8 if width is None: width = self.width # Samples per byte spb = 8 // self.bitdepth out = bytearray() mask = 2**self.bitdepth - 1 shifts = [self.bitdepth * i for i in reversed(list(range(spb)))] for o in bs: out.extend([mask & (o >> i) for i in shifts]) return out[:width]
def _bytes_to_values(self, bs, width=None)
Convert a packed row of bytes into a row of values. Result will be a freshly allocated object, not shared with the argument.
3.236434
3.228019
1.002607
# length of row, in bytes rb = self.row_bytes a = bytearray() # The previous (reconstructed) scanline. # None indicates first line of image. recon = None for some_bytes in byte_blocks: a.extend(some_bytes) while len(a) >= rb + 1: filter_type = a[0] scanline = a[1: rb + 1] del a[: rb + 1] recon = self.undo_filter(filter_type, scanline, recon) yield recon if len(a) != 0: # :file:format We get here with a file format error: # when the available bytes (after decompressing) do not # pack into exact rows. raise FormatError('Wrong size for decompressed IDAT chunk.') assert len(a) == 0
def _iter_straight_packed(self, byte_blocks)
Iterator that undoes the effect of filtering; yields each row as a sequence of packed bytes. Assumes input is straightlaced. `byte_blocks` should be an iterable that yields the raw bytes in blocks of arbitrary size.
7.794652
7.373907
1.057059
self.validate_signature() while True: if not self.atchunk: self.atchunk = self._chunk_len_type() if self.atchunk is None: raise FormatError('This PNG file has no IDAT chunks.') if self.atchunk[1] == b'IDAT': return self.process_chunk(lenient=lenient)
def preamble(self, lenient=False)
Extract the image metadata by reading the initial part of the PNG file up to the start of the ``IDAT`` chunk. All the chunks that precede the ``IDAT`` chunk are read and either processed for metadata or discarded. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions.
7.142844
5.767161
1.238537
x = self.file.read(8) if not x: return None if len(x) != 8: raise FormatError( 'End of file whilst reading chunk length and type.') length, type = struct.unpack('!I4s', x) if length > 2 ** 31 - 1: raise FormatError('Chunk %s is too large: %d.' % (type, length)) # Check that all bytes are in valid ASCII range. # https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-layout type_bytes = set(bytearray(type)) if not(type_bytes <= set(range(65, 91)) | set(range(97, 123))): raise FormatError( 'Chunk %r has invalid Chunk Type.' % list(type)) return length, type
def _chunk_len_type(self)
Reads just enough of the input to determine the next chunk's length and type; return a (*length*, *type*) pair where *type* is a byte sequence. If there are no more chunks, ``None`` is returned.
3.831939
3.373599
1.135861
type, data = self.chunk(lenient=lenient) method = '_process_' + type.decode('ascii') m = getattr(self, method, None) if m: m(data)
def process_chunk(self, lenient=False)
Process the next chunk and its data. This only processes the following chunk types: ``IHDR``, ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``. All other chunk types are ignored. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warnings rather than exceptions.
5.063302
4.682735
1.08127
def iteridat(): while True: type, data = self.chunk(lenient=lenient) if type == b'IEND': # http://www.w3.org/TR/PNG/#11IEND break if type != b'IDAT': continue # type == b'IDAT' # http://www.w3.org/TR/PNG/#11IDAT if self.colormap and not self.plte: warnings.warn("PLTE chunk is required before IDAT chunk") yield data self.preamble(lenient=lenient) raw = decompress(iteridat()) if self.interlace: def rows_from_interlace(): # It's important that this iterator doesn't read # IDAT chunks until it yields the first row. bs = bytearray(itertools.chain(*raw)) arraycode = 'BH'[self.bitdepth > 8] # Like :meth:`group` but # producing an array.array object for each row. values = self._deinterlace(bs) vpr = self.width * self.planes for i in range(0, len(values), vpr): row = array(arraycode, values[i:i+vpr]) yield row rows = rows_from_interlace() else: rows = self._iter_bytes_to_values(self._iter_straight_packed(raw)) info = dict() for attr in 'greyscale alpha planes bitdepth interlace'.split(): info[attr] = getattr(self, attr) info['size'] = (self.width, self.height) for attr in 'gamma transparent background'.split(): a = getattr(self, attr, None) if a is not None: info[attr] = a if getattr(self, 'x_pixels_per_unit', None): info['physical'] = Resolution(self.x_pixels_per_unit, self.y_pixels_per_unit, self.unit_is_meter) if self.plte: info['palette'] = self.palette() return self.width, self.height, rows, info
def read(self, lenient=False)
Read the PNG file and decode it. Returns (`width`, `height`, `rows`, `info`). May use excessive memory. `rows` is a sequence of rows; each row is a sequence of values. If the optional `lenient` argument evaluates to True, checksum failures will raise warnings rather than exceptions.
4.608234
4.354334
1.05831
width, height, pixels, info = self.asDirect() if info['alpha']: raise Error("will not convert image with alpha channel to RGB") if not info['greyscale']: return width, height, pixels, info info['greyscale'] = False info['planes'] = 3 if info['bitdepth'] > 8: def newarray(): return array('H', [0]) else: def newarray(): return bytearray([0]) def iterrgb(): for row in pixels: a = newarray() * 3 * width for i in range(3): a[i::3] = row yield a return width, height, iterrgb(), info
def asRGB(self)
Return image as RGB pixels. RGB colour images are passed through unchanged; greyscales are expanded into RGB triplets (there is a small speed overhead for doing this). An alpha channel in the source image will raise an exception. The return values are as for the :meth:`read` method except that the *info* reflect the returned pixels, not the source image. In particular, for this method ``info['greyscale']`` will be ``False``.
4.69467
4.256019
1.103066
width, height, pixels, info = self.asDirect() if info['alpha'] and not info['greyscale']: return width, height, pixels, info typecode = 'BH'[info['bitdepth'] > 8] maxval = 2**info['bitdepth'] - 1 maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width if info['bitdepth'] > 8: def newarray(): return array('H', maxbuffer) else: def newarray(): return bytearray(maxbuffer) if info['alpha'] and info['greyscale']: # LA to RGBA def convert(): for row in pixels: # Create a fresh target row, then copy L channel # into first three target channels, and A channel # into fourth channel. a = newarray() convert_la_to_rgba(row, a) yield a elif info['greyscale']: # L to RGBA def convert(): for row in pixels: a = newarray() convert_l_to_rgba(row, a) yield a else: assert not info['alpha'] and not info['greyscale'] # RGB to RGBA def convert(): for row in pixels: a = newarray() convert_rgb_to_rgba(row, a) yield a info['alpha'] = True info['greyscale'] = False info['planes'] = 4 return width, height, convert(), info
def asRGBA(self)
Return image as RGBA pixels. Greyscales are expanded into RGB triplets; an alpha channel is synthesized if necessary. The return values are as for the :meth:`read` method except that the *info* reflect the returned pixels, not the source image. In particular, for this method ``info['greyscale']`` will be ``False``, and ``info['alpha']`` will be ``True``.
3.490437
3.420701
1.020387
import re import binascii # Remove all non-hexadecimal digits s = re.sub(br'[^a-fA-F\d]', b'', s) # binscii.unhexlify works in Python 2 and Python 3 (unlike # thing.decode('hex')). return binascii.unhexlify(s)
def _dehex(s)
Liberally convert from hex string to binary string.
7.33954
7.009093
1.047145