code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# First check if gzipping is allowed by the global setting if not getattr(settings, 'BAKERY_GZIP', False): return False # Then check if the content type of this particular file is gzippable whitelist = getattr( settings, 'GZIP_CONTENT_TYPES', DEFAULT_GZIP_CONTENT_TYPES ) return mimetypes.guess_type(path)[0] in whitelist
def is_gzippable(self, path)
Returns a boolean indicating if the provided file path is a candidate for gzipping.
4.533026
4.350202
1.042026
logger.debug("Gzipping to {}{}".format(self.fs_name, target_path)) # Write GZIP data to an in-memory buffer data_buffer = six.BytesIO() kwargs = dict( filename=path.basename(target_path), mode='wb', fileobj=data_buffer ) if float(sys.version[:3]) >= 2.7: kwargs['mtime'] = 0 with gzip.GzipFile(**kwargs) as f: f.write(six.binary_type(html)) # Write that buffer out to the filesystem with self.fs.open(smart_text(target_path), 'wb') as outfile: outfile.write(data_buffer.getvalue()) outfile.close()
def gzip_file(self, target_path, html)
Zips up the provided HTML as a companion for the provided path. Intended to take advantage of the peculiarities of Amazon S3's GZIP service. mtime, an option that writes a timestamp to the output file is set to 0, to avoid having s3cmd do unnecessary uploads because of differences in the timestamp
3.532718
3.466937
1.018974
if self.url: url = self.url % kwargs elif self.pattern_name: try: url = reverse(self.pattern_name, args=args, kwargs=kwargs) except NoReverseMatch: return None else: return None return url
def get_redirect_url(self, *args, **kwargs)
Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method.
2.385841
2.25999
1.055687
for detail_view in self.detail_views: view = self._get_view(detail_view) view().build_object(self) self._build_extra() self._build_related()
def build(self)
Iterates through the views pointed to by self.detail_views, runs build_object with `self`, and calls _build_extra() and _build_related().
6.660937
2.546853
2.61536
for detail_view in self.detail_views: view = self._get_view(detail_view) view().unbuild_object(self) self._unbuild_extra() # _build_related again to kill the object from RSS etc. self._build_related()
def unbuild(self)
Iterates through the views pointed to by self.detail_views, runs unbuild_object with `self`, and calls _build_extra() and _build_related().
10.804381
5.638133
1.916305
from bakery import tasks from django.contrib.contenttypes.models import ContentType # if obj.save(publish=False) has been passed, we skip everything. if not kwargs.pop('publish', True): super(AutoPublishingBuildableModel, self).save(*args, **kwargs) # Otherwise, for the standard obj.save(), here we go... else: # First figure out if the record is an addition, or an edit of # a preexisting record. try: preexisting = self.__class__.objects.get(pk=self.pk) except self.__class__.DoesNotExist: preexisting = None # If this is an addition... if not preexisting: # We will publish if that's the boolean if self.get_publication_status(): action = 'publish' # Otherwise we will do nothing do nothing else: action = None # If this is an edit... else: # If it's being unpublished... if not self.get_publication_status() and \ preexisting.get_publication_status(): action = 'unpublish' # If it's being published... elif self.get_publication_status(): action = 'publish' # If it's remaining unpublished... else: action = None # Now, no matter what, save it normally inside of a dedicated # database transaction so that we are sure that the save will # be complete before we trigger any task with transaction.atomic(): super(AutoPublishingBuildableModel, self).save(*args, **kwargs) # Finally, depending on the action, fire off a task ct = ContentType.objects.get_for_model(self.__class__) if action == 'publish': tasks.publish_object.delay(ct.pk, self.pk) elif action == 'unpublish': tasks.unpublish_object.delay(ct.pk, self.pk)
def save(self, *args, **kwargs)
A custom save that publishes or unpublishes the object where appropriate. Save with keyword argument obj.save(publish=False) to skip the process.
3.698284
3.513919
1.052467
from bakery import tasks from django.contrib.contenttypes.models import ContentType # if obj.save(unpublish=False) has been passed, we skip the task. unpublish = kwargs.pop('unpublish', True) # Delete it from the database super(AutoPublishingBuildableModel, self).delete(*args, **kwargs) if unpublish: ct = ContentType.objects.get_for_model(self.__class__) tasks.unpublish_object.delay(ct.pk, self.pk)
def delete(self, *args, **kwargs)
Triggers a task that will unpublish the object after it is deleted. Save with keyword argument obj.delete(unpublish=False) to skip it.
4.670563
3.499438
1.334661
logger.info("Build started") # Set options self.set_options(*args, **options) # Get the build directory ready if not options.get("keep_build_dir"): self.init_build_dir() # Build up static files if not options.get("skip_static"): self.build_static() # Build the media directory if not options.get("skip_media"): self.build_media() # Build views self.build_views() # Close out logger.info("Build finished")
def handle(self, *args, **options)
Making it happen.
3.724975
3.591064
1.03729
self.verbosity = int(options.get('verbosity', 1)) # Figure out what build directory to use if options.get("build_dir"): self.build_dir = options.get("build_dir") settings.BUILD_DIR = self.build_dir else: if not hasattr(settings, 'BUILD_DIR'): raise CommandError(self.build_unconfig_msg) self.build_dir = settings.BUILD_DIR # Get the datatypes right so fs will be happy self.build_dir = smart_text(self.build_dir) self.static_root = smart_text(settings.STATIC_ROOT) self.media_root = smart_text(settings.MEDIA_ROOT) # Connect the BUILD_DIR with our filesystem backend self.app = apps.get_app_config("bakery") self.fs = self.app.filesystem self.fs_name = self.app.filesystem_name # If the build dir doesn't exist make it if not self.fs.exists(self.build_dir): self.fs.makedirs(self.build_dir) # Figure out what views we'll be using if options.get('view_list'): self.view_list = options['view_list'] else: if not hasattr(settings, 'BAKERY_VIEWS'): raise CommandError(self.views_unconfig_msg) self.view_list = settings.BAKERY_VIEWS # Are we pooling? self.pooling = options.get('pooling')
def set_options(self, *args, **options)
Configure a few global options before things get going.
2.911044
2.864971
1.016082
# Destroy the build directory, if it exists logger.debug("Initializing %s" % self.build_dir) if self.verbosity > 1: self.stdout.write("Initializing build directory") if self.fs.exists(self.build_dir): self.fs.removetree(self.build_dir) # Then recreate it from scratch self.fs.makedirs(self.build_dir)
def init_build_dir(self)
Clear out the build directory and create a new one.
3.445897
3.350342
1.028521
logger.debug("Building static directory") if self.verbosity > 1: self.stdout.write("Building static directory") management.call_command( "collectstatic", interactive=False, verbosity=0 ) # Set the target directory inside the filesystem. target_dir = path.join( self.build_dir, settings.STATIC_URL.lstrip('/') ) target_dir = smart_text(target_dir) if os.path.exists(self.static_root) and settings.STATIC_URL: if getattr(settings, 'BAKERY_GZIP', False): self.copytree_and_gzip(self.static_root, target_dir) # if gzip isn't enabled, just copy the tree straight over else: logger.debug("Copying {}{} to {}{}".format("osfs://", self.static_root, self.fs_name, target_dir)) copy.copy_dir("osfs:///", self.static_root, self.fs, target_dir) # If they exist in the static directory, copy the robots.txt # and favicon.ico files down to the root so they will work # on the live website. robots_src = path.join(target_dir, 'robots.txt') if self.fs.exists(robots_src): robots_target = path.join(self.build_dir, 'robots.txt') logger.debug("Copying {}{} to {}{}".format(self.fs_name, robots_src, self.fs_name, robots_target)) self.fs.copy(robots_src, robots_target) favicon_src = path.join(target_dir, 'favicon.ico') if self.fs.exists(favicon_src): favicon_target = path.join(self.build_dir, 'favicon.ico') logger.debug("Copying {}{} to {}{}".format(self.fs_name, favicon_src, self.fs_name, favicon_target)) self.fs.copy(favicon_src, favicon_target)
def build_static(self, *args, **options)
Builds the static files directory as well as robots.txt and favicon.ico
2.732702
2.658718
1.027827
logger.debug("Building media directory") if self.verbosity > 1: self.stdout.write("Building media directory") if os.path.exists(self.media_root) and settings.MEDIA_URL: target_dir = path.join(self.fs_name, self.build_dir, settings.MEDIA_URL.lstrip('/')) logger.debug("Copying {}{} to {}{}".format("osfs://", self.media_root, self.fs_name, target_dir)) copy.copy_dir("osfs:///", smart_text(self.media_root), self.fs, smart_text(target_dir))
def build_media(self)
Build the media files.
4.638543
4.472936
1.037024
# Then loop through and run them all for view_str in self.view_list: logger.debug("Building %s" % view_str) if self.verbosity > 1: self.stdout.write("Building %s" % view_str) view = get_callable(view_str) self.get_view_instance(view).build_method()
def build_views(self)
Bake out specified buildable views.
4.375302
4.229039
1.034585
# Figure out what we're building... build_list = [] # Walk through the source directory... for (dirpath, dirnames, filenames) in os.walk(source_dir): for f in filenames: # Figure out what is going where source_path = os.path.join(dirpath, f) rel_path = os.path.relpath(dirpath, source_dir) target_path = os.path.join(target_dir, rel_path, f) # Add it to our list to build build_list.append((source_path, target_path)) logger.debug("Gzipping {} files".format(len(build_list))) # Build em all if not getattr(self, 'pooling', False): [self.copyfile_and_gzip(*u) for u in build_list] else: cpu_count = multiprocessing.cpu_count() logger.debug("Pooling build on {} CPUs".format(cpu_count)) pool = ThreadPool(processes=cpu_count) pool.map(self.pooled_copyfile_and_gzip, build_list)
def copytree_and_gzip(self, source_dir, target_dir)
Copies the provided source directory to the provided target directory. Gzips JavaScript, CSS and HTML and other files along the way.
2.821781
2.911229
0.969275
# And then where we want to copy it to. target_dir = path.dirname(target_path) if not self.fs.exists(target_dir): try: self.fs.makedirs(target_dir) except OSError: pass # determine the mimetype of the file guess = mimetypes.guess_type(source_path) content_type = guess[0] encoding = guess[1] # If it isn't a file want to gzip... if content_type not in self.gzip_file_match: # just copy it to the target. logger.debug("Copying {}{} to {}{} because its filetype isn't on the whitelist".format( "osfs://", source_path, self.fs_name, target_path )) copy.copy_file("osfs:///", smart_text(source_path), self.fs, smart_text(target_path)) # # if the file is already gzipped elif encoding == 'gzip': logger.debug("Copying {}{} to {}{} because it's already gzipped".format( "osfs://", source_path, self.fs_name, target_path )) copy.copy_file("osfs:///", smart_text(source_path), self.fs, smart_text(target_path)) # If it is one we want to gzip... else: # ... let the world know ... logger.debug("Gzipping {}{} to {}{}".format( "osfs://", source_path, self.fs_name, target_path )) # Open up the source file from the OS with open(source_path, 'rb') as source_file: # Write GZIP data to an in-memory buffer data_buffer = six.BytesIO() kwargs = dict( filename=path.basename(target_path), mode='wb', fileobj=data_buffer ) if float(sys.version[:3]) >= 2.7: kwargs['mtime'] = 0 with gzip.GzipFile(**kwargs) as f: f.write(six.binary_type(source_file.read())) # Write that buffer out to the filesystem with self.fs.open(smart_text(target_path), 'wb') as outfile: outfile.write(data_buffer.getvalue()) outfile.close()
def copyfile_and_gzip(self, source_path, target_path)
Copies the provided file to the provided target directory. Gzips JavaScript, CSS and HTML and other files along the way.
2.902219
2.950756
0.983551
# Counts and such we can use to keep tabs on this as they progress self.uploaded_files = 0 self.uploaded_file_list = [] self.deleted_files = 0 self.deleted_file_list = [] self.start_time = time.time() # Configure all the options we're going to use self.set_options(options) # Initialize the boto connection logger.debug("Connecting to s3") if self.verbosity > 2: self.stdout.write("Connecting to s3") self.s3_client, self.s3_resource = get_s3_client() # Grab our bucket logger.debug("Retriving bucket {}".format(self.aws_bucket_name)) if self.verbosity > 2: self.stdout.write("Retriving bucket {}".format(self.aws_bucket_name)) self.bucket = self.s3_resource.Bucket(self.aws_bucket_name) # Get a list of all keys in our s3 bucket ... # ...nunless you're this is case where we're blindly pushing if self.force_publish and self.no_delete: self.blind_upload = True logger.debug("Skipping object retrieval. We won't need to because we're blinding uploading everything.") self.s3_obj_dict = {} else: self.blind_upload = False logger.debug("Retrieving objects now published in bucket") if self.verbosity > 2: self.stdout.write("Retrieving objects now published in bucket") self.s3_obj_dict = {} self.s3_obj_dict = self.get_bucket_file_list() # Get a list of all the local files in our build directory logger.debug("Retrieving files built locally") if self.verbosity > 2: self.stdout.write("Retrieving files built locally") self.local_file_list = self.get_local_file_list() # Sync local files with s3 bucket logger.debug("Syncing local files with bucket") if self.verbosity > 2: self.stdout.write("Syncing local files with bucket") self.sync_with_s3() # Delete anything that's left in our keys dict if not self.dry_run and not self.no_delete: self.deleted_file_list = list(self.s3_obj_dict.keys()) self.deleted_files = len(self.deleted_file_list) if self.deleted_files: logger.debug("Deleting %s keys" % self.deleted_files) if self.verbosity > 0: self.stdout.write("Deleting %s keys" % self.deleted_files) self.batch_delete_s3_objects( self.deleted_file_list, self.aws_bucket_name ) # Run any post publish hooks on the views if not hasattr(settings, 'BAKERY_VIEWS'): raise CommandError(self.views_unconfig_msg) for view_str in settings.BAKERY_VIEWS: view = get_callable(view_str)() if hasattr(view, 'post_publish'): getattr(view, 'post_publish')(self.bucket) # We're finished, print the final output elapsed_time = time.time() - self.start_time msg = "Publish completed, %d uploaded and %d deleted files in %.2f seconds" % ( self.uploaded_files, self.deleted_files, elapsed_time ) logger.info(msg) if self.verbosity > 0: self.stdout.write(msg) if self.dry_run: logger.info("Publish executed with the --dry-run option. No content was changed on S3.") if self.verbosity > 0: self.stdout.write("Publish executed with the --dry-run option. No content was changed on S3.")
def handle(self, *args, **options)
Sync files in the build directory to a specified S3 bucket
2.93516
2.90912
1.008951
self.verbosity = int(options.get('verbosity')) # Will we be gzipping? self.gzip = getattr(settings, 'BAKERY_GZIP', False) # And if so what content types will we be gzipping? self.gzip_content_types = getattr( settings, 'GZIP_CONTENT_TYPES', DEFAULT_GZIP_CONTENT_TYPES ) # What ACL (i.e. security permissions) will be giving the files on S3? self.acl = getattr(settings, 'DEFAULT_ACL', self.DEFAULT_ACL) # Should we set cache-control headers? self.cache_control = getattr(settings, 'BAKERY_CACHE_CONTROL', {}) # If the user specifies a build directory... if options.get('build_dir'): # ... validate that it is good. if not os.path.exists(options.get('build_dir')): raise CommandError(self.build_missing_msg) # Go ahead and use it self.build_dir = options.get("build_dir") # If the user does not specify a build dir... else: # Check if it is set in settings.py if not hasattr(settings, 'BUILD_DIR'): raise CommandError(self.build_unconfig_msg) # Then make sure it actually exists if not os.path.exists(settings.BUILD_DIR): raise CommandError(self.build_missing_msg) # Go ahead and use it self.build_dir = settings.BUILD_DIR # If the user provides a bucket name, use that. if options.get("aws_bucket_name"): self.aws_bucket_name = options.get("aws_bucket_name") else: # Otherwise try to find it the settings if not hasattr(settings, 'AWS_BUCKET_NAME'): raise CommandError(self.bucket_unconfig_msg) self.aws_bucket_name = settings.AWS_BUCKET_NAME # The bucket prefix, if it exists self.aws_bucket_prefix = options.get("aws_bucket_prefix") # If the user sets the --force option if options.get('force'): self.force_publish = True else: self.force_publish = False # set the --dry-run option if options.get('dry_run'): self.dry_run = True if self.verbosity > 0: logger.info("Executing with the --dry-run option set.") else: self.dry_run = False self.no_delete = options.get('no_delete') self.no_pooling = options.get('no_pooling')
def set_options(self, options)
Configure all the many options we'll need to make this happen.
2.77672
2.745122
1.011511
logger.debug("Retrieving bucket object list") paginator = self.s3_client.get_paginator('list_objects') options = { 'Bucket': self.aws_bucket_name } if self.aws_bucket_prefix: logger.debug("Adding prefix {} to bucket list as a filter".format(self.aws_bucket_prefix)) options['Prefix'] = self.aws_bucket_prefix page_iterator = paginator.paginate(**options) obj_dict = {} for page in page_iterator: obj_dict.update(get_bucket_page(page)) return obj_dict
def get_bucket_file_list(self)
Little utility method that handles pagination and returns all objects in given bucket.
2.663989
2.545085
1.046719
file_list = [] for (dirpath, dirnames, filenames) in os.walk(self.build_dir): for fname in filenames: # relative path, to sync with the S3 key local_key = os.path.join( os.path.relpath(dirpath, self.build_dir), fname ) if local_key.startswith('./'): local_key = local_key[2:] file_list.append(local_key) return file_list
def get_local_file_list(self)
Walk the local build directory and create a list of relative and absolute paths to files.
2.632604
2.454062
1.072754
# Create a list to put all the files we're going to update self.update_list = [] # Figure out which files need to be updated and upload all these files logger.debug("Comparing {} local files with {} bucket files".format( len(self.local_file_list), len(self.s3_obj_dict.keys()) )) if self.no_pooling: [self.compare_local_file(f) for f in self.local_file_list] else: cpu_count = multiprocessing.cpu_count() logger.debug("Pooling local file comparison on {} CPUs".format(cpu_count)) pool = ThreadPool(processes=cpu_count) pool.map(self.compare_local_file, self.local_file_list) logger.debug("Uploading {} new or updated files to bucket".format(len(self.update_list))) if self.no_pooling: [self.upload_to_s3(*u) for u in self.update_list] else: logger.debug("Pooling s3 uploads on {} CPUs".format(cpu_count)) pool = ThreadPool(processes=cpu_count) pool.map(self.pooled_upload_to_s3, self.update_list)
def sync_with_s3(self)
Walk through our self.local_files list, and match them with the list of keys in the S3 bucket.
2.679834
2.672928
1.002584
with open(filename, 'rb') as f: m = hashlib.md5(f.read()) return m.hexdigest()
def get_md5(self, filename)
Returns the md5 checksum of the provided file name.
2.253101
2.193779
1.027041
# Loop through the file contents ... md5s = [] with open(filename, 'rb') as fp: while True: # Break it into chunks data = fp.read(chunk_size) # Finish when there are no more if not data: break # Generate a md5 hash for each chunk md5s.append(hashlib.md5(data)) # Combine the chunks digests = b"".join(m.digest() for m in md5s) # Generate a new hash using them new_md5 = hashlib.md5(digests) # Create the ETag as Amazon will new_etag = '"%s-%s"' % (new_md5.hexdigest(), len(md5s)) # Trim it down and pass it back for comparison return new_etag.strip('"').strip("'")
def get_multipart_md5(self, filename, chunk_size=8 * 1024 * 1024)
Returns the md5 checksum of the provided file name after breaking it into chunks. This is done to mirror the method used by Amazon S3 after a multipart upload.
4.034317
3.915908
1.030238
# Where is the file? file_path = os.path.join(self.build_dir, file_key) # If we're in force_publish mode just add it if self.force_publish: self.update_list.append((file_key, file_path)) # And quit now return # Does it exist in our s3 object list? if file_key in self.s3_obj_dict: # Get the md5 stored in Amazon's header s3_md5 = self.s3_obj_dict[file_key].get('ETag').strip('"').strip("'") # If there is a multipart ETag on S3, compare that to our local file after its chunked up. # We are presuming this file was uploaded in multiple parts. if "-" in s3_md5: local_md5 = self.get_multipart_md5(file_path) # Other, do it straight for the whole file else: local_md5 = self.get_md5(file_path) # If their md5 hexdigests match, do nothing if s3_md5 == local_md5: pass # If they don't match, we want to add it else: logger.debug("{} has changed".format(file_key)) self.update_list.append((file_key, file_path)) # Remove the file from the s3 dict, we don't need it anymore del self.s3_obj_dict[file_key] # If the file doesn't exist, queue it for creation else: logger.debug("{} has been added".format(file_key)) self.update_list.append((file_key, file_path))
def compare_local_file(self, file_key)
Compares a local version of a file with what's already published. If an update is needed, the file's key is added self.update_list.
3.545638
3.406978
1.040699
extra_args = {'ACL': self.acl} # determine the mimetype of the file guess = mimetypes.guess_type(filename) content_type = guess[0] encoding = guess[1] if content_type: extra_args['ContentType'] = content_type # add the gzip headers, if necessary if (self.gzip and content_type in self.gzip_content_types) or encoding == 'gzip': extra_args['ContentEncoding'] = 'gzip' # add the cache-control headers if necessary if content_type in self.cache_control: extra_args['CacheControl'] = ''.join(( 'max-age=', str(self.cache_control[content_type]) )) # access and write the contents from the file if not self.dry_run: logger.debug("Uploading %s" % filename) if self.verbosity > 0: self.stdout.write("Uploading %s" % filename) s3_obj = self.s3_resource.Object(self.aws_bucket_name, key) s3_obj.upload_file(filename, ExtraArgs=extra_args) # Update counts self.uploaded_files += 1 self.uploaded_file_list.append(filename)
def upload_to_s3(self, key, filename)
Set the content type and gzip headers if applicable and upload the item to S3
2.709645
2.642754
1.025311
try: attr = getattr(self, attname) except AttributeError: return default if callable(attr) or args: args = args[:] if args else [] # Check co_argcount rather than try/excepting the function and # catching the TypeError, because something inside the function # may raise the TypeError. This technique is more accurate. try: code = six.get_function_code(attr) except AttributeError: code = six.get_function_code(attr.__call__) if code.co_argcount == 2 + len(args): # one argument is 'self' args.append(obj) return attr(*args) return attr
def _get_bakery_dynamic_attr(self, attname, obj, args=None, default=None)
Allows subclasses to provide an attribute (say, 'foo') in three different ways: As a fixed class-level property or as a method foo(self) or foo(self, obj). The second argument argument 'obj' is the "subject" of the current Feed invocation. See the Django Feed documentation for details. This method was shamelessly stolen from the Feed class and extended with the ability to pass additional arguments to subclass methods.
4.334276
4.413666
0.982013
year = super(BuildableYearArchiveView, self).get_year() fmt = self.get_year_format() return date(int(year), 1, 1).strftime(fmt)
def get_year(self)
Return the year from the database in the format expected by the URL.
5.569639
4.581638
1.215643
self.year = str(dt.year) logger.debug("Building %s" % self.year) self.request = self.create_request(self.get_url()) target_path = self.get_build_path() self.build_file(target_path, self.get_content())
def build_year(self, dt)
Build the page for the provided year.
4.42801
3.762913
1.176751
qs = self.get_dated_queryset() years = self.get_date_list(qs) [self.build_year(dt) for dt in years]
def build_dated_queryset(self)
Build pages for all years in the queryset.
6.491217
4.687826
1.384697
year = super(BuildableMonthArchiveView, self).get_year() month = super(BuildableMonthArchiveView, self).get_month() fmt = self.get_month_format() return date(int(year), int(month), 1).strftime(fmt)
def get_month(self)
Return the month from the database in the format expected by the URL.
3.436751
2.993573
1.148043
qs = self.get_dated_queryset() months = self.get_date_list(qs) [self.build_month(dt) for dt in months]
def build_dated_queryset(self)
Build pages for all years in the queryset.
6.790594
5.256399
1.291872
self.year = str(dt.year) self.month = str(dt.month) logger.debug("Building %s-%s" % (self.year, self.month)) target_path = os.path.split(self.get_build_path())[0] if self.fs.exists(target_path): logger.debug("Removing {}".format(target_path)) self.fs.removetree(target_path)
def unbuild_month(self, dt)
Deletes the directory at self.get_build_path.
2.898975
2.461246
1.177848
year = super(BuildableDayArchiveView, self).get_year() fmt = self.get_year_format() dt = date(int(year), 1, 1) return dt.strftime(fmt)
def get_year(self)
Return the year from the database in the format expected by the URL.
5.710858
4.551948
1.254597
year = super(BuildableDayArchiveView, self).get_year() month = super(BuildableDayArchiveView, self).get_month() fmt = self.get_month_format() dt = date(int(year), int(month), 1) return dt.strftime(fmt)
def get_month(self)
Return the month from the database in the format expected by the URL.
3.61426
3.113543
1.160819
year = super(BuildableDayArchiveView, self).get_year() month = super(BuildableDayArchiveView, self).get_month() day = super(BuildableDayArchiveView, self).get_day() fmt = self.get_day_format() dt = date(int(year), int(month), int(day)) return dt.strftime(fmt)
def get_day(self)
Return the day from the database in the format expected by the URL.
2.811006
2.513987
1.118147
return os.path.join( '/archive', self.get_year(), self.get_month(), self.get_day() )
def get_url(self)
The URL at which the detail page should appear. By default it is /archive/ + the year in self.year_format + the month in self.month_format + the day in the self.day_format. An example would be /archive/2016/01/01/.
4.499724
3.430537
1.311668
target_path = path.join(settings.BUILD_DIR, self.get_url().lstrip('/')) if not self.fs.exists(target_path): logger.debug("Creating {}".format(target_path)) self.fs.makedirs(target_path) return os.path.join(target_path, 'index.html')
def get_build_path(self)
Used to determine where to build the page. Override this if you would like your page at a different location. By default it will be built at self.get_url() + "/index.html"
3.223191
2.583098
1.2478
self.month = str(dt.month) self.year = str(dt.year) self.day = str(dt.day) logger.debug("Building %s-%s-%s" % (self.year, self.month, self.day)) self.request = self.create_request(self.get_url()) path = self.get_build_path() self.build_file(path, self.get_content())
def build_day(self, dt)
Build the page for the provided day.
2.998078
2.663666
1.125546
qs = self.get_dated_queryset() days = self.get_date_list(qs, date_type='day') [self.build_day(dt) for dt in days]
def build_dated_queryset(self)
Build pages for all years in the queryset.
6.537938
5.077915
1.287524
session_kwargs = {} if hasattr(settings, 'AWS_ACCESS_KEY_ID'): session_kwargs['aws_access_key_id'] = settings.AWS_ACCESS_KEY_ID if hasattr(settings, 'AWS_SECRET_ACCESS_KEY'): session_kwargs['aws_secret_access_key'] = settings.AWS_SECRET_ACCESS_KEY boto3.setup_default_session(**session_kwargs) s3_kwargs = {} if hasattr(settings, 'AWS_S3_ENDPOINT'): s3_kwargs['endpoint_url'] = settings.AWS_S3_ENDPOINT elif hasattr(settings, 'AWS_S3_HOST'): if hasattr(settings, 'AWS_S3_USE_SSL') and settings.AWS_S3_USE_SSL is False: protocol = "http://" else: protocol = "https://" s3_kwargs['endpoint_url'] = "{}{}".format( protocol, settings.AWS_S3_HOST ) if hasattr(settings, "AWS_REGION"): s3_kwargs['region_name'] = settings.AWS_REGION s3_client = boto3.client('s3', **s3_kwargs) s3_resource = boto3.resource('s3', **s3_kwargs) return s3_client, s3_resource
def get_s3_client()
A DRY place to make sure AWS credentials in settings override environment based credentials. Boto3 will fall back to: http://boto3.readthedocs.io/en/latest/guide/configuration.html
1.548764
1.516829
1.021054
key_list = page.get('Contents', []) logger.debug("Retrieving page with {} keys".format( len(key_list), )) return dict((k.get('Key'), k) for k in key_list)
def get_bucket_page(page)
Returns all the keys in a s3 bucket paginator page.
4.393278
4.035185
1.088743
logger.debug("Retrieving bucket object list") if not s3_client: s3_client, s3_resource = get_s3_client() obj_dict = {} paginator = s3_client.get_paginator('list_objects') page_iterator = paginator.paginate(Bucket=aws_bucket_name) for page in page_iterator: key_list = page.get('Contents', []) logger.debug("Loading page with {} keys".format(len(key_list))) for obj in key_list: obj_dict[obj.get('Key')] = obj return obj_dict
def get_all_objects_in_bucket( aws_bucket_name, s3_client=None, max_keys=1000 )
Little utility method that handles pagination and returns all objects in given bucket.
2.134431
2.140934
0.996963
if s3_client is None: s3_client, s3_resource = get_s3_client() key_chunks = [] for i in range(0, len(keys), chunk_size): chunk = [] for key in (list(keys)[i:i+100]): chunk.append({'Key': key}) key_chunks.append(chunk) for chunk in key_chunks: s3_client.delete_objects( Bucket=aws_bucket_name, Delete={'Objects': chunk} )
def batch_delete_s3_objects( keys, aws_bucket_name, chunk_size=100, s3_client=None )
Utility method that batch deletes objects in given bucket.
2.100204
2.102268
0.999018
if isinstance(p, str): p = string(p) return regex(r'\s*') >> p << regex(r'\s*')
def lexeme(p)
From a parser (or string), make a parser that consumes whitespace on either side.
5.255884
4.095832
1.283227
return lexeme(p).optional().map(lambda v: False if v is None else True)
def is_present(p)
Given a parser or string, make a parser that returns True if the parser matches, False otherwise
13.77759
14.270865
0.965435
(result, _) = (self << eof).parse_partial(stream) return result
def parse(self, stream)
Parse a string or list of tokens and return the result or raise a ParseError.
31.281385
20.271044
1.543156
result = self(stream, 0) if result.status: return (result.value, stream[result.index:]) else: raise ParseError(result.expected, stream, result.furthest)
def parse_partial(self, stream)
Parse the longest possible prefix of a given string. Return a tuple of the result and the rest of the string, or raise a ParseError.
6.23097
4.684433
1.330144
items_sep, fields_sep, keys_sep = separators return items_sep.join(fields_sep.join(keys_sep.join(x) for x in sorted(it.items())) for it in array_value)
def extract_key_values(array_value, separators=(';', ',', ':'), **kwargs)
Serialize array of objects with simple key-values
4.587841
4.205057
1.091029
with open(schemafile) as f: return cls(json.load(f))
def from_schemafile(cls, schemafile)
Create a Flatson instance from a schemafile
3.612632
3.453533
1.046069
if name in self._default_serialization_methods: raise ValueError("Can't replace original %s serialization method") self._serialization_methods[name] = serialize_func
def register_serialization_method(self, name, serialize_func)
Register a custom serialization method that can be used via schema configuration
4.583811
5.113591
0.896398
return [self._serialize(f, obj) for f in self.fields]
def flatten(self, obj)
Return a list with the field values
7.083009
5.452821
1.298962
return OrderedDict(zip(self.fieldnames, self.flatten(obj)))
def flatten_dict(self, obj)
Return an OrderedDict dict preserving order of keys in fieldnames
8.879423
4.413944
2.011675
LOGGER.debug('Connection %s closing', self.id) if self.busy: raise ConnectionBusyError(self) with self._lock: if not self.handle.closed: try: self.handle.close() except psycopg2.InterfaceError as error: LOGGER.error('Error closing socket: %s', error)
def close(self)
Close the connection :raises: ConnectionBusyError
4.308752
3.749168
1.149255
if self.handle.isexecuting(): return True elif self.used_by is None: return False return not self.used_by() is None
def busy(self)
Return if the connection is currently executing a query or is locked by a session that still exists. :rtype: bool
7.951653
7.770044
1.023373
LOGGER.debug('Connection %s freeing', self.id) if self.handle.isexecuting(): raise ConnectionBusyError(self) with self._lock: self.used_by = None LOGGER.debug('Connection %s freed', self.id)
def free(self)
Remove the lock on the connection if the connection is not active :raises: ConnectionBusyError
6.246918
4.950298
1.261928
if self.busy: raise ConnectionBusyError(self) with self._lock: self.used_by = weakref.ref(session) LOGGER.debug('Connection %s locked', self.id)
def lock(self, session)
Lock the connection, ensuring that it is not busy and storing a weakref for the session. :param queries.Session session: The session to lock the connection with :raises: ConnectionBusyError
6.705616
5.364222
1.250063
if id(connection) in self.connections: raise ValueError('Connection already exists in pool') if len(self.connections) == self.max_size: LOGGER.warning('Race condition found when adding new connection') try: connection.close() except (psycopg2.Error, psycopg2.Warning) as error: LOGGER.error('Error closing the conn that cant be used: %s', error) raise PoolFullError(self) with self._lock: self.connections[id(connection)] = Connection(connection) LOGGER.debug('Pool %s added connection %s', self.id, id(connection))
def add(self, connection)
Add a new connection to the pool :param connection: The connection to add to the pool :type connection: psycopg2.extensions.connection :raises: PoolFullError
4.01137
4.001751
1.002404
return [c for c in self.connections.values() if c.busy and not c.closed]
def busy_connections(self)
Return a list of active/busy connections :rtype: list
4.929905
5.680995
0.867789
LOGGER.debug('Cleaning the pool') for connection in [self.connections[k] for k in self.connections if self.connections[k].closed]: LOGGER.debug('Removing %s', connection.id) self.remove(connection.handle) if self.idle_duration > self.idle_ttl: self.close() LOGGER.debug('Pool %s cleaned', self.id)
def clean(self)
Clean the pool by removing any closed connections and if the pool's idle has exceeded its idle TTL, remove all connections.
5.151305
3.736154
1.378772
for cid in list(self.connections.keys()): self.remove(self.connections[cid].handle) LOGGER.debug('Pool %s closed', self.id)
def close(self)
Close the pool by closing and removing all of the connections
8.066469
5.722838
1.409523
LOGGER.debug('Pool %s freeing connection %s', self.id, id(connection)) try: self.connection_handle(connection).free() except KeyError: raise ConnectionNotFoundError(self.id, id(connection)) if self.idle_connections == list(self.connections.values()): with self._lock: self.idle_start = self.time_method() LOGGER.debug('Pool %s freed connection %s', self.id, id(connection))
def free(self, connection)
Free the connection from use by the session that was using it. :param connection: The connection to free :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError
4.480501
4.201571
1.066387
idle = self.idle_connections if idle: connection = idle.pop(0) connection.lock(session) if self.idle_start: with self._lock: self.idle_start = None return connection.handle raise NoIdleConnectionsError(self.id)
def get(self, session)
Return an idle connection and assign the session to the connection :param queries.Session session: The session to assign :rtype: psycopg2.extensions.connection :raises: NoIdleConnectionsError
6.700688
5.512594
1.215524
return [c for c in self.connections.values() if not c.busy and not c.closed]
def idle_connections(self)
Return a list of idle connections :rtype: list
5.079988
5.847279
0.868778
cid = id(connection) try: self.connection_handle(connection).lock(session) except KeyError: raise ConnectionNotFoundError(self.id, cid) else: if self.idle_start: with self._lock: self.idle_start = None LOGGER.debug('Pool %s locked connection %s', self.id, cid)
def lock(self, connection, session)
Explicitly lock the specified connection :type connection: psycopg2.extensions.connection :param connection: The connection to lock :param queries.Session session: The session to hold the lock
5.750189
6.523287
0.881486
cid = id(connection) if cid not in self.connections: raise ConnectionNotFoundError(self.id, cid) self.connection_handle(connection).close() with self._lock: del self.connections[cid] LOGGER.debug('Pool %s removed connection %s', self.id, cid)
def remove(self, connection)
Remove the connection from the pool :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError :raises: ConnectionBusyError
4.558841
4.086958
1.115461
return { 'connections': { 'busy': len(self.busy_connections), 'closed': len(self.closed_connections), 'executing': len(self.executing_connections), 'idle': len(self.idle_connections), 'locked': len(self.busy_connections) }, 'exceptions': sum([c.exceptions for c in self.connections.values()]), 'executions': sum([c.executions for c in self.connections.values()]), 'full': self.is_full, 'idle': { 'duration': self.idle_duration, 'ttl': self.idle_ttl }, 'max_size': self.max_size }
def report(self)
Return a report about the pool state and configuration. :rtype: dict
2.932977
2.734581
1.07255
with self._lock: for cid in list(self.connections.keys()): if self.connections[cid].executing: raise ConnectionBusyError(cid) if self.connections[cid].locked: self.connections[cid].free() self.connections[cid].close() del self.connections[cid]
def shutdown(self)
Forcefully shutdown the entire pool, closing all non-executing connections. :raises: ConnectionBusyError
3.865802
3.085761
1.252787
with cls._lock: cls._ensure_pool_exists(pid) cls._pools[pid].add(connection)
def add(cls, pid, connection)
Add a new connection and session to a pool. :param str pid: The pool id :type connection: psycopg2.extensions.connection :param connection: The connection to add to the pool
5.563025
6.372959
0.872911
with cls._lock: try: cls._ensure_pool_exists(pid) except KeyError: LOGGER.debug('Pool clean invoked against missing pool %s', pid) return cls._pools[pid].clean() cls._maybe_remove_pool(pid)
def clean(cls, pid)
Clean the specified pool, removing any closed connections or stale locks. :param str pid: The pool id to clean
5.338863
5.922016
0.901528
if pid in cls._pools: raise KeyError('Pool %s already exists' % pid) with cls._lock: LOGGER.debug("Creating Pool: %s (%i/%i)", pid, idle_ttl, max_size) cls._pools[pid] = Pool(pid, idle_ttl, max_size, time_method)
def create(cls, pid, idle_ttl=DEFAULT_IDLE_TTL, max_size=DEFAULT_MAX_SIZE, time_method=None)
Create a new pool, with the ability to pass in values to override the default idle TTL and the default maximum size. A pool's idle TTL defines the amount of time that a pool can be open without any sessions before it is removed. A pool's max size defines the maximum number of connections that can be added to the pool to prevent unbounded open connections. :param str pid: The pool ID :param int idle_ttl: Time in seconds for the idle TTL :param int max_size: The maximum pool size :param callable time_method: Override the use of :py:meth:`time.time` method for time values. :raises: KeyError
2.900817
3.156017
0.919138
with cls._lock: LOGGER.debug('Freeing %s from pool %s', id(connection), pid) cls._ensure_pool_exists(pid) cls._pools[pid].free(connection)
def free(cls, pid, connection)
Free a connection that was locked by a session :param str pid: The pool ID :param connection: The connection to remove :type connection: psycopg2.extensions.connection
5.112678
5.155088
0.991773
with cls._lock: cls._ensure_pool_exists(pid) return cls._pools[pid].get(session)
def get(cls, pid, session)
Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection
5.511879
7.309681
0.754052
with cls._lock: return cls._pools[pid].connection_handle(connection)
def get_connection(cls, pid, connection)
Return the specified :class:`~queries.pool.Connection` from the pool. :param str pid: The pool ID :param connection: The connection to return for :type connection: psycopg2.extensions.connection :rtype: queries.pool.Connection
11.869633
16.281431
0.729029
with cls._lock: cls._ensure_pool_exists(pid) return connection in cls._pools[pid]
def has_connection(cls, pid, connection)
Check to see if a pool has the specified connection :param str pid: The pool ID :param connection: The connection to check for :type connection: psycopg2.extensions.connection :rtype: bool
6.414604
7.117787
0.901208
with cls._lock: cls._ensure_pool_exists(pid) return bool(cls._pools[pid].idle_connections)
def has_idle_connection(cls, pid)
Check to see if a pool has an idle connection :param str pid: The pool ID :rtype: bool
5.979589
6.410991
0.932709
with cls._lock: cls._ensure_pool_exists(pid) return cls._pools[pid].is_full
def is_full(cls, pid)
Return a bool indicating if the specified pool is full :param str pid: The pool id :rtype: bool
5.664246
5.839606
0.96997
with cls._lock: cls._ensure_pool_exists(pid) cls._pools[pid].lock(connection, session)
def lock(cls, pid, connection, session)
Explicitly lock the specified connection in the pool :param str pid: The pool id :type connection: psycopg2.extensions.connection :param connection: The connection to add to the pool :param queries.Session session: The session to hold the lock
5.319861
8.255533
0.644399
with cls._lock: cls._ensure_pool_exists(pid) cls._pools[pid].close() del cls._pools[pid]
def remove(cls, pid)
Remove a pool, closing all connections :param str pid: The pool ID
4.157514
4.755346
0.874282
cls._ensure_pool_exists(pid) cls._pools[pid].remove(connection)
def remove_connection(cls, pid, connection)
Remove a connection from the pool, closing it if is open. :param str pid: The pool ID :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError
6.788774
6.873895
0.987617
with cls._lock: cls._ensure_pool_exists(pid) cls._pools[pid].set_idle_ttl(ttl)
def set_idle_ttl(cls, pid, ttl)
Set the idle TTL for a pool, after which it will be destroyed. :param str pid: The pool id :param int ttl: The TTL for an idle pool
4.12344
5.411238
0.762014
with cls._lock: cls._ensure_pool_exists(pid) cls._pools[pid].set_max_size(size)
def set_max_size(cls, pid, size)
Set the maximum number of connections for the specified pool :param str pid: The pool to set the size for :param int size: The maximum number of connections
4.33354
6.610533
0.655551
for pid in list(cls._pools.keys()): cls._pools[pid].shutdown() LOGGER.info('Shutdown complete, all pooled connections closed')
def shutdown(cls)
Close all connections on in all pools
9.21684
6.086092
1.51441
with cls._lock: cls._ensure_pool_exists(pid) return len(cls._pools[pid])
def size(cls, pid)
Return the number of connections in the pool :param str pid: The pool id :rtype int
5.461527
5.971561
0.914589
return { 'timestamp': datetime.datetime.utcnow().isoformat(), 'process': os.getpid(), 'pools': dict([(i, p.report()) for i, p in cls._pools.items()]) }
def report(cls)
Return the state of the all of the registered pools. :rtype: dict
4.593814
3.668878
1.252103
if not len(cls._pools[pid]): del cls._pools[pid]
def _maybe_remove_pool(cls, pid)
If the pool has no open connections, remove it :param str pid: The pool id to clean
5.482966
8.059567
0.680305
try: self._cursor.callproc(name, args) except psycopg2.Error as err: self._incr_exceptions() raise err finally: self._incr_executions() return results.Results(self._cursor)
def callproc(self, name, args=None)
Call a stored procedure on the server, returning the results in a :py:class:`queries.Results` instance. :param str name: The procedure name :param list args: The list of arguments to pass in :rtype: queries.Results :raises: queries.DataError :raises: queries.DatabaseError :raises: queries.IntegrityError :raises: queries.InternalError :raises: queries.InterfaceError :raises: queries.NotSupportedError :raises: queries.OperationalError :raises: queries.ProgrammingError
4.958943
5.029267
0.986017
if not self._conn: raise psycopg2.InterfaceError('Connection not open') LOGGER.info('Closing connection %r in %s', self._conn, self.pid) self._pool_manager.free(self.pid, self._conn) self._pool_manager.remove_connection(self.pid, self._conn) # Un-assign the connection and cursor self._conn, self._cursor = None, None
def close(self)
Explicitly close the connection and remove it from the connection pool if pooling is enabled. If the connection is already closed :raises: psycopg2.InterfaceError
4.583012
3.975775
1.152734
return hashlib.md5(':'.join([self.__class__.__name__, self._uri]).encode('utf-8')).hexdigest()
def pid(self)
Return the pool ID used for connection pooling. :rtype: str
7.41974
7.860576
0.943918
try: self._cursor.execute(sql, parameters) except psycopg2.Error as err: self._incr_exceptions() raise err finally: self._incr_executions() return results.Results(self._cursor)
def query(self, sql, parameters=None)
A generator to issue a query on the server, mogrifying the parameters against the sql statement. Results are returned as a :py:class:`queries.Results` object which can act as an iterator and has multiple ways to access the result data. :param str sql: The SQL statement :param dict parameters: A dictionary of query parameters :rtype: queries.Results :raises: queries.DataError :raises: queries.DatabaseError :raises: queries.IntegrityError :raises: queries.InternalError :raises: queries.InterfaceError :raises: queries.NotSupportedError :raises: queries.OperationalError :raises: queries.ProgrammingError
4.650568
4.925261
0.944228
if self._conn.encoding != value: self._conn.set_client_encoding(value)
def set_encoding(self, value=DEFAULT_ENCODING)
Set the client encoding for the session if the value specified is different than the current client encoding. :param str value: The encoding value to use
5.017162
6.387079
0.785518
if self._cursor: LOGGER.debug('Closing the cursor on %s', self.pid) self._cursor.close() self._cursor = None if self._conn: LOGGER.debug('Freeing %s in the pool', self.pid) try: pool.PoolManager.instance().free(self.pid, self._conn) except pool.ConnectionNotFoundError: pass self._conn = None
def _cleanup(self)
Remove the connection from the stack, closing out the cursor
4.16141
3.578808
1.162792
# Attempt to get a cached connection from the connection pool try: connection = self._pool_manager.get(self.pid, self) LOGGER.debug("Re-using connection for %s", self.pid) except pool.NoIdleConnectionsError: if self._pool_manager.is_full(self.pid): raise # Create a new PostgreSQL connection kwargs = utils.uri_to_kwargs(self._uri) LOGGER.debug("Creating a new connection for %s", self.pid) connection = self._psycopg2_connect(kwargs) self._pool_manager.add(self.pid, connection) self._pool_manager.lock(self.pid, connection, self) # Added in because psycopg2ct connects and leaves the connection in # a weird state: consts.STATUS_DATESTYLE, returning from # Connection._setup without setting the state as const.STATUS_OK if utils.PYPY: connection.reset() # Register the custom data types self._register_unicode(connection) self._register_uuid(connection) return connection
def _connect(self)
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
6.395816
5.738999
1.114448
cursor = connection.cursor(name=name, cursor_factory=self._cursor_factory) if name is not None: cursor.scrollable = True cursor.withhold = True return cursor
def _get_cursor(self, connection, name=None)
Return a cursor for the given cursor_factory. Specify a name to use server-side cursors. :param connection: The connection to create a cursor on :type connection: psycopg2.extensions.connection :param str name: A cursor name for a server side cursor :rtype: psycopg2.extensions.cursor
4.010459
4.650095
0.862447
self._pool_manager.get_connection(self.pid, self._conn).exceptions += 1
def _incr_exceptions(self)
Increment the number of exceptions for the current connection.
16.788525
10.289918
1.631551
self._pool_manager.get_connection(self.pid, self._conn).executions += 1
def _incr_executions(self)
Increment the number of executions for the current connection.
18.4039
10.162116
1.81103
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, connection) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY, connection)
def _register_unicode(connection)
Register the cursor to be able to receive Unicode string. :type connection: psycopg2.extensions.connection :param connection: Where to register things
2.01908
2.349557
0.859345
if self._conn.status == psycopg2.extensions.STATUS_BEGIN: return self.READY return self._conn.status
def _status(self)
Return the current connection status as an integer value. The status should match one of the following constants: - queries.Session.INTRANS: Connection established, in transaction - queries.Session.PREPARED: Prepared for second phase of transaction - queries.Session.READY: Connected, no active transaction :rtype: int
7.788153
6.305507
1.235135
if not self.cursor.rowcount: return {} self._rewind() if self.cursor.rowcount == 1: return dict(self.cursor.fetchone()) else: raise ValueError('More than one row')
def as_dict(self)
Return a single row result as a dictionary. If the results contain multiple rows, a :py:class:`ValueError` will be raised. :return: dict :raises: ValueError
4.633496
4.008274
1.155983
if not self.cursor.rowcount: return [] self.cursor.scroll(0, 'absolute') return self.cursor.fetchall()
def items(self)
Return all of the rows that are in the result set. :rtype: list
4.90019
4.43229
1.105566
if pwd is None: return getpass.getuser() else: try: return pwd.getpwuid(os.getuid())[0] except KeyError as error: LOGGER.error('Could not get logged-in user: %s', error)
def get_current_user()
Return the current username for the logged in user :rtype: str
3.639249
3.658193
0.994821
if port: host = '%s:%s' % (host, port) if password: return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname) return 'postgresql://%s@%s/%s' % (user, host, dbname)
def uri(host='localhost', port=5432, dbname='postgres', user='postgres', password=None)
Return a PostgreSQL connection URI for the specified values. :param str host: Host to connect to :param int port: Port to connect on :param str dbname: The database name :param str user: User to connect as :param str password: The password to use, None for no password :return str: The PostgreSQL connection URI
1.733076
2.025685
0.855551