code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
filehash = ''
with open(hashfile_path, 'r') as stream:
for _cnt, line in enumerate(stream):
if line.rstrip().endswith(filename):
filehash = re.match(r'^[A-Za-z0-9]*', line).group(0)
break
if filehash:
return filehash
raise AttributeError("Filename %s not found in hash file" % filename) | def get_hash_for_filename(filename, hashfile_path) | Return hash for filename in the hashfile. | 3.228949 | 3.127036 | 1.032591 |
# type: (List[str]) -> List[str]
fully_qualified_cmd_path = which(commands[0])
if fully_qualified_cmd_path and (
not which(commands[0], add_win_suffixes=False)):
commands[0] = os.path.basename(fully_qualified_cmd_path)
return commands | def fix_windows_command_list(commands) | Return command list with working Windows commands.
npm on windows is npm.cmd, which will blow up
subprocess.check_call(['npm', '...'])
Similar issues arise when calling python apps like pipenv that will have
a windows-only suffix applied to them | 3.917419 | 4.967176 | 0.788661 |
env = os.environ.copy()
for step in commands:
if isinstance(step, (list, six.string_types)):
execution_dir = directory
raw_command = step
elif step.get('command'): # dictionary
execution_dir = os.path.join(directory,
step.get('cwd')) if step.get('cwd') else directory # noqa pylint: disable=line-too-long
raw_command = step['command']
else:
raise AttributeError("Invalid command step: %s" % step)
command_list = raw_command.split(' ') if isinstance(raw_command, six.string_types) else raw_command # noqa pylint: disable=line-too-long
if platform.system().lower() == 'windows':
command_list = fix_windows_command_list(command_list)
with change_dir(execution_dir):
check_call(command_list, env=env) | def run_commands(commands, # type: List[Union[str, List[str], Dict[str, Union[str, List[str]]]]]
directory, # type: str
env=None # type: Optional[Dict[str, Union[str, int]]]
): # noqa
# type: (...) -> None
if env is None | Run list of commands. | 2.749706 | 2.485514 | 1.106293 |
sha256 = hashlib.sha256()
mem_view = memoryview(bytearray(128*1024))
with open(filename, 'rb', buffering=0) as stream:
for i in iter(lambda: stream.readinto(mem_view), 0):
sha256.update(mem_view[:i])
return sha256.hexdigest() | def sha256sum(filename) | Return SHA256 hash of file. | 1.767167 | 1.737657 | 1.016983 |
if embedded_lib_path is None:
embedded_lib_path = get_embedded_lib_path()
old_sys_path = list(sys.path)
sys.path.insert(
1, # https://stackoverflow.com/a/10097543
embedded_lib_path
)
try:
yield
finally:
sys.path = old_sys_path | def use_embedded_pkgs(embedded_lib_path=None) | Temporarily prepend embedded packages to sys.path. | 2.545405 | 2.382625 | 1.06832 |
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if add_win_suffixes and platform.system().lower() == 'windows' and not (
fname.endswith('.exe') or fname.endswith('.cmd')):
fnames = [fname + '.exe', fname + '.cmd']
else:
fnames = [fname]
for i in fnames:
if fpath:
exe_file = os.path.join(fpath, i)
if is_exe(exe_file):
return exe_file
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, i)
if is_exe(exe_file):
return exe_file
return None | def which(program, add_win_suffixes=True) | Mimic 'which' command behavior.
Adapted from https://stackoverflow.com/a/377028 | 1.657662 | 1.654214 | 1.002084 |
backend_opts = {}
if module_opts.get('terraform_backend_config'):
backend_opts['config'] = merge_nested_environment_dicts(
module_opts.get('terraform_backend_config'),
env_name
)
if module_opts.get('terraform_backend_cfn_outputs'):
if not backend_opts.get('config'):
backend_opts['config'] = {}
if not backend_opts['config'].get('region'):
backend_opts['config']['region'] = env_vars['AWS_DEFAULT_REGION']
boto_args = extract_boto_args_from_env(env_vars)
cfn_client = boto3.client(
'cloudformation',
region_name=backend_opts['config']['region'],
**boto_args
)
for (key, val) in merge_nested_environment_dicts(module_opts.get('terraform_backend_cfn_outputs'), # noqa pylint: disable=line-too-long
env_name).items():
backend_opts['config'][key] = find_cfn_output(
val.split('::')[1],
cfn_client.describe_stacks(
StackName=val.split('::')[0]
)['Stacks'][0]['Outputs']
)
return backend_opts | def create_config_backend_options(module_opts, env_name, env_vars) | Return backend options defined in module options. | 2.242419 | 2.208537 | 1.015341 |
cmd_list = []
for (key, val) in backend_vals.items():
cmd_list.append('-backend-config')
cmd_list.append(key + '=' + val)
return cmd_list | def get_backend_init_list(backend_vals) | Turn backend config dict into command line items. | 2.828619 | 2.158749 | 1.310305 |
backend_filenames = gen_backend_tfvars_files(environment, region)
for name in backend_filenames:
if os.path.isfile(os.path.join(path, name)):
return name
return backend_filenames[-1] | def get_backend_tfvars_file(path, environment, region) | Determine Terraform backend file. | 3.190332 | 3.054044 | 1.044625 |
if isinstance(terraform_version_opts, six.string_types):
return terraform_version_opts
if terraform_version_opts.get(env_name):
return terraform_version_opts.get(env_name)
if terraform_version_opts.get('*'):
return terraform_version_opts.get('*')
return None | def get_module_defined_tf_var(terraform_version_opts, env_name) | Return version of Terraform requested in module options. | 1.993021 | 1.853124 | 1.075493 |
for name in gen_workspace_tfvars_files(environment, region):
if os.path.isfile(os.path.join(path, name)):
return name
return "%s.tfvars" % environment | def get_workspace_tfvars_file(path, environment, region) | Determine Terraform workspace-specific tfvars file name. | 3.378168 | 2.995166 | 1.127874 |
terraform_dir = os.path.join(module_path, '.terraform')
local_tfstate_path = os.path.join(terraform_dir, 'terraform.tfstate')
current_backend_config = {}
desired_backend_config = {}
LOGGER.debug('Comparing previous & desired Terraform backend configs')
if os.path.isfile(local_tfstate_path):
with open(local_tfstate_path, 'r') as stream:
current_backend_config = hcl.load(stream).get('backend',
{}).get('config',
{})
if backend_options.get('config'):
desired_backend_config = backend_options.get('config')
elif os.path.isfile(os.path.join(module_path,
backend_options.get('filename'))):
with open(os.path.join(module_path,
backend_options.get('filename')),
'r') as stream:
desired_backend_config = hcl.load(stream)
# Can't solely rely on the backend info defined in runway options or
# backend files; merge in the values defined in main.tf
# (or whatever tf file)
for filename in ['main.tf'] + glob.glob(os.path.join(module_path, '*.tf')):
if os.path.isfile(filename):
with open(filename, 'r') as stream:
tf_config = hcl.load(stream)
if tf_config.get('terraform', {}).get('backend'):
[(_s3key, tffile_backend_config)] = tf_config['terraform']['backend'].items() # noqa pylint: disable=line-too-long
desired_backend_config = merge_dicts(
desired_backend_config,
tffile_backend_config
)
break
if current_backend_config != desired_backend_config:
LOGGER.info("Desired and previously initialized TF backend config is "
"out of sync; trashing local TF state directory %s",
terraform_dir)
send2trash(terraform_dir)
run_terraform_init(
tf_bin=tf_bin,
module_path=module_path,
backend_options=backend_options,
env_name=env_name,
env_region=env_region,
env_vars=env_vars
) | def reinit_on_backend_changes(tf_bin, # pylint: disable=too-many-arguments
module_path, backend_options, env_name,
env_region, env_vars) | Clean terraform directory and run init if necessary.
If deploying a TF module to multiple regions (or any scenario requiring
multiple backend configs), switching the backend will cause TF to
compare the old and new backends. This will frequently cause an access
error as the creds/role for the new backend won't always have access to
the old one.
This method compares the defined & initialized backend configs and
trashes the terraform directory & re-inits if they're out of sync. | 2.90085 | 2.791023 | 1.03935 |
init_cmd = [tf_bin, 'init']
cmd_opts = {'env_vars': env_vars, 'exit_on_error': False}
if backend_options.get('config'):
LOGGER.info('Using provided backend values "%s"',
str(backend_options.get('config')))
cmd_opts['cmd_list'] = init_cmd + get_backend_init_list(backend_options.get('config')) # noqa pylint: disable=line-too-long
elif os.path.isfile(os.path.join(module_path,
backend_options.get('filename'))):
LOGGER.info('Using backend config file %s',
backend_options.get('filename'))
cmd_opts['cmd_list'] = init_cmd + ['-backend-config=%s' % backend_options.get('filename')] # noqa pylint: disable=line-too-long
else:
LOGGER.info(
"No backend tfvars file found -- looking for one "
"of \"%s\" (proceeding with bare 'terraform "
"init')",
', '.join(gen_backend_tfvars_files(
env_name,
env_region)))
cmd_opts['cmd_list'] = init_cmd
try:
run_module_command(**cmd_opts)
except subprocess.CalledProcessError as shelloutexc:
# An error during initialization can leave things in an inconsistent
# state (e.g. backend configured but no providers downloaded). Marking
# this with a file so it will be deleted on the next run.
if os.path.isdir(os.path.join(module_path, '.terraform')):
with open(os.path.join(module_path,
'.terraform',
FAILED_INIT_FILENAME), 'w') as stream:
stream.write('1')
sys.exit(shelloutexc.returncode) | def run_terraform_init(tf_bin, # pylint: disable=too-many-arguments
module_path, backend_options, env_name, env_region,
env_vars) | Run Terraform init. | 3.261315 | 3.267748 | 0.998031 |
for command_name in possible_command_names:
if hasattr(ALL_COMMANDS_MODULE, command_name):
command_module = getattr(ALL_COMMANDS_MODULE, command_name)
command_class_hierarchy = getmembers(command_module, isclass)
command_class_tuple = list(filter(_not_base_class, command_class_hierarchy))[0]
return command_class_tuple[1]
return None | def find_command_class(possible_command_names) | Try to find a class for one of the given command names. | 2.939044 | 2.874884 | 1.022317 |
if os.path.isdir(module_dir):
LOGGER.error("Error generating sample module -- directory %s "
"already exists!",
module_dir)
sys.exit(1)
os.mkdir(module_dir) | def generate_sample_module(module_dir) | Generate skeleton sample module. | 3.583132 | 3.380085 | 1.060072 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.sls')
generate_sample_module(module_dir)
for i in ['config-dev-us-east-1.json', 'handler.py', 'package.json',
'serverless.yml']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'serverless',
i),
os.path.join(module_dir, i),
)
LOGGER.info("Sample Serverless module created at %s",
module_dir) | def generate_sample_sls_module(env_root, module_dir=None) | Generate skeleton Serverless sample module. | 3.369903 | 3.027419 | 1.113127 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.sls')
generate_sample_module(module_dir)
for i in ['package.json', 'serverless.yml', 'tsconfig.json',
'webpack.config.js']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'sls-tsc',
i),
os.path.join(module_dir, i),
)
os.mkdir(os.path.join(module_dir, 'src'))
for i in ['handler.spec.ts', 'handler.ts']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'sls-tsc',
'src',
i),
os.path.join(module_dir, 'src', i),
)
LOGGER.info("Sample Serverless TypeScript module created at %s",
module_dir) | def generate_sample_sls_tsc_module(env_root, module_dir=None) | Generate skeleton Serverless TypeScript sample module. | 2.187681 | 2.061048 | 1.061441 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cdk')
generate_sample_module(module_dir)
for i in ['.npmignore', 'cdk.json', 'package.json', 'runway.module.yml',
'tsconfig.json', 'README.md']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-tsc',
i),
os.path.join(module_dir, i),
)
for i in [['bin', 'sample.ts'], ['lib', 'sample-stack.ts']]:
os.mkdir(os.path.join(module_dir, i[0]))
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-tsc',
i[0],
i[1]),
os.path.join(module_dir, i[0], i[1]),
)
with open(os.path.join(module_dir, '.gitignore'), 'w') as stream:
stream.write('*.js\n')
stream.write('*.d.ts\n')
stream.write('node_modules\n')
LOGGER.info("Sample CDK module created at %s", module_dir)
LOGGER.info('To finish its setup, change to the %s directory and execute '
'"npm install" to generate its lockfile.', module_dir) | def generate_sample_cdk_tsc_module(env_root, module_dir=None) | Generate skeleton CDK TS sample module. | 2.661541 | 2.623782 | 1.014391 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cdk')
generate_sample_module(module_dir)
for i in ['add-project.hook.d.ts', 'cdk.json', 'package.json',
'runway.module.yml', 'README.md']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
i),
os.path.join(module_dir, i),
)
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
'dot_gitignore'),
os.path.join(module_dir, '.gitignore'),
)
os.mkdir(os.path.join(module_dir, 'src'))
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
'src',
'HelloCdk.sln'),
os.path.join(module_dir, 'src', 'HelloCdk.sln'),
)
os.mkdir(os.path.join(module_dir, 'src', 'HelloCdk'))
for i in ['HelloCdk.csproj', 'HelloConstruct.cs', 'HelloStack.cs',
'Program.cs']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
'src',
'HelloCdk',
i),
os.path.join(module_dir, 'src', 'HelloCdk', i),
)
LOGGER.info("Sample C# CDK module created at %s", module_dir)
LOGGER.info('To finish its setup, change to the %s directory and execute '
'"npm install" to generate its lockfile.', module_dir) | def generate_sample_cdk_cs_module(env_root, module_dir=None) | Generate skeleton CDK C# sample module. | 2.353617 | 2.317508 | 1.015581 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cdk')
generate_sample_module(module_dir)
for i in ['app.py', 'cdk.json', 'lambda-index.py', 'package.json',
'runway.module.yml', 'Pipfile']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-py',
i),
os.path.join(module_dir, i),
)
with open(os.path.join(module_dir, '.gitignore'), 'w') as stream:
stream.write('node_modules')
LOGGER.info("Sample CDK module created at %s", module_dir)
LOGGER.info('To finish its setup, change to the %s directory and execute '
'"npm install" and "pipenv update -d --three" to generate its '
'lockfiles.', module_dir) | def generate_sample_cdk_py_module(env_root, module_dir=None) | Generate skeleton CDK python sample module. | 4.034435 | 3.954975 | 1.020091 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cfn')
generate_sample_module(module_dir)
for i in ['stacks.yaml', 'dev-us-east-1.env']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cfn',
i),
os.path.join(module_dir, i)
)
os.mkdir(os.path.join(module_dir, 'templates'))
with open(os.path.join(module_dir,
'templates',
'tf_state.yml'), 'w') as stream:
stream.write(
cfn_flip.flip(
check_output(
[sys.executable,
os.path.join(ROOT,
'templates',
'stacker',
'tfstate_blueprints',
'tf_state.py')]
)
)
)
LOGGER.info("Sample CloudFormation module created at %s",
module_dir) | def generate_sample_cfn_module(env_root, module_dir=None) | Generate skeleton CloudFormation sample module. | 3.334567 | 3.290518 | 1.013386 |
if module_dir is None:
module_dir = os.path.join(env_root,
'runway-sample-tfstate.cfn')
generate_sample_module(module_dir)
for i in ['stacks.yaml', 'dev-us-east-1.env']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'stacker',
i),
os.path.join(module_dir, i)
)
os.mkdir(os.path.join(module_dir, 'tfstate_blueprints'))
for i in ['__init__.py', 'tf_state.py']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'stacker',
'tfstate_blueprints',
i),
os.path.join(module_dir, 'tfstate_blueprints', i)
)
os.chmod( # make blueprint executable
os.path.join(module_dir, 'tfstate_blueprints', 'tf_state.py'),
os.stat(os.path.join(module_dir,
'tfstate_blueprints',
'tf_state.py')).st_mode | 0o0111
)
LOGGER.info("Sample Stacker module created at %s",
module_dir) | def generate_sample_stacker_module(env_root, module_dir=None) | Generate skeleton Stacker sample module. | 2.441128 | 2.402062 | 1.016264 |
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.tf')
generate_sample_module(module_dir)
for i in ['backend-us-east-1.tfvars', 'dev-us-east-1.tfvars', 'main.tf']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'terraform',
i),
os.path.join(module_dir, i),
)
tf_ver_template = os.path.join(ROOT,
'templates',
'terraform',
'.terraform-version')
if os.path.isfile(tf_ver_template):
shutil.copyfile(
tf_ver_template,
os.path.join(module_dir, '.terraform-version'),
)
else: # running directly from git
latest_tf_ver = get_latest_tf_version()
with open(os.path.join(module_dir,
'.terraform-version'), 'w') as stream:
stream.write(latest_tf_ver)
LOGGER.info("Sample Terraform app created at %s",
module_dir) | def generate_sample_tf_module(env_root, module_dir=None) | Generate skeleton Terraform sample module. | 2.621395 | 2.515527 | 1.042086 |
if self._cli_arguments['cfn']:
generate_sample_cfn_module(self.env_root)
elif self._cli_arguments['sls']:
generate_sample_sls_module(self.env_root)
elif self._cli_arguments['sls-tsc']:
generate_sample_sls_tsc_module(self.env_root)
elif self._cli_arguments['stacker']:
generate_sample_stacker_module(self.env_root)
elif self._cli_arguments['tf']:
generate_sample_tf_module(self.env_root)
elif self._cli_arguments['cdk-tsc']:
generate_sample_cdk_tsc_module(self.env_root)
elif self._cli_arguments['cdk-py']:
generate_sample_cdk_py_module(self.env_root)
elif self._cli_arguments['cdk-csharp']:
generate_sample_cdk_cs_module(self.env_root) | def execute(self) | Run selected module generator. | 2.234349 | 2.132557 | 1.047733 |
print('Runway config already present')
sys.exit(1)
with open('runway.yml', 'w') as stream:
stream.write()
print('runway.yml generated')
print('See additional getting started information at '
'https://docs.onica.com/projects/runway/en/latest/how_to_use.html') | def execute(self): # pylint: disable=no-self-use
if os.path.isfile('runway.yml') | Generate runway.yml. | 6.50433 | 5.688038 | 1.14351 |
template = self.template
# variables = self.get_variables()
template.add_version('2010-09-09')
template.add_description('Static Website - Dependencies')
# Resources
awslogbucket = template.add_resource(
s3.Bucket(
'AWSLogBucket',
AccessControl=s3.Private,
VersioningConfiguration=s3.VersioningConfiguration(
Status='Enabled'
)
)
)
template.add_output(Output(
'AWSLogBucketName',
Description='Name of bucket storing AWS logs',
Value=awslogbucket.ref()
))
template.add_resource(
s3.BucketPolicy(
'AllowAWSLogWriting',
Bucket=awslogbucket.ref(),
PolicyDocument=Policy(
Version='2012-10-17',
Statement=[
Statement(
Action=[awacs.s3.PutObject],
Effect=Allow,
Principal=AWSPrincipal(Join(':',
['arn:aws:iam:',
AccountId,
'root'])),
Resource=[
Join('', ['arn:aws:s3:::',
awslogbucket.ref(),
'/*'])
]
)
]
)
)
)
artifacts = template.add_resource(
s3.Bucket(
'Artifacts',
AccessControl=s3.Private,
LifecycleConfiguration=s3.LifecycleConfiguration(
Rules=[
s3.LifecycleRule(
NoncurrentVersionExpirationInDays=90,
Status='Enabled'
)
]
),
VersioningConfiguration=s3.VersioningConfiguration(
Status='Enabled'
)
)
)
template.add_output(Output(
'ArtifactsBucketName',
Description='Name of bucket storing artifacts',
Value=artifacts.ref()
)) | def create_template(self) | Create template (main function called by Stacker). | 2.224766 | 2.058438 | 1.080803 |
if session:
s3_resource = session.resource('s3')
else:
s3_resource = boto3.resource('s3')
try:
s3_resource.Object(bucket_name, key).load()
except ClientError as exc:
if exc.response['Error']['Code'] == '404':
return False
raise
return True | def does_s3_object_exist(bucket_name, key, session=None) | Determine if object exists on s3. | 1.663513 | 1.602311 | 1.038196 |
if session:
s3_client = session.client('s3')
else:
s3_client = boto3.client('s3')
transfer = S3Transfer(s3_client)
filedes, temp_file = tempfile.mkstemp()
os.close(filedes)
transfer.download_file(bucket, key, temp_file)
output_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(temp_file, 'r')
zip_ref.extractall(output_dir)
zip_ref.close()
os.remove(temp_file)
return output_dir | def download_and_extract_to_mkdtemp(bucket, key, session=None) | Download zip archive and extract it to temporary directory. | 1.894492 | 1.81965 | 1.04113 |
if session:
s3_client = session.client('s3')
else:
s3_client = boto3.client('s3')
transfer = S3Transfer(s3_client)
filedes, temp_file = tempfile.mkstemp()
os.close(filedes)
LOGGER.info("staticsite: archiving app at %s to s3://%s/%s",
app_dir, bucket, key)
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as filehandle:
with change_dir(app_dir):
for dirname, _subdirs, files in os.walk('./'):
if dirname != './':
filehandle.write(dirname)
for filename in files:
filehandle.write(os.path.join(dirname, filename))
transfer.upload_file(temp_file, bucket, key)
os.remove(temp_file) | def zip_and_upload(app_dir, bucket, key, session=None) | Zip built static site and upload to S3. | 2.334283 | 2.283164 | 1.02239 |
build_output = os.path.join(
options['path'],
options['build_output']
)
else:
build_output = options['path']
context_dict['artifact_bucket_name'] = RxrefLookup.handle(
kwargs.get('artifact_bucket_rxref_lookup'),
provider=provider,
context=context
)
if options.get('pre_build_steps'):
run_commands(options['pre_build_steps'], options['path'])
context_dict['hash'] = get_hash_of_files(
root_path=options['path'],
directories=options.get('source_hashing', {}).get('directories')
)
# Now determine if the current staticsite has already been deployed
if options.get('source_hashing', {}).get('enabled', True):
context_dict['hash_tracking_parameter'] = options.get(
'source_hashing', {}).get('parameter', default_param_name)
ssm_client = session.client('ssm')
try:
old_parameter_value = ssm_client.get_parameter(
Name=context_dict['hash_tracking_parameter']
)['Parameter']['Value']
except ssm_client.exceptions.ParameterNotFound:
old_parameter_value = None
else:
context_dict['hash_tracking_disabled'] = True
old_parameter_value = None
context_dict['current_archive_filename'] = (
context_dict['artifact_key_prefix'] + context_dict['hash'] + '.zip'
)
if old_parameter_value:
context_dict['old_archive_filename'] = (
context_dict['artifact_key_prefix'] + old_parameter_value + '.zip'
)
if old_parameter_value == context_dict['hash']:
LOGGER.info("staticsite: skipping build; app hash %s already deployed "
"in this environment",
context_dict['hash'])
context_dict['deploy_is_current'] = True
return context_dict
if does_s3_object_exist(context_dict['artifact_bucket_name'],
context_dict['current_archive_filename'],
session):
context_dict['app_directory'] = download_and_extract_to_mkdtemp(
context_dict['artifact_bucket_name'],
context_dict['current_archive_filename'], session
)
else:
if options.get('build_steps'):
LOGGER.info('staticsite: executing build commands')
run_commands(options['build_steps'], options['path'])
zip_and_upload(build_output, context_dict['artifact_bucket_name'],
context_dict['current_archive_filename'], session)
context_dict['app_directory'] = build_output
context_dict['deploy_is_current'] = False
return context_dict | def build(context, provider, **kwargs): # pylint: disable=unused-argument
session = get_session(provider.region)
options = kwargs.get('options', {})
context_dict = {}
context_dict['artifact_key_prefix'] = "%s-%s-" % (options['namespace'], options['name']) # noqa
default_param_name = "%shash" % context_dict['artifact_key_prefix']
if options.get('build_output') | Build static site. | 2.741966 | 2.655968 | 1.032379 |
if not config.get('namespace'):
LOGGER.fatal("staticsite: module %s's environment configuration is "
"missing a namespace definition!",
module_name)
sys.exit(1) | def ensure_valid_environment_config(module_name, config) | Exit if config is invalid. | 8.803972 | 8.192078 | 1.074693 |
if self.options.get('environments', {}).get(self.context.env_name):
self.setup_website_module(command='plan')
else:
LOGGER.info("Skipping staticsite plan of %s; no environment "
"config found for this environment/region",
self.options['path']) | def plan(self) | Create website CFN module and run stacker diff. | 12.704318 | 9.755619 | 1.302256 |
if platform.system().lower() == 'windows':
if command[0] == 'npx.cmd' and command[1] == '-c':
return "npx.cmd -c \"%s\"" % " ".join(command[2:])
return " ".join(command)
# Strip out redundant npx quotes not needed when executing the command
# directly
return " ".join(command).replace('\'\'', '\'') | def format_npm_command_for_logging(command) | Convert npm command list to string for display to user. | 5.795155 | 5.769329 | 1.004476 |
if which(NPX_BIN):
# Use npx if available (npm v5.2+)
LOGGER.debug("Using npx to invoke %s.", command)
if platform.system().lower() == 'windows':
cmd_list = [NPX_BIN,
'-c',
"%s %s" % (command, ' '.join(command_opts))]
else:
# The nested app-through-npx-via-subprocess command invocation
# requires this redundant quoting
cmd_list = [NPX_BIN,
'-c',
"''%s %s''" % (command, ' '.join(command_opts))]
else:
LOGGER.debug('npx not found; falling back invoking %s shell script '
'directly.', command)
cmd_list = [
os.path.join(path,
'node_modules',
'.bin',
command)
] + command_opts
return cmd_list | def generate_node_command(command, command_opts, path) | Return node bin command list for subprocess execution. | 4.959436 | 4.807028 | 1.031705 |
if exit_on_error:
try:
subprocess.check_call(cmd_list, env=env_vars)
except subprocess.CalledProcessError as shelloutexc:
sys.exit(shelloutexc.returncode)
else:
subprocess.check_call(cmd_list, env=env_vars) | def run_module_command(cmd_list, env_vars, exit_on_error=True) | Shell out to provisioner command. | 2.054082 | 2.048767 | 1.002594 |
# https://docs.npmjs.com/cli/ci#description
with open(os.devnull, 'w') as fnull:
if ((os.path.isfile(os.path.join(path,
'package-lock.json')) or
os.path.isfile(os.path.join(path,
'npm-shrinkwrap.json'))) and
subprocess.call(
[NPM_BIN, 'ci', '-h'],
stdout=fnull,
stderr=subprocess.STDOUT
) == 0):
return True
return False | def use_npm_ci(path) | Return true if npm ci should be used in lieu of npm install. | 2.605403 | 2.544407 | 1.023973 |
# Use npm ci if available (npm v5.7+)
if options.get('skip_npm_ci'):
LOGGER.info("Skipping npm ci or npm install on %s...",
os.path.basename(path))
elif context.env_vars.get('CI') and use_npm_ci(path): # noqa
LOGGER.info("Running npm ci on %s...",
os.path.basename(path))
subprocess.check_call([NPM_BIN, 'ci'])
else:
LOGGER.info("Running npm install on %s...",
os.path.basename(path))
subprocess.check_call([NPM_BIN, 'install']) | def run_npm_install(path, options, context) | Run npm install/ci. | 3.069918 | 2.928067 | 1.048445 |
if env_config.get(env_name):
current_env_config = env_config[env_name]
if isinstance(current_env_config, type(True)) and current_env_config:
return True
if isinstance(current_env_config, six.string_types):
(account_id, region) = current_env_config.split('/')
if region == env_vars['AWS_DEFAULT_REGION']:
boto_args = extract_boto_args_from_env(env_vars)
sts_client = boto3.client(
'sts',
region_name=env_vars['AWS_DEFAULT_REGION'],
**boto_args
)
if sts_client.get_caller_identity()['Account'] == account_id:
return True
if isinstance(current_env_config, dict):
return True
return False | def cdk_module_matches_env(env_name, env_config, env_vars) | Return bool on whether cdk command should continue in current env. | 2.230167 | 2.177663 | 1.024111 |
LOGGER.debug('Listing stacks in the CDK app prior to '
'diff')
return subprocess.check_output(
generate_node_command(
command='cdk',
command_opts=['list'] + context_opts,
path=module_path),
env=env_vars
).strip().split('\n') | def get_cdk_stacks(module_path, env_vars, context_opts) | Return list of CDK stacks. | 7.95855 | 7.441449 | 1.069489 |
LOGGER.error('"npm" not found in path or is not executable; '
'please ensure it is installed correctly.')
sys.exit(1)
if 'DEBUG' in self.context.env_vars:
cdk_opts.append('-v') # Increase logging if requested
warn_on_boto_env_vars(self.context.env_vars)
if cdk_module_matches_env(self.context.env_name,
self.options.get('environments', {}),
self.context.env_vars):
if os.path.isfile(os.path.join(self.path, 'package.json')):
with change_dir(self.path):
run_npm_install(self.path, self.options, self.context)
if self.options.get('options', {}).get('build_steps',
[]):
LOGGER.info("Running build steps for %s...",
os.path.basename(self.path))
run_commands(
commands=self.options.get('options',
{}).get('build_steps',
[]),
directory=self.path,
env=self.context.env_vars
)
cdk_context_opts = []
if isinstance(self.options.get('environments',
{}).get(self.context.env_name), # noqa
dict):
for (key, val) in self.options['environments'][self.context.env_name].items(): # noqa pylint: disable=line-too-long
cdk_context_opts.extend(['-c', "%s=%s" % (key, val)])
cdk_opts.extend(cdk_context_opts)
if command == 'diff':
LOGGER.info("Running cdk %s on each stack in %s",
command,
os.path.basename(self.path))
for i in get_cdk_stacks(self.path,
self.context.env_vars,
cdk_context_opts):
subprocess.call(
generate_node_command(
'cdk',
cdk_opts + [i], # 'diff <stack>'
self.path
),
env=self.context.env_vars
)
else:
if command == 'deploy':
if 'CI' in self.context.env_vars:
cdk_opts.append('--ci')
cdk_opts.append('--require-approval=never')
bootstrap_command = generate_node_command(
'cdk',
['bootstrap'] + cdk_context_opts,
self.path
)
LOGGER.info('Running cdk bootstrap...')
run_module_command(cmd_list=bootstrap_command,
env_vars=self.context.env_vars)
elif command == 'destroy' and 'CI' in self.context.env_vars: # noqa
cdk_opts.append('-f') # Don't prompt
cdk_command = generate_node_command(
'cdk',
cdk_opts,
self.path
)
LOGGER.info("Running cdk %s on %s (\"%s\")",
command,
os.path.basename(self.path),
format_npm_command_for_logging(cdk_command)) # noqa
run_module_command(cmd_list=cdk_command,
env_vars=self.context.env_vars)
else:
LOGGER.info(
"Skipping cdk %s of %s; no \"package.json\" "
"file was found (need a package file specifying "
"aws-cdk in devDependencies)",
command,
os.path.basename(self.path))
else:
LOGGER.info(
"Skipping cdk %s of %s; no config for "
"this environment found or current account/region does not "
"match configured environment",
command,
os.path.basename(self.path))
response['skipped_configs'] = True
return response | def run_cdk(self, command='deploy'): # pylint: disable=too-many-branches
response = {'skipped_configs': False}
cdk_opts = [command]
if not which('npm') | Run CDK. | 2.875694 | 2.846808 | 1.010147 |
if session_name is None:
session_name = 'runway'
assume_role_opts = {'RoleArn': role_arn,
'RoleSessionName': session_name}
if duration_seconds:
assume_role_opts['DurationSeconds'] = int(duration_seconds)
boto_args = {}
if env_vars:
for i in ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']:
if env_vars.get(i.upper()):
boto_args[i] = env_vars[i.upper()]
sts_client = boto3.client('sts', region_name=region, **boto_args)
LOGGER.info("Assuming role %s...", role_arn)
response = sts_client.assume_role(**assume_role_opts)
return {'AWS_ACCESS_KEY_ID': response['Credentials']['AccessKeyId'],
'AWS_SECRET_ACCESS_KEY': response['Credentials']['SecretAccessKey'], # noqa
'AWS_SESSION_TOKEN': response['Credentials']['SessionToken']} | def assume_role(role_arn, session_name=None, duration_seconds=None,
region='us-east-1', env_vars=None) | Assume IAM role. | 1.845746 | 1.835968 | 1.005325 |
if not class_path:
# First check directory name for type-indicating suffix
basename = os.path.basename(path)
if basename.endswith('.sls'):
class_path = 'runway.module.serverless.Serverless'
elif basename.endswith('.tf'):
class_path = 'runway.module.terraform.Terraform'
elif basename.endswith('.cdk'):
class_path = 'runway.module.cdk.CloudDevelopmentKit'
elif basename.endswith('.cfn'):
class_path = 'runway.module.cloudformation.CloudFormation'
if not class_path:
# Fallback to autodetection
if os.path.isfile(os.path.join(path, 'serverless.yml')):
class_path = 'runway.module.serverless.Serverless'
elif glob.glob(os.path.join(path, '*.tf')):
class_path = 'runway.module.terraform.Terraform'
elif os.path.isfile(os.path.join(path, 'cdk.json')) \
and os.path.isfile(os.path.join(path, 'package.json')):
class_path = 'runway.module.cdk.CloudDevelopmentKit'
elif glob.glob(os.path.join(path, '*.env')) or (
glob.glob(os.path.join(path, '*.yaml'))) or (
glob.glob(os.path.join(path, '*.yml'))):
class_path = 'runway.module.cloudformation.CloudFormation'
if not class_path:
LOGGER.error('No module class found for %s', os.path.basename(path))
sys.exit(1)
return load_object_from_string(class_path) | def determine_module_class(path, class_path) | Determine type of module and return deployment module class. | 2.004221 | 1.930341 | 1.038273 |
module_options_file = os.path.join(path,
'runway.module.yml')
if os.path.isfile(module_options_file):
with open(module_options_file, 'r') as stream:
module_options = merge_dicts(module_options,
yaml.safe_load(stream))
return module_options | def load_module_opts_from_file(path, module_options) | Update module_options with any options defined in module path. | 2.323276 | 2.209484 | 1.051501 |
if isinstance(assume_role_config, dict):
if assume_role_config.get('post_deploy_env_revert'):
context.restore_existing_iam_env_vars() | def post_deploy_assume_role(assume_role_config, context) | Revert to previous credentials, if necessary. | 6.830565 | 5.476017 | 1.24736 |
if isinstance(assume_role_config, dict):
assume_role_arn = ''
if assume_role_config.get('post_deploy_env_revert'):
context.save_existing_iam_env_vars()
if assume_role_config.get('arn'):
assume_role_arn = assume_role_config['arn']
assume_role_duration = assume_role_config.get('duration')
elif assume_role_config.get(context.env_name):
if isinstance(assume_role_config[context.env_name], dict):
assume_role_arn = assume_role_config[context.env_name]['arn'] # noqa
assume_role_duration = assume_role_config[context.env_name].get('duration') # noqa pylint: disable=line-too-long
else:
assume_role_arn = assume_role_config[context.env_name]
assume_role_duration = None
else:
LOGGER.info('Skipping assume-role; no role found for '
'environment %s...',
context.env_name)
if assume_role_arn:
context.env_vars = merge_dicts(
context.env_vars,
assume_role(
role_arn=assume_role_arn,
session_name=assume_role_config.get('session_name', None),
duration_seconds=assume_role_duration,
region=context.env_region,
env_vars=context.env_vars
)
)
else:
context.env_vars = merge_dicts(
context.env_vars,
assume_role(role_arn=assume_role_config,
region=context.env_region,
env_vars=context.env_vars)
) | def pre_deploy_assume_role(assume_role_config, context) | Assume role (prior to deployment). | 2.128294 | 2.105375 | 1.010886 |
# Super overkill here using pagination when an account can only
# have a single alias, but at least this implementation should be
# future-proof
current_account_aliases = []
paginator = iam_client.get_paginator('list_account_aliases')
response_iterator = paginator.paginate()
for page in response_iterator:
current_account_aliases.extend(page.get('AccountAliases', []))
if account_alias in current_account_aliases:
LOGGER.info('Verified current AWS account alias matches required '
'alias %s.',
account_alias)
else:
LOGGER.error('Current AWS account aliases "%s" do not match '
'required account alias %s in Runway config.',
','.join(current_account_aliases),
account_alias)
sys.exit(1) | def validate_account_alias(iam_client, account_alias) | Exit if list_account_aliases doesn't include account_alias. | 3.516149 | 3.431661 | 1.02462 |
resp = sts_client.get_caller_identity()
if 'Account' in resp:
if resp['Account'] == account_id:
LOGGER.info('Verified current AWS account matches required '
'account id %s.',
account_id)
else:
LOGGER.error('Current AWS account %s does not match '
'required account %s in Runway config.',
resp['Account'],
account_id)
sys.exit(1)
else:
LOGGER.error('Error checking current account ID')
sys.exit(1) | def validate_account_id(sts_client, account_id) | Exit if get_caller_identity doesn't match account_id. | 2.837175 | 2.693268 | 1.053432 |
boto_args = {'region_name': context.env_vars['AWS_DEFAULT_REGION']}
for i in ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']:
if context.env_vars.get(i.upper()):
boto_args[i] = context.env_vars[i.upper()]
if isinstance(deployment.get('account-id'), (int, six.string_types)):
account_id = str(deployment['account-id'])
elif deployment.get('account-id', {}).get(context.env_name):
account_id = str(deployment['account-id'][context.env_name])
else:
account_id = None
if account_id:
validate_account_id(boto3.client('sts', **boto_args), account_id)
if isinstance(deployment.get('account-alias'), six.string_types):
account_alias = deployment['account-alias']
elif deployment.get('account-alias', {}).get(context.env_name):
account_alias = deployment['account-alias'][context.env_name]
else:
account_alias = None
if account_alias:
validate_account_alias(boto3.client('iam', **boto_args),
account_alias) | def validate_account_credentials(deployment, context) | Exit if requested deployment account doesn't match credentials. | 1.746946 | 1.739395 | 1.004341 |
env_override_name = 'DEPLOY_ENVIRONMENT'
LOGGER.info("")
if env_override_name in env_vars:
LOGGER.info("Environment \"%s\" was determined from the %s environment variable.",
env_name,
env_override_name)
LOGGER.info("If this is not correct, update "
"the value (or unset it to fall back to the name of "
"the current git branch or parent directory).")
else:
LOGGER.info("Environment \"%s\" was determined from the current "
"git branch or parent directory.",
env_name)
LOGGER.info("If this is not the environment name, update the branch/folder name or "
"set an override value via the %s environment variable",
env_override_name)
LOGGER.info("") | def echo_detected_environment(env_name, env_vars) | Print a helper note about how the environment was determined. | 3.973051 | 3.760117 | 1.05663 |
name = _module_name_for_display(module)
if isinstance(module, dict):
environment_config = module.get('environments', {}).get(environment_name)
if environment_config:
return "%s (%s)" % (name, environment_config)
return "%s" % (name) | def _module_menu_entry(module, environment_name) | Build a string to display in the 'select module' menu. | 3.282195 | 2.981407 | 1.100888 |
paths = ", ".join([_module_name_for_display(module) for module in deployment['modules']])
regions = ", ".join(deployment.get('regions', []))
return "%s - %s (%s)" % (deployment.get('name'), paths, regions) | def _deployment_menu_entry(deployment) | Build a string to display in the 'select deployment' menu. | 4.278879 | 4.140367 | 1.033454 |
deployments = self.runway_config['deployments']
context = Context(env_name=get_env(self.env_root,
self.runway_config.get('ignore_git_branch', False)),
env_region=None,
env_root=self.env_root,
env_vars=os.environ.copy())
echo_detected_environment(context.env_name, context.env_vars)
# set default names if needed
for i, deployment in enumerate(deployments):
if not deployment.get('name'):
deployment['name'] = 'deployment_' + str(i+1)
if command == 'destroy':
LOGGER.info('WARNING!')
LOGGER.info('Runway is running in DESTROY mode.')
if context.env_vars.get('CI', None):
if command == 'destroy':
deployments_to_run = self.reverse_deployments(deployments)
else:
deployments_to_run = deployments
else:
if command == 'destroy':
LOGGER.info('Any/all deployment(s) selected will be '
'irrecoverably DESTROYED.')
deployments_to_run = self.reverse_deployments(
self.select_deployment_to_run(
context.env_name,
deployments,
command=command
)
)
else:
deployments_to_run = self.select_deployment_to_run(
context.env_name,
deployments
)
LOGGER.info("Found %d deployment(s)", len(deployments_to_run))
for i, deployment in enumerate(deployments_to_run):
LOGGER.info("")
LOGGER.info("")
LOGGER.info("======= Processing deployment '%s' ===========================",
deployment.get('name'))
if deployment.get('regions'):
if deployment.get('env_vars'):
deployment_env_vars = merge_nested_environment_dicts(
deployment.get('env_vars'), env_name=context.env_name,
env_root=self.env_root
)
if deployment_env_vars:
LOGGER.info("OS environment variable overrides being "
"applied this deployment: %s",
str(deployment_env_vars))
context.env_vars = merge_dicts(context.env_vars, deployment_env_vars)
LOGGER.info("")
LOGGER.info("Attempting to deploy '%s' to region(s): %s",
context.env_name,
", ".join(deployment['regions']))
for region in deployment['regions']:
LOGGER.info("")
LOGGER.info("======= Processing region %s ================"
"===========", region)
context.env_region = region
context.env_vars = merge_dicts(
context.env_vars,
{'AWS_DEFAULT_REGION': context.env_region,
'AWS_REGION': context.env_region}
)
if deployment.get('assume-role'):
pre_deploy_assume_role(deployment['assume-role'], context)
if deployment.get('account-id') or (deployment.get('account-alias')):
validate_account_credentials(deployment, context)
modules = deployment.get('modules', [])
if deployment.get('current_dir'):
modules.append('.' + os.sep)
for module in modules:
self._deploy_module(module, deployment, context, command)
if deployment.get('assume-role'):
post_deploy_assume_role(deployment['assume-role'], context)
else:
LOGGER.error('No region configured for any deployment')
sys.exit(1) | def run(self, deployments=None, command='plan'): # noqa pylint: disable=too-many-branches,too-many-statements
if deployments is None | Execute apps/code command. | 3.001072 | 2.954471 | 1.015773 |
if deployments is None:
deployments = []
reversed_deployments = []
for i in deployments[::-1]:
deployment = copy.deepcopy(i)
for config in ['modules', 'regions']:
if deployment.get(config):
deployment[config] = deployment[config][::-1]
reversed_deployments.append(deployment)
return reversed_deployments | def reverse_deployments(deployments=None) | Reverse deployments and the modules/regions in them. | 3.132722 | 2.654006 | 1.180375 |
return []
deployments_to_run = []
num_deployments = len(deployments)
if num_deployments == 1:
selected_deployment_index = 1
else:
print('')
print('Configured deployments:')
for i, deployment in enumerate(deployments):
print(" %d: %s" % (i+1, _deployment_menu_entry(deployment)))
print('')
print('')
if command == 'destroy':
print('(Operating in destroy mode -- "all" will destroy all '
'deployments in reverse order)')
selected_deployment_index = input('Enter number of deployment to run (or "all"): ')
if selected_deployment_index == 'all':
return deployments
if selected_deployment_index == '':
LOGGER.error('Please select a valid number (or "all")')
sys.exit(1)
selected_deployment = deployments[int(selected_deployment_index) - 1]
if selected_deployment.get('current_dir', False):
deployments_to_run.append(selected_deployment)
elif not selected_deployment.get('modules', []):
LOGGER.error('No modules configured in selected deployment')
sys.exit(1)
elif len(selected_deployment['modules']) == 1:
# No need to select a module in the deployment - there's only one
if command == 'destroy':
LOGGER.info('(only one deployment detected; all modules '
'automatically selected for termination)')
if not strtobool(input('Proceed?: ')):
sys.exit(0)
deployments_to_run.append(selected_deployment)
else:
modules = selected_deployment['modules']
print('')
print('Configured modules in deployment \'%s\':' % selected_deployment.get('name'))
for i, module in enumerate(modules):
print(" %s: %s" % (i+1, _module_menu_entry(module, env_name)))
print('')
print('')
if command == 'destroy':
print('(Operating in destroy mode -- "all" will destroy all '
'deployments in reverse order)')
selected_module_index = input('Enter number of module to run (or "all"): ')
if selected_module_index == 'all':
deployments_to_run.append(selected_deployment)
elif selected_module_index == '' or (
not selected_module_index.isdigit() or (
not 0 < int(selected_module_index) <= len(modules))):
LOGGER.error('Please select a valid number (or "all")')
sys.exit(1)
else:
selected_deployment['modules'] = [modules[int(selected_module_index) - 1]]
deployments_to_run.append(selected_deployment)
LOGGER.debug('Selected deployment is %s...', deployments_to_run)
return deployments_to_run | def select_deployment_to_run(env_name, deployments=None, command='build'): # noqa pylint: disable=too-many-branches,too-many-statements,too-many-locals
if deployments is None or not deployments | Query user for deployments to run. | 2.406192 | 2.379627 | 1.011164 |
clean_cmd = ['git', 'clean', '-X', '-d']
if 'CI' not in os.environ:
LOGGER.info('The following files/directories will be deleted:')
LOGGER.info('')
LOGGER.info(check_output(clean_cmd + ['-n']).decode())
if not strtobool(input('Proceed?: ')):
return False
check_call(clean_cmd + ['-f'])
empty_dirs = self.get_empty_dirs(self.env_root)
if empty_dirs != []:
LOGGER.info('Now removing empty directories:')
for directory in empty_dirs:
LOGGER.info("Removing %s/", directory)
shutil.rmtree(os.path.join(self.env_root, directory))
return True | def execute(self) | Execute git clean to remove untracked/build files. | 3.909364 | 3.548608 | 1.101661 |
names = []
for ext in ['yml', 'json']:
# Give preference to explicit stage-region files
names.append(
os.path.join('env',
"%s-%s.%s" % (stage, region, ext))
)
names.append("config-%s-%s.%s" % (stage, region, ext))
# Fallback to stage name only
names.append(
os.path.join('env',
"%s.%s" % (stage, ext))
)
names.append("config-%s.%s" % (stage, ext))
return names | def gen_sls_config_files(stage, region) | Generate possible SLS config files names. | 2.997783 | 2.708347 | 1.106868 |
for name in gen_sls_config_files(stage, region):
if os.path.isfile(os.path.join(path, name)):
return name
return "config-%s.json" % stage | def get_sls_config_file(path, stage, region) | Determine Serverless config file name. | 3.701288 | 3.253278 | 1.13771 |
sls_process = subprocess.Popen(sls_cmd,
stdout=subprocess.PIPE,
env=env_vars)
stdoutdata, _stderrdata = sls_process.communicate()
sls_return = sls_process.wait()
print(stdoutdata)
if sls_return != 0 and (sls_return == 1 and not (
re.search(r"Stack '.*' does not exist", stdoutdata))):
sys.exit(sls_return) | def run_sls_remove(sls_cmd, env_vars) | Run sls remove command. | 2.657156 | 2.696808 | 0.985297 |
response = {'skipped_configs': False}
sls_opts = [command]
if not which('npm'):
LOGGER.error('"npm" not found in path or is not executable; '
'please ensure it is installed correctly.')
sys.exit(1)
if 'CI' in self.context.env_vars and command != 'remove':
sls_opts.append('--conceal') # Hide secrets from serverless output
if 'DEBUG' in self.context.env_vars:
sls_opts.append('-v') # Increase logging if requested
warn_on_boto_env_vars(self.context.env_vars)
sls_opts.extend(['-r', self.context.env_region])
sls_opts.extend(['--stage', self.context.env_name])
sls_env_file = get_sls_config_file(self.path,
self.context.env_name,
self.context.env_region)
sls_cmd = generate_node_command(command='sls',
command_opts=sls_opts,
path=self.path)
if (not self.options.get('environments') and os.path.isfile(os.path.join(self.path, sls_env_file))) or ( # noqa pylint: disable=line-too-long
self.options.get('environments', {}).get(self.context.env_name)): # noqa
if os.path.isfile(os.path.join(self.path, 'package.json')):
with change_dir(self.path):
run_npm_install(self.path, self.options, self.context)
LOGGER.info("Running sls %s on %s (\"%s\")",
command,
os.path.basename(self.path),
format_npm_command_for_logging(sls_cmd))
if command == 'remove':
# Need to account for exit code 1 on any removals after
# the first
run_sls_remove(sls_cmd, self.context.env_vars)
else:
run_module_command(cmd_list=sls_cmd,
env_vars=self.context.env_vars)
else:
LOGGER.warning(
"Skipping serverless %s of %s; no \"package.json\" "
"file was found (need a package file specifying "
"serverless in devDependencies)",
command,
os.path.basename(self.path))
else:
response['skipped_configs'] = True
LOGGER.info(
"Skipping serverless %s of %s; no config file for "
"this stage/region found (looking for one of \"%s\")",
command,
os.path.basename(self.path),
', '.join(gen_sls_config_files(self.context.env_name,
self.context.env_region)))
return response | def run_serverless(self, command='deploy') | Run Serverless. | 3.542039 | 3.509007 | 1.009414 |
if not outline and not dump and hooks:
util.handle_hooks(
stage=stage,
hooks=hooks,
provider=provider,
context=context
) | def handle_hooks(stage, hooks, provider, context, dump, outline) | Handle pre/post hooks.
Args:
stage (str): The name of the hook stage - pre_build/post_build.
hooks (list): A list of dictionaries containing the hooks to execute.
provider (:class:`stacker.provider.base.BaseProvider`): The provider
the current stack is using.
context (:class:`stacker.context.Context`): The current stacker
context.
dump (bool): Whether running with dump set or not.
outline (bool): Whether running with outline set or not. | 3.82305 | 4.551764 | 0.839905 |
post_destroy = self.context.config.post_destroy
if not outline and post_destroy:
util.handle_hooks(
stage="post_destroy",
hooks=post_destroy,
provider=self.provider,
context=self.context) | def post_run(self, outline=False, *args, **kwargs) | Any steps that need to be taken after running the action. | 6.484006 | 6.356529 | 1.020054 |
old_env = dict(os.environ)
try:
# Environment
env = os.environ.copy()
env['LC_CTYPE'] = u'en_US.UTF'
os.environ.update(env)
# Run awscli in the same process
exit_code = create_clidriver().main(*cmd)
# Deal with problems
if exit_code > 0:
raise RuntimeError('AWS CLI exited with code {}'.format(exit_code))
finally:
os.environ.clear()
os.environ.update(old_env) | def aws_cli(*cmd) | Invoke aws command. | 3.601314 | 3.577616 | 1.006624 |
files_to_skip = []
for i in ['current_archive_filename', 'old_archive_filename']:
if hook_data.get(i):
files_to_skip.append(hook_data[i])
archives.sort(key=itemgetter('LastModified'),
reverse=False) # sort from oldest to newest
# Drop all but last 15 files
return [i['Key'] for i in archives[:-15] if i['Key'] not in files_to_skip] | def get_archives_to_prune(archives, hook_data) | Return list of keys to delete. | 3.960316 | 3.549896 | 1.115615 |
LOGGER.info('staticsite: skipping upload; latest version already '
'deployed')
else:
distribution_id = OutputLookup.handle(
kwargs.get('distributionid_output_lookup'),
provider=provider,
context=context
)
distribution_domain = OutputLookup.handle(
kwargs.get('distributiondomain_output_lookup'),
provider=provider,
context=context
)
# Using the awscli for s3 syncing is incredibly suboptimal, but on
# balance it's probably the most stable/efficient option for syncing
# the files until https://github.com/boto/boto3/issues/358 is resolved
aws_cli(['s3',
'sync',
context.hook_data['staticsite']['app_directory'],
"s3://%s/" % bucket_name,
'--delete'])
cf_client = session.client('cloudfront')
cf_client.create_invalidation(
DistributionId=distribution_id,
InvalidationBatch={'Paths': {'Quantity': 1, 'Items': ['/*']},
'CallerReference': str(time.time())}
)
LOGGER.info("staticsite: sync & CF invalidation of %s (domain %s) "
"complete",
distribution_id,
distribution_domain)
if not context.hook_data['staticsite'].get('hash_tracking_disabled'):
LOGGER.info("staticsite: updating environment SSM parameter %s "
"with hash %s",
context.hook_data['staticsite']['hash_tracking_parameter'], # noqa
context.hook_data['staticsite']['hash'])
ssm_client = session.client('ssm')
ssm_client.put_parameter(
Name=context.hook_data['staticsite']['hash_tracking_parameter'], # noqa
Description='Hash of currently deployed static website source',
Value=context.hook_data['staticsite']['hash'],
Type='String',
Overwrite=True
)
LOGGER.info("staticsite: cleaning up old site archives...")
archives = []
s3_client = session.client('s3')
list_objects_v2_paginator = s3_client.get_paginator('list_objects_v2')
response_iterator = list_objects_v2_paginator.paginate(
Bucket=context.hook_data['staticsite']['artifact_bucket_name'],
Prefix=context.hook_data['staticsite']['artifact_key_prefix']
)
for page in response_iterator:
archives.extend(page.get('Contents', []))
archives_to_prune = get_archives_to_prune(
archives,
context.hook_data['staticsite']
)
# Iterate in chunks of 1000 to match delete_objects limit
for objects in [archives_to_prune[i:i + 1000]
for i in range(0, len(archives_to_prune), 1000)]:
s3_client.delete_objects(
Bucket=context.hook_data['staticsite']['artifact_bucket_name'],
Delete={'Objects': [{'Key': i} for i in objects]}
)
return True | def sync(context, provider, **kwargs): # pylint: disable=too-many-locals
session = get_session(provider.region)
bucket_name = OutputLookup.handle(kwargs.get('bucket_output_lookup'),
provider=provider,
context=context)
if context.hook_data['staticsite']['deploy_is_current'] | Sync static website to S3 bucket. | 2.55337 | 2.528939 | 1.009661 |
try:
with open(config_filename, 'r') as stream:
yaml.safe_load(stream)
except yaml.constructor.ConstructorError as yaml_error:
if yaml_error.problem.startswith(
'could not determine a constructor for the tag \'!'):
LOGGER.error('"%s" appears to be a CloudFormation template, '
'but is located in the top level of a module '
'alongside the CloudFormation config files (i.e. '
'the file or files indicating the stack names & '
'parameters). Please move the template to a '
'subdirectory.',
config_filename)
sys.exit(1) | def ensure_stacker_compat_config(config_filename) | Ensure config file can be loaded by Stacker. | 5.51327 | 5.308287 | 1.038616 |
for name in gen_stacker_env_files(environment, region):
if os.path.isfile(os.path.join(path, name)):
return name
return "%s-%s.env" % (environment, region) | def get_stacker_env_file(path, environment, region) | Determine Stacker environment file name. | 3.072023 | 2.799067 | 1.097517 |
if platform.system().lower() == 'windows':
# Because this will be run via subprocess, the backslashes on Windows
# will cause command errors
lib_path = lib_path.replace('\\', '/')
return ("import sys;"
"sys.argv = ['stacker'] + {args};"
"sys.path.insert(1, '{lib_path}');"
"from stacker.logger import setup_logging;"
"from stacker.commands import Stacker;"
"stacker = Stacker(setup_logging=setup_logging);"
"args = stacker.parse_args({args});"
"stacker.configure(args);args.run(args)".format(args=str(args),
lib_path=lib_path)) | def make_stacker_cmd_string(args, lib_path) | Generate stacker invocation script from command line arg list.
This is the standard stacker invocation script, with the following changes:
* Adding our explicit arguments to parse_args (instead of leaving it empty)
* Overriding sys.argv
* Adding embedded runway lib directory to sys.path | 3.785534 | 3.710772 | 1.020147 |
stacker_cmd.append('--force')
elif command == 'build':
if 'CI' in self.context.env_vars:
stacker_cmd.append('--recreate-failed')
else:
stacker_cmd.append('--interactive')
if 'DEBUG' in self.context.env_vars:
stacker_cmd.append('--verbose') # Increase logging if requested
stacker_env_file = get_stacker_env_file(self.path,
self.context.env_name,
self.context.env_region)
stacker_env_file_present = os.path.isfile(
os.path.join(self.path, stacker_env_file)
)
if isinstance(self.options.get('environments',
{}).get(self.context.env_name),
dict):
for (key, val) in self.options['environments'][self.context.env_name].items(): # noqa
stacker_cmd.extend(['-e', "%s=%s" % (key, val)])
if stacker_env_file_present:
stacker_cmd.append(stacker_env_file)
if not (stacker_env_file_present or self.options.get(
'environments',
{}).get(self.context.env_name)):
response['skipped_configs'] = True
LOGGER.info(
"Skipping stacker %s; no environment "
"file found for this environment/region "
"(looking for one of \"%s\")",
command,
', '.join(
gen_stacker_env_files(self.context.env_name, # noqa
self.context.env_region)) # noqa
)
else:
with change_dir(self.path):
# Iterate through any stacker yaml configs to deploy them in order
# or destroy them in reverse order
for _root, _dirs, files in os.walk(self.path):
sorted_files = sorted(files)
if command == 'destroy':
sorted_files = reversed(sorted_files)
for name in sorted_files:
if re.match(r"runway(\..*)?\.yml", name) or (
name.startswith('.')):
# Hidden files (e.g. .gitlab-ci.yml) or runway configs
# definitely aren't stacker config files
continue
if os.path.splitext(name)[1] in ['.yaml', '.yml']:
ensure_stacker_compat_config(
os.path.join(self.path, name)
)
LOGGER.info("Running stacker %s on %s in region %s",
command,
name,
self.context.env_region)
stacker_cmd_str = make_stacker_cmd_string(
stacker_cmd + [name],
get_embedded_lib_path()
)
stacker_cmd_list = [sys.executable, '-c']
LOGGER.debug(
"Stacker command being executed: %s \"%s\"",
' '.join(stacker_cmd_list),
stacker_cmd_str
)
run_module_command(
cmd_list=stacker_cmd_list + [stacker_cmd_str],
env_vars=self.context.env_vars
)
break # only need top level files
return response | def run_stacker(self, command='diff'): # pylint: disable=too-many-branches,too-many-locals
response = {'skipped_configs': False}
stacker_cmd = [command, "--region=%s" % self.context.env_region]
if command == 'destroy' | Run Stacker. | 3.128194 | 3.083379 | 1.014534 |
if LooseVersion(troposphere.__version__) == LooseVersion('2.4.0'):
cf_dist = cloudfront.Distribution
cf_dist.props['DistributionConfig'] = (DistributionConfig, True)
return cf_dist
return cloudfront.Distribution | def get_cf_distribution_class() | Return the correct troposphere CF distribution class. | 4.991154 | 4.050284 | 1.232297 |
if LooseVersion(troposphere.__version__) > LooseVersion('2.4.0'):
return cloudfront.S3OriginConfig
if LooseVersion(troposphere.__version__) == LooseVersion('2.4.0'):
return S3OriginConfig
return cloudfront.S3Origin | def get_s3_origin_conf_class() | Return the correct S3 Origin Config class for troposphere. | 2.933389 | 2.353656 | 1.246312 |
show = self.state.show if show is None else show
self.set_presence(PresenceState(available=True, show=show)) | def set_available(self, show=None) | Sets the agent availability to True.
Args:
show (aioxmpp.PresenceShow, optional): the show state of the presence (Default value = None) | 6.176632 | 7.113417 | 0.868307 |
show = PresenceShow.NONE
self.set_presence(PresenceState(available=False, show=show)) | def set_unavailable(self) | Sets the agent availability to False. | 14.193696 | 12.374624 | 1.147 |
state = state if state is not None else self.state
status = status if status is not None else self.status
priority = priority if priority is not None else self.priority
self.presenceserver.set_presence(state, status, priority) | def set_presence(self, state=None, status=None, priority=None) | Change the presence broadcast by the client.
If the client is currently connected, the new presence is broadcast immediately.
Args:
state(aioxmpp.PresenceState, optional): New presence state to broadcast (Default value = None)
status(dict or str, optional): New status information to broadcast (Default value = None)
priority (int, optional): New priority for the resource (Default value = None) | 2.315158 | 2.820235 | 0.82091 |
for jid, item in self.roster.items.items():
try:
self._contacts[jid.bare()].update(item.export_as_json())
except KeyError:
self._contacts[jid.bare()] = item.export_as_json()
return self._contacts | def get_contacts(self) | Returns list of contacts
Returns:
dict: the roster of contacts | 4.299954 | 3.753046 | 1.145724 |
try:
return self.get_contacts()[jid.bare()]
except KeyError:
raise ContactNotFound
except AttributeError:
raise AttributeError("jid must be an aioxmpp.JID object") | def get_contact(self, jid) | Returns a contact
Args:
jid (aioxmpp.JID): jid of the contact
Returns:
dict: the roster of contacts | 5.441932 | 4.689806 | 1.160375 |
self.roster.subscribe(aioxmpp.JID.fromstr(peer_jid).bare()) | def subscribe(self, peer_jid) | Asks for subscription
Args:
peer_jid (str): the JID you ask for subscriptiion | 5.807233 | 6.943385 | 0.836369 |
self.roster.unsubscribe(aioxmpp.JID.fromstr(peer_jid).bare()) | def unsubscribe(self, peer_jid) | Asks for unsubscription
Args:
peer_jid (str): the JID you ask for unsubscriptiion | 6.062028 | 6.901679 | 0.878341 |
self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare()) | def approve(self, peer_jid) | Approve a subscription request from jid
Args:
peer_jid (str): the JID to approve | 6.659089 | 7.640571 | 0.871543 |
date = datetime.datetime.now()
self.store.insert(0, (date, event, category))
if len(self.store) > self.size:
del self.store[-1] | def append(self, event, category=None) | Adds a new event to the trace store.
The event may hava a category
Args:
event (spade.message.Message): the event to be stored
category (str, optional): a category to classify the event (Default value = None) | 3.232672 | 3.584825 | 0.901766 |
return list(itertools.islice((itertools.filterfalse(lambda x: x[1].sent, self.store)), limit))[::-1] | def received(self, limit=None) | Returns all the events that have been received (excluding sent events), until a limit if defined
Args:
limit (int, optional): the max length of the events to return (Default value = None)
Returns:
list: a list of received events | 7.820938 | 10.311879 | 0.75844 |
if category and not to:
msg_slice = itertools.islice((x for x in self.store if x[2] == category), limit)
elif to and not category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1])), limit)
elif to and category:
to = JID.fromstr(to)
msg_slice = itertools.islice((x for x in self.store if _agent_in_msg(to, x[1]) and x[2] == category), limit)
else:
msg_slice = self.all(limit=limit)
return msg_slice
return list(msg_slice)[::-1] | def filter(self, limit=None, to=None, category=None) | Returns the events that match the filters
Args:
limit (int, optional): the max length of the events to return (Default value = None)
to (str, optional): only events that have been sent or received by 'to' (Default value = None)
category (str, optional): only events belonging to the category (Default value = None)
Returns:
list: a list of filtered events | 2.67437 | 2.832073 | 0.944316 |
return self.container.start_agent(agent=self, auto_register=auto_register) | def start(self, auto_register=True) | Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True) | 6.538774 | 4.803213 | 1.361333 |
if auto_register:
await self._async_register()
self.client = aioxmpp.PresenceManagedClient(self.jid,
aioxmpp.make_security_layer(self.password,
no_verify=not self.verify_security),
loop=self.loop,
logger=logging.getLogger(self.jid.localpart))
# obtain an instance of the service
self.message_dispatcher = self.client.summon(SimpleMessageDispatcher)
# Presence service
self.presence = PresenceManager(self)
await self._async_connect()
# register a message callback here
self.message_dispatcher.register_callback(
aioxmpp.MessageType.CHAT,
None,
self._message_received,
)
await self.setup()
self._alive.set()
for behaviour in self.behaviours:
if not behaviour.is_running:
behaviour.start() | async def _async_start(self, auto_register=True) | Starts the agent from a coroutine. This fires some actions:
* if auto_register: register the agent in the server
* runs the event loop
* connects the agent to the server
* runs the registered behaviours
Args:
auto_register (bool, optional): register the agent in the server (Default value = True) | 5.649726 | 5.511497 | 1.02508 |
self.conn_coro = self.client.connected()
aenter = type(self.conn_coro).__aenter__(self.conn_coro)
self.stream = await aenter
logger.info(f"Agent {str(self.jid)} connected and authenticated.")
except aiosasl.AuthenticationFailure:
raise AuthenticationFailure(
"Could not authenticate the agent. Check user and password or use auto_register=True") | async def _async_connect(self): # pragma: no cover
try | connect and authenticate to the XMPP server. Async mode. | 9.553401 | 7.78177 | 1.227664 |
async def _async_register(self): # pragma: no cover
metadata = aioxmpp.make_security_layer(None, no_verify=not self.verify_security)
query = ibr.Query(self.jid.localpart, self.password)
_, stream, features = await aioxmpp.node.connect_xmlstream(self.jid, metadata, loop=self.loop)
await ibr.register(stream, query) | Register the agent in the XMPP server from a coroutine. | null | null | null |
|
digest = md5(str(jid).encode("utf-8")).hexdigest()
return "http://www.gravatar.com/avatar/{md5}?d=monsterid".format(md5=digest) | def build_avatar_url(jid) | Static method to build a gravatar url with the agent's JID
Args:
jid (aioxmpp.JID): an XMPP identifier
Returns:
str: an URL for the gravatar | 3.325062 | 3.8483 | 0.864034 |
behaviour.set_agent(self)
if issubclass(type(behaviour), FSMBehaviour):
for _, state in behaviour.get_states().items():
state.set_agent(self)
behaviour.set_template(template)
self.behaviours.append(behaviour)
if self.is_alive():
behaviour.start() | def add_behaviour(self, behaviour, template=None) | Adds and starts a behaviour to the agent.
If template is not None it is used to match
new messages and deliver them to the behaviour.
Args:
behaviour (spade.behaviour.CyclicBehaviour): the behaviour to be started
template (spade.template.Template, optional): the template to match messages with (Default value = None) | 3.046382 | 3.577422 | 0.851558 |
if not self.has_behaviour(behaviour):
raise ValueError("This behaviour is not registered")
index = self.behaviours.index(behaviour)
self.behaviours[index].kill()
self.behaviours.pop(index) | def remove_behaviour(self, behaviour) | Removes a behaviour from the agent.
The behaviour is first killed.
Args:
behaviour (spade.behaviour.CyclicBehaviour): the behaviour instance to be removed | 2.502238 | 2.629166 | 0.951723 |
if self.presence:
self.presence.set_unavailable()
for behav in self.behaviours:
behav.kill()
if self.web.is_started():
await self.web.runner.cleanup()
if self.is_alive():
# Disconnect from XMPP server
self.client.stop()
aexit = self.conn_coro.__aexit__(*sys.exc_info())
await aexit
logger.info("Client disconnected.")
self._alive.clear() | async def _async_stop(self) | Stops an agent and kills all its behaviours. | 7.392517 | 6.083404 | 1.215194 |
msg = Message.from_node(msg)
return self.dispatch(msg) | def _message_received(self, msg) | Callback run when an XMPP Message is reveived.
This callback delivers the message to every behaviour
that is waiting for it. First, the aioxmpp.Message is
converted to spade.message.Message
Args:
msg (aioxmpp.Messagge): the message just received.
Returns:
list(asyncio.Future): a list of futures of the append of the message at each matched behaviour. | 12.757799 | 14.828102 | 0.86038 |
logger.debug(f"Got message: {msg}")
futures = []
matched = False
for behaviour in (x for x in self.behaviours if x.match(msg)):
futures.append(self.submit(behaviour.enqueue(msg)))
logger.debug(f"Message enqueued to behaviour: {behaviour}")
self.traces.append(msg, category=str(behaviour))
matched = True
if not matched:
logger.warning(f"No behaviour matched for message: {msg}")
self.traces.append(msg)
return futures | def dispatch(self, msg) | Dispatch the message to every behaviour that is waiting for
it using their templates match.
Args:
msg (spade.message.Messagge): the message to dispatch.
Returns:
list(asyncio.Future): a list of futures of the append of the message at each matched behaviour. | 3.867875 | 3.543564 | 1.091521 |
if not isinstance(node, aioxmpp.stanza.Message):
raise AttributeError("node must be a aioxmpp.stanza.Message instance")
msg = cls()
msg._to = node.to
msg._sender = node.from_
if None in node.body:
msg.body = node.body[None]
else:
for key in node.body.keys():
msg.body = node.body[key]
break
for data in node.xep0004_data:
if data.title == SPADE_X_METADATA:
for field in data.fields:
if field.var != "_thread_node":
msg.set_metadata(field.var, field.values[0])
else:
msg.thread = field.values[0]
return msg | def from_node(cls, node) | Creates a new spade.message.Message from an aixoxmpp.stanza.Message
Args:
node (aioxmpp.stanza.Message): an aioxmpp Message
Returns:
spade.message.Message: a new spade Message | 3.951018 | 3.383694 | 1.167664 |
if jid is not None and not isinstance(jid, str):
raise TypeError("'to' MUST be a string")
self._to = aioxmpp.JID.fromstr(jid) if jid is not None else None | def to(self, jid: str) | Set jid of the receiver.
Args:
jid (str): the jid of the receiver. | 4.595086 | 4.382261 | 1.048565 |
if jid is not None and not isinstance(jid, str):
raise TypeError("'sender' MUST be a string")
self._sender = aioxmpp.JID.fromstr(jid) if jid is not None else None | def sender(self, jid: str) | Set jid of the sender
Args:
jid (str): jid of the sender | 4.461597 | 4.361254 | 1.023008 |
if body is not None and not isinstance(body, str):
raise TypeError("'body' MUST be a string")
self._body = body | def body(self, body: str) | Set body of the message
Args:
body (str): The body of the message | 4.96685 | 4.844141 | 1.025331 |
if value is not None and not isinstance(value, str):
raise TypeError("'thread' MUST be a string")
self._thread = value | def thread(self, value: str) | Set thread id of the message
Args:
value (str): the thread id | 4.709389 | 5.284321 | 0.8912 |
if not isinstance(key, str) or not isinstance(value, str):
raise TypeError("'key' and 'value' of metadata MUST be strings")
self.metadata[key] = value | def set_metadata(self, key: str, value: str) | Add a new metadata to the message
Args:
key (str): name of the metadata
value (str): value of the metadata | 3.787806 | 4.04006 | 0.937562 |
return self.metadata[key] if key in self.metadata else None | def get_metadata(self, key) -> str | Get the value of a metadata. Returns None if metadata does not exist.
Args:
key (str): name of the metadata
Returns:
str: the value of the metadata (or None) | 6.375807 | 7.079661 | 0.900581 |
if self.to and message.to != self.to:
return False
if self.sender and message.sender != self.sender:
return False
if self.body and message.body != self.body:
return False
if self.thread and message.thread != self.thread:
return False
for key, value in self.metadata.items():
if message.get_metadata(key) != value:
return False
logger.debug(f"message matched {self} == {message}")
return True | def match(self, message) -> bool | Returns wether a message matches with this message or not.
The message can be a Message object or a Template object.
Args:
message (spade.message.Message): the message to match to
Returns:
bool: wether the message matches or not | 2.312118 | 2.221649 | 1.040721 |
return Message(
to=str(self.sender),
sender=str(self.to),
body=self.body,
thread=self.thread,
metadata=self.metadata
) | def make_reply(self) | Creates a copy of the message, exchanging sender and receiver
Returns:
spade.message.Message: a new message with exchanged sender and receiver | 4.143881 | 4.138209 | 1.001371 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.