body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, l2_norm_clip, stddev): 'Initializes the GaussianSumQuery.\n\n Args:\n l2_norm_clip: The clipping norm to apply to the global norm of each\n record.\n stddev: The stddev of the noise added to the sum.\n ' self._l2_norm_clip = l2_norm_clip self._stddev = stddev self._ledger = None
830,702,645,456,667,900
Initializes the GaussianSumQuery. Args: l2_norm_clip: The clipping norm to apply to the global norm of each record. stddev: The stddev of the noise added to the sum.
tensorflow_privacy/privacy/dp_query/gaussian_query.py
__init__
Juspem1980/privacy
python
def __init__(self, l2_norm_clip, stddev): 'Initializes the GaussianSumQuery.\n\n Args:\n l2_norm_clip: The clipping norm to apply to the global norm of each\n record.\n stddev: The stddev of the noise added to the sum.\n ' self._l2_norm_clip = l2_norm_clip self._stddev = stddev self._ledger = None
def make_global_state(self, l2_norm_clip, stddev): 'Creates a global state from the given parameters.' return self._GlobalState(tf.cast(l2_norm_clip, tf.float32), tf.cast(stddev, tf.float32))
7,631,861,765,875,769,000
Creates a global state from the given parameters.
tensorflow_privacy/privacy/dp_query/gaussian_query.py
make_global_state
Juspem1980/privacy
python
def make_global_state(self, l2_norm_clip, stddev): return self._GlobalState(tf.cast(l2_norm_clip, tf.float32), tf.cast(stddev, tf.float32))
def preprocess_record_impl(self, params, record): 'Clips the l2 norm, returning the clipped record and the l2 norm.\n\n Args:\n params: The parameters for the sample.\n record: The record to be processed.\n\n Returns:\n A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is\n the structure of preprocessed tensors, and l2_norm is the total l2 norm\n before clipping.\n ' l2_norm_clip = params record_as_list = tf.nest.flatten(record) (clipped_as_list, norm) = tf.clip_by_global_norm(record_as_list, l2_norm_clip) return (tf.nest.pack_sequence_as(record, clipped_as_list), norm)
-7,945,747,213,345,504,000
Clips the l2 norm, returning the clipped record and the l2 norm. Args: params: The parameters for the sample. record: The record to be processed. Returns: A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is the structure of preprocessed tensors, and l2_norm is the total l2 norm before clipping.
tensorflow_privacy/privacy/dp_query/gaussian_query.py
preprocess_record_impl
Juspem1980/privacy
python
def preprocess_record_impl(self, params, record): 'Clips the l2 norm, returning the clipped record and the l2 norm.\n\n Args:\n params: The parameters for the sample.\n record: The record to be processed.\n\n Returns:\n A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is\n the structure of preprocessed tensors, and l2_norm is the total l2 norm\n before clipping.\n ' l2_norm_clip = params record_as_list = tf.nest.flatten(record) (clipped_as_list, norm) = tf.clip_by_global_norm(record_as_list, l2_norm_clip) return (tf.nest.pack_sequence_as(record, clipped_as_list), norm)
def get_noised_result(self, sample_state, global_state): 'See base class.' if (LooseVersion(tf.__version__) < LooseVersion('2.0.0')): def add_noise(v): return (v + tf.random.normal(tf.shape(input=v), stddev=global_state.stddev)) else: random_normal = tf.random_normal_initializer(stddev=global_state.stddev) def add_noise(v): return (v + random_normal(tf.shape(input=v))) if self._ledger: dependencies = [self._ledger.record_sum_query(global_state.l2_norm_clip, global_state.stddev)] else: dependencies = [] with tf.control_dependencies(dependencies): return (tf.nest.map_structure(add_noise, sample_state), global_state)
-6,189,862,732,280,817,000
See base class.
tensorflow_privacy/privacy/dp_query/gaussian_query.py
get_noised_result
Juspem1980/privacy
python
def get_noised_result(self, sample_state, global_state): if (LooseVersion(tf.__version__) < LooseVersion('2.0.0')): def add_noise(v): return (v + tf.random.normal(tf.shape(input=v), stddev=global_state.stddev)) else: random_normal = tf.random_normal_initializer(stddev=global_state.stddev) def add_noise(v): return (v + random_normal(tf.shape(input=v))) if self._ledger: dependencies = [self._ledger.record_sum_query(global_state.l2_norm_clip, global_state.stddev)] else: dependencies = [] with tf.control_dependencies(dependencies): return (tf.nest.map_structure(add_noise, sample_state), global_state)
def __init__(self, l2_norm_clip, sum_stddev, denominator): 'Initializes the GaussianAverageQuery.\n\n Args:\n l2_norm_clip: The clipping norm to apply to the global norm of each\n record.\n sum_stddev: The stddev of the noise added to the sum (before\n normalization).\n denominator: The normalization constant (applied after noise is added to\n the sum).\n ' super(GaussianAverageQuery, self).__init__(numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev), denominator=denominator)
-4,537,654,262,657,255,000
Initializes the GaussianAverageQuery. Args: l2_norm_clip: The clipping norm to apply to the global norm of each record. sum_stddev: The stddev of the noise added to the sum (before normalization). denominator: The normalization constant (applied after noise is added to the sum).
tensorflow_privacy/privacy/dp_query/gaussian_query.py
__init__
Juspem1980/privacy
python
def __init__(self, l2_norm_clip, sum_stddev, denominator): 'Initializes the GaussianAverageQuery.\n\n Args:\n l2_norm_clip: The clipping norm to apply to the global norm of each\n record.\n sum_stddev: The stddev of the noise added to the sum (before\n normalization).\n denominator: The normalization constant (applied after noise is added to\n the sum).\n ' super(GaussianAverageQuery, self).__init__(numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev), denominator=denominator)
def _assert_community_contribution_stats_is_in_default_state(self): 'Checks if the community contribution stats is in its default\n state.\n ' community_contribution_stats = suggestion_services.get_community_contribution_stats() self.assertEqual(community_contribution_stats.translation_reviewer_counts_by_lang_code, {}) self.assertEqual(community_contribution_stats.translation_suggestion_counts_by_lang_code, {}) self.assertEqual(community_contribution_stats.question_reviewer_count, 0) self.assertEqual(community_contribution_stats.question_suggestion_count, 0)
-8,273,821,425,724,012,000
Checks if the community contribution stats is in its default state.
core/domain/suggestion_registry_test.py
_assert_community_contribution_stats_is_in_default_state
AdityaDubey0/oppia
python
def _assert_community_contribution_stats_is_in_default_state(self): 'Checks if the community contribution stats is in its default\n state.\n ' community_contribution_stats = suggestion_services.get_community_contribution_stats() self.assertEqual(community_contribution_stats.translation_reviewer_counts_by_lang_code, {}) self.assertEqual(community_contribution_stats.translation_suggestion_counts_by_lang_code, {}) self.assertEqual(community_contribution_stats.question_reviewer_count, 0) self.assertEqual(community_contribution_stats.question_suggestion_count, 0)
def conversion_fn(): 'Temporary function.' pass
-7,532,713,194,701,360,000
Temporary function.
core/domain/suggestion_registry_test.py
conversion_fn
AdityaDubey0/oppia
python
def conversion_fn(): pass
def __init__(self, feats_extract: Optional[AbsFeatsExtract], normalize: Optional[(AbsNormalize and InversibleInterface)], tts: AbsGANTTS): 'Initialize ESPnetGANTTSModel module.' assert check_argument_types() super().__init__() self.feats_extract = feats_extract self.normalize = normalize self.tts = tts assert hasattr(tts, 'generator'), 'generator module must be resistered as tts.generator' assert hasattr(tts, 'discriminator'), 'discriminator module must be resistered as tts.discriminator'
-4,964,261,881,008,749,000
Initialize ESPnetGANTTSModel module.
espnet2/gan_tts/espnet_model.py
__init__
actboy/espnet
python
def __init__(self, feats_extract: Optional[AbsFeatsExtract], normalize: Optional[(AbsNormalize and InversibleInterface)], tts: AbsGANTTS): assert check_argument_types() super().__init__() self.feats_extract = feats_extract self.normalize = normalize self.tts = tts assert hasattr(tts, 'generator'), 'generator module must be resistered as tts.generator' assert hasattr(tts, 'discriminator'), 'discriminator module must be resistered as tts.discriminator'
def forward(self, text: torch.Tensor, text_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, spembs: Optional[torch.Tensor]=None, sids: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None, forward_generator: bool=True) -> Dict[(str, Any)]: 'Return generator or discriminator loss with dict format.\n\n Args:\n text (Tensor): Text index tensor (B, T_text).\n text_lengths (Tensor): Text length tensor (B,).\n speech (Tensor): Speech waveform tensor (B, T_wav).\n speech_lengths (Tensor): Speech length tensor (B,).\n spembs (Optional[Tensor]): Speaker embedding tensor (B, D).\n sids (Optional[Tensor]): Speaker ID tensor (B, 1).\n lids (Optional[Tensor]): Language ID tensor (B, 1).\n forward_generator (bool): Whether to forward generator.\n\n Returns:\n Dict[str, Any]:\n - loss (Tensor): Loss scalar tensor.\n - stats (Dict[str, float]): Statistics to be monitored.\n - weight (Tensor): Weight tensor to summarize losses.\n - optim_idx (int): Optimizer index (0 for G and 1 for D).\n\n ' with autocast(False): feats = None if (self.feats_extract is not None): (feats, feats_lengths) = self.feats_extract(speech, speech_lengths) if (self.normalize is not None): (feats, feats_lengths) = self.normalize(feats, feats_lengths) batch = {} batch.update(text=text, text_lengths=text_lengths) batch.update(forward_generator=forward_generator) if (feats is not None): batch.update(feats=feats, feats_lengths=feats_lengths) if self.tts.require_raw_speech: batch.update(speech=speech, speech_lengths=speech_lengths) if (spembs is not None): batch.update(spembs=spembs) if (sids is not None): batch.update(sids=sids) if (lids is not None): batch.update(lids=lids) return self.tts(**batch)
-8,931,493,892,899,705,000
Return generator or discriminator loss with dict format. Args: text (Tensor): Text index tensor (B, T_text). text_lengths (Tensor): Text length tensor (B,). speech (Tensor): Speech waveform tensor (B, T_wav). speech_lengths (Tensor): Speech length tensor (B,). spembs (Optional[Tensor]): Speaker embedding tensor (B, D). sids (Optional[Tensor]): Speaker ID tensor (B, 1). lids (Optional[Tensor]): Language ID tensor (B, 1). forward_generator (bool): Whether to forward generator. Returns: Dict[str, Any]: - loss (Tensor): Loss scalar tensor. - stats (Dict[str, float]): Statistics to be monitored. - weight (Tensor): Weight tensor to summarize losses. - optim_idx (int): Optimizer index (0 for G and 1 for D).
espnet2/gan_tts/espnet_model.py
forward
actboy/espnet
python
def forward(self, text: torch.Tensor, text_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, spembs: Optional[torch.Tensor]=None, sids: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None, forward_generator: bool=True) -> Dict[(str, Any)]: 'Return generator or discriminator loss with dict format.\n\n Args:\n text (Tensor): Text index tensor (B, T_text).\n text_lengths (Tensor): Text length tensor (B,).\n speech (Tensor): Speech waveform tensor (B, T_wav).\n speech_lengths (Tensor): Speech length tensor (B,).\n spembs (Optional[Tensor]): Speaker embedding tensor (B, D).\n sids (Optional[Tensor]): Speaker ID tensor (B, 1).\n lids (Optional[Tensor]): Language ID tensor (B, 1).\n forward_generator (bool): Whether to forward generator.\n\n Returns:\n Dict[str, Any]:\n - loss (Tensor): Loss scalar tensor.\n - stats (Dict[str, float]): Statistics to be monitored.\n - weight (Tensor): Weight tensor to summarize losses.\n - optim_idx (int): Optimizer index (0 for G and 1 for D).\n\n ' with autocast(False): feats = None if (self.feats_extract is not None): (feats, feats_lengths) = self.feats_extract(speech, speech_lengths) if (self.normalize is not None): (feats, feats_lengths) = self.normalize(feats, feats_lengths) batch = {} batch.update(text=text, text_lengths=text_lengths) batch.update(forward_generator=forward_generator) if (feats is not None): batch.update(feats=feats, feats_lengths=feats_lengths) if self.tts.require_raw_speech: batch.update(speech=speech, speech_lengths=speech_lengths) if (spembs is not None): batch.update(spembs=spembs) if (sids is not None): batch.update(sids=sids) if (lids is not None): batch.update(lids=lids) return self.tts(**batch)
def collect_feats(self, text: torch.Tensor, text_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, spembs: Optional[torch.Tensor]=None, sids: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None) -> Dict[(str, torch.Tensor)]: 'Calculate features and return them as a dict.\n\n Args:\n text (Tensor): Text index tensor (B, T_text).\n text_lengths (Tensor): Text length tensor (B,).\n speech (Tensor): Speech waveform tensor (B, T_wav).\n speech_lengths (Tensor): Speech length tensor (B, 1).\n spembs (Optional[Tensor]): Speaker embedding tensor (B, D).\n sids (Optional[Tensor]): Speaker index tensor (B, 1).\n lids (Optional[Tensor]): Language ID tensor (B, 1).\n\n Returns:\n Dict[str, Tensor]: Dict of features.\n\n ' feats = None if (self.feats_extract is not None): (feats, feats_lengths) = self.feats_extract(speech, speech_lengths) feats_dict = {} if (feats is not None): feats_dict.update(feats=feats, feats_lengths=feats_lengths) return feats_dict
-8,469,345,931,603,927,000
Calculate features and return them as a dict. Args: text (Tensor): Text index tensor (B, T_text). text_lengths (Tensor): Text length tensor (B,). speech (Tensor): Speech waveform tensor (B, T_wav). speech_lengths (Tensor): Speech length tensor (B, 1). spembs (Optional[Tensor]): Speaker embedding tensor (B, D). sids (Optional[Tensor]): Speaker index tensor (B, 1). lids (Optional[Tensor]): Language ID tensor (B, 1). Returns: Dict[str, Tensor]: Dict of features.
espnet2/gan_tts/espnet_model.py
collect_feats
actboy/espnet
python
def collect_feats(self, text: torch.Tensor, text_lengths: torch.Tensor, speech: torch.Tensor, speech_lengths: torch.Tensor, spembs: Optional[torch.Tensor]=None, sids: Optional[torch.Tensor]=None, lids: Optional[torch.Tensor]=None) -> Dict[(str, torch.Tensor)]: 'Calculate features and return them as a dict.\n\n Args:\n text (Tensor): Text index tensor (B, T_text).\n text_lengths (Tensor): Text length tensor (B,).\n speech (Tensor): Speech waveform tensor (B, T_wav).\n speech_lengths (Tensor): Speech length tensor (B, 1).\n spembs (Optional[Tensor]): Speaker embedding tensor (B, D).\n sids (Optional[Tensor]): Speaker index tensor (B, 1).\n lids (Optional[Tensor]): Language ID tensor (B, 1).\n\n Returns:\n Dict[str, Tensor]: Dict of features.\n\n ' feats = None if (self.feats_extract is not None): (feats, feats_lengths) = self.feats_extract(speech, speech_lengths) feats_dict = {} if (feats is not None): feats_dict.update(feats=feats, feats_lengths=feats_lengths) return feats_dict
@staticmethod def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions': '\n Function signatures for factory method\n\n Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,\n database_name: str, timeout: int = None)\n ' return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
-2,160,951,259,533,789,000
Function signatures for factory method Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str, database_name: str, timeout: int = None)
flask_app/utilities/DataInterfaces/SqlInterface.py
factory
cliftbar/flask_app_template
python
@staticmethod def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions': '\n Function signatures for factory method\n\n Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,\n database_name: str, timeout: int = None)\n ' return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
def get_latest_version() -> str: '\n :return: The latest available version in the format "vX.Y.Z"\n ' subdomain = ('staging.' if config.is_staging.value else '') credentials = config.staging_credentials() url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain) response = requests.get(url, auth=credentials) return response.text
-3,277,660,655,089,626,000
:return: The latest available version in the format "vX.Y.Z"
brainframe/cli/docker_compose.py
get_latest_version
aotuai/brainframe-cli
python
def get_latest_version() -> str: '\n \n ' subdomain = ('staging.' if config.is_staging.value else ) credentials = config.staging_credentials() url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain) response = requests.get(url, auth=credentials) return response.text
def _assert_has_docker_permissions() -> None: 'Fails if the user does not have permissions to interact with Docker' if (not (os_utils.is_root() or os_utils.currently_in_group('docker'))): error_message = ((i18n.t('general.docker-bad-permissions') + '\n') + _group_recommendation_message('docker')) print_utils.fail(error_message)
3,725,551,011,273,241,600
Fails if the user does not have permissions to interact with Docker
brainframe/cli/docker_compose.py
_assert_has_docker_permissions
aotuai/brainframe-cli
python
def _assert_has_docker_permissions() -> None: if (not (os_utils.is_root() or os_utils.currently_in_group('docker'))): error_message = ((i18n.t('general.docker-bad-permissions') + '\n') + _group_recommendation_message('docker')) print_utils.fail(error_message)
def _assert_has_write_permissions(path: Path) -> None: 'Fails if the user does not have write access to the given path.' if os.access(path, os.W_OK): return error_message = i18n.t('general.file-bad-write-permissions', path=path) error_message += '\n' if (path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID): error_message += (' ' + _group_recommendation_message('brainframe')) else: error_message += (' ' + i18n.t('general.unexpected-group-for-file', path=path, group='brainframe')) print_utils.fail(error_message)
-1,048,546,519,596,421,600
Fails if the user does not have write access to the given path.
brainframe/cli/docker_compose.py
_assert_has_write_permissions
aotuai/brainframe-cli
python
def _assert_has_write_permissions(path: Path) -> None: if os.access(path, os.W_OK): return error_message = i18n.t('general.file-bad-write-permissions', path=path) error_message += '\n' if (path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID): error_message += (' ' + _group_recommendation_message('brainframe')) else: error_message += (' ' + i18n.t('general.unexpected-group-for-file', path=path, group='brainframe')) print_utils.fail(error_message)
def configure_logger(path: pathlib.Path, verbose: bool=False) -> logging.Logger: 'Configure logger\n\n Args:\n path (pathlib.Path): Path where the persistent logger should write to.\n verbose (bool, optional): Use verbose logging. Defaults to False.\n\n Returns:\n logging.Logger: Created logger\n ' path.parent.mkdir(parents=True, exist_ok=True) logger = logging.getLogger() logger.setLevel(logging.INFO) log_file_formatter = logging.Formatter('%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s') if verbose: log_console_formatter = logging.Formatter('[c8ylp] %(levelname)-5s %(message)s') console_loglevel = logging.INFO if (len(logger.handlers) == 0): console_handler = logging.StreamHandler() console_handler.setFormatter(log_console_formatter) console_handler.setLevel(console_loglevel) logger.addHandler(console_handler) else: handler = logger.handlers[0] handler.setLevel(console_loglevel) handler.setFormatter(log_console_formatter) else: logger.handlers = [] rotate_handler = RotatingFileHandler(filename=str(path), maxBytes=10000000, backupCount=5) rotate_handler.setFormatter(log_file_formatter) rotate_handler.setLevel(logging.INFO) logger.addHandler(rotate_handler) return logger
664,404,265,283,654,300
Configure logger Args: path (pathlib.Path): Path where the persistent logger should write to. verbose (bool, optional): Use verbose logging. Defaults to False. Returns: logging.Logger: Created logger
c8ylp/cli/core.py
configure_logger
SoftwareAG/cumulocity-remote-access-local-proxy
python
def configure_logger(path: pathlib.Path, verbose: bool=False) -> logging.Logger: 'Configure logger\n\n Args:\n path (pathlib.Path): Path where the persistent logger should write to.\n verbose (bool, optional): Use verbose logging. Defaults to False.\n\n Returns:\n logging.Logger: Created logger\n ' path.parent.mkdir(parents=True, exist_ok=True) logger = logging.getLogger() logger.setLevel(logging.INFO) log_file_formatter = logging.Formatter('%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s') if verbose: log_console_formatter = logging.Formatter('[c8ylp] %(levelname)-5s %(message)s') console_loglevel = logging.INFO if (len(logger.handlers) == 0): console_handler = logging.StreamHandler() console_handler.setFormatter(log_console_formatter) console_handler.setLevel(console_loglevel) logger.addHandler(console_handler) else: handler = logger.handlers[0] handler.setLevel(console_loglevel) handler.setFormatter(log_console_formatter) else: logger.handlers = [] rotate_handler = RotatingFileHandler(filename=str(path), maxBytes=10000000, backupCount=5) rotate_handler.setFormatter(log_file_formatter) rotate_handler.setLevel(logging.INFO) logger.addHandler(rotate_handler) return logger
def signal_handler(_signal, _frame): 'Signal handler' sys.exit(ExitCodes.TERMINATE)
1,730,254,604,786,225,000
Signal handler
c8ylp/cli/core.py
signal_handler
SoftwareAG/cumulocity-remote-access-local-proxy
python
def signal_handler(_signal, _frame): sys.exit(ExitCodes.TERMINATE)
def register_signals(): 'Register signal handlers' signal.signal(signal.SIGINT, signal_handler)
7,165,289,326,244,733,000
Register signal handlers
c8ylp/cli/core.py
register_signals
SoftwareAG/cumulocity-remote-access-local-proxy
python
def register_signals(): signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient: 'Create Cumulocity client and prompt for missing credentials\n if necessary.\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n\n Returns:\n CumulocityClient: Configured Cumulocity client\n ' if ((not opts.disable_prompts) and (not opts.host)): opts.host = click.prompt(text='Enter the Cumulocity Host/URL') client = CumulocityClient(hostname=opts.host, tenant=opts.tenant, user=opts.user, password=opts.password, tfacode=opts.tfa_code, token=opts.token, ignore_ssl_validate=opts.ignore_ssl_validate) if (not client.url): opts.show_error('No Cumulocity host was provided. The host can be set viaenvironment variables, arguments or the env-file') ctx.exit(ExitCodes.NO_SESSION) logging.info('Checking tenant id') client.validate_tenant_id() retries = 3 success = False while retries: try: if client.token: client.validate_credentials() else: client.login() if (opts.env_file and opts.store_token): store_credentials(opts, client) success = True break except CumulocityMissingTFAToken as ex: client.tfacode = click.prompt(text='Enter your Cumulocity TFA-Token', hide_input=False) except Exception as ex: logging.info('unknown exception: %s', ex) if (not opts.disable_prompts): if (not client.user): client.user = click.prompt(text='Enter your Cumulocity Username') if (not client.password): client.password = click.prompt(text='Enter your Cumulocity Password [input hidden]', hide_input=True) retries -= 1 if (not success): logging.info('Could not create client') ctx.exit(ExitCodes.NO_SESSION) return client
180,156,177,567,948,860
Create Cumulocity client and prompt for missing credentials if necessary. Args: ctx (click.Context): Click context opts (ProxyContext): Proxy options Returns: CumulocityClient: Configured Cumulocity client
c8ylp/cli/core.py
create_client
SoftwareAG/cumulocity-remote-access-local-proxy
python
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient: 'Create Cumulocity client and prompt for missing credentials\n if necessary.\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n\n Returns:\n CumulocityClient: Configured Cumulocity client\n ' if ((not opts.disable_prompts) and (not opts.host)): opts.host = click.prompt(text='Enter the Cumulocity Host/URL') client = CumulocityClient(hostname=opts.host, tenant=opts.tenant, user=opts.user, password=opts.password, tfacode=opts.tfa_code, token=opts.token, ignore_ssl_validate=opts.ignore_ssl_validate) if (not client.url): opts.show_error('No Cumulocity host was provided. The host can be set viaenvironment variables, arguments or the env-file') ctx.exit(ExitCodes.NO_SESSION) logging.info('Checking tenant id') client.validate_tenant_id() retries = 3 success = False while retries: try: if client.token: client.validate_credentials() else: client.login() if (opts.env_file and opts.store_token): store_credentials(opts, client) success = True break except CumulocityMissingTFAToken as ex: client.tfacode = click.prompt(text='Enter your Cumulocity TFA-Token', hide_input=False) except Exception as ex: logging.info('unknown exception: %s', ex) if (not opts.disable_prompts): if (not client.user): client.user = click.prompt(text='Enter your Cumulocity Username') if (not client.password): client.password = click.prompt(text='Enter your Cumulocity Password [input hidden]', hide_input=True) retries -= 1 if (not success): logging.info('Could not create client') ctx.exit(ExitCodes.NO_SESSION) return client
def store_credentials(opts: ProxyContext, client: CumulocityClient): 'Store credentials to the environment file. It creates\n the file if it does not already exist.\n\n The file will only be written to if it has changed.\n\n Args:\n opts (ProxyContext): Proxy options\n client (CumulocityClient): Cumulocity client containing valid\n credentials\n ' changed = save_env(opts.env_file, {'C8Y_HOST': client.url, 'C8Y_USER': client.user, 'C8Y_TENANT': client.tenant, 'C8Y_TOKEN': client.token}) if changed: opts.show_message(f'Env file was updated: {opts.env_file}') else: opts.show_info(f'Env file is already up to date: {opts.env_file}')
-3,190,738,371,489,343,000
Store credentials to the environment file. It creates the file if it does not already exist. The file will only be written to if it has changed. Args: opts (ProxyContext): Proxy options client (CumulocityClient): Cumulocity client containing valid credentials
c8ylp/cli/core.py
store_credentials
SoftwareAG/cumulocity-remote-access-local-proxy
python
def store_credentials(opts: ProxyContext, client: CumulocityClient): 'Store credentials to the environment file. It creates\n the file if it does not already exist.\n\n The file will only be written to if it has changed.\n\n Args:\n opts (ProxyContext): Proxy options\n client (CumulocityClient): Cumulocity client containing valid\n credentials\n ' changed = save_env(opts.env_file, {'C8Y_HOST': client.url, 'C8Y_USER': client.user, 'C8Y_TENANT': client.tenant, 'C8Y_TOKEN': client.token}) if changed: opts.show_message(f'Env file was updated: {opts.env_file}') else: opts.show_info(f'Env file is already up to date: {opts.env_file}')
def get_config_id(ctx: click.Context, mor: Dict[(str, Any)], config: str) -> str: 'Get the remote access configuration id matching a specific type\n from a device managed object\n\n Args:\n mor (Dict[str, Any]): Device managed object\n config (str): Expected configuration type\n\n Returns:\n str: Remote access configuration id\n ' device_name = mor.get('name', '<<empty_name>>') if (REMOTE_ACCESS_FRAGMENT not in mor): logging.error('No Remote Access Configuration has been found for device "%s"', device_name) ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT) valid_configs = [item for item in mor.get(REMOTE_ACCESS_FRAGMENT, []) if (item.get('protocol') == PASSTHROUGH)] if (not valid_configs): logging.error('No config with protocol set to "%s" has been found for device "%s"', PASSTHROUGH, device_name) ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG) def extract_config_id(matching_config): logging.info('Using Configuration with Name "%s" and Remote Port %s', matching_config.get('name'), matching_config.get('port')) return matching_config.get('id') if (not config): return extract_config_id(valid_configs[0]) matches = [item for item in valid_configs if (item.get('name', '').casefold() == config.casefold())] if (not matches): logging.error('Provided config name "%s" for "%s" was not found or none with protocal set to "%s"', config, device_name, PASSTHROUGH) ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG) return extract_config_id(matches[0])
6,785,673,878,745,491,000
Get the remote access configuration id matching a specific type from a device managed object Args: mor (Dict[str, Any]): Device managed object config (str): Expected configuration type Returns: str: Remote access configuration id
c8ylp/cli/core.py
get_config_id
SoftwareAG/cumulocity-remote-access-local-proxy
python
def get_config_id(ctx: click.Context, mor: Dict[(str, Any)], config: str) -> str: 'Get the remote access configuration id matching a specific type\n from a device managed object\n\n Args:\n mor (Dict[str, Any]): Device managed object\n config (str): Expected configuration type\n\n Returns:\n str: Remote access configuration id\n ' device_name = mor.get('name', '<<empty_name>>') if (REMOTE_ACCESS_FRAGMENT not in mor): logging.error('No Remote Access Configuration has been found for device "%s"', device_name) ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT) valid_configs = [item for item in mor.get(REMOTE_ACCESS_FRAGMENT, []) if (item.get('protocol') == PASSTHROUGH)] if (not valid_configs): logging.error('No config with protocol set to "%s" has been found for device "%s"', PASSTHROUGH, device_name) ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG) def extract_config_id(matching_config): logging.info('Using Configuration with Name "%s" and Remote Port %s', matching_config.get('name'), matching_config.get('port')) return matching_config.get('id') if (not config): return extract_config_id(valid_configs[0]) matches = [item for item in valid_configs if (item.get('name', ).casefold() == config.casefold())] if (not matches): logging.error('Provided config name "%s" for "%s" was not found or none with protocal set to "%s"', config, device_name, PASSTHROUGH) ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG) return extract_config_id(matches[0])
def run_proxy_in_background(ctx: click.Context, opts: ProxyContext, connection_data: RemoteAccessConnectionData, ready_signal: threading.Event=None): 'Run the proxy in a background thread\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n connection_data (RemoteAccessConnectionData): Remote access connection data\n ' stop_signal = threading.Event() _local_ready_signal = threading.Event() register_signals() background = threading.Thread(target=start_proxy, args=(ctx, opts), kwargs=dict(connection_data=connection_data, stop_signal=stop_signal, ready_signal=_local_ready_signal), daemon=True) background.start() if (not _local_ready_signal.wait(opts.wait_port_timeout)): opts.exit_server_not_ready() opts.set_env() timer = CommandTimer('Duration', on_exit=click.echo).start() @ctx.call_on_close def _shutdown_server_thread(): stop_signal.set() background.join() timer.stop_with_message() if ready_signal: ready_signal.set()
-36,272,710,872,368,430
Run the proxy in a background thread Args: ctx (click.Context): Click context opts (ProxyContext): Proxy options connection_data (RemoteAccessConnectionData): Remote access connection data
c8ylp/cli/core.py
run_proxy_in_background
SoftwareAG/cumulocity-remote-access-local-proxy
python
def run_proxy_in_background(ctx: click.Context, opts: ProxyContext, connection_data: RemoteAccessConnectionData, ready_signal: threading.Event=None): 'Run the proxy in a background thread\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n connection_data (RemoteAccessConnectionData): Remote access connection data\n ' stop_signal = threading.Event() _local_ready_signal = threading.Event() register_signals() background = threading.Thread(target=start_proxy, args=(ctx, opts), kwargs=dict(connection_data=connection_data, stop_signal=stop_signal, ready_signal=_local_ready_signal), daemon=True) background.start() if (not _local_ready_signal.wait(opts.wait_port_timeout)): opts.exit_server_not_ready() opts.set_env() timer = CommandTimer('Duration', on_exit=click.echo).start() @ctx.call_on_close def _shutdown_server_thread(): stop_signal.set() background.join() timer.stop_with_message() if ready_signal: ready_signal.set()
def pre_start_checks(ctx: click.Context, opts: ProxyContext) -> Optional[RemoteAccessConnectionData]: 'Run prestart checks before starting the local proxy\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n\n Returns:\n Optional[RemoteAccessConnectionData]: Remote access connection data\n ' try: client = create_client(ctx, opts) mor = client.get_managed_object(opts.device, opts.external_type) config_id = get_config_id(ctx, mor, opts.config) device_id = mor.get('id') is_authorized = client.validate_remote_access_role() if (not is_authorized): opts.show_error(f'The user is not authorized to use Cloud Remote Access. Contact your Cumulocity Admin. user={opts.user}') ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN) except Exception as ex: if isinstance(ex, click.exceptions.Exit): opts.show_error(f'Could not retrieve device information. reason={ex}') raise error_context = '' extra_details = [] if (opts.host and (opts.host not in str(ex))): extra_details.append(f"host={(opts.host or '')}") if (opts.user and (opts.user not in str(ex))): extra_details.append(f"user={(opts.user or '')}") if extra_details: error_context = ('. settings: ' + ', '.join(extra_details)) opts.show_error(f'Unexpected error when retrieving device information from Cumulocity. error_details={ex}{error_context}') ctx.exit(ExitCodes.NOT_AUTHORIZED) return RemoteAccessConnectionData(client=client, managed_object_id=device_id, remote_config_id=config_id)
3,013,459,128,331,044,000
Run prestart checks before starting the local proxy Args: ctx (click.Context): Click context opts (ProxyContext): Proxy options Returns: Optional[RemoteAccessConnectionData]: Remote access connection data
c8ylp/cli/core.py
pre_start_checks
SoftwareAG/cumulocity-remote-access-local-proxy
python
def pre_start_checks(ctx: click.Context, opts: ProxyContext) -> Optional[RemoteAccessConnectionData]: 'Run prestart checks before starting the local proxy\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n\n Returns:\n Optional[RemoteAccessConnectionData]: Remote access connection data\n ' try: client = create_client(ctx, opts) mor = client.get_managed_object(opts.device, opts.external_type) config_id = get_config_id(ctx, mor, opts.config) device_id = mor.get('id') is_authorized = client.validate_remote_access_role() if (not is_authorized): opts.show_error(f'The user is not authorized to use Cloud Remote Access. Contact your Cumulocity Admin. user={opts.user}') ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN) except Exception as ex: if isinstance(ex, click.exceptions.Exit): opts.show_error(f'Could not retrieve device information. reason={ex}') raise error_context = extra_details = [] if (opts.host and (opts.host not in str(ex))): extra_details.append(f"host={(opts.host or )}") if (opts.user and (opts.user not in str(ex))): extra_details.append(f"user={(opts.user or )}") if extra_details: error_context = ('. settings: ' + ', '.join(extra_details)) opts.show_error(f'Unexpected error when retrieving device information from Cumulocity. error_details={ex}{error_context}') ctx.exit(ExitCodes.NOT_AUTHORIZED) return RemoteAccessConnectionData(client=client, managed_object_id=device_id, remote_config_id=config_id)
def start_proxy(ctx: click.Context, opts: ProxyContext, connection_data: RemoteAccessConnectionData, stop_signal: threading.Event=None, ready_signal: threading.Event=None) -> NoReturn: 'Start the local proxy\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n ' is_main_thread = (threading.current_thread() is threading.main_thread()) if is_main_thread: register_signals() client_opts = {'host': opts.host, 'config_id': connection_data.remote_config_id, 'device_id': connection_data.managed_object_id, 'session': connection_data.client.session, 'token': opts.token, 'ignore_ssl_validate': opts.ignore_ssl_validate, 'ping_interval': opts.ping_interval, 'max_retries': 2} tcp_server = None background = None try: tcp_server = TCPProxyServer(opts.port, WebsocketClient(**client_opts), opts.tcp_size, opts.tcp_timeout) exit_code = ExitCodes.OK click.secho(BANNER1) logging.info('Starting tcp server') background = threading.Thread(target=tcp_server.serve_forever, daemon=True) background.start() if (not tcp_server.wait_for_running(opts.wait_port_timeout)): opts.exit_server_not_ready() if tcp_server.server.socket: opts.used_port = tcp_server.server.socket.getsockname()[1] if is_main_thread: opts.show_info(f''' c8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}''') ssh_username = (opts.ssh_user or '<device_username>') opts.show_message(f''' For example, if you are running a ssh proxy, you connect to {opts.device} by executing the following in a new tab/console: ssh -p {opts.used_port} {ssh_username}@localhost''') opts.show_info('\nPress ctrl-c to shutdown the server') if ready_signal: ready_signal.set() while background.is_alive(): if (stop_signal and stop_signal.is_set()): break time.sleep(1) logging.debug('Waiting in background: alive=%s', background.is_alive()) except Exception as ex: if isinstance(ex, click.exceptions.Exit): exit_code = getattr(ex, 'exit_code') raise if str(ex): opts.show_error(f'The local proxy TCP Server experienced an unexpected error. port={opts.port}, error={ex}') exit_code = ExitCodes.UNKNOWN finally: if tcp_server: tcp_server.shutdown() if background: background.join() if is_main_thread: if (int(exit_code) == 0): opts.show_message(f'Exiting: {str(exit_code)} ({int(exit_code)})') else: opts.show_error(f'Exiting: {str(exit_code)} ({int(exit_code)})') ctx.exit(exit_code) else: opts.show_info('Exiting')
6,606,672,606,113,287,000
Start the local proxy Args: ctx (click.Context): Click context opts (ProxyContext): Proxy options
c8ylp/cli/core.py
start_proxy
SoftwareAG/cumulocity-remote-access-local-proxy
python
def start_proxy(ctx: click.Context, opts: ProxyContext, connection_data: RemoteAccessConnectionData, stop_signal: threading.Event=None, ready_signal: threading.Event=None) -> NoReturn: 'Start the local proxy\n\n Args:\n ctx (click.Context): Click context\n opts (ProxyContext): Proxy options\n ' is_main_thread = (threading.current_thread() is threading.main_thread()) if is_main_thread: register_signals() client_opts = {'host': opts.host, 'config_id': connection_data.remote_config_id, 'device_id': connection_data.managed_object_id, 'session': connection_data.client.session, 'token': opts.token, 'ignore_ssl_validate': opts.ignore_ssl_validate, 'ping_interval': opts.ping_interval, 'max_retries': 2} tcp_server = None background = None try: tcp_server = TCPProxyServer(opts.port, WebsocketClient(**client_opts), opts.tcp_size, opts.tcp_timeout) exit_code = ExitCodes.OK click.secho(BANNER1) logging.info('Starting tcp server') background = threading.Thread(target=tcp_server.serve_forever, daemon=True) background.start() if (not tcp_server.wait_for_running(opts.wait_port_timeout)): opts.exit_server_not_ready() if tcp_server.server.socket: opts.used_port = tcp_server.server.socket.getsockname()[1] if is_main_thread: opts.show_info(f' c8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}') ssh_username = (opts.ssh_user or '<device_username>') opts.show_message(f' For example, if you are running a ssh proxy, you connect to {opts.device} by executing the following in a new tab/console: ssh -p {opts.used_port} {ssh_username}@localhost') opts.show_info('\nPress ctrl-c to shutdown the server') if ready_signal: ready_signal.set() while background.is_alive(): if (stop_signal and stop_signal.is_set()): break time.sleep(1) logging.debug('Waiting in background: alive=%s', background.is_alive()) except Exception as ex: if isinstance(ex, click.exceptions.Exit): exit_code = getattr(ex, 'exit_code') raise if str(ex): opts.show_error(f'The local proxy TCP Server experienced an unexpected error. port={opts.port}, error={ex}') exit_code = ExitCodes.UNKNOWN finally: if tcp_server: tcp_server.shutdown() if background: background.join() if is_main_thread: if (int(exit_code) == 0): opts.show_message(f'Exiting: {str(exit_code)} ({int(exit_code)})') else: opts.show_error(f'Exiting: {str(exit_code)} ({int(exit_code)})') ctx.exit(exit_code) else: opts.show_info('Exiting')
@property def used_port(self) -> int: 'Get the port used by the local proxy\n\n Returns:\n int: Port number\n ' return self._root_context.get('used_port', self.port)
8,691,517,613,530,922,000
Get the port used by the local proxy Returns: int: Port number
c8ylp/cli/core.py
used_port
SoftwareAG/cumulocity-remote-access-local-proxy
python
@property def used_port(self) -> int: 'Get the port used by the local proxy\n\n Returns:\n int: Port number\n ' return self._root_context.get('used_port', self.port)
@used_port.setter def used_port(self, value: int): 'Store the port used by the local proxy for later reference\n\n Args:\n value (int): Port number\n ' self._root_context['used_port'] = value
-7,539,411,045,327,996,000
Store the port used by the local proxy for later reference Args: value (int): Port number
c8ylp/cli/core.py
used_port
SoftwareAG/cumulocity-remote-access-local-proxy
python
@used_port.setter def used_port(self, value: int): 'Store the port used by the local proxy for later reference\n\n Args:\n value (int): Port number\n ' self._root_context['used_port'] = value
def exit_server_not_ready(self) -> NoReturn: 'Exit with a server not ready error\n\n Returns:\n NoReturn: The function does not return\n ' self.show_error(f'Timed out waiting for local port to open: port={self.used_port}, timeout={self.wait_port_timeout}s') self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
-3,488,656,533,846,449,700
Exit with a server not ready error Returns: NoReturn: The function does not return
c8ylp/cli/core.py
exit_server_not_ready
SoftwareAG/cumulocity-remote-access-local-proxy
python
def exit_server_not_ready(self) -> NoReturn: 'Exit with a server not ready error\n\n Returns:\n NoReturn: The function does not return\n ' self.show_error(f'Timed out waiting for local port to open: port={self.used_port}, timeout={self.wait_port_timeout}s') self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[(str, Any)]) -> 'ProxyContext': 'Load proxy settings from a dictionary\n\n Args:\n src_dict (Dict[str, Any]): [description]\n\n Returns:\n ProxyContext: Proxy options after the values have been set\n via the dictionary\n ' logging.info('Loading from dictionary') assert isinstance(src_dict, dict) for (key, value) in src_dict.items(): logging.info('reading key: %s=%s', key, value) if hasattr(self, key): setattr(self, key, value) return self
6,400,380,737,886,094,000
Load proxy settings from a dictionary Args: src_dict (Dict[str, Any]): [description] Returns: ProxyContext: Proxy options after the values have been set via the dictionary
c8ylp/cli/core.py
fromdict
SoftwareAG/cumulocity-remote-access-local-proxy
python
def fromdict(self, src_dict: Dict[(str, Any)]) -> 'ProxyContext': 'Load proxy settings from a dictionary\n\n Args:\n src_dict (Dict[str, Any]): [description]\n\n Returns:\n ProxyContext: Proxy options after the values have been set\n via the dictionary\n ' logging.info('Loading from dictionary') assert isinstance(src_dict, dict) for (key, value) in src_dict.items(): logging.info('reading key: %s=%s', key, value) if hasattr(self, key): setattr(self, key, value) return self
def start_background(self, ctx: click.Context=None) -> 'ProxyContext': 'Start the local proxy in the background\n\n Returns:\n ProxyContext: Reference to the proxy context so it can be chained\n with other commands or used after the initialization of the class.\n ' cur_ctx = (ctx or self._ctx) connection_data = pre_start_checks(cur_ctx, self) ready_signal = threading.Event() run_proxy_in_background(cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal) if (not ready_signal.wait(self.wait_port_timeout)): self.exit_server_not_ready() return self
7,925,858,526,088,870,000
Start the local proxy in the background Returns: ProxyContext: Reference to the proxy context so it can be chained with other commands or used after the initialization of the class.
c8ylp/cli/core.py
start_background
SoftwareAG/cumulocity-remote-access-local-proxy
python
def start_background(self, ctx: click.Context=None) -> 'ProxyContext': 'Start the local proxy in the background\n\n Returns:\n ProxyContext: Reference to the proxy context so it can be chained\n with other commands or used after the initialization of the class.\n ' cur_ctx = (ctx or self._ctx) connection_data = pre_start_checks(cur_ctx, self) ready_signal = threading.Event() run_proxy_in_background(cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal) if (not ready_signal.wait(self.wait_port_timeout)): self.exit_server_not_ready() return self
def start(self, ctx: click.Context=None) -> None: 'Start the local proxy in the background\n\n Returns:\n ProxyContext: Reference to the proxy context so it can be chained\n with other commands or used after the initialization of the class.\n ' cur_ctx = (ctx or self._ctx) connection_data = pre_start_checks(cur_ctx, self) start_proxy(cur_ctx, self, connection_data=connection_data)
-3,952,982,397,710,996,500
Start the local proxy in the background Returns: ProxyContext: Reference to the proxy context so it can be chained with other commands or used after the initialization of the class.
c8ylp/cli/core.py
start
SoftwareAG/cumulocity-remote-access-local-proxy
python
def start(self, ctx: click.Context=None) -> None: 'Start the local proxy in the background\n\n Returns:\n ProxyContext: Reference to the proxy context so it can be chained\n with other commands or used after the initialization of the class.\n ' cur_ctx = (ctx or self._ctx) connection_data = pre_start_checks(cur_ctx, self) start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod def show_message(cls, msg: str, *args, **kwargs): 'Show an message to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' click.secho(msg, fg='green') logging.info(msg, *args, **kwargs)
-635,830,249,380,264,400
Show an message to the user and log it Args: msg (str): User message to print on the console
c8ylp/cli/core.py
show_message
SoftwareAG/cumulocity-remote-access-local-proxy
python
@classmethod def show_message(cls, msg: str, *args, **kwargs): 'Show an message to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' click.secho(msg, fg='green') logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs): 'Show an error to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' if (not self.verbose): click.secho(msg, fg='red') logging.warning(msg, *args, **kwargs)
6,178,511,922,028,288,000
Show an error to the user and log it Args: msg (str): User message to print on the console
c8ylp/cli/core.py
show_error
SoftwareAG/cumulocity-remote-access-local-proxy
python
def show_error(self, msg: str, *args, **kwargs): 'Show an error to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' if (not self.verbose): click.secho(msg, fg='red') logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs): 'Show an info message to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' if (not self.verbose): click.secho(msg) logging.warning(msg, *args, **kwargs)
3,718,197,683,893,955,600
Show an info message to the user and log it Args: msg (str): User message to print on the console
c8ylp/cli/core.py
show_info
SoftwareAG/cumulocity-remote-access-local-proxy
python
def show_info(self, msg: str, *args, **kwargs): 'Show an info message to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' if (not self.verbose): click.secho(msg) logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs): 'Show a warning to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' if (not self.verbose): click.secho(msg, fg='yellow') logging.warning(msg, *args, **kwargs)
-5,328,920,099,859,709,000
Show a warning to the user and log it Args: msg (str): User message to print on the console
c8ylp/cli/core.py
show_warning
SoftwareAG/cumulocity-remote-access-local-proxy
python
def show_warning(self, msg: str, *args, **kwargs): 'Show a warning to the user and log it\n\n Args:\n msg (str): User message to print on the console\n ' if (not self.verbose): click.secho(msg, fg='yellow') logging.warning(msg, *args, **kwargs)
def set_env(self): 'Set environment variables so information about the proxy can\n be access by plugins\n ' os.environ['C8Y_HOST'] = str(self.host) os.environ['PORT'] = str(self.used_port) os.environ['DEVICE'] = self.device os.environ['WSLENV'] = 'PORT/u:DEVICE/u:C8Y_HOST/u'
-3,005,475,876,850,048,500
Set environment variables so information about the proxy can be access by plugins
c8ylp/cli/core.py
set_env
SoftwareAG/cumulocity-remote-access-local-proxy
python
def set_env(self): 'Set environment variables so information about the proxy can\n be access by plugins\n ' os.environ['C8Y_HOST'] = str(self.host) os.environ['PORT'] = str(self.used_port) os.environ['DEVICE'] = self.device os.environ['WSLENV'] = 'PORT/u:DEVICE/u:C8Y_HOST/u'
@classmethod def log_path(cls) -> pathlib.Path: 'Get the log path' return (pathlib.Path(os.getenv('C8YLP_LOG_DIR', '~/.c8ylp/')).expanduser() / 'localproxy.log')
-4,331,468,252,760,386,000
Get the log path
c8ylp/cli/core.py
log_path
SoftwareAG/cumulocity-remote-access-local-proxy
python
@classmethod def log_path(cls) -> pathlib.Path: return (pathlib.Path(os.getenv('C8YLP_LOG_DIR', '~/.c8ylp/')).expanduser() / 'localproxy.log')
def get_ffme_returns(): '\n Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap\n ' me_m = pd.read_csv('data/Portfolios_Formed_on_ME_monthly_EW.csv', header=0, index_col=0, na_values=(- 99.99)) rets = me_m[['Lo 10', 'Hi 10']] rets.columns = ['SmallCap', 'LargeCap'] rets = (rets / 100) rets.index = pd.to_datetime(rets.index, format='%Y%m').to_period('M') return rets
-8,356,439,846,472,406,000
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
kit.py
get_ffme_returns
jaimeaguilera/Investing-projects
python
def get_ffme_returns(): '\n \n ' me_m = pd.read_csv('data/Portfolios_Formed_on_ME_monthly_EW.csv', header=0, index_col=0, na_values=(- 99.99)) rets = me_m[['Lo 10', 'Hi 10']] rets.columns = ['SmallCap', 'LargeCap'] rets = (rets / 100) rets.index = pd.to_datetime(rets.index, format='%Y%m').to_period('M') return rets
def get_fff_returns(): '\n Load the Fama-French Research Factor Monthly Dataset\n ' rets = (pd.read_csv('data/F-F_Research_Data_Factors_m.csv', header=0, index_col=0, na_values=(- 99.99)) / 100) rets.index = pd.to_datetime(rets.index, format='%Y%m').to_period('M') return rets
-7,927,721,906,992,404,000
Load the Fama-French Research Factor Monthly Dataset
kit.py
get_fff_returns
jaimeaguilera/Investing-projects
python
def get_fff_returns(): '\n \n ' rets = (pd.read_csv('data/F-F_Research_Data_Factors_m.csv', header=0, index_col=0, na_values=(- 99.99)) / 100) rets.index = pd.to_datetime(rets.index, format='%Y%m').to_period('M') return rets
def get_hfi_returns(): '\n Load and format the EDHEC Hedge Fund Index Returns\n ' hfi = pd.read_csv('data/edhec-hedgefundindices.csv', header=0, index_col=0, parse_dates=True) hfi = (hfi / 100) hfi.index = hfi.index.to_period('M') return hfi
-2,043,821,786,576,152,000
Load and format the EDHEC Hedge Fund Index Returns
kit.py
get_hfi_returns
jaimeaguilera/Investing-projects
python
def get_hfi_returns(): '\n \n ' hfi = pd.read_csv('data/edhec-hedgefundindices.csv', header=0, index_col=0, parse_dates=True) hfi = (hfi / 100) hfi.index = hfi.index.to_period('M') return hfi
def get_ind_file(filetype, weighting='vw', n_inds=30): '\n Load and format the Ken French Industry Portfolios files\n Variant is a tuple of (weighting, size) where:\n weighting is one of "ew", "vw"\n number of inds is 30 or 49\n ' if (filetype is 'returns'): name = f'{weighting}_rets' divisor = 100 elif (filetype is 'nfirms'): name = 'nfirms' divisor = 1 elif (filetype is 'size'): name = 'size' divisor = 1 else: raise ValueError(f'filetype must be one of: returns, nfirms, size') ind = (pd.read_csv(f'data/ind{n_inds}_m_{name}.csv', header=0, index_col=0, na_values=(- 99.99)) / divisor) ind.index = pd.to_datetime(ind.index, format='%Y%m').to_period('M') ind.columns = ind.columns.str.strip() return ind
-1,716,315,270,824,754,000
Load and format the Ken French Industry Portfolios files Variant is a tuple of (weighting, size) where: weighting is one of "ew", "vw" number of inds is 30 or 49
kit.py
get_ind_file
jaimeaguilera/Investing-projects
python
def get_ind_file(filetype, weighting='vw', n_inds=30): '\n Load and format the Ken French Industry Portfolios files\n Variant is a tuple of (weighting, size) where:\n weighting is one of "ew", "vw"\n number of inds is 30 or 49\n ' if (filetype is 'returns'): name = f'{weighting}_rets' divisor = 100 elif (filetype is 'nfirms'): name = 'nfirms' divisor = 1 elif (filetype is 'size'): name = 'size' divisor = 1 else: raise ValueError(f'filetype must be one of: returns, nfirms, size') ind = (pd.read_csv(f'data/ind{n_inds}_m_{name}.csv', header=0, index_col=0, na_values=(- 99.99)) / divisor) ind.index = pd.to_datetime(ind.index, format='%Y%m').to_period('M') ind.columns = ind.columns.str.strip() return ind
def get_ind_returns(weighting='vw', n_inds=30): '\n Load and format the Ken French Industry Portfolios Monthly Returns\n ' return get_ind_file('returns', weighting=weighting, n_inds=n_inds)
8,765,230,633,926,116,000
Load and format the Ken French Industry Portfolios Monthly Returns
kit.py
get_ind_returns
jaimeaguilera/Investing-projects
python
def get_ind_returns(weighting='vw', n_inds=30): '\n \n ' return get_ind_file('returns', weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30): '\n Load and format the Ken French 30 Industry Portfolios Average number of Firms\n ' return get_ind_file('nfirms', n_inds=n_inds)
-3,638,385,353,629,806,600
Load and format the Ken French 30 Industry Portfolios Average number of Firms
kit.py
get_ind_nfirms
jaimeaguilera/Investing-projects
python
def get_ind_nfirms(n_inds=30): '\n \n ' return get_ind_file('nfirms', n_inds=n_inds)
def get_ind_size(n_inds=30): '\n Load and format the Ken French 30 Industry Portfolios Average size (market cap)\n ' return get_ind_file('size', n_inds=n_inds)
-8,120,265,645,189,793,000
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
kit.py
get_ind_size
jaimeaguilera/Investing-projects
python
def get_ind_size(n_inds=30): '\n \n ' return get_ind_file('size', n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False): '\n Load the industry portfolio data and derive the market caps\n ' ind_nfirms = get_ind_nfirms(n_inds=n_inds) ind_size = get_ind_size(n_inds=n_inds) ind_mktcap = (ind_nfirms * ind_size) if weights: total_mktcap = ind_mktcap.sum(axis=1) ind_capweight = ind_mktcap.divide(total_mktcap, axis='rows') return ind_capweight return ind_mktcap
-2,019,311,730,886,692,000
Load the industry portfolio data and derive the market caps
kit.py
get_ind_market_caps
jaimeaguilera/Investing-projects
python
def get_ind_market_caps(n_inds=30, weights=False): '\n \n ' ind_nfirms = get_ind_nfirms(n_inds=n_inds) ind_size = get_ind_size(n_inds=n_inds) ind_mktcap = (ind_nfirms * ind_size) if weights: total_mktcap = ind_mktcap.sum(axis=1) ind_capweight = ind_mktcap.divide(total_mktcap, axis='rows') return ind_capweight return ind_mktcap
def get_total_market_index_returns(n_inds=30): '\n Load the 30 industry portfolio data and derive the returns of a capweighted total market index\n ' ind_capweight = get_ind_market_caps(n_inds=n_inds) ind_return = get_ind_returns(weighting='vw', n_inds=n_inds) total_market_return = (ind_capweight * ind_return).sum(axis='columns') return total_market_return
-2,314,792,494,135,799,300
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
kit.py
get_total_market_index_returns
jaimeaguilera/Investing-projects
python
def get_total_market_index_returns(n_inds=30): '\n \n ' ind_capweight = get_ind_market_caps(n_inds=n_inds) ind_return = get_ind_returns(weighting='vw', n_inds=n_inds) total_market_return = (ind_capweight * ind_return).sum(axis='columns') return total_market_return
def skewness(r): '\n Alternative to scipy.stats.skew()\n Computes the skewness of the supplied Series or DataFrame\n Returns a float or a Series\n ' r = r[((r != 0) & r.notnull())] demeaned_r = (r - r.mean()) sigma_r = r.std(ddof=0) exp = (demeaned_r ** 3).mean() return (exp / (sigma_r ** 3))
7,755,191,297,281,593,000
Alternative to scipy.stats.skew() Computes the skewness of the supplied Series or DataFrame Returns a float or a Series
kit.py
skewness
jaimeaguilera/Investing-projects
python
def skewness(r): '\n Alternative to scipy.stats.skew()\n Computes the skewness of the supplied Series or DataFrame\n Returns a float or a Series\n ' r = r[((r != 0) & r.notnull())] demeaned_r = (r - r.mean()) sigma_r = r.std(ddof=0) exp = (demeaned_r ** 3).mean() return (exp / (sigma_r ** 3))
def kurtosis(r): '\n Alternative to scipy.stats.kurtosis()\n Computes the kurtosis of the supplied Series or DataFrame\n Returns a float or a Series\n ' r = r[((r != 0) & r.notnull())] demeaned_r = (r - r.mean()) sigma_r = r.std(ddof=0) exp = (demeaned_r ** 4).mean() return (exp / (sigma_r ** 4))
-8,853,748,143,048,105,000
Alternative to scipy.stats.kurtosis() Computes the kurtosis of the supplied Series or DataFrame Returns a float or a Series
kit.py
kurtosis
jaimeaguilera/Investing-projects
python
def kurtosis(r): '\n Alternative to scipy.stats.kurtosis()\n Computes the kurtosis of the supplied Series or DataFrame\n Returns a float or a Series\n ' r = r[((r != 0) & r.notnull())] demeaned_r = (r - r.mean()) sigma_r = r.std(ddof=0) exp = (demeaned_r ** 4).mean() return (exp / (sigma_r ** 4))
def compound(r): '\n returns the result of compounding the set of returns in r\n ' return np.expm1(np.log1p(r).sum())
-5,716,126,220,503,370,000
returns the result of compounding the set of returns in r
kit.py
compound
jaimeaguilera/Investing-projects
python
def compound(r): '\n \n ' return np.expm1(np.log1p(r).sum())
def annualize_rets(r): '\n Annualizes a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n ' r_valid = r[((r != 0) & r.notnull())] date_beg = r_valid.agg((lambda x: x.first_valid_index())) date_end = r_valid.agg((lambda x: x.last_valid_index())) try: years_fraction = ((date_end - date_beg).dt.days / 365.2425) except: years_fraction = ((date_end - date_beg).days / 365.2425) compounded_growth = (1 + r_valid).prod() return ((compounded_growth ** (1 / years_fraction)) - 1)
444,043,545,948,035,400
Annualizes a set of returns We should infer the periods per year but that is currently left as an exercise to the reader :-)
kit.py
annualize_rets
jaimeaguilera/Investing-projects
python
def annualize_rets(r): '\n Annualizes a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n ' r_valid = r[((r != 0) & r.notnull())] date_beg = r_valid.agg((lambda x: x.first_valid_index())) date_end = r_valid.agg((lambda x: x.last_valid_index())) try: years_fraction = ((date_end - date_beg).dt.days / 365.2425) except: years_fraction = ((date_end - date_beg).days / 365.2425) compounded_growth = (1 + r_valid).prod() return ((compounded_growth ** (1 / years_fraction)) - 1)
def annualize_vol(r): '\n Annualizes the vol of a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n ' r_valid = r[((r != 0) & r.notnull())] total_num_periods = r_valid.count() date_beg = r_valid.agg((lambda x: x.first_valid_index())) date_end = r_valid.agg((lambda x: x.last_valid_index())) try: years_fraction = ((date_end - date_beg).dt.days / 365.2425) except: years_fraction = ((date_end - date_beg).days / 365.2425) periods_per_year = (total_num_periods / years_fraction) return (r_valid.std() * (periods_per_year ** 0.5))
4,409,763,613,660,388,000
Annualizes the vol of a set of returns We should infer the periods per year but that is currently left as an exercise to the reader :-)
kit.py
annualize_vol
jaimeaguilera/Investing-projects
python
def annualize_vol(r): '\n Annualizes the vol of a set of returns\n We should infer the periods per year\n but that is currently left as an exercise\n to the reader :-)\n ' r_valid = r[((r != 0) & r.notnull())] total_num_periods = r_valid.count() date_beg = r_valid.agg((lambda x: x.first_valid_index())) date_end = r_valid.agg((lambda x: x.last_valid_index())) try: years_fraction = ((date_end - date_beg).dt.days / 365.2425) except: years_fraction = ((date_end - date_beg).days / 365.2425) periods_per_year = (total_num_periods / years_fraction) return (r_valid.std() * (periods_per_year ** 0.5))
def sharpe_ratio(r, riskfree_rate): '\n Computes the annualized sharpe ratio of a set of returns\n ' r_valid = r[((r != 0) & r.notnull())] total_num_periods = r_valid.count() date_beg = r_valid.agg((lambda x: x.first_valid_index())) date_end = r_valid.agg((lambda x: x.last_valid_index())) try: years_fraction = ((date_end - date_beg).dt.days / 365.2425) except: years_fraction = ((date_end - date_beg).days / 365.2425) periods_per_year = (total_num_periods / years_fraction) rf_per_period = (((1 + riskfree_rate) ** (1 / periods_per_year)) - 1) excess_ret = (r - rf_per_period) ann_ex_ret = annualize_rets(excess_ret) ann_vol = annualize_vol(r) return (ann_ex_ret / ann_vol)
2,533,955,272,572,073,000
Computes the annualized sharpe ratio of a set of returns
kit.py
sharpe_ratio
jaimeaguilera/Investing-projects
python
def sharpe_ratio(r, riskfree_rate): '\n \n ' r_valid = r[((r != 0) & r.notnull())] total_num_periods = r_valid.count() date_beg = r_valid.agg((lambda x: x.first_valid_index())) date_end = r_valid.agg((lambda x: x.last_valid_index())) try: years_fraction = ((date_end - date_beg).dt.days / 365.2425) except: years_fraction = ((date_end - date_beg).days / 365.2425) periods_per_year = (total_num_periods / years_fraction) rf_per_period = (((1 + riskfree_rate) ** (1 / periods_per_year)) - 1) excess_ret = (r - rf_per_period) ann_ex_ret = annualize_rets(excess_ret) ann_vol = annualize_vol(r) return (ann_ex_ret / ann_vol)
def is_normal(r, level=0.01): '\n Applies the Jarque-Bera test to determine if a Series is normal or not\n Test is applied at the 1% level by default\n Returns True if the hypothesis of normality is accepted, False otherwise\n ' if isinstance(r, pd.DataFrame): return r.aggregate(is_normal) else: (statistic, p_value) = scipy.stats.jarque_bera(r) return (p_value > level)
4,814,920,000,768,772,000
Applies the Jarque-Bera test to determine if a Series is normal or not Test is applied at the 1% level by default Returns True if the hypothesis of normality is accepted, False otherwise
kit.py
is_normal
jaimeaguilera/Investing-projects
python
def is_normal(r, level=0.01): '\n Applies the Jarque-Bera test to determine if a Series is normal or not\n Test is applied at the 1% level by default\n Returns True if the hypothesis of normality is accepted, False otherwise\n ' if isinstance(r, pd.DataFrame): return r.aggregate(is_normal) else: (statistic, p_value) = scipy.stats.jarque_bera(r) return (p_value > level)
def drawdown(return_series: pd.Series): 'Takes a time series of asset returns.\n returns a DataFrame with columns for\n the wealth index, \n the previous peaks, and \n the percentage drawdown\n ' wealth_index = (1000 * (1 + return_series).cumprod()) previous_peaks = wealth_index.cummax() drawdowns = ((wealth_index - previous_peaks) / previous_peaks) return pd.DataFrame({'Wealth': wealth_index, 'Previous Peak': previous_peaks, 'Drawdown': drawdowns})
1,925,900,058,201,987,300
Takes a time series of asset returns. returns a DataFrame with columns for the wealth index, the previous peaks, and the percentage drawdown
kit.py
drawdown
jaimeaguilera/Investing-projects
python
def drawdown(return_series: pd.Series): 'Takes a time series of asset returns.\n returns a DataFrame with columns for\n the wealth index, \n the previous peaks, and \n the percentage drawdown\n ' wealth_index = (1000 * (1 + return_series).cumprod()) previous_peaks = wealth_index.cummax() drawdowns = ((wealth_index - previous_peaks) / previous_peaks) return pd.DataFrame({'Wealth': wealth_index, 'Previous Peak': previous_peaks, 'Drawdown': drawdowns})
def semideviation(r): '\n Returns the semideviation aka negative semideviation of r\n r must be a Series or a DataFrame, else raises a TypeError\n ' if isinstance(r, pd.Series): is_negative = (r < 0) return r[is_negative].std(ddof=0) elif isinstance(r, pd.DataFrame): return r.aggregate(semideviation) else: raise TypeError('Expected r to be a Series or DataFrame')
-1,785,064,964,747,070,700
Returns the semideviation aka negative semideviation of r r must be a Series or a DataFrame, else raises a TypeError
kit.py
semideviation
jaimeaguilera/Investing-projects
python
def semideviation(r): '\n Returns the semideviation aka negative semideviation of r\n r must be a Series or a DataFrame, else raises a TypeError\n ' if isinstance(r, pd.Series): is_negative = (r < 0) return r[is_negative].std(ddof=0) elif isinstance(r, pd.DataFrame): return r.aggregate(semideviation) else: raise TypeError('Expected r to be a Series or DataFrame')
def var_historic(r, level=5): '\n Returns the historic Value at Risk at a specified level\n i.e. returns the number such that "level" percent of the returns\n fall below that number, and the (100-level) percent are above\n ' r = r[((r != 0) & r.notnull())] if isinstance(r, pd.DataFrame): return r.aggregate(var_historic, level=level) elif isinstance(r, pd.Series): return (- np.percentile(r, level)) else: raise TypeError('Expected r to be a Series or DataFrame')
8,531,678,315,750,730,000
Returns the historic Value at Risk at a specified level i.e. returns the number such that "level" percent of the returns fall below that number, and the (100-level) percent are above
kit.py
var_historic
jaimeaguilera/Investing-projects
python
def var_historic(r, level=5): '\n Returns the historic Value at Risk at a specified level\n i.e. returns the number such that "level" percent of the returns\n fall below that number, and the (100-level) percent are above\n ' r = r[((r != 0) & r.notnull())] if isinstance(r, pd.DataFrame): return r.aggregate(var_historic, level=level) elif isinstance(r, pd.Series): return (- np.percentile(r, level)) else: raise TypeError('Expected r to be a Series or DataFrame')
def cvar_historic(r, level=5): '\n Computes the Conditional VaR of Series or DataFrame\n ' r = r[((r != 0) & r.notnull())] if isinstance(r, pd.Series): is_beyond = (r <= (- var_historic(r, level=level))) return (- r[is_beyond].mean()) elif isinstance(r, pd.DataFrame): return r.aggregate(cvar_historic, level=level) else: raise TypeError('Expected r to be a Series or DataFrame')
-3,315,997,735,787,485,000
Computes the Conditional VaR of Series or DataFrame
kit.py
cvar_historic
jaimeaguilera/Investing-projects
python
def cvar_historic(r, level=5): '\n \n ' r = r[((r != 0) & r.notnull())] if isinstance(r, pd.Series): is_beyond = (r <= (- var_historic(r, level=level))) return (- r[is_beyond].mean()) elif isinstance(r, pd.DataFrame): return r.aggregate(cvar_historic, level=level) else: raise TypeError('Expected r to be a Series or DataFrame')
def var_gaussian(r, level=5, modified=False): '\n Returns the Parametric Gauusian VaR of a Series or DataFrame\n If "modified" is True, then the modified VaR is returned,\n using the Cornish-Fisher modification\n ' r = r[((r != 0) & r.notnull())] z = norm.ppf((level / 100)) if modified: s = skewness(r) k = kurtosis(r) z = (((z + ((((z ** 2) - 1) * s) / 6)) + ((((z ** 3) - (3 * z)) * (k - 3)) / 24)) - ((((2 * (z ** 3)) - (5 * z)) * (s ** 2)) / 36)) return (- (r.mean() + (z * r.std(ddof=0))))
4,522,035,123,193,760,300
Returns the Parametric Gauusian VaR of a Series or DataFrame If "modified" is True, then the modified VaR is returned, using the Cornish-Fisher modification
kit.py
var_gaussian
jaimeaguilera/Investing-projects
python
def var_gaussian(r, level=5, modified=False): '\n Returns the Parametric Gauusian VaR of a Series or DataFrame\n If "modified" is True, then the modified VaR is returned,\n using the Cornish-Fisher modification\n ' r = r[((r != 0) & r.notnull())] z = norm.ppf((level / 100)) if modified: s = skewness(r) k = kurtosis(r) z = (((z + ((((z ** 2) - 1) * s) / 6)) + ((((z ** 3) - (3 * z)) * (k - 3)) / 24)) - ((((2 * (z ** 3)) - (5 * z)) * (s ** 2)) / 36)) return (- (r.mean() + (z * r.std(ddof=0))))
def portfolio_return(weights, returns): '\n Computes the return on a portfolio from constituent returns and weights\n weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix\n ' return (weights.T @ returns)
-1,847,745,247,972,241,200
Computes the return on a portfolio from constituent returns and weights weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
kit.py
portfolio_return
jaimeaguilera/Investing-projects
python
def portfolio_return(weights, returns): '\n Computes the return on a portfolio from constituent returns and weights\n weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix\n ' return (weights.T @ returns)
def portfolio_vol(weights, covmat): '\n Computes the vol of a portfolio from a covariance matrix and constituent weights\n weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix\n ' vol = (((weights.T @ covmat) @ weights) ** 0.5) return vol
-3,955,678,057,595,877,400
Computes the vol of a portfolio from a covariance matrix and constituent weights weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
kit.py
portfolio_vol
jaimeaguilera/Investing-projects
python
def portfolio_vol(weights, covmat): '\n Computes the vol of a portfolio from a covariance matrix and constituent weights\n weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix\n ' vol = (((weights.T @ covmat) @ weights) ** 0.5) return vol
def plot_ef2(n_points, er, cov): '\n Plots the 2-asset efficient frontier\n ' if ((er.shape[0] != 2) or (er.shape[0] != 2)): raise ValueError('plot_ef2 can only plot 2-asset frontiers') weights = [np.array([w, (1 - w)]) for w in np.linspace(0, 1, n_points)] rets = [portfolio_return(w, er) for w in weights] vols = [portfolio_vol(w, cov) for w in weights] ef = pd.DataFrame({'Returns': rets, 'Volatility': vols}) return ef.plot.line(x='Volatility', y='Returns', style='.-')
6,113,814,886,680,996,000
Plots the 2-asset efficient frontier
kit.py
plot_ef2
jaimeaguilera/Investing-projects
python
def plot_ef2(n_points, er, cov): '\n \n ' if ((er.shape[0] != 2) or (er.shape[0] != 2)): raise ValueError('plot_ef2 can only plot 2-asset frontiers') weights = [np.array([w, (1 - w)]) for w in np.linspace(0, 1, n_points)] rets = [portfolio_return(w, er) for w in weights] vols = [portfolio_vol(w, cov) for w in weights] ef = pd.DataFrame({'Returns': rets, 'Volatility': vols}) return ef.plot.line(x='Volatility', y='Returns', style='.-')
def minimize_vol(target_return, er, cov): '\n Returns the optimal weights that achieve the target return\n given a set of expected returns and a covariance matrix\n ' n = er.shape[0] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} return_is_target = {'type': 'eq', 'args': (er,), 'fun': (lambda weights, er: (target_return - portfolio_return(weights, er)))} weights = minimize(portfolio_vol, init_guess, args=(cov,), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1, return_is_target), bounds=bounds) return weights.x
7,147,474,082,142,541,000
Returns the optimal weights that achieve the target return given a set of expected returns and a covariance matrix
kit.py
minimize_vol
jaimeaguilera/Investing-projects
python
def minimize_vol(target_return, er, cov): '\n Returns the optimal weights that achieve the target return\n given a set of expected returns and a covariance matrix\n ' n = er.shape[0] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} return_is_target = {'type': 'eq', 'args': (er,), 'fun': (lambda weights, er: (target_return - portfolio_return(weights, er)))} weights = minimize(portfolio_vol, init_guess, args=(cov,), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1, return_is_target), bounds=bounds) return weights.x
def tracking_error(r_a, r_b): '\n Returns the Tracking Error between the two return series\n ' return np.sqrt(((r_a - r_b) ** 2).sum())
-6,295,209,354,440,144,000
Returns the Tracking Error between the two return series
kit.py
tracking_error
jaimeaguilera/Investing-projects
python
def tracking_error(r_a, r_b): '\n \n ' return np.sqrt(((r_a - r_b) ** 2).sum())
def msr(riskfree_rate, er, cov): '\n Returns the weights of the portfolio that gives you the maximum sharpe ratio\n given the riskfree rate and expected returns and a covariance matrix\n ' n = er.shape[0] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} def neg_sharpe(weights, riskfree_rate, er, cov): '\n Returns the negative of the sharpe ratio\n of the given portfolio\n ' r = portfolio_return(weights, er) vol = portfolio_vol(weights, cov) return ((- (r - riskfree_rate)) / vol) weights = minimize(neg_sharpe, init_guess, args=(riskfree_rate, er, cov), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) return weights.x
3,631,754,018,628,217,300
Returns the weights of the portfolio that gives you the maximum sharpe ratio given the riskfree rate and expected returns and a covariance matrix
kit.py
msr
jaimeaguilera/Investing-projects
python
def msr(riskfree_rate, er, cov): '\n Returns the weights of the portfolio that gives you the maximum sharpe ratio\n given the riskfree rate and expected returns and a covariance matrix\n ' n = er.shape[0] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} def neg_sharpe(weights, riskfree_rate, er, cov): '\n Returns the negative of the sharpe ratio\n of the given portfolio\n ' r = portfolio_return(weights, er) vol = portfolio_vol(weights, cov) return ((- (r - riskfree_rate)) / vol) weights = minimize(neg_sharpe, init_guess, args=(riskfree_rate, er, cov), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) return weights.x
def gmv(cov): '\n Returns the weights of the Global Minimum Volatility portfolio\n given a covariance matrix\n ' n = cov.shape[0] return msr(0, np.repeat(1, n), cov)
239,850,363,951,659,140
Returns the weights of the Global Minimum Volatility portfolio given a covariance matrix
kit.py
gmv
jaimeaguilera/Investing-projects
python
def gmv(cov): '\n Returns the weights of the Global Minimum Volatility portfolio\n given a covariance matrix\n ' n = cov.shape[0] return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov): '\n Returns a list of weights that represent a grid of n_points on the efficient frontier\n ' target_rs = np.linspace(er.min(), er.max(), n_points) weights = [minimize_vol(target_return, er, cov) for target_return in target_rs] return weights
-8,565,410,215,275,897,000
Returns a list of weights that represent a grid of n_points on the efficient frontier
kit.py
optimal_weights
jaimeaguilera/Investing-projects
python
def optimal_weights(n_points, er, cov): '\n \n ' target_rs = np.linspace(er.min(), er.max(), n_points) weights = [minimize_vol(target_return, er, cov) for target_return in target_rs] return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False): '\n Plots the multi-asset efficient frontier\n ' weights = optimal_weights(n_points, er, cov) rets = [portfolio_return(w, er) for w in weights] vols = [portfolio_vol(w, cov) for w in weights] ef = pd.DataFrame({'Returns': rets, 'Volatility': vols}) ax = ef.plot.line(x='Volatility', y='Returns', style=style, legend=legend) if show_cml: ax.set_xlim(left=0) w_msr = msr(riskfree_rate, er, cov) r_msr = portfolio_return(w_msr, er) vol_msr = portfolio_vol(w_msr, cov) cml_x = [0, vol_msr] cml_y = [riskfree_rate, r_msr] ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10) if show_ew: n = er.shape[0] w_ew = np.repeat((1 / n), n) r_ew = portfolio_return(w_ew, er) vol_ew = portfolio_vol(w_ew, cov) ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10) if show_gmv: w_gmv = gmv(cov) r_gmv = portfolio_return(w_gmv, er) vol_gmv = portfolio_vol(w_gmv, cov) ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10) return ax
-588,327,934,387,513,900
Plots the multi-asset efficient frontier
kit.py
plot_ef
jaimeaguilera/Investing-projects
python
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False): '\n \n ' weights = optimal_weights(n_points, er, cov) rets = [portfolio_return(w, er) for w in weights] vols = [portfolio_vol(w, cov) for w in weights] ef = pd.DataFrame({'Returns': rets, 'Volatility': vols}) ax = ef.plot.line(x='Volatility', y='Returns', style=style, legend=legend) if show_cml: ax.set_xlim(left=0) w_msr = msr(riskfree_rate, er, cov) r_msr = portfolio_return(w_msr, er) vol_msr = portfolio_vol(w_msr, cov) cml_x = [0, vol_msr] cml_y = [riskfree_rate, r_msr] ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10) if show_ew: n = er.shape[0] w_ew = np.repeat((1 / n), n) r_ew = portfolio_return(w_ew, er) vol_ew = portfolio_vol(w_ew, cov) ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10) if show_gmv: w_gmv = gmv(cov) r_gmv = portfolio_return(w_gmv, er) vol_gmv = portfolio_vol(w_gmv, cov) ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10) return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None): '\n Run a backtest of the CPPI strategy, given a set of returns for the risky asset\n Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History\n ' dates = risky_r.index n_steps = len(dates) account_value = start floor_value = (start * floor) peak = account_value if isinstance(risky_r, pd.Series): risky_r = pd.DataFrame(risky_r, columns=['R']) if (safe_r is None): safe_r = pd.DataFrame().reindex_like(risky_r) safe_r.values[:] = (riskfree_rate / 12) account_history = pd.DataFrame().reindex_like(risky_r) risky_w_history = pd.DataFrame().reindex_like(risky_r) cushion_history = pd.DataFrame().reindex_like(risky_r) floorval_history = pd.DataFrame().reindex_like(risky_r) peak_history = pd.DataFrame().reindex_like(risky_r) for step in range(n_steps): if (drawdown is not None): peak = np.maximum(peak, account_value) floor_value = (peak * (1 - drawdown)) cushion = ((account_value - floor_value) / account_value) risky_w = (m * cushion) risky_w = np.minimum(risky_w, 1) risky_w = np.maximum(risky_w, 0) safe_w = (1 - risky_w) risky_alloc = (account_value * risky_w) safe_alloc = (account_value * safe_w) account_value = ((risky_alloc * (1 + risky_r.iloc[step])) + (safe_alloc * (1 + safe_r.iloc[step]))) cushion_history.iloc[step] = cushion risky_w_history.iloc[step] = risky_w account_history.iloc[step] = account_value floorval_history.iloc[step] = floor_value peak_history.iloc[step] = peak risky_wealth = (start * (1 + risky_r).cumprod()) backtest_result = {'Wealth': account_history, 'Risky Wealth': risky_wealth, 'Risk Budget': cushion_history, 'Risky Allocation': risky_w_history, 'm': m, 'start': start, 'floor': floor, 'risky_r': risky_r, 'safe_r': safe_r, 'drawdown': drawdown, 'peak': peak_history, 'floor': floorval_history} return backtest_result
1,973,406,296,599,866,400
Run a backtest of the CPPI strategy, given a set of returns for the risky asset Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
kit.py
run_cppi
jaimeaguilera/Investing-projects
python
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None): '\n Run a backtest of the CPPI strategy, given a set of returns for the risky asset\n Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History\n ' dates = risky_r.index n_steps = len(dates) account_value = start floor_value = (start * floor) peak = account_value if isinstance(risky_r, pd.Series): risky_r = pd.DataFrame(risky_r, columns=['R']) if (safe_r is None): safe_r = pd.DataFrame().reindex_like(risky_r) safe_r.values[:] = (riskfree_rate / 12) account_history = pd.DataFrame().reindex_like(risky_r) risky_w_history = pd.DataFrame().reindex_like(risky_r) cushion_history = pd.DataFrame().reindex_like(risky_r) floorval_history = pd.DataFrame().reindex_like(risky_r) peak_history = pd.DataFrame().reindex_like(risky_r) for step in range(n_steps): if (drawdown is not None): peak = np.maximum(peak, account_value) floor_value = (peak * (1 - drawdown)) cushion = ((account_value - floor_value) / account_value) risky_w = (m * cushion) risky_w = np.minimum(risky_w, 1) risky_w = np.maximum(risky_w, 0) safe_w = (1 - risky_w) risky_alloc = (account_value * risky_w) safe_alloc = (account_value * safe_w) account_value = ((risky_alloc * (1 + risky_r.iloc[step])) + (safe_alloc * (1 + safe_r.iloc[step]))) cushion_history.iloc[step] = cushion risky_w_history.iloc[step] = risky_w account_history.iloc[step] = account_value floorval_history.iloc[step] = floor_value peak_history.iloc[step] = peak risky_wealth = (start * (1 + risky_r).cumprod()) backtest_result = {'Wealth': account_history, 'Risky Wealth': risky_wealth, 'Risk Budget': cushion_history, 'Risky Allocation': risky_w_history, 'm': m, 'start': start, 'floor': floor, 'risky_r': risky_r, 'safe_r': safe_r, 'drawdown': drawdown, 'peak': peak_history, 'floor': floorval_history} return backtest_result
def summary_stats(r, riskfree_rate=0.03): '\n Return a DataFrame that contains aggregated summary stats for the returns in the columns of r\n ' ann_r = annualize_rets(r) ann_vol = annualize_vol(r) ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate) dd = r.aggregate((lambda r: drawdown(r).Drawdown.min())) skew = r.aggregate(skewness) kurt = r.aggregate(kurtosis) cf_var5 = r.aggregate(var_gaussian, modified=True) hist_cvar5 = r.aggregate(cvar_historic) return pd.DataFrame({'Annualized Return': ann_r, 'Annualized Vol': ann_vol, 'Skewness': skew, 'Kurtosis': kurt, 'Cornish-Fisher VaR (5%)': cf_var5, 'Historic CVaR (5%)': hist_cvar5, 'Sharpe Ratio': ann_sr, 'Max Drawdown': dd})
-3,695,334,458,142,436,400
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
kit.py
summary_stats
jaimeaguilera/Investing-projects
python
def summary_stats(r, riskfree_rate=0.03): '\n \n ' ann_r = annualize_rets(r) ann_vol = annualize_vol(r) ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate) dd = r.aggregate((lambda r: drawdown(r).Drawdown.min())) skew = r.aggregate(skewness) kurt = r.aggregate(kurtosis) cf_var5 = r.aggregate(var_gaussian, modified=True) hist_cvar5 = r.aggregate(cvar_historic) return pd.DataFrame({'Annualized Return': ann_r, 'Annualized Vol': ann_vol, 'Skewness': skew, 'Kurtosis': kurt, 'Cornish-Fisher VaR (5%)': cf_var5, 'Historic CVaR (5%)': hist_cvar5, 'Sharpe Ratio': ann_sr, 'Max Drawdown': dd})
def gbm(n_years=10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True): '\n Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo\n :param n_years: The number of years to generate data for\n :param n_paths: The number of scenarios/trajectories\n :param mu: Annualized Drift, e.g. Market Return\n :param sigma: Annualized Volatility\n :param steps_per_year: granularity of the simulation\n :param s_0: initial value\n :return: a numpy array of n_paths columns and n_years*steps_per_year rows\n ' dt = (1 / steps_per_year) n_steps = (int((n_years * steps_per_year)) + 1) rets_plus_1 = np.random.normal(loc=((1 + mu) ** dt), scale=(sigma * np.sqrt(dt)), size=(n_steps, n_scenarios)) rets_plus_1[0] = 1 ret_val = ((s_0 * pd.DataFrame(rets_plus_1).cumprod()) if prices else (rets_plus_1 - 1)) return ret_val
-5,941,378,999,358,949,000
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo :param n_years: The number of years to generate data for :param n_paths: The number of scenarios/trajectories :param mu: Annualized Drift, e.g. Market Return :param sigma: Annualized Volatility :param steps_per_year: granularity of the simulation :param s_0: initial value :return: a numpy array of n_paths columns and n_years*steps_per_year rows
kit.py
gbm
jaimeaguilera/Investing-projects
python
def gbm(n_years=10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True): '\n Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo\n :param n_years: The number of years to generate data for\n :param n_paths: The number of scenarios/trajectories\n :param mu: Annualized Drift, e.g. Market Return\n :param sigma: Annualized Volatility\n :param steps_per_year: granularity of the simulation\n :param s_0: initial value\n :return: a numpy array of n_paths columns and n_years*steps_per_year rows\n ' dt = (1 / steps_per_year) n_steps = (int((n_years * steps_per_year)) + 1) rets_plus_1 = np.random.normal(loc=((1 + mu) ** dt), scale=(sigma * np.sqrt(dt)), size=(n_steps, n_scenarios)) rets_plus_1[0] = 1 ret_val = ((s_0 * pd.DataFrame(rets_plus_1).cumprod()) if prices else (rets_plus_1 - 1)) return ret_val
def regress(dependent_variable, explanatory_variables, alpha=True): "\n Runs a linear regression to decompose the dependent variable into the explanatory variables\n returns an object of type statsmodel's RegressionResults on which you can call\n .summary() to print a full summary\n .params for the coefficients\n .tvalues and .pvalues for the significance levels\n .rsquared_adj and .rsquared for quality of fit\n " if alpha: explanatory_variables = explanatory_variables.copy() explanatory_variables['Alpha'] = 1 lm = sm.OLS(dependent_variable, explanatory_variables).fit() return lm
-2,629,055,977,376,220,700
Runs a linear regression to decompose the dependent variable into the explanatory variables returns an object of type statsmodel's RegressionResults on which you can call .summary() to print a full summary .params for the coefficients .tvalues and .pvalues for the significance levels .rsquared_adj and .rsquared for quality of fit
kit.py
regress
jaimeaguilera/Investing-projects
python
def regress(dependent_variable, explanatory_variables, alpha=True): "\n Runs a linear regression to decompose the dependent variable into the explanatory variables\n returns an object of type statsmodel's RegressionResults on which you can call\n .summary() to print a full summary\n .params for the coefficients\n .tvalues and .pvalues for the significance levels\n .rsquared_adj and .rsquared for quality of fit\n " if alpha: explanatory_variables = explanatory_variables.copy() explanatory_variables['Alpha'] = 1 lm = sm.OLS(dependent_variable, explanatory_variables).fit() return lm
def portfolio_tracking_error(weights, ref_r, bb_r): '\n returns the tracking error between the reference returns\n and a portfolio of building block returns held with given weights\n ' return tracking_error(ref_r, (weights * bb_r).sum(axis=1))
-5,504,051,318,243,047,000
returns the tracking error between the reference returns and a portfolio of building block returns held with given weights
kit.py
portfolio_tracking_error
jaimeaguilera/Investing-projects
python
def portfolio_tracking_error(weights, ref_r, bb_r): '\n returns the tracking error between the reference returns\n and a portfolio of building block returns held with given weights\n ' return tracking_error(ref_r, (weights * bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables): '\n Returns the optimal weights that minimizes the Tracking error between\n a portfolio of the explanatory variables and the dependent variable\n ' n = explanatory_variables.shape[1] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} solution = minimize(portfolio_tracking_error, init_guess, args=(dependent_variable, explanatory_variables), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) weights = pd.Series(solution.x, index=explanatory_variables.columns) return weights
-7,954,644,322,848,577,000
Returns the optimal weights that minimizes the Tracking error between a portfolio of the explanatory variables and the dependent variable
kit.py
style_analysis
jaimeaguilera/Investing-projects
python
def style_analysis(dependent_variable, explanatory_variables): '\n Returns the optimal weights that minimizes the Tracking error between\n a portfolio of the explanatory variables and the dependent variable\n ' n = explanatory_variables.shape[1] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} solution = minimize(portfolio_tracking_error, init_guess, args=(dependent_variable, explanatory_variables), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) weights = pd.Series(solution.x, index=explanatory_variables.columns) return weights
def ff_analysis(r, factors): '\n Returns the loadings of r on the Fama French Factors\n which can be read in using get_fff_returns()\n the index of r must be a (not necessarily proper) subset of the index of factors\n r is either a Series or a DataFrame\n ' if isinstance(r, pd.Series): dependent_variable = r explanatory_variables = factors.loc[r.index] tilts = regress(dependent_variable, explanatory_variables).params elif isinstance(r, pd.DataFrame): tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns}) else: raise TypeError('r must be a Series or a DataFrame') return tilts
5,331,380,417,579,062,000
Returns the loadings of r on the Fama French Factors which can be read in using get_fff_returns() the index of r must be a (not necessarily proper) subset of the index of factors r is either a Series or a DataFrame
kit.py
ff_analysis
jaimeaguilera/Investing-projects
python
def ff_analysis(r, factors): '\n Returns the loadings of r on the Fama French Factors\n which can be read in using get_fff_returns()\n the index of r must be a (not necessarily proper) subset of the index of factors\n r is either a Series or a DataFrame\n ' if isinstance(r, pd.Series): dependent_variable = r explanatory_variables = factors.loc[r.index] tilts = regress(dependent_variable, explanatory_variables).params elif isinstance(r, pd.DataFrame): tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns}) else: raise TypeError('r must be a Series or a DataFrame') return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs): '\n Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame\n If supplied a set of capweights and a capweight tether, it is applied and reweighted \n ' n = len(r.columns) ew = pd.Series((1 / n), index=r.columns) if (cap_weights is not None): cw = cap_weights.loc[r.index[0]] if ((microcap_threshold is not None) and (microcap_threshold > 0)): microcap = (cw < microcap_threshold) ew[microcap] = 0 ew = (ew / ew.sum()) if ((max_cw_mult is not None) and (max_cw_mult > 0)): ew = np.minimum(ew, (cw * max_cw_mult)) ew = (ew / ew.sum()) return ew
8,770,652,885,608,347,000
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame If supplied a set of capweights and a capweight tether, it is applied and reweighted
kit.py
weight_ew
jaimeaguilera/Investing-projects
python
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs): '\n Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame\n If supplied a set of capweights and a capweight tether, it is applied and reweighted \n ' n = len(r.columns) ew = pd.Series((1 / n), index=r.columns) if (cap_weights is not None): cw = cap_weights.loc[r.index[0]] if ((microcap_threshold is not None) and (microcap_threshold > 0)): microcap = (cw < microcap_threshold) ew[microcap] = 0 ew = (ew / ew.sum()) if ((max_cw_mult is not None) and (max_cw_mult > 0)): ew = np.minimum(ew, (cw * max_cw_mult)) ew = (ew / ew.sum()) return ew
def weight_cw(r, cap_weights, **kwargs): '\n Returns the weights of the CW portfolio based on the time series of capweights\n ' w = cap_weights.loc[r.index[1]] return (w / w.sum())
345,298,233,300,992,060
Returns the weights of the CW portfolio based on the time series of capweights
kit.py
weight_cw
jaimeaguilera/Investing-projects
python
def weight_cw(r, cap_weights, **kwargs): '\n \n ' w = cap_weights.loc[r.index[1]] return (w / w.sum())
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs): '\n Backtests a given weighting scheme, given some parameters:\n r : asset returns to use to build the portfolio\n estimation_window: the window to use to estimate parameters\n weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments\n ' n_periods = r.shape[0] windows = [(start, (start + estimation_window)) for start in range((n_periods - estimation_window))] weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows] weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns) returns = (weights * r).sum(axis='columns', min_count=1) return returns
4,342,074,272,339,228,000
Backtests a given weighting scheme, given some parameters: r : asset returns to use to build the portfolio estimation_window: the window to use to estimate parameters weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
kit.py
backtest_ws
jaimeaguilera/Investing-projects
python
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs): '\n Backtests a given weighting scheme, given some parameters:\n r : asset returns to use to build the portfolio\n estimation_window: the window to use to estimate parameters\n weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments\n ' n_periods = r.shape[0] windows = [(start, (start + estimation_window)) for start in range((n_periods - estimation_window))] weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows] weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns) returns = (weights * r).sum(axis='columns', min_count=1) return returns
def sample_cov(r, **kwargs): '\n Returns the sample covariance of the supplied returns\n ' return r.cov()
3,398,896,989,843,861,500
Returns the sample covariance of the supplied returns
kit.py
sample_cov
jaimeaguilera/Investing-projects
python
def sample_cov(r, **kwargs): '\n \n ' return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs): '\n Produces the weights of the GMV portfolio given a covariance matrix of the returns \n ' est_cov = cov_estimator(r, **kwargs) return gmv(est_cov)
-4,044,872,262,371,711,500
Produces the weights of the GMV portfolio given a covariance matrix of the returns
kit.py
weight_gmv
jaimeaguilera/Investing-projects
python
def weight_gmv(r, cov_estimator=sample_cov, **kwargs): '\n \n ' est_cov = cov_estimator(r, **kwargs) return gmv(est_cov)
def cc_cov(r, **kwargs): '\n Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model\n ' rhos = r.corr() n = rhos.shape[0] rho_bar = ((rhos.values.sum() - n) / (n * (n - 1))) ccor = np.full_like(rhos, rho_bar) np.fill_diagonal(ccor, 1.0) sd = r.std() return pd.DataFrame((ccor * np.outer(sd, sd)), index=r.columns, columns=r.columns)
-2,987,201,178,320,407,000
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
kit.py
cc_cov
jaimeaguilera/Investing-projects
python
def cc_cov(r, **kwargs): '\n \n ' rhos = r.corr() n = rhos.shape[0] rho_bar = ((rhos.values.sum() - n) / (n * (n - 1))) ccor = np.full_like(rhos, rho_bar) np.fill_diagonal(ccor, 1.0) sd = r.std() return pd.DataFrame((ccor * np.outer(sd, sd)), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs): '\n Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators\n ' prior = cc_cov(r, **kwargs) sample = sample_cov(r, **kwargs) return ((delta * prior) + ((1 - delta) * sample))
7,209,318,898,781,462,000
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
kit.py
shrinkage_cov
jaimeaguilera/Investing-projects
python
def shrinkage_cov(r, delta=0.5, **kwargs): '\n \n ' prior = cc_cov(r, **kwargs) sample = sample_cov(r, **kwargs) return ((delta * prior) + ((1 - delta) * sample))
def risk_contribution(w, cov): '\n Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix\n ' total_portfolio_var = (portfolio_vol(w, cov) ** 2) marginal_contrib = (cov @ w) risk_contrib = (np.multiply(marginal_contrib, w.T) / total_portfolio_var) return risk_contrib
499,167,234,481,604,100
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
kit.py
risk_contribution
jaimeaguilera/Investing-projects
python
def risk_contribution(w, cov): '\n \n ' total_portfolio_var = (portfolio_vol(w, cov) ** 2) marginal_contrib = (cov @ w) risk_contrib = (np.multiply(marginal_contrib, w.T) / total_portfolio_var) return risk_contrib
def target_risk_contributions(target_risk, cov): '\n Returns the weights of the portfolio that gives you the weights such\n that the contributions to portfolio risk are as close as possible to\n the target_risk, given the covariance matrix\n ' n = cov.shape[0] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} def msd_risk(weights, target_risk, cov): '\n Returns the Mean Squared Difference in risk contributions\n between weights and target_risk\n ' w_contribs = risk_contribution(weights, cov) return ((w_contribs - target_risk) ** 2).sum() weights = minimize(msd_risk, init_guess, args=(target_risk, cov), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) return weights.x
-6,586,840,443,301,521,000
Returns the weights of the portfolio that gives you the weights such that the contributions to portfolio risk are as close as possible to the target_risk, given the covariance matrix
kit.py
target_risk_contributions
jaimeaguilera/Investing-projects
python
def target_risk_contributions(target_risk, cov): '\n Returns the weights of the portfolio that gives you the weights such\n that the contributions to portfolio risk are as close as possible to\n the target_risk, given the covariance matrix\n ' n = cov.shape[0] init_guess = np.repeat((1 / n), n) bounds = (((0.0, 1.0),) * n) weights_sum_to_1 = {'type': 'eq', 'fun': (lambda weights: (np.sum(weights) - 1))} def msd_risk(weights, target_risk, cov): '\n Returns the Mean Squared Difference in risk contributions\n between weights and target_risk\n ' w_contribs = risk_contribution(weights, cov) return ((w_contribs - target_risk) ** 2).sum() weights = minimize(msd_risk, init_guess, args=(target_risk, cov), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) return weights.x
def equal_risk_contributions(cov): '\n Returns the weights of the portfolio that equalizes the contributions\n of the constituents based on the given covariance matrix\n ' n = cov.shape[0] return target_risk_contributions(target_risk=np.repeat((1 / n), n), cov=cov)
-4,143,323,565,454,961,700
Returns the weights of the portfolio that equalizes the contributions of the constituents based on the given covariance matrix
kit.py
equal_risk_contributions
jaimeaguilera/Investing-projects
python
def equal_risk_contributions(cov): '\n Returns the weights of the portfolio that equalizes the contributions\n of the constituents based on the given covariance matrix\n ' n = cov.shape[0] return target_risk_contributions(target_risk=np.repeat((1 / n), n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs): '\n Produces the weights of the ERC portfolio given a covariance matrix of the returns \n ' est_cov = cov_estimator(r, **kwargs) return equal_risk_contributions(est_cov)
1,028,696,242,090,410,500
Produces the weights of the ERC portfolio given a covariance matrix of the returns
kit.py
weight_erc
jaimeaguilera/Investing-projects
python
def weight_erc(r, cov_estimator=sample_cov, **kwargs): '\n \n ' est_cov = cov_estimator(r, **kwargs) return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w): '\nObtain the implied expected returns by reverse engineering the weights\nInputs:\ndelta: Risk Aversion Coefficient (scalar)\nsigma: Variance-Covariance Matrix (N x N) as DataFrame\n w: Portfolio weights (N x 1) as Series\nReturns an N x 1 vector of Returns as Series\n ' ir = (delta * sigma.dot(w).squeeze()) ir.name = 'Implied Returns' return ir
-2,040,071,156,509,215,700
Obtain the implied expected returns by reverse engineering the weights Inputs: delta: Risk Aversion Coefficient (scalar) sigma: Variance-Covariance Matrix (N x N) as DataFrame w: Portfolio weights (N x 1) as Series Returns an N x 1 vector of Returns as Series
kit.py
implied_returns
jaimeaguilera/Investing-projects
python
def implied_returns(delta, sigma, w): '\nObtain the implied expected returns by reverse engineering the weights\nInputs:\ndelta: Risk Aversion Coefficient (scalar)\nsigma: Variance-Covariance Matrix (N x N) as DataFrame\n w: Portfolio weights (N x 1) as Series\nReturns an N x 1 vector of Returns as Series\n ' ir = (delta * sigma.dot(w).squeeze()) ir.name = 'Implied Returns' return ir
def proportional_prior(sigma, tau, p): '\n Returns the He-Litterman simplified Omega\n Inputs:\n sigma: N x N Covariance Matrix as DataFrame\n tau: a scalar\n p: a K x N DataFrame linking Q and Assets\n returns a P x P DataFrame, a Matrix representing Prior Uncertainties\n ' helit_omega = p.dot((tau * sigma)).dot(p.T) return pd.DataFrame(np.diag(np.diag(helit_omega.values)), index=p.index, columns=p.index)
-3,693,253,497,267,448,300
Returns the He-Litterman simplified Omega Inputs: sigma: N x N Covariance Matrix as DataFrame tau: a scalar p: a K x N DataFrame linking Q and Assets returns a P x P DataFrame, a Matrix representing Prior Uncertainties
kit.py
proportional_prior
jaimeaguilera/Investing-projects
python
def proportional_prior(sigma, tau, p): '\n Returns the He-Litterman simplified Omega\n Inputs:\n sigma: N x N Covariance Matrix as DataFrame\n tau: a scalar\n p: a K x N DataFrame linking Q and Assets\n returns a P x P DataFrame, a Matrix representing Prior Uncertainties\n ' helit_omega = p.dot((tau * sigma)).dot(p.T) return pd.DataFrame(np.diag(np.diag(helit_omega.values)), index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q, omega=None, delta=2.5, tau=0.02): '\n# Computes the posterior expected returns based on \n# the original black litterman reference model\n#\n# W.prior must be an N x 1 vector of weights, a Series\n# Sigma.prior is an N x N covariance matrix, a DataFrame\n# P must be a K x N matrix linking Q and the Assets, a DataFrame\n# Q must be an K x 1 vector of views, a Series\n# Omega must be a K x K matrix a DataFrame, or None\n# if Omega is None, we assume it is\n# proportional to variance of the prior\n# delta and tau are scalars\n ' if (omega is None): omega = proportional_prior(sigma_prior, tau, p) N = w_prior.shape[0] K = q.shape[0] pi = implied_returns(delta, sigma_prior, w_prior) sigma_prior_scaled = (tau * sigma_prior) mu_bl = (pi + sigma_prior_scaled.dot(p.T).dot(inv((p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot((q - p.dot(pi).values)))) sigma_bl = ((sigma_prior + sigma_prior_scaled) - sigma_prior_scaled.dot(p.T).dot(inv((p.dot(sigma_prior_scaled).dot(p.T) + omega))).dot(p).dot(sigma_prior_scaled)) return (mu_bl, sigma_bl)
-185,691,400,770,407,680
# Computes the posterior expected returns based on # the original black litterman reference model # # W.prior must be an N x 1 vector of weights, a Series # Sigma.prior is an N x N covariance matrix, a DataFrame # P must be a K x N matrix linking Q and the Assets, a DataFrame # Q must be an K x 1 vector of views, a Series # Omega must be a K x K matrix a DataFrame, or None # if Omega is None, we assume it is # proportional to variance of the prior # delta and tau are scalars
kit.py
bl
jaimeaguilera/Investing-projects
python
def bl(w_prior, sigma_prior, p, q, omega=None, delta=2.5, tau=0.02): '\n# Computes the posterior expected returns based on \n# the original black litterman reference model\n#\n# W.prior must be an N x 1 vector of weights, a Series\n# Sigma.prior is an N x N covariance matrix, a DataFrame\n# P must be a K x N matrix linking Q and the Assets, a DataFrame\n# Q must be an K x 1 vector of views, a Series\n# Omega must be a K x K matrix a DataFrame, or None\n# if Omega is None, we assume it is\n# proportional to variance of the prior\n# delta and tau are scalars\n ' if (omega is None): omega = proportional_prior(sigma_prior, tau, p) N = w_prior.shape[0] K = q.shape[0] pi = implied_returns(delta, sigma_prior, w_prior) sigma_prior_scaled = (tau * sigma_prior) mu_bl = (pi + sigma_prior_scaled.dot(p.T).dot(inv((p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot((q - p.dot(pi).values)))) sigma_bl = ((sigma_prior + sigma_prior_scaled) - sigma_prior_scaled.dot(p.T).dot(inv((p.dot(sigma_prior_scaled).dot(p.T) + omega))).dot(p).dot(sigma_prior_scaled)) return (mu_bl, sigma_bl)
def inverse(d): '\n Invert the dataframe by inverting the underlying matrix\n ' return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
-3,257,535,559,224,084,500
Invert the dataframe by inverting the underlying matrix
kit.py
inverse
jaimeaguilera/Investing-projects
python
def inverse(d): '\n \n ' return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True): '\n Optimal (Tangent/Max Sharpe Ratio) Portfolio weights\n by using the Markowitz Optimization Procedure\n Mu is the vector of Excess expected Returns\n Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series\n This implements page 188 Equation 5.2.28 of\n "The econometrics of financial markets" Campbell, Lo and Mackinlay.\n ' w = inverse(sigma).dot(mu) if scale: w = (w / sum(w)) return w
1,760,906,879,167,205,600
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights by using the Markowitz Optimization Procedure Mu is the vector of Excess expected Returns Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series This implements page 188 Equation 5.2.28 of "The econometrics of financial markets" Campbell, Lo and Mackinlay.
kit.py
weight_msr
jaimeaguilera/Investing-projects
python
def weight_msr(sigma, mu, scale=True): '\n Optimal (Tangent/Max Sharpe Ratio) Portfolio weights\n by using the Markowitz Optimization Procedure\n Mu is the vector of Excess expected Returns\n Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series\n This implements page 188 Equation 5.2.28 of\n "The econometrics of financial markets" Campbell, Lo and Mackinlay.\n ' w = inverse(sigma).dot(mu) if scale: w = (w / sum(w)) return w
def neg_sharpe(weights, riskfree_rate, er, cov): '\n Returns the negative of the sharpe ratio\n of the given portfolio\n ' r = portfolio_return(weights, er) vol = portfolio_vol(weights, cov) return ((- (r - riskfree_rate)) / vol)
-5,140,026,034,260,675,000
Returns the negative of the sharpe ratio of the given portfolio
kit.py
neg_sharpe
jaimeaguilera/Investing-projects
python
def neg_sharpe(weights, riskfree_rate, er, cov): '\n Returns the negative of the sharpe ratio\n of the given portfolio\n ' r = portfolio_return(weights, er) vol = portfolio_vol(weights, cov) return ((- (r - riskfree_rate)) / vol)
def msd_risk(weights, target_risk, cov): '\n Returns the Mean Squared Difference in risk contributions\n between weights and target_risk\n ' w_contribs = risk_contribution(weights, cov) return ((w_contribs - target_risk) ** 2).sum()
-1,417,795,964,434,329,000
Returns the Mean Squared Difference in risk contributions between weights and target_risk
kit.py
msd_risk
jaimeaguilera/Investing-projects
python
def msd_risk(weights, target_risk, cov): '\n Returns the Mean Squared Difference in risk contributions\n between weights and target_risk\n ' w_contribs = risk_contribution(weights, cov) return ((w_contribs - target_risk) ** 2).sum()
def __init__(self, datasetPathList, nodeList, group, level='dataset', priority='normal', move='n', static='n', custodial='n', request_only='y', blocks=None, subscriptionId=(- 1), comments=''): '\n Initialize PhEDEx subscription with default value\n ' if isinstance(datasetPathList, basestring): datasetPathList = [datasetPathList] if isinstance(nodeList, basestring): nodeList = [nodeList] self.datasetPaths = set(datasetPathList) self.nodes = set(nodeList) self.level = level.lower() self.priority = priority.lower() self.move = move.lower() self.static = static.lower() self.group = group self.custodial = custodial.lower() self.request_only = request_only.lower() self.requesterID = None self.status = 'New' self.comments = comments self.subscriptionIds = set([subscriptionId]) self.blocks = blocks try: for option in (self.static, self.custodial, self.request_only, self.move): assert (option in ('y', 'n')) assert (self.priority in PhEDEx_VALID_SUBSCRIPTION_PRIORITIES) assert (self.level in ('dataset', 'block')) if (self.level == 'block'): assert (self.blocks is not None) except AssertionError: msg = 'The subscription is not a valid PhEDEx subscription.\n' msg += 'Check the options for this subscription: \n' msg += ('level: %s\n' % self.level) msg += ('priority: %s\n' % self.priority) msg += ('static: %s\n' % self.static) msg += ('move: %s\n' % self.move) msg += ('custodial: %s\n' % self.custodial) msg += ('blocks: %s\n' % str(self.blocks)) raise PhEDExSubscriptionException(msg)
-6,030,256,932,251,470,000
Initialize PhEDEx subscription with default value
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
__init__
cbbrainerd/WMCore
python
def __init__(self, datasetPathList, nodeList, group, level='dataset', priority='normal', move='n', static='n', custodial='n', request_only='y', blocks=None, subscriptionId=(- 1), comments=): '\n \n ' if isinstance(datasetPathList, basestring): datasetPathList = [datasetPathList] if isinstance(nodeList, basestring): nodeList = [nodeList] self.datasetPaths = set(datasetPathList) self.nodes = set(nodeList) self.level = level.lower() self.priority = priority.lower() self.move = move.lower() self.static = static.lower() self.group = group self.custodial = custodial.lower() self.request_only = request_only.lower() self.requesterID = None self.status = 'New' self.comments = comments self.subscriptionIds = set([subscriptionId]) self.blocks = blocks try: for option in (self.static, self.custodial, self.request_only, self.move): assert (option in ('y', 'n')) assert (self.priority in PhEDEx_VALID_SUBSCRIPTION_PRIORITIES) assert (self.level in ('dataset', 'block')) if (self.level == 'block'): assert (self.blocks is not None) except AssertionError: msg = 'The subscription is not a valid PhEDEx subscription.\n' msg += 'Check the options for this subscription: \n' msg += ('level: %s\n' % self.level) msg += ('priority: %s\n' % self.priority) msg += ('static: %s\n' % self.static) msg += ('move: %s\n' % self.move) msg += ('custodial: %s\n' % self.custodial) msg += ('blocks: %s\n' % str(self.blocks)) raise PhEDExSubscriptionException(msg)
def __str__(self): '\n Write out useful information for this object\n :return:\n ' res = {'datasetPaths': self.datasetPaths, 'nodes': self.nodes, 'priority': self.priority, 'move': self.move, 'group': self.group, 'custodial': self.custodial, 'request_only': self.request_only, 'blocks': self.blocks} return str(res)
-1,481,429,788,718,934,000
Write out useful information for this object :return:
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
__str__
cbbrainerd/WMCore
python
def __str__(self): '\n Write out useful information for this object\n :return:\n ' res = {'datasetPaths': self.datasetPaths, 'nodes': self.nodes, 'priority': self.priority, 'move': self.move, 'group': self.group, 'custodial': self.custodial, 'request_only': self.request_only, 'blocks': self.blocks} return str(res)
def getDatasetsAndBlocks(self): '\n _getDatasetsAndBlocks_\n\n Get the block structure\n with datasets and blocks\n ' return self.blocks
7,009,327,071,037,417,000
_getDatasetsAndBlocks_ Get the block structure with datasets and blocks
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
getDatasetsAndBlocks
cbbrainerd/WMCore
python
def getDatasetsAndBlocks(self): '\n _getDatasetsAndBlocks_\n\n Get the block structure\n with datasets and blocks\n ' return self.blocks
def matchesExistingTransferRequest(self, phedexDataSvc): '\n _matchesExistingTransferRequest_\n\n Check the given phedex data service to verify if an unapproved\n transfer request equal to this subscription is already in the system.\n ' if ((len(self.datasetPaths) != 1) or (len(self.nodes) != 1)): msg = 'matchesExistingTransferRequest can only run in single node/dataset subscriptions' raise PhEDExSubscriptionException(msg) if (self.level != 'dataset'): msg = 'matchesExistingTransferRequest is only supported by dataset subscriptions' raise PhEDExSubscriptionException(msg) node = next(iter(self.nodes)) dataset = next(iter(self.datasetPaths)) existingRequests = phedexDataSvc.getRequestList(dataset=dataset, node=node, decision='pending')['phedex']['request'] for request in existingRequests: requestId = request['id'] requestInfo = phedexDataSvc.getTransferRequests(request=requestId)['phedex']['request'] if (not requestInfo): logging.error("Transfer request %s doesn't exist in PhEDEx", requestId) continue requestInfo = requestInfo[0] destinations = requestInfo['destinations']['node'] for nodeInfo in destinations: if (nodeInfo['name'] == node): break else: continue phedexRequest = PhEDExSubscription(self.datasetPaths, self.nodes, self.group, self.level, requestInfo['priority'], requestInfo['move'], requestInfo['static'], requestInfo['custodial'], self.request_only) if self.isEqualOptions(phedexRequest): return True return False
6,305,325,705,504,872,000
_matchesExistingTransferRequest_ Check the given phedex data service to verify if an unapproved transfer request equal to this subscription is already in the system.
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
matchesExistingTransferRequest
cbbrainerd/WMCore
python
def matchesExistingTransferRequest(self, phedexDataSvc): '\n _matchesExistingTransferRequest_\n\n Check the given phedex data service to verify if an unapproved\n transfer request equal to this subscription is already in the system.\n ' if ((len(self.datasetPaths) != 1) or (len(self.nodes) != 1)): msg = 'matchesExistingTransferRequest can only run in single node/dataset subscriptions' raise PhEDExSubscriptionException(msg) if (self.level != 'dataset'): msg = 'matchesExistingTransferRequest is only supported by dataset subscriptions' raise PhEDExSubscriptionException(msg) node = next(iter(self.nodes)) dataset = next(iter(self.datasetPaths)) existingRequests = phedexDataSvc.getRequestList(dataset=dataset, node=node, decision='pending')['phedex']['request'] for request in existingRequests: requestId = request['id'] requestInfo = phedexDataSvc.getTransferRequests(request=requestId)['phedex']['request'] if (not requestInfo): logging.error("Transfer request %s doesn't exist in PhEDEx", requestId) continue requestInfo = requestInfo[0] destinations = requestInfo['destinations']['node'] for nodeInfo in destinations: if (nodeInfo['name'] == node): break else: continue phedexRequest = PhEDExSubscription(self.datasetPaths, self.nodes, self.group, self.level, requestInfo['priority'], requestInfo['move'], requestInfo['static'], requestInfo['custodial'], self.request_only) if self.isEqualOptions(phedexRequest): return True return False
def matchesExistingSubscription(self, phedexDataSvc): '\n _matchesExistingSubscription_\n\n Check the given phedex data service to verify if a PhEDEx subscription\n equal to this subscription is already in the system.\n ' if ((len(self.datasetPaths) != 1) or (len(self.nodes) != 1)): msg = 'matchesExistingSubscription can only run in single node/dataset subscriptions' raise PhEDExSubscriptionException(msg) if (self.level != 'dataset'): msg = 'matchesExistingSubscription is only supported by dataset subscriptions' raise PhEDExSubscriptionException(msg) node = next(iter(self.nodes)) dataset = next(iter(self.datasetPaths)) existingSubscription = phedexDataSvc.subscriptions(dataset=dataset, node=node)['phedex']['dataset'] if (len(existingSubscription) < 1): return False datasetInfo = existingSubscription[0] for subscriptionInfo in datasetInfo['subscription']: if (node != subscriptionInfo['node']): continue phedexSub = PhEDExSubscription(self.datasetPaths, self.nodes, self.group, subscriptionInfo['level'], subscriptionInfo['priority'], subscriptionInfo['move'], self.static, subscriptionInfo['custodial'], self.request_only) if self.isEqualOptions(phedexSub): return True return False
1,436,007,145,053,543,200
_matchesExistingSubscription_ Check the given phedex data service to verify if a PhEDEx subscription equal to this subscription is already in the system.
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
matchesExistingSubscription
cbbrainerd/WMCore
python
def matchesExistingSubscription(self, phedexDataSvc): '\n _matchesExistingSubscription_\n\n Check the given phedex data service to verify if a PhEDEx subscription\n equal to this subscription is already in the system.\n ' if ((len(self.datasetPaths) != 1) or (len(self.nodes) != 1)): msg = 'matchesExistingSubscription can only run in single node/dataset subscriptions' raise PhEDExSubscriptionException(msg) if (self.level != 'dataset'): msg = 'matchesExistingSubscription is only supported by dataset subscriptions' raise PhEDExSubscriptionException(msg) node = next(iter(self.nodes)) dataset = next(iter(self.datasetPaths)) existingSubscription = phedexDataSvc.subscriptions(dataset=dataset, node=node)['phedex']['dataset'] if (len(existingSubscription) < 1): return False datasetInfo = existingSubscription[0] for subscriptionInfo in datasetInfo['subscription']: if (node != subscriptionInfo['node']): continue phedexSub = PhEDExSubscription(self.datasetPaths, self.nodes, self.group, subscriptionInfo['level'], subscriptionInfo['priority'], subscriptionInfo['move'], self.static, subscriptionInfo['custodial'], self.request_only) if self.isEqualOptions(phedexSub): return True return False
def addSubscription(self, subObj): '\n _addSubscription_\n Add a new subscription to the subscription policy.\n If the same subscription key exist just add the node list\n ' for subscription in self._subList: if subscription.isEqualOptions(subObj): if subscription.isEqualNode(subObj): subscription.addDatasetPaths(subObj) return self._subList.append(subObj) return
5,781,099,127,773,631,000
_addSubscription_ Add a new subscription to the subscription policy. If the same subscription key exist just add the node list
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
addSubscription
cbbrainerd/WMCore
python
def addSubscription(self, subObj): '\n _addSubscription_\n Add a new subscription to the subscription policy.\n If the same subscription key exist just add the node list\n ' for subscription in self._subList: if subscription.isEqualOptions(subObj): if subscription.isEqualNode(subObj): subscription.addDatasetPaths(subObj) return self._subList.append(subObj) return
def compact(self): '\n _compact_\n\n Compact the subscription list by aggregating the subscriptions where the nodes\n share a list of dataset paths.\n ' bags = [] baggedIndexes = set() for (i, subscriptionA) in enumerate(self._subList): if (i in baggedIndexes): continue bags.append([subscriptionA]) for (j, subscriptionB) in enumerate(self._subList[(i + 1):], (i + 1)): if (j in baggedIndexes): continue if (subscriptionA.isEqualOptions(subscriptionB) and subscriptionA.isEqualDatasetPaths(subscriptionB)): bags[(- 1)].append(subscriptionB) baggedIndexes.add(j) newSubList = [] for bag in bags: anchorSubscription = bag[0] for subscription in bag[1:]: anchorSubscription.addNodes(subscription) newSubList.append(anchorSubscription) self._subList = newSubList
2,831,347,083,717,089,300
_compact_ Compact the subscription list by aggregating the subscriptions where the nodes share a list of dataset paths.
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
compact
cbbrainerd/WMCore
python
def compact(self): '\n _compact_\n\n Compact the subscription list by aggregating the subscriptions where the nodes\n share a list of dataset paths.\n ' bags = [] baggedIndexes = set() for (i, subscriptionA) in enumerate(self._subList): if (i in baggedIndexes): continue bags.append([subscriptionA]) for (j, subscriptionB) in enumerate(self._subList[(i + 1):], (i + 1)): if (j in baggedIndexes): continue if (subscriptionA.isEqualOptions(subscriptionB) and subscriptionA.isEqualDatasetPaths(subscriptionB)): bags[(- 1)].append(subscriptionB) baggedIndexes.add(j) newSubList = [] for bag in bags: anchorSubscription = bag[0] for subscription in bag[1:]: anchorSubscription.addNodes(subscription) newSubList.append(anchorSubscription) self._subList = newSubList
def get_source(category): '\n function that gets the json response to our url request\n ' get_source_url = NEWS_API_BASE_URL.format(category, api_key) print(get_source_url) with urllib.request.urlopen(get_source_url) as url: get_source_data = url.read() get_source_response = json.loads(get_source_data) sources_result = None if get_source_response['sources']: sources_results_list = get_source_response['sources'] sources_result = process_sources(sources_results_list) print(sources_result) return sources_result
2,996,768,572,009,040,400
function that gets the json response to our url request
app/requests.py
get_source
ClarisseU/newsHighlight
python
def get_source(category): '\n \n ' get_source_url = NEWS_API_BASE_URL.format(category, api_key) print(get_source_url) with urllib.request.urlopen(get_source_url) as url: get_source_data = url.read() get_source_response = json.loads(get_source_data) sources_result = None if get_source_response['sources']: sources_results_list = get_source_response['sources'] sources_result = process_sources(sources_results_list) print(sources_result) return sources_result
def process_sources(sources_list): '\n Function that checks the news results and turn them into objects\n \n Args:\n sources_list: A list of dictionaries that contain sources details\n ' sources_result = [] for source_item in sources_list: author = source_item.get('author') title = source_item.get('title') imageurl = source_item.get('urltoimage') description = source_item.get('description') url = source_item.get('url') id = source_item.get('id') sources_object = Sources(author, title, imageurl, description, url, id) sources_result.append(sources_object) return sources_result
5,628,184,623,351,458,000
Function that checks the news results and turn them into objects Args: sources_list: A list of dictionaries that contain sources details
app/requests.py
process_sources
ClarisseU/newsHighlight
python
def process_sources(sources_list): '\n Function that checks the news results and turn them into objects\n \n Args:\n sources_list: A list of dictionaries that contain sources details\n ' sources_result = [] for source_item in sources_list: author = source_item.get('author') title = source_item.get('title') imageurl = source_item.get('urltoimage') description = source_item.get('description') url = source_item.get('url') id = source_item.get('id') sources_object = Sources(author, title, imageurl, description, url, id) sources_result.append(sources_object) return sources_result
def get_articles(id): '\n Function that processes the articles and returns a list of articles objects\n ' get_articles_url = ARTICLE.format(id, api_key) print(get_articles_url) with urllib.request.urlopen(get_articles_url) as url: article_data = url.read() articles_response = json.loads(article_data) articles_object = None if articles_response['articles']: response_list = articles_response['articles'] articles_object = process_articles(response_list) return articles_object
-5,579,780,585,189,511,000
Function that processes the articles and returns a list of articles objects
app/requests.py
get_articles
ClarisseU/newsHighlight
python
def get_articles(id): '\n \n ' get_articles_url = ARTICLE.format(id, api_key) print(get_articles_url) with urllib.request.urlopen(get_articles_url) as url: article_data = url.read() articles_response = json.loads(article_data) articles_object = None if articles_response['articles']: response_list = articles_response['articles'] articles_object = process_articles(response_list) return articles_object