body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def inference(net, img_path='', output_path='./', output_name='f', use_gpu=True):
'\n\n :param net:\n :param img_path:\n :param output_path:\n :return:\n '
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
img = read_img(img_path)
testloader_list = []
testloader_flip_list = []
for pv in scale_list:
composed_transforms_ts = transforms.Compose([tr.Scale_only_img(pv), tr.Normalize_xception_tf_only_img(), tr.ToTensor_only_img()])
composed_transforms_ts_flip = transforms.Compose([tr.Scale_only_img(pv), tr.HorizontalFlip_only_img(), tr.Normalize_xception_tf_only_img(), tr.ToTensor_only_img()])
testloader_list.append(img_transform(img, composed_transforms_ts))
testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip))
start_time = timeit.default_timer()
net.eval()
for (iii, sample_batched) in enumerate(zip(testloader_list, testloader_flip_list)):
(inputs, labels) = (sample_batched[0]['image'], sample_batched[0]['label'])
(inputs_f, _) = (sample_batched[1]['image'], sample_batched[1]['label'])
inputs = inputs.unsqueeze(0)
inputs_f = inputs_f.unsqueeze(0)
inputs = torch.cat((inputs, inputs_f), dim=0)
if (iii == 0):
(_, _, h, w) = inputs.size()
inputs = Variable(inputs, requires_grad=False)
with torch.no_grad():
if (use_gpu >= 0):
inputs = inputs.cuda()
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = ((outputs[0] + flip(flip_cihp(outputs[1]), dim=(- 1))) / 2)
outputs = outputs.unsqueeze(0)
if (iii > 0):
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = (outputs_final + outputs)
else:
outputs_final = outputs.clone()
predictions = torch.max(outputs_final, 1)[1]
results = predictions.cpu().numpy()
vis_res = decode_labels(results)
parsing_im = Image.fromarray(vis_res[0])
parsing_im.save((output_path + '/{}.png'.format(output_name)))
end_time = timeit.default_timer()
print((('time used for the multi-scale image inference' + ' is :') + str((end_time - start_time)))) | 7,895,869,433,386,588,000 | :param net:
:param img_path:
:param output_path:
:return: | exp/inference/inference_dir.py | inference | ericwang0701/Graphonomy | python | def inference(net, img_path=, output_path='./', output_name='f', use_gpu=True):
'\n\n :param net:\n :param img_path:\n :param output_path:\n :return:\n '
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
img = read_img(img_path)
testloader_list = []
testloader_flip_list = []
for pv in scale_list:
composed_transforms_ts = transforms.Compose([tr.Scale_only_img(pv), tr.Normalize_xception_tf_only_img(), tr.ToTensor_only_img()])
composed_transforms_ts_flip = transforms.Compose([tr.Scale_only_img(pv), tr.HorizontalFlip_only_img(), tr.Normalize_xception_tf_only_img(), tr.ToTensor_only_img()])
testloader_list.append(img_transform(img, composed_transforms_ts))
testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip))
start_time = timeit.default_timer()
net.eval()
for (iii, sample_batched) in enumerate(zip(testloader_list, testloader_flip_list)):
(inputs, labels) = (sample_batched[0]['image'], sample_batched[0]['label'])
(inputs_f, _) = (sample_batched[1]['image'], sample_batched[1]['label'])
inputs = inputs.unsqueeze(0)
inputs_f = inputs_f.unsqueeze(0)
inputs = torch.cat((inputs, inputs_f), dim=0)
if (iii == 0):
(_, _, h, w) = inputs.size()
inputs = Variable(inputs, requires_grad=False)
with torch.no_grad():
if (use_gpu >= 0):
inputs = inputs.cuda()
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = ((outputs[0] + flip(flip_cihp(outputs[1]), dim=(- 1))) / 2)
outputs = outputs.unsqueeze(0)
if (iii > 0):
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = (outputs_final + outputs)
else:
outputs_final = outputs.clone()
predictions = torch.max(outputs_final, 1)[1]
results = predictions.cpu().numpy()
vis_res = decode_labels(results)
parsing_im = Image.fromarray(vis_res[0])
parsing_im.save((output_path + '/{}.png'.format(output_name)))
end_time = timeit.default_timer()
print((('time used for the multi-scale image inference' + ' is :') + str((end_time - start_time)))) |
def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
'\n Yields a list of the beginning and ending timestamps of each day between the start date and now.\n The return value is a pendulum.period\n '
now = pendulum.now()
while (start_date <= now):
end_date = (start_date + interval)
(yield pendulum.period(start_date, end_date))
start_date = end_date | 3,912,673,268,839,119,000 | Yields a list of the beginning and ending timestamps of each day between the start date and now.
The return value is a pendulum.period | airbyte-integrations/connectors/source-slack/source_slack/source.py | chunk_date_range | AetherUnbound/airbyte | python | def chunk_date_range(start_date: DateTime, interval=pendulum.duration(days=1)) -> Iterable[Period]:
'\n Yields a list of the beginning and ending timestamps of each day between the start date and now.\n The return value is a pendulum.period\n '
now = pendulum.now()
while (start_date <= now):
end_date = (start_date + interval)
(yield pendulum.period(start_date, end_date))
start_date = end_date |
@property
@abstractmethod
def data_field(self) -> str:
'The name of the field in the response which contains the data' | 4,770,618,322,837,041,000 | The name of the field in the response which contains the data | airbyte-integrations/connectors/source-slack/source_slack/source.py | data_field | AetherUnbound/airbyte | python | @property
@abstractmethod
def data_field(self) -> str:
|
def stream_slices(self, stream_state: Mapping[(str, Any)]=None, **kwargs) -> Iterable[Optional[Mapping[(str, any)]]]:
"\n The logic for incrementally syncing threads is not very obvious, so buckle up.\n\n To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.\n\n One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every\n single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no\n way to guarantee that a thread deep in the past didn't receive a new message.\n\n A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,\n and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the\n past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep\n the logic simple to reason about.\n\n Good luck.\n "
stream_state = (stream_state or {})
channels_stream = Channels(authenticator=self.authenticator)
if (self.cursor_field in stream_state):
messages_start_date = (pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window)
else:
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f'Syncing replies {message_chunk}')
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk['channel'] = channel['id']
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
(yield {'channel': channel['id'], self.cursor_field: message[self.primary_key]}) | -7,067,957,066,529,734,000 | The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.
One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every
single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no
way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,
and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the
past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep
the logic simple to reason about.
Good luck. | airbyte-integrations/connectors/source-slack/source_slack/source.py | stream_slices | AetherUnbound/airbyte | python | def stream_slices(self, stream_state: Mapping[(str, Any)]=None, **kwargs) -> Iterable[Optional[Mapping[(str, any)]]]:
"\n The logic for incrementally syncing threads is not very obvious, so buckle up.\n\n To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread, basically its ID.\n\n One complication is that threads can be updated at any time in the future. Therefore, if we wanted to comprehensively sync data i.e: get every\n single response in a thread, we'd have to read every message in the slack instance every time we ran a sync, because otherwise there is no\n way to guarantee that a thread deep in the past didn't receive a new message.\n\n A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past, get every message since,\n and read all of the thread responses. This is essentially the approach we're taking here via slicing: create slices from N days into the\n past and read all messages in threads since then. We could optionally filter out records we have already read, but that's omitted to keep\n the logic simple to reason about.\n\n Good luck.\n "
stream_state = (stream_state or {})
channels_stream = Channels(authenticator=self.authenticator)
if (self.cursor_field in stream_state):
messages_start_date = (pendulum.from_timestamp(stream_state[self.cursor_field]) - self.messages_lookback_window)
else:
messages_start_date = pendulum.from_timestamp(self._start_ts)
messages_stream = ChannelMessages(authenticator=self.authenticator, default_start_date=messages_start_date)
for message_chunk in messages_stream.stream_slices(stream_state={self.cursor_field: messages_start_date.timestamp()}):
self.logger.info(f'Syncing replies {message_chunk}')
for channel in channels_stream.read_records(sync_mode=SyncMode.full_refresh):
message_chunk['channel'] = channel['id']
for message in messages_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=message_chunk):
(yield {'channel': channel['id'], self.cursor_field: message[self.primary_key]}) |
def run_dask_function(config):
'Start a Dask Cluster using dask-kubernetes and run a function.\n\n Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each\n forming a `dask` cluster. Then, a function specified from `config` is being imported and\n run with the given arguments. The tasks created by this `function` are being run on the\n `dask` cluster for distributed computation.\n\n The config dict must contain the following sections:\n * run\n * dask_cluster\n * output\n\n Args:\n config (dict):\n Config dictionary.\n '
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if (not path):
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if (not workers):
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results | -2,584,494,591,846,567,000 | Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary. | benchmark/btb_benchmark/kubernetes.py | run_dask_function | HDI-Project/BTB | python | def run_dask_function(config):
'Start a Dask Cluster using dask-kubernetes and run a function.\n\n Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each\n forming a `dask` cluster. Then, a function specified from `config` is being imported and\n run with the given arguments. The tasks created by this `function` are being run on the\n `dask` cluster for distributed computation.\n\n The config dict must contain the following sections:\n * run\n * dask_cluster\n * output\n\n Args:\n config (dict):\n Config dictionary.\n '
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if (not path):
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if (not workers):
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results |
def run_on_kubernetes(config, namespace='default'):
'Run dask function inside a pod using the given config.\n\n Create a pod, using the local kubernetes configuration that starts a Dask Cluster\n using dask-kubernetes and runs a function specified within the `config` dictionary.\n\n Args:\n config (dict):\n Config dictionary.\n namespace (str):\n Kubernetes namespace were the pod will be created.\n '
load_kube_config()
c = Configuration()
Configuration.set_default(c)
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.') | 2,622,271,829,564,217,300 | Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created. | benchmark/btb_benchmark/kubernetes.py | run_on_kubernetes | HDI-Project/BTB | python | def run_on_kubernetes(config, namespace='default'):
'Run dask function inside a pod using the given config.\n\n Create a pod, using the local kubernetes configuration that starts a Dask Cluster\n using dask-kubernetes and runs a function specified within the `config` dictionary.\n\n Args:\n config (dict):\n Config dictionary.\n namespace (str):\n Kubernetes namespace were the pod will be created.\n '
load_kube_config()
c = Configuration()
Configuration.set_default(c)
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.') |
def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
'Generates new corpus seeds.\n\n Run {targets} without input, and outputs the generated corpus seeds to\n {seed_dir}.\n '
logging.info('Generating corpus seeds to {}'.format(seed_dir))
def job(command):
logging.debug("Running '{}'\n".format(' '.join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(' '.join(command), subprocess.run(command, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [os.path.join(build_dir, 'src', 'test', 'fuzz', target), '-runs=100000', target_seed_dir]
futures.append(fuzz_pool.submit(job, command))
for future in as_completed(futures):
future.result() | -8,586,212,390,711,142,000 | Generates new corpus seeds.
Run {targets} without input, and outputs the generated corpus seeds to
{seed_dir}. | test/fuzz/test_runner.py | generate_corpus_seeds | BlockMechanic/crown | python | def generate_corpus_seeds(*, fuzz_pool, build_dir, seed_dir, targets):
'Generates new corpus seeds.\n\n Run {targets} without input, and outputs the generated corpus seeds to\n {seed_dir}.\n '
logging.info('Generating corpus seeds to {}'.format(seed_dir))
def job(command):
logging.debug("Running '{}'\n".format(' '.join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(' '.join(command), subprocess.run(command, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [os.path.join(build_dir, 'src', 'test', 'fuzz', target), '-runs=100000', target_seed_dir]
futures.append(fuzz_pool.submit(job, command))
for future in as_completed(futures):
future.result() |
def get_event_categories(source_type: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetEventCategoriesResult:
'\n ## Example Usage\n\n List the event categories of all the RDS resources.\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example_event_categories = aws.rds.get_event_categories()\n pulumi.export("example", example_event_categories.event_categories)\n ```\n\n List the event categories specific to the RDS resource `db-snapshot`.\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")\n pulumi.export("example", example_event_categories.event_categories)\n ```\n\n\n :param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.\n '
__args__ = dict()
__args__['sourceType'] = source_type
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(event_categories=__ret__.event_categories, id=__ret__.id, source_type=__ret__.source_type) | 6,330,624,011,440,082,000 | ## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot. | sdk/python/pulumi_aws/rds/get_event_categories.py | get_event_categories | mdop-wh/pulumi-aws | python | def get_event_categories(source_type: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetEventCategoriesResult:
'\n ## Example Usage\n\n List the event categories of all the RDS resources.\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example_event_categories = aws.rds.get_event_categories()\n pulumi.export("example", example_event_categories.event_categories)\n ```\n\n List the event categories specific to the RDS resource `db-snapshot`.\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")\n pulumi.export("example", example_event_categories.event_categories)\n ```\n\n\n :param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.\n '
__args__ = dict()
__args__['sourceType'] = source_type
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(event_categories=__ret__.event_categories, id=__ret__.id, source_type=__ret__.source_type) |
@property
@pulumi.getter(name='eventCategories')
def event_categories(self) -> List[str]:
'\n A list of the event categories.\n '
return pulumi.get(self, 'event_categories') | 7,065,916,001,102,644,000 | A list of the event categories. | sdk/python/pulumi_aws/rds/get_event_categories.py | event_categories | mdop-wh/pulumi-aws | python | @property
@pulumi.getter(name='eventCategories')
def event_categories(self) -> List[str]:
'\n \n '
return pulumi.get(self, 'event_categories') |
@property
@pulumi.getter
def id(self) -> str:
'\n The provider-assigned unique ID for this managed resource.\n '
return pulumi.get(self, 'id') | 3,214,403,723,836,065,300 | The provider-assigned unique ID for this managed resource. | sdk/python/pulumi_aws/rds/get_event_categories.py | id | mdop-wh/pulumi-aws | python | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id') |
def model_fn(model_dir):
'Load the PyTorch model from the `model_dir` directory.'
print('Loading model.')
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print('model_info: {}'.format(model_info))
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
model.to(device).eval()
print('Done loading model.')
return model | 7,045,043,767,301,961,000 | Load the PyTorch model from the `model_dir` directory. | Project_Plagiarism_Detection/source_pytorch/train.py | model_fn | ngocpc/Project_Plagiarism_Detection | python | def model_fn(model_dir):
print('Loading model.')
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print('model_info: {}'.format(model_info))
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = BinaryClassifier(model_info['input_features'], model_info['hidden_dim'], model_info['output_dim'])
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
model.to(device).eval()
print('Done loading model.')
return model |
def train(model, train_loader, epochs, criterion, optimizer, device):
'\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n criterion - The loss function used for training. \n optimizer - The optimizer to use during training.\n device - Where the model and data should be loaded (gpu or cpu).\n '
for epoch in range(1, (epochs + 1)):
model.train()
total_loss = 0
for batch in train_loader:
(batch_x, batch_y) = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
y_pred = model(batch_x)
loss = criterion(y_pred, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print('Epoch: {}, Loss: {}'.format(epoch, (total_loss / len(train_loader)))) | -4,570,357,159,223,916,000 | This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
train_loader - The PyTorch DataLoader that should be used during training.
epochs - The total number of epochs to train for.
criterion - The loss function used for training.
optimizer - The optimizer to use during training.
device - Where the model and data should be loaded (gpu or cpu). | Project_Plagiarism_Detection/source_pytorch/train.py | train | ngocpc/Project_Plagiarism_Detection | python | def train(model, train_loader, epochs, criterion, optimizer, device):
'\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n criterion - The loss function used for training. \n optimizer - The optimizer to use during training.\n device - Where the model and data should be loaded (gpu or cpu).\n '
for epoch in range(1, (epochs + 1)):
model.train()
total_loss = 0
for batch in train_loader:
(batch_x, batch_y) = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
y_pred = model(batch_x)
loss = criterion(y_pred, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print('Epoch: {}, Loss: {}'.format(epoch, (total_loss / len(train_loader)))) |
def available(self):
'True if the solver is available'
return self.executable(self.path) | 4,336,824,989,999,015,000 | True if the solver is available | pulp/apis/gurobi_api.py | available | KCachel/pulp | python | def available(self):
return self.executable(self.path) |
def actualSolve(self, lp):
'Solve a well formulated lp problem'
if ('GUROBI_HOME' in os.environ):
if ('LD_LIBRARY_PATH' not in os.environ):
os.environ['LD_LIBRARY_PATH'] = ''
os.environ['LD_LIBRARY_PATH'] += ((':' + os.environ['GUROBI_HOME']) + '/lib')
if (not self.executable(self.path)):
raise PulpSolverError(('PuLP: cannot execute ' + self.path))
if (not self.keepFiles):
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, ('%s-pulp.lp' % uuid))
tmpSol = os.path.join(self.tmpDir, ('%s-pulp.sol' % uuid))
tmpMst = os.path.join(self.tmpDir, ('%s-pulp.mst' % uuid))
else:
tmpLp = (lp.name + '-pulp.lp')
tmpSol = (lp.name + '-pulp.sol')
tmpMst = (lp.name + '-pulp.mst')
vs = lp.writeLP(tmpLp, writeSOS=1)
try:
os.remove(tmpSol)
except:
pass
cmd = self.path
cmd += (' ' + ' '.join([('%s=%s' % (key, value)) for (key, value) in self.options]))
cmd += (' ResultFile=%s' % tmpSol)
if self.mip_start:
self.writesol(filename=tmpMst, vs=vs)
cmd += (' InputFile=%s' % tmpMst)
if lp.isMIP():
if (not self.mip):
warnings.warn('GUROBI_CMD does not allow a problem to be relaxed')
cmd += (' %s' % tmpLp)
if self.msg:
pipe = None
else:
pipe = open(os.devnull, 'w')
return_code = subprocess.call(cmd.split(), stdout=pipe, stderr=pipe)
if (pipe is not None):
pipe.close()
if (return_code != 0):
raise PulpSolverError(('PuLP: Error while trying to execute ' + self.path))
if (not os.path.exists(tmpSol)):
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
values = reducedCosts = shadowPrices = slacks = None
else:
(status, values, reducedCosts, shadowPrices, slacks) = self.readsol(tmpSol)
if (not self.keepFiles):
for f in [tmpSol, tmpMst, tmpLp, 'gurobi.log']:
try:
os.remove(f)
except:
pass
if (status != constants.LpStatusInfeasible):
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks)
lp.assignStatus(status)
return status | -8,829,734,950,521,837,000 | Solve a well formulated lp problem | pulp/apis/gurobi_api.py | actualSolve | KCachel/pulp | python | def actualSolve(self, lp):
if ('GUROBI_HOME' in os.environ):
if ('LD_LIBRARY_PATH' not in os.environ):
os.environ['LD_LIBRARY_PATH'] =
os.environ['LD_LIBRARY_PATH'] += ((':' + os.environ['GUROBI_HOME']) + '/lib')
if (not self.executable(self.path)):
raise PulpSolverError(('PuLP: cannot execute ' + self.path))
if (not self.keepFiles):
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, ('%s-pulp.lp' % uuid))
tmpSol = os.path.join(self.tmpDir, ('%s-pulp.sol' % uuid))
tmpMst = os.path.join(self.tmpDir, ('%s-pulp.mst' % uuid))
else:
tmpLp = (lp.name + '-pulp.lp')
tmpSol = (lp.name + '-pulp.sol')
tmpMst = (lp.name + '-pulp.mst')
vs = lp.writeLP(tmpLp, writeSOS=1)
try:
os.remove(tmpSol)
except:
pass
cmd = self.path
cmd += (' ' + ' '.join([('%s=%s' % (key, value)) for (key, value) in self.options]))
cmd += (' ResultFile=%s' % tmpSol)
if self.mip_start:
self.writesol(filename=tmpMst, vs=vs)
cmd += (' InputFile=%s' % tmpMst)
if lp.isMIP():
if (not self.mip):
warnings.warn('GUROBI_CMD does not allow a problem to be relaxed')
cmd += (' %s' % tmpLp)
if self.msg:
pipe = None
else:
pipe = open(os.devnull, 'w')
return_code = subprocess.call(cmd.split(), stdout=pipe, stderr=pipe)
if (pipe is not None):
pipe.close()
if (return_code != 0):
raise PulpSolverError(('PuLP: Error while trying to execute ' + self.path))
if (not os.path.exists(tmpSol)):
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
values = reducedCosts = shadowPrices = slacks = None
else:
(status, values, reducedCosts, shadowPrices, slacks) = self.readsol(tmpSol)
if (not self.keepFiles):
for f in [tmpSol, tmpMst, tmpLp, 'gurobi.log']:
try:
os.remove(f)
except:
pass
if (status != constants.LpStatusInfeasible):
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks)
lp.assignStatus(status)
return status |
def readsol(self, filename):
'Read a Gurobi solution file'
with open(filename) as my_file:
try:
next(my_file)
except StopIteration:
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
return (status, {}, {}, {}, {})
status = constants.LpStatusOptimal
shadowPrices = {}
slacks = {}
shadowPrices = {}
slacks = {}
values = {}
reducedCosts = {}
for line in my_file:
if (line[0] != '#'):
(name, value) = line.split()
values[name] = float(value)
return (status, values, reducedCosts, shadowPrices, slacks) | -1,829,265,237,492,473,000 | Read a Gurobi solution file | pulp/apis/gurobi_api.py | readsol | KCachel/pulp | python | def readsol(self, filename):
with open(filename) as my_file:
try:
next(my_file)
except StopIteration:
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
return (status, {}, {}, {}, {})
status = constants.LpStatusOptimal
shadowPrices = {}
slacks = {}
shadowPrices = {}
slacks = {}
values = {}
reducedCosts = {}
for line in my_file:
if (line[0] != '#'):
(name, value) = line.split()
values[name] = float(value)
return (status, values, reducedCosts, shadowPrices, slacks) |
def writesol(self, filename, vs):
'Writes a GUROBI solution file'
values = [(v.name, v.value()) for v in vs if (v.value() is not None)]
rows = []
for (name, value) in values:
rows.append('{} {}'.format(name, value))
with open(filename, 'w') as f:
f.write('\n'.join(rows))
return True | -5,954,528,396,085,368,000 | Writes a GUROBI solution file | pulp/apis/gurobi_api.py | writesol | KCachel/pulp | python | def writesol(self, filename, vs):
values = [(v.name, v.value()) for v in vs if (v.value() is not None)]
rows = []
for (name, value) in values:
rows.append('{} {}'.format(name, value))
with open(filename, 'w') as f:
f.write('\n'.join(rows))
return True |
def __init__(self, mip=True, msg=True, timeLimit=None, epgap=None, **solverParams):
'\n Initializes the Gurobi solver.\n\n @param mip: if False the solver will solve a MIP as an LP\n @param msg: displays information from the solver to stdout\n @param timeLimit: sets the maximum time for solution\n @param epgap: sets the integer bound gap\n '
LpSolver.__init__(self, mip, msg)
self.timeLimit = timeLimit
self.epgap = epgap
if (not self.msg):
gurobipy.setParam('OutputFlag', 0)
for (key, value) in solverParams.items():
gurobipy.setParam(key, value) | -6,638,805,552,735,199,000 | Initializes the Gurobi solver.
@param mip: if False the solver will solve a MIP as an LP
@param msg: displays information from the solver to stdout
@param timeLimit: sets the maximum time for solution
@param epgap: sets the integer bound gap | pulp/apis/gurobi_api.py | __init__ | KCachel/pulp | python | def __init__(self, mip=True, msg=True, timeLimit=None, epgap=None, **solverParams):
'\n Initializes the Gurobi solver.\n\n @param mip: if False the solver will solve a MIP as an LP\n @param msg: displays information from the solver to stdout\n @param timeLimit: sets the maximum time for solution\n @param epgap: sets the integer bound gap\n '
LpSolver.__init__(self, mip, msg)
self.timeLimit = timeLimit
self.epgap = epgap
if (not self.msg):
gurobipy.setParam('OutputFlag', 0)
for (key, value) in solverParams.items():
gurobipy.setParam(key, value) |
def available(self):
'True if the solver is available'
return True | -8,466,514,769,147,015,000 | True if the solver is available | pulp/apis/gurobi_api.py | available | KCachel/pulp | python | def available(self):
return True |
def callSolver(self, lp, callback=None):
'Solves the problem with gurobi\n '
self.solveTime = (- clock())
lp.solverModel.optimize(callback=callback)
self.solveTime += clock() | 8,155,221,425,289,192,000 | Solves the problem with gurobi | pulp/apis/gurobi_api.py | callSolver | KCachel/pulp | python | def callSolver(self, lp, callback=None):
'\n '
self.solveTime = (- clock())
lp.solverModel.optimize(callback=callback)
self.solveTime += clock() |
def buildSolverModel(self, lp):
'\n Takes the pulp lp model and translates it into a gurobi model\n '
log.debug('create the gurobi model')
lp.solverModel = gurobipy.Model(lp.name)
log.debug('set the sense of the problem')
if (lp.sense == constants.LpMaximize):
lp.solverModel.setAttr('ModelSense', (- 1))
if self.timeLimit:
lp.solverModel.setParam('TimeLimit', self.timeLimit)
if self.epgap:
lp.solverModel.setParam('MIPGap', self.epgap)
log.debug('add the variables to the problem')
for var in lp.variables():
lowBound = var.lowBound
if (lowBound is None):
lowBound = (- gurobipy.GRB.INFINITY)
upBound = var.upBound
if (upBound is None):
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if ((var.cat == constants.LpInteger) and self.mip):
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound, vtype=varType, obj=obj, name=var.name)
lp.solverModel.update()
log.debug('add the Constraints to the problem')
for (name, constraint) in lp.constraints.items():
expr = gurobipy.LinExpr(list(constraint.values()), [v.solverVar for v in constraint.keys()])
if (constraint.sense == constants.LpConstraintLE):
relation = gurobipy.GRB.LESS_EQUAL
elif (constraint.sense == constants.LpConstraintGE):
relation = gurobipy.GRB.GREATER_EQUAL
elif (constraint.sense == constants.LpConstraintEQ):
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr, relation, (- constraint.constant), name)
lp.solverModel.update() | -7,679,258,590,792,931,000 | Takes the pulp lp model and translates it into a gurobi model | pulp/apis/gurobi_api.py | buildSolverModel | KCachel/pulp | python | def buildSolverModel(self, lp):
'\n \n '
log.debug('create the gurobi model')
lp.solverModel = gurobipy.Model(lp.name)
log.debug('set the sense of the problem')
if (lp.sense == constants.LpMaximize):
lp.solverModel.setAttr('ModelSense', (- 1))
if self.timeLimit:
lp.solverModel.setParam('TimeLimit', self.timeLimit)
if self.epgap:
lp.solverModel.setParam('MIPGap', self.epgap)
log.debug('add the variables to the problem')
for var in lp.variables():
lowBound = var.lowBound
if (lowBound is None):
lowBound = (- gurobipy.GRB.INFINITY)
upBound = var.upBound
if (upBound is None):
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if ((var.cat == constants.LpInteger) and self.mip):
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound, vtype=varType, obj=obj, name=var.name)
lp.solverModel.update()
log.debug('add the Constraints to the problem')
for (name, constraint) in lp.constraints.items():
expr = gurobipy.LinExpr(list(constraint.values()), [v.solverVar for v in constraint.keys()])
if (constraint.sense == constants.LpConstraintLE):
relation = gurobipy.GRB.LESS_EQUAL
elif (constraint.sense == constants.LpConstraintGE):
relation = gurobipy.GRB.GREATER_EQUAL
elif (constraint.sense == constants.LpConstraintEQ):
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr, relation, (- constraint.constant), name)
lp.solverModel.update() |
def actualSolve(self, lp, callback=None):
'\n Solve a well formulated lp problem\n\n creates a gurobi model, variables and constraints and attaches\n them to the lp model which it then solves\n '
self.buildSolverModel(lp)
log.debug('Solve the Model using gurobi')
self.callSolver(lp, callback=callback)
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus | 5,262,698,481,114,730,000 | Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves | pulp/apis/gurobi_api.py | actualSolve | KCachel/pulp | python | def actualSolve(self, lp, callback=None):
'\n Solve a well formulated lp problem\n\n creates a gurobi model, variables and constraints and attaches\n them to the lp model which it then solves\n '
self.buildSolverModel(lp)
log.debug('Solve the Model using gurobi')
self.callSolver(lp, callback=callback)
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus |
def actualResolve(self, lp, callback=None):
'\n Solve a well formulated lp problem\n\n uses the old solver and modifies the rhs of the modified constraints\n '
log.debug('Resolve the Model using gurobi')
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS, (- constraint.constant))
lp.solverModel.update()
self.callSolver(lp, callback=callback)
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus | 2,901,966,951,382,373,400 | Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints | pulp/apis/gurobi_api.py | actualResolve | KCachel/pulp | python | def actualResolve(self, lp, callback=None):
'\n Solve a well formulated lp problem\n\n uses the old solver and modifies the rhs of the modified constraints\n '
log.debug('Resolve the Model using gurobi')
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS, (- constraint.constant))
lp.solverModel.update()
self.callSolver(lp, callback=callback)
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus |
def available(self):
'True if the solver is available'
return False | 3,812,336,950,956,385,300 | True if the solver is available | pulp/apis/gurobi_api.py | available | KCachel/pulp | python | def available(self):
return False |
def actualSolve(self, lp, callback=None):
'Solve a well formulated lp problem'
raise PulpSolverError('GUROBI: Not Available') | 7,349,007,719,746,268,000 | Solve a well formulated lp problem | pulp/apis/gurobi_api.py | actualSolve | KCachel/pulp | python | def actualSolve(self, lp, callback=None):
raise PulpSolverError('GUROBI: Not Available') |
def model_architecture(self, num_features, num_actions, max_history_len):
'Build a keras model and return a compiled model.\n\n :param max_history_len: The maximum number of historical\n turns used to decide on next action\n '
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32
batch_shape = (None, max_history_len, num_features)
model = Sequential()
model.add(Masking((- 1), batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape, dropout=0.2))
model.add(Dense(input_dim=n_hidden, units=num_actions))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
logger.debug(model.summary())
return model | 2,872,324,262,310,207,500 | Build a keras model and return a compiled model.
:param max_history_len: The maximum number of historical
turns used to decide on next action | rasa_core/policies/keras_policy.py | model_architecture | AdrianAdamiec/rasa_core | python | def model_architecture(self, num_features, num_actions, max_history_len):
'Build a keras model and return a compiled model.\n\n :param max_history_len: The maximum number of historical\n turns used to decide on next action\n '
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32
batch_shape = (None, max_history_len, num_features)
model = Sequential()
model.add(Masking((- 1), batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape, dropout=0.2))
model.add(Dense(input_dim=n_hidden, units=num_actions))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
logger.debug(model.summary())
return model |
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) | 5,146,063,946,393,382,000 | 3x3 convolution with padding | segmentation_models_pytorch/encoders/zerocenter.py | conv3x3 | vinnamkim/segmentation_models.pytorch | python | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) |
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 2,748,212,586,768,409,000 | 1x1 convolution | segmentation_models_pytorch/encoders/zerocenter.py | conv1x1 | vinnamkim/segmentation_models.pytorch | python | def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) |
def resnet18(pretrained=False, progress=True, **kwargs):
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs) | 3,628,755,405,227,026,400 | ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnet18 | vinnamkim/segmentation_models.pytorch | python | def resnet18(pretrained=False, progress=True, **kwargs):
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs) |
def resnet34(pretrained=False, progress=True, **kwargs):
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs) | 2,970,724,482,834,467,300 | ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnet34 | vinnamkim/segmentation_models.pytorch | python | def resnet34(pretrained=False, progress=True, **kwargs):
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs) |
def resnet50(pretrained=False, progress=True, **kwargs):
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) | 8,498,227,001,201,233,000 | ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnet50 | vinnamkim/segmentation_models.pytorch | python | def resnet50(pretrained=False, progress=True, **kwargs):
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
def resnet101(pretrained=False, progress=True, **kwargs):
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) | -4,235,029,202,871,245,000 | ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnet101 | vinnamkim/segmentation_models.pytorch | python | def resnet101(pretrained=False, progress=True, **kwargs):
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) |
def resnet152(pretrained=False, progress=True, **kwargs):
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) | -1,663,645,882,722,182,100 | ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnet152 | vinnamkim/segmentation_models.pytorch | python | def resnet152(pretrained=False, progress=True, **kwargs):
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) |
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) | -4,696,768,546,196,444,000 | ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnext50_32x4d | vinnamkim/segmentation_models.pytorch | python | def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) | -3,328,597,472,209,807,400 | ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | resnext101_32x8d | vinnamkim/segmentation_models.pytorch | python | def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) |
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) | 2,511,253,809,351,651,300 | Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | wide_resnet50_2 | vinnamkim/segmentation_models.pytorch | python | def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) | -1,834,030,766,958,855,400 | Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | segmentation_models_pytorch/encoders/zerocenter.py | wide_resnet101_2 | vinnamkim/segmentation_models.pytorch | python | def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) |
@staticmethod
def _parse(results):
'Parse the test output.\n\n See also https://github.com/axboe/fio/blob/master/HOWTO\n '
stats = defaultdict(int)
for (host, output) in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7])
stats[host] += int(job.split(';')[48])
return stats | 3,945,342,443,185,179,000 | Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO | perfrunner/tests/fio.py | _parse | agyryk/perfrunner | python | @staticmethod
def _parse(results):
'Parse the test output.\n\n See also https://github.com/axboe/fio/blob/master/HOWTO\n '
stats = defaultdict(int)
for (host, output) in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7])
stats[host] += int(job.split(';')[48])
return stats |
def forward(self, outputs, batch):
'\n :param outputs:\n :param batch:\n :return:\n '
opt = self.opt
(hm_loss, wh_loss, off_loss, id_loss) = (0.0, 0.0, 0.0, 0.0)
for s in range(opt.num_stacks):
output = outputs[s]
if (not opt.mse_loss):
output['hm'] = _sigmoid(output['hm'])
hm_loss += (self.crit(output['hm'], batch['hm']) / opt.num_stacks)
if (opt.wh_weight > 0):
if opt.dense_wh:
mask_weight = (batch['dense_wh_mask'].sum() + 0.0001)
wh_loss += ((self.crit_wh((output['wh'] * batch['dense_wh_mask']), (batch['dense_wh'] * batch['dense_wh_mask'])) / mask_weight) / opt.num_stacks)
else:
wh_loss += (self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks)
if (opt.reg_offset and (opt.off_weight > 0)):
off_loss += (self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks)
if (opt.id_weight > 0):
id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = id_head[(batch['reg_mask'] > 0)].contiguous()
id_head = (self.emb_scale * F.normalize(id_head))
id_target = batch['ids'][(batch['reg_mask'] > 0)]
id_output = self.classifier.forward(id_head).contiguous()
id_loss += self.IDLoss(id_output, id_target)
det_loss = (((opt.hm_weight * hm_loss) + (opt.wh_weight * wh_loss)) + (opt.off_weight * off_loss))
loss = (((torch.exp((- self.s_det)) * det_loss) + (torch.exp((- self.s_id)) * id_loss)) + (self.s_det + self.s_id))
loss *= 0.5
loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'id_loss': id_loss}
return (loss, loss_stats) | 1,284,021,264,384,355,000 | :param outputs:
:param batch:
:return: | src/lib/trains/mot.py | forward | CaptainEven/FairMOTVehicle | python | def forward(self, outputs, batch):
'\n :param outputs:\n :param batch:\n :return:\n '
opt = self.opt
(hm_loss, wh_loss, off_loss, id_loss) = (0.0, 0.0, 0.0, 0.0)
for s in range(opt.num_stacks):
output = outputs[s]
if (not opt.mse_loss):
output['hm'] = _sigmoid(output['hm'])
hm_loss += (self.crit(output['hm'], batch['hm']) / opt.num_stacks)
if (opt.wh_weight > 0):
if opt.dense_wh:
mask_weight = (batch['dense_wh_mask'].sum() + 0.0001)
wh_loss += ((self.crit_wh((output['wh'] * batch['dense_wh_mask']), (batch['dense_wh'] * batch['dense_wh_mask'])) / mask_weight) / opt.num_stacks)
else:
wh_loss += (self.crit_reg(output['wh'], batch['reg_mask'], batch['ind'], batch['wh']) / opt.num_stacks)
if (opt.reg_offset and (opt.off_weight > 0)):
off_loss += (self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg']) / opt.num_stacks)
if (opt.id_weight > 0):
id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = id_head[(batch['reg_mask'] > 0)].contiguous()
id_head = (self.emb_scale * F.normalize(id_head))
id_target = batch['ids'][(batch['reg_mask'] > 0)]
id_output = self.classifier.forward(id_head).contiguous()
id_loss += self.IDLoss(id_output, id_target)
det_loss = (((opt.hm_weight * hm_loss) + (opt.wh_weight * wh_loss)) + (opt.off_weight * off_loss))
loss = (((torch.exp((- self.s_det)) * det_loss) + (torch.exp((- self.s_id)) * id_loss)) + (self.s_det + self.s_id))
loss *= 0.5
loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'wh_loss': wh_loss, 'off_loss': off_loss, 'id_loss': id_loss}
return (loss, loss_stats) |
def _get_metadata(self, result: Dict[(str, Any)]) -> List[EventMetadataEntry]:
'\n Here, we run queries against our output Snowflake database tables to add additional context\n to our asset materializations.\n '
table_name = result['unique_id'].split('.')[(- 1)]
with connect_snowflake(config=self._snowflake_config, schema=self._dbt_schema) as con:
n_rows = pandas.read_sql_query(f'SELECT COUNT(*) FROM {table_name}', con)
sample_rows = pandas.read_sql_query(f'SELECT * FROM {table_name} SAMPLE ROW (10 rows)', con)
return (super()._get_metadata(result) + [EventMetadataEntry.int(int(n_rows.iloc[0][0]), 'dbt Model Number of Rows'), EventMetadataEntry.md(sample_rows.astype('str').to_markdown(), 'dbt Model Sample Rows')]) | -9,076,837,992,323,186,000 | Here, we run queries against our output Snowflake database tables to add additional context
to our asset materializations. | examples/hacker_news/hacker_news/resources/dbt_asset_resource.py | _get_metadata | AndreaGiardini/dagster | python | def _get_metadata(self, result: Dict[(str, Any)]) -> List[EventMetadataEntry]:
'\n Here, we run queries against our output Snowflake database tables to add additional context\n to our asset materializations.\n '
table_name = result['unique_id'].split('.')[(- 1)]
with connect_snowflake(config=self._snowflake_config, schema=self._dbt_schema) as con:
n_rows = pandas.read_sql_query(f'SELECT COUNT(*) FROM {table_name}', con)
sample_rows = pandas.read_sql_query(f'SELECT * FROM {table_name} SAMPLE ROW (10 rows)', con)
return (super()._get_metadata(result) + [EventMetadataEntry.int(int(n_rows.iloc[0][0]), 'dbt Model Number of Rows'), EventMetadataEntry.md(sample_rows.astype('str').to_markdown(), 'dbt Model Sample Rows')]) |
def get_form_instance_from_request(request):
' Get the form class from the request. '
form_id = request.POST.get('form_id')
if (form_id and form_id.isdigit()):
try:
return Form.objects.get(pk=int(form_id))
except Form.DoesNotExist:
pass
return None | -7,506,938,458,354,489,000 | Get the form class from the request. | wagtailstreamforms/utils/requests.py | get_form_instance_from_request | AsankaL/wagtailstreamforms | python | def get_form_instance_from_request(request):
' '
form_id = request.POST.get('form_id')
if (form_id and form_id.isdigit()):
try:
return Form.objects.get(pk=int(form_id))
except Form.DoesNotExist:
pass
return None |
def min_ea():
'\n Return the lowest mapped address of the IDB.\n Wrapper on :meth:`BipIdb.min_ea`.\n '
return BipIdb.min_ea() | 3,291,513,428,824,812,000 | Return the lowest mapped address of the IDB.
Wrapper on :meth:`BipIdb.min_ea`. | bip/base/bipidb.py | min_ea | BrunoPujos/bip | python | def min_ea():
'\n Return the lowest mapped address of the IDB.\n Wrapper on :meth:`BipIdb.min_ea`.\n '
return BipIdb.min_ea() |
def max_ea():
'\n Return the highest mapped address of the IDB.\n Wrapper on :meth:`BipIdb.max_ea`.\n '
return BipIdb.max_ea() | 1,639,269,415,500,825,000 | Return the highest mapped address of the IDB.
Wrapper on :meth:`BipIdb.max_ea`. | bip/base/bipidb.py | max_ea | BrunoPujos/bip | python | def max_ea():
'\n Return the highest mapped address of the IDB.\n Wrapper on :meth:`BipIdb.max_ea`.\n '
return BipIdb.max_ea() |
def Here():
'\n Return current screen address.\n\n :return: The current address.\n '
return BipIdb.current_addr() | 2,565,687,747,275,942,400 | Return current screen address.
:return: The current address. | bip/base/bipidb.py | Here | BrunoPujos/bip | python | def Here():
'\n Return current screen address.\n\n :return: The current address.\n '
return BipIdb.current_addr() |
@staticmethod
def ptr_size():
'\n Return the number of bits in a pointer.\n \n :rtype: int\n '
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
return bits | 8,048,461,866,271,615,000 | Return the number of bits in a pointer.
:rtype: int | bip/base/bipidb.py | ptr_size | BrunoPujos/bip | python | @staticmethod
def ptr_size():
'\n Return the number of bits in a pointer.\n \n :rtype: int\n '
info = idaapi.get_inf_structure()
if info.is_64bit():
bits = 64
elif info.is_32bit():
bits = 32
else:
bits = 16
return bits |
@staticmethod
def min_ea():
'\n Return the lowest mapped address of the IDB.\n '
return idc.get_inf_attr(idc.INF_MIN_EA) | -6,052,441,912,866,462,000 | Return the lowest mapped address of the IDB. | bip/base/bipidb.py | min_ea | BrunoPujos/bip | python | @staticmethod
def min_ea():
'\n \n '
return idc.get_inf_attr(idc.INF_MIN_EA) |
@staticmethod
def max_ea():
'\n Return the highest mapped address of the IDB.\n '
return idc.get_inf_attr(idc.INF_MAX_EA) | -9,029,114,986,457,386,000 | Return the highest mapped address of the IDB. | bip/base/bipidb.py | max_ea | BrunoPujos/bip | python | @staticmethod
def max_ea():
'\n \n '
return idc.get_inf_attr(idc.INF_MAX_EA) |
@staticmethod
def image_base():
'\n Return the base address of the image loaded in the IDB.\n \n This is different from :meth:`~BipIdb.min_ea` which is the lowest\n *mapped* address.\n '
return idaapi.get_imagebase() | 434,545,259,254,879,040 | Return the base address of the image loaded in the IDB.
This is different from :meth:`~BipIdb.min_ea` which is the lowest
*mapped* address. | bip/base/bipidb.py | image_base | BrunoPujos/bip | python | @staticmethod
def image_base():
'\n Return the base address of the image loaded in the IDB.\n \n This is different from :meth:`~BipIdb.min_ea` which is the lowest\n *mapped* address.\n '
return idaapi.get_imagebase() |
@staticmethod
def current_addr():
'\n Return current screen address.\n\n :return: The current address selected.\n '
return ida_kernwin.get_screen_ea() | -2,356,439,664,913,423,400 | Return current screen address.
:return: The current address selected. | bip/base/bipidb.py | current_addr | BrunoPujos/bip | python | @staticmethod
def current_addr():
'\n Return current screen address.\n\n :return: The current address selected.\n '
return ida_kernwin.get_screen_ea() |
@staticmethod
def relea(addr):
'\n Calculate the relative address compare to the IDA image base.\n The calcul done is ``ADDR - IMGBASE``.\n \n The opposite of this function is :func:`absea`.\n \n :param int addr: The absolute address to translate.\n :return: The offset from image base corresponding to ``addr``.\n :rtype: int\n '
return (addr - idaapi.get_imagebase()) | -1,530,190,527,725,429,000 | Calculate the relative address compare to the IDA image base.
The calcul done is ``ADDR - IMGBASE``.
The opposite of this function is :func:`absea`.
:param int addr: The absolute address to translate.
:return: The offset from image base corresponding to ``addr``.
:rtype: int | bip/base/bipidb.py | relea | BrunoPujos/bip | python | @staticmethod
def relea(addr):
'\n Calculate the relative address compare to the IDA image base.\n The calcul done is ``ADDR - IMGBASE``.\n \n The opposite of this function is :func:`absea`.\n \n :param int addr: The absolute address to translate.\n :return: The offset from image base corresponding to ``addr``.\n :rtype: int\n '
return (addr - idaapi.get_imagebase()) |
@staticmethod
def absea(offset):
'\n Calculate the absolute address from an offset of the image base.\n The calcul done is ``OFFSET + IMGBASE`` .\n \n The opposite of this function is :func:`relea`.\n \n :param int offset: The offset from the beginning of the image base\n to translate.\n :return: The absolute address corresponding to the offset.\n :rtype: int\n '
return (offset + idaapi.get_imagebase()) | 3,687,362,688,392,563,000 | Calculate the absolute address from an offset of the image base.
The calcul done is ``OFFSET + IMGBASE`` .
The opposite of this function is :func:`relea`.
:param int offset: The offset from the beginning of the image base
to translate.
:return: The absolute address corresponding to the offset.
:rtype: int | bip/base/bipidb.py | absea | BrunoPujos/bip | python | @staticmethod
def absea(offset):
'\n Calculate the absolute address from an offset of the image base.\n The calcul done is ``OFFSET + IMGBASE`` .\n \n The opposite of this function is :func:`relea`.\n \n :param int offset: The offset from the beginning of the image base\n to translate.\n :return: The absolute address corresponding to the offset.\n :rtype: int\n '
return (offset + idaapi.get_imagebase()) |
def __init__(self, name=None, run_id=None, start_time=None, end_time=None, succeeded=None, local_vars_configuration=None):
'AuditProcess - a model defined in OpenAPI"\n \n :param name: (required)\n :type name: str\n :param run_id: (required)\n :type run_id: str\n :param start_time: (required)\n :type start_time: datetime\n :param end_time: \n :type end_time: datetime\n :param succeeded: \n :type succeeded: bool\n\n '
if (local_vars_configuration is None):
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._run_id = None
self._start_time = None
self._end_time = None
self._succeeded = None
self.discriminator = None
self.name = name
self.run_id = run_id
self.start_time = start_time
self.end_time = end_time
self.succeeded = succeeded | -2,539,799,742,336,256,000 | AuditProcess - a model defined in OpenAPI"
:param name: (required)
:type name: str
:param run_id: (required)
:type run_id: str
:param start_time: (required)
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param succeeded:
:type succeeded: bool | sdk/finbourne_insights/models/audit_process.py | __init__ | finbourne/finbourne-insights-sdk-python | python | def __init__(self, name=None, run_id=None, start_time=None, end_time=None, succeeded=None, local_vars_configuration=None):
'AuditProcess - a model defined in OpenAPI"\n \n :param name: (required)\n :type name: str\n :param run_id: (required)\n :type run_id: str\n :param start_time: (required)\n :type start_time: datetime\n :param end_time: \n :type end_time: datetime\n :param succeeded: \n :type succeeded: bool\n\n '
if (local_vars_configuration is None):
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._run_id = None
self._start_time = None
self._end_time = None
self._succeeded = None
self.discriminator = None
self.name = name
self.run_id = run_id
self.start_time = start_time
self.end_time = end_time
self.succeeded = succeeded |
@property
def name(self):
'Gets the name of this AuditProcess. # noqa: E501\n\n\n :return: The name of this AuditProcess. # noqa: E501\n :rtype: str\n '
return self._name | -8,446,075,018,189,653,000 | Gets the name of this AuditProcess. # noqa: E501
:return: The name of this AuditProcess. # noqa: E501
:rtype: str | sdk/finbourne_insights/models/audit_process.py | name | finbourne/finbourne-insights-sdk-python | python | @property
def name(self):
'Gets the name of this AuditProcess. # noqa: E501\n\n\n :return: The name of this AuditProcess. # noqa: E501\n :rtype: str\n '
return self._name |
@name.setter
def name(self, name):
'Sets the name of this AuditProcess.\n\n\n :param name: The name of this AuditProcess. # noqa: E501\n :type name: str\n '
if (self.local_vars_configuration.client_side_validation and (name is None)):
raise ValueError('Invalid value for `name`, must not be `None`')
if (self.local_vars_configuration.client_side_validation and (name is not None) and (len(name) > 128)):
raise ValueError('Invalid value for `name`, length must be less than or equal to `128`')
if (self.local_vars_configuration.client_side_validation and (name is not None) and (len(name) < 0)):
raise ValueError('Invalid value for `name`, length must be greater than or equal to `0`')
self._name = name | -1,908,435,027,012,927,500 | Sets the name of this AuditProcess.
:param name: The name of this AuditProcess. # noqa: E501
:type name: str | sdk/finbourne_insights/models/audit_process.py | name | finbourne/finbourne-insights-sdk-python | python | @name.setter
def name(self, name):
'Sets the name of this AuditProcess.\n\n\n :param name: The name of this AuditProcess. # noqa: E501\n :type name: str\n '
if (self.local_vars_configuration.client_side_validation and (name is None)):
raise ValueError('Invalid value for `name`, must not be `None`')
if (self.local_vars_configuration.client_side_validation and (name is not None) and (len(name) > 128)):
raise ValueError('Invalid value for `name`, length must be less than or equal to `128`')
if (self.local_vars_configuration.client_side_validation and (name is not None) and (len(name) < 0)):
raise ValueError('Invalid value for `name`, length must be greater than or equal to `0`')
self._name = name |
@property
def run_id(self):
'Gets the run_id of this AuditProcess. # noqa: E501\n\n\n :return: The run_id of this AuditProcess. # noqa: E501\n :rtype: str\n '
return self._run_id | 8,597,131,932,081,543,000 | Gets the run_id of this AuditProcess. # noqa: E501
:return: The run_id of this AuditProcess. # noqa: E501
:rtype: str | sdk/finbourne_insights/models/audit_process.py | run_id | finbourne/finbourne-insights-sdk-python | python | @property
def run_id(self):
'Gets the run_id of this AuditProcess. # noqa: E501\n\n\n :return: The run_id of this AuditProcess. # noqa: E501\n :rtype: str\n '
return self._run_id |
@run_id.setter
def run_id(self, run_id):
'Sets the run_id of this AuditProcess.\n\n\n :param run_id: The run_id of this AuditProcess. # noqa: E501\n :type run_id: str\n '
if (self.local_vars_configuration.client_side_validation and (run_id is None)):
raise ValueError('Invalid value for `run_id`, must not be `None`')
self._run_id = run_id | 5,772,166,214,027,327,000 | Sets the run_id of this AuditProcess.
:param run_id: The run_id of this AuditProcess. # noqa: E501
:type run_id: str | sdk/finbourne_insights/models/audit_process.py | run_id | finbourne/finbourne-insights-sdk-python | python | @run_id.setter
def run_id(self, run_id):
'Sets the run_id of this AuditProcess.\n\n\n :param run_id: The run_id of this AuditProcess. # noqa: E501\n :type run_id: str\n '
if (self.local_vars_configuration.client_side_validation and (run_id is None)):
raise ValueError('Invalid value for `run_id`, must not be `None`')
self._run_id = run_id |
@property
def start_time(self):
'Gets the start_time of this AuditProcess. # noqa: E501\n\n\n :return: The start_time of this AuditProcess. # noqa: E501\n :rtype: datetime\n '
return self._start_time | -1,591,834,829,298,791,700 | Gets the start_time of this AuditProcess. # noqa: E501
:return: The start_time of this AuditProcess. # noqa: E501
:rtype: datetime | sdk/finbourne_insights/models/audit_process.py | start_time | finbourne/finbourne-insights-sdk-python | python | @property
def start_time(self):
'Gets the start_time of this AuditProcess. # noqa: E501\n\n\n :return: The start_time of this AuditProcess. # noqa: E501\n :rtype: datetime\n '
return self._start_time |
@start_time.setter
def start_time(self, start_time):
'Sets the start_time of this AuditProcess.\n\n\n :param start_time: The start_time of this AuditProcess. # noqa: E501\n :type start_time: datetime\n '
if (self.local_vars_configuration.client_side_validation and (start_time is None)):
raise ValueError('Invalid value for `start_time`, must not be `None`')
self._start_time = start_time | 4,984,940,772,512,307,000 | Sets the start_time of this AuditProcess.
:param start_time: The start_time of this AuditProcess. # noqa: E501
:type start_time: datetime | sdk/finbourne_insights/models/audit_process.py | start_time | finbourne/finbourne-insights-sdk-python | python | @start_time.setter
def start_time(self, start_time):
'Sets the start_time of this AuditProcess.\n\n\n :param start_time: The start_time of this AuditProcess. # noqa: E501\n :type start_time: datetime\n '
if (self.local_vars_configuration.client_side_validation and (start_time is None)):
raise ValueError('Invalid value for `start_time`, must not be `None`')
self._start_time = start_time |
@property
def end_time(self):
'Gets the end_time of this AuditProcess. # noqa: E501\n\n\n :return: The end_time of this AuditProcess. # noqa: E501\n :rtype: datetime\n '
return self._end_time | 8,328,583,657,415,390,000 | Gets the end_time of this AuditProcess. # noqa: E501
:return: The end_time of this AuditProcess. # noqa: E501
:rtype: datetime | sdk/finbourne_insights/models/audit_process.py | end_time | finbourne/finbourne-insights-sdk-python | python | @property
def end_time(self):
'Gets the end_time of this AuditProcess. # noqa: E501\n\n\n :return: The end_time of this AuditProcess. # noqa: E501\n :rtype: datetime\n '
return self._end_time |
@end_time.setter
def end_time(self, end_time):
'Sets the end_time of this AuditProcess.\n\n\n :param end_time: The end_time of this AuditProcess. # noqa: E501\n :type end_time: datetime\n '
self._end_time = end_time | -809,817,271,008,857,500 | Sets the end_time of this AuditProcess.
:param end_time: The end_time of this AuditProcess. # noqa: E501
:type end_time: datetime | sdk/finbourne_insights/models/audit_process.py | end_time | finbourne/finbourne-insights-sdk-python | python | @end_time.setter
def end_time(self, end_time):
'Sets the end_time of this AuditProcess.\n\n\n :param end_time: The end_time of this AuditProcess. # noqa: E501\n :type end_time: datetime\n '
self._end_time = end_time |
@property
def succeeded(self):
'Gets the succeeded of this AuditProcess. # noqa: E501\n\n\n :return: The succeeded of this AuditProcess. # noqa: E501\n :rtype: bool\n '
return self._succeeded | 5,928,420,784,814,148,000 | Gets the succeeded of this AuditProcess. # noqa: E501
:return: The succeeded of this AuditProcess. # noqa: E501
:rtype: bool | sdk/finbourne_insights/models/audit_process.py | succeeded | finbourne/finbourne-insights-sdk-python | python | @property
def succeeded(self):
'Gets the succeeded of this AuditProcess. # noqa: E501\n\n\n :return: The succeeded of this AuditProcess. # noqa: E501\n :rtype: bool\n '
return self._succeeded |
@succeeded.setter
def succeeded(self, succeeded):
'Sets the succeeded of this AuditProcess.\n\n\n :param succeeded: The succeeded of this AuditProcess. # noqa: E501\n :type succeeded: bool\n '
self._succeeded = succeeded | 8,987,236,863,677,922,000 | Sets the succeeded of this AuditProcess.
:param succeeded: The succeeded of this AuditProcess. # noqa: E501
:type succeeded: bool | sdk/finbourne_insights/models/audit_process.py | succeeded | finbourne/finbourne-insights-sdk-python | python | @succeeded.setter
def succeeded(self, succeeded):
'Sets the succeeded of this AuditProcess.\n\n\n :param succeeded: The succeeded of this AuditProcess. # noqa: E501\n :type succeeded: bool\n '
self._succeeded = succeeded |
def to_dict(self, serialize=False):
'Returns the model properties as a dict'
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result | -1,664,115,404,714,547,500 | Returns the model properties as a dict | sdk/finbourne_insights/models/audit_process.py | to_dict | finbourne/finbourne-insights-sdk-python | python | def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, 'to_dict'):
args = getfullargspec(x.to_dict).args
if (len(args) == 1):
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = (self.attribute_map.get(attr, attr) if serialize else attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: convert(x)), value))
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: (item[0], convert(item[1]))), value.items()))
else:
result[attr] = convert(value)
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | sdk/finbourne_insights/models/audit_process.py | to_str | finbourne/finbourne-insights-sdk-python | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | sdk/finbourne_insights/models/audit_process.py | __repr__ | finbourne/finbourne-insights-sdk-python | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, AuditProcess)):
return False
return (self.to_dict() == other.to_dict()) | -5,086,045,539,816,114,000 | Returns true if both objects are equal | sdk/finbourne_insights/models/audit_process.py | __eq__ | finbourne/finbourne-insights-sdk-python | python | def __eq__(self, other):
if (not isinstance(other, AuditProcess)):
return False
return (self.to_dict() == other.to_dict()) |
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, AuditProcess)):
return True
return (self.to_dict() != other.to_dict()) | 2,383,757,369,715,803,600 | Returns true if both objects are not equal | sdk/finbourne_insights/models/audit_process.py | __ne__ | finbourne/finbourne-insights-sdk-python | python | def __ne__(self, other):
if (not isinstance(other, AuditProcess)):
return True
return (self.to_dict() != other.to_dict()) |
def _get_sparse_matrixes(X):
'Create csc, csr and coo sparse matrix from any of the above\n\n Arguments:\n X {array-like, csc, csr or coo sparse matrix}\n\n Returns:\n csc, csr, coo\n '
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, 'only coo, csc and csr sparse matrixes are supported'
return (X_csc, X_csr, X_coo) | -8,066,853,925,303,291,000 | Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo | src/interface_py/h2o4gpu/solvers/factorization.py | _get_sparse_matrixes | aaron8tang/h2o4gpu | python | def _get_sparse_matrixes(X):
'Create csc, csr and coo sparse matrix from any of the above\n\n Arguments:\n X {array-like, csc, csr or coo sparse matrix}\n\n Returns:\n csc, csr, coo\n '
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, 'only coo, csc and csr sparse matrixes are supported'
return (X_csc, X_csr, X_coo) |
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
'Learn model from rating matrix X.\n\n Parameters\n ----------\n X {array-like, sparse matrix}, shape (m, n)\n Data matrix to be decomposed.\n y None\n Ignored\n X_test {array-like, coo sparse matrix}, shape (m, n)\n Data matrix for cross validation.\n X_BATCHES int, default: 1\n Batches to split XT, increase this parameter in case out of memory error.\n THETA_BATCHES int, default: 1\n Batches to split theta, increase this parameter in case out of memory error.\n early_stopping_rounds int, default: None\n Activates early stopping. Cross validation error needs to decrease\n at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.\n Returns the model from the last iteration (not the best one). If early stopping occurs,\n the model will have three additional fields: best_cv_score, best_train_score and best_iteration.\n verbose bool, default: False\n Prints training and validation score(if applicable) on each iteration.\n scores {list}\n List of tuples with train, cv score for every iteration.\n\n Returns\n -------\n self : returns an instance of self.\n\n '
(csc_X, csr_X, coo_X) = _get_sparse_matrixes(X)
if (early_stopping_rounds is not None):
assert (X_test is not None), 'X_test is mandatory with early stopping'
if (X_test is not None):
assert scipy.sparse.isspmatrix_coo(X_test), 'X_test must be a coo sparse scipy matrix'
assert (X.shape == X_test.shape)
assert (X_test.dtype == self.dtype)
assert (X.dtype == self.dtype)
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if (coo_X_test is None):
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if (self.thetaT is None):
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert (self.thetaT.dtype == self.dtype)
if (self.XT is None):
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert (self.XT.dtype == self.dtype)
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
(status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr) = make_data(m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data, csc_X.indices, csc_X.indptr, csc_X.data, coo_X.row, coo_X.col, coo_X.data, self.thetaT, self.XT, (coo_X_test.row if (coo_X_test is not None) else None), (coo_X_test.col if (coo_X_test is not None) else None), (coo_X_test.data if (coo_X_test is not None) else None), csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert (status == 0), 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = (- 1)
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m, n, self.f, nnz, self.lambda_, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, thetaTDevice, XTDevice, X_BATCHES, THETA_BATCHES)
if (verbose or (scores is not None)):
result = factorization_score(m, n, self.f, nnz, self.lambda_, thetaTDevice, XTDevice, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr)
train_score = result[0]
if ((X_test is not None) and (verbose or (early_stopping_rounds is not None) or (scores is not None))):
result = factorization_score(m, n, self.f, nnz_test, self.lambda_, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print('iteration {0} train: {1} cv: {2}'.format(i, train_score, cv_score))
if (scores is not None):
scores.append((train_score, cv_score))
if (early_stopping_rounds is not None):
if (self.best_cv_score > cv_score):
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if ((i - self.best_iteration) > early_stopping_rounds):
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, (m * self.f))
copy_fecatorization_result(self.thetaT, thetaTDevice, (n * self.f))
free_data(thetaTDevice)
free_data(XTDevice)
return self | 1,335,698,133,788,022,000 | Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self. | src/interface_py/h2o4gpu/solvers/factorization.py | fit | aaron8tang/h2o4gpu | python | def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
'Learn model from rating matrix X.\n\n Parameters\n ----------\n X {array-like, sparse matrix}, shape (m, n)\n Data matrix to be decomposed.\n y None\n Ignored\n X_test {array-like, coo sparse matrix}, shape (m, n)\n Data matrix for cross validation.\n X_BATCHES int, default: 1\n Batches to split XT, increase this parameter in case out of memory error.\n THETA_BATCHES int, default: 1\n Batches to split theta, increase this parameter in case out of memory error.\n early_stopping_rounds int, default: None\n Activates early stopping. Cross validation error needs to decrease\n at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.\n Returns the model from the last iteration (not the best one). If early stopping occurs,\n the model will have three additional fields: best_cv_score, best_train_score and best_iteration.\n verbose bool, default: False\n Prints training and validation score(if applicable) on each iteration.\n scores {list}\n List of tuples with train, cv score for every iteration.\n\n Returns\n -------\n self : returns an instance of self.\n\n '
(csc_X, csr_X, coo_X) = _get_sparse_matrixes(X)
if (early_stopping_rounds is not None):
assert (X_test is not None), 'X_test is mandatory with early stopping'
if (X_test is not None):
assert scipy.sparse.isspmatrix_coo(X_test), 'X_test must be a coo sparse scipy matrix'
assert (X.shape == X_test.shape)
assert (X_test.dtype == self.dtype)
assert (X.dtype == self.dtype)
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if (coo_X_test is None):
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if (self.thetaT is None):
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert (self.thetaT.dtype == self.dtype)
if (self.XT is None):
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert (self.XT.dtype == self.dtype)
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
(status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr) = make_data(m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data, csc_X.indices, csc_X.indptr, csc_X.data, coo_X.row, coo_X.col, coo_X.data, self.thetaT, self.XT, (coo_X_test.row if (coo_X_test is not None) else None), (coo_X_test.col if (coo_X_test is not None) else None), (coo_X_test.data if (coo_X_test is not None) else None), csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert (status == 0), 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = (- 1)
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m, n, self.f, nnz, self.lambda_, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, thetaTDevice, XTDevice, X_BATCHES, THETA_BATCHES)
if (verbose or (scores is not None)):
result = factorization_score(m, n, self.f, nnz, self.lambda_, thetaTDevice, XTDevice, cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr)
train_score = result[0]
if ((X_test is not None) and (verbose or (early_stopping_rounds is not None) or (scores is not None))):
result = factorization_score(m, n, self.f, nnz_test, self.lambda_, thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, cooColIndexTestDevicePtr, cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print('iteration {0} train: {1} cv: {2}'.format(i, train_score, cv_score))
if (scores is not None):
scores.append((train_score, cv_score))
if (early_stopping_rounds is not None):
if (self.best_cv_score > cv_score):
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if ((i - self.best_iteration) > early_stopping_rounds):
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, (m * self.f))
copy_fecatorization_result(self.thetaT, thetaTDevice, (n * self.f))
free_data(thetaTDevice)
free_data(XTDevice)
return self |
def predict(self, X):
'Predict none zero elements of coo sparse matrix X according to the fitted model.\n\n Parameters\n ----------\n X {array-like, sparse coo matrix} shape (m, n)\n Data matrix in coo format. Values are ignored.\n\n Returns\n -------\n {array-like, sparse coo matrix} shape (m, n)\n Predicted values.\n\n '
assert ((self.XT is not None) and (self.thetaT is not None)), 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(X), 'convert X to coo sparse matrix'
assert (X.dtype == self.dtype)
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum((a * b), axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape) | 1,088,738,667,691,376,400 | Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values. | src/interface_py/h2o4gpu/solvers/factorization.py | predict | aaron8tang/h2o4gpu | python | def predict(self, X):
'Predict none zero elements of coo sparse matrix X according to the fitted model.\n\n Parameters\n ----------\n X {array-like, sparse coo matrix} shape (m, n)\n Data matrix in coo format. Values are ignored.\n\n Returns\n -------\n {array-like, sparse coo matrix} shape (m, n)\n Predicted values.\n\n '
assert ((self.XT is not None) and (self.thetaT is not None)), 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(X), 'convert X to coo sparse matrix'
assert (X.dtype == self.dtype)
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum((a * b), axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape) |
def testAppsV1beta1DeploymentList(self):
'\n Test AppsV1beta1DeploymentList\n '
pass | -6,776,409,037,605,836,000 | Test AppsV1beta1DeploymentList | kubernetes/test/test_apps_v1beta1_deployment_list.py | testAppsV1beta1DeploymentList | dix000p/kubernetes-client | python | def testAppsV1beta1DeploymentList(self):
'\n \n '
pass |
def iscoroutinefunction(func):
'\n Return True if func is a coroutine function (a function defined with async\n def syntax, and doesn\'t contain yield), or a function decorated with\n @asyncio.coroutine.\n\n Note: copied and modified from Python 3.5\'s builtin couroutines.py to avoid\n importing asyncio directly, which in turns also initializes the "logging"\n module as a side-effect (see issue #8).\n '
return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', False)) | 6,053,619,501,236,065,000 | Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8). | src/_pytest/compat.py | iscoroutinefunction | robholt/pytest | python | def iscoroutinefunction(func):
'\n Return True if func is a coroutine function (a function defined with async\n def syntax, and doesn\'t contain yield), or a function decorated with\n @asyncio.coroutine.\n\n Note: copied and modified from Python 3.5\'s builtin couroutines.py to avoid\n importing asyncio directly, which in turns also initializes the "logging"\n module as a side-effect (see issue #8).\n '
return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', False)) |
def num_mock_patch_args(function):
' return number of arguments used up by mock arguments (if any) '
patchings = getattr(function, 'patchings', None)
if (not patchings):
return 0
mock_sentinel = getattr(sys.modules.get('mock'), 'DEFAULT', object())
ut_mock_sentinel = getattr(sys.modules.get('unittest.mock'), 'DEFAULT', object())
return len([p for p in patchings if ((not p.attribute_name) and ((p.new is mock_sentinel) or (p.new is ut_mock_sentinel)))]) | -4,451,766,268,141,289,000 | return number of arguments used up by mock arguments (if any) | src/_pytest/compat.py | num_mock_patch_args | robholt/pytest | python | def num_mock_patch_args(function):
' '
patchings = getattr(function, 'patchings', None)
if (not patchings):
return 0
mock_sentinel = getattr(sys.modules.get('mock'), 'DEFAULT', object())
ut_mock_sentinel = getattr(sys.modules.get('unittest.mock'), 'DEFAULT', object())
return len([p for p in patchings if ((not p.attribute_name) and ((p.new is mock_sentinel) or (p.new is ut_mock_sentinel)))]) |
def getfuncargnames(function, *, name: str='', is_method=False, cls=None):
"Returns the names of a function's mandatory arguments.\n\n This should return the names of all function arguments that:\n * Aren't bound to an instance or type as in instance or class methods.\n * Don't have default values.\n * Aren't bound with functools.partial.\n * Aren't replaced with mocks.\n\n The is_method and cls arguments indicate that the function should\n be treated as a bound method even though it's not unless, only in\n the case of cls, the function is a static method.\n\n The name parameter should be the original name in which the function was collected.\n\n @RonnyPfannschmidt: This function should be refactored when we\n revisit fixtures. The fixture mechanism should ask the node for\n the fixture names, and not try to obtain directly from the\n function object well after collection has occurred.\n "
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail('Could not determine arguments of {!r}: {}'.format(function, e), pytrace=False)
arg_names = tuple((p.name for p in parameters.values() if (((p.kind is Parameter.POSITIONAL_OR_KEYWORD) or (p.kind is Parameter.KEYWORD_ONLY)) and (p.default is Parameter.empty))))
if (not name):
name = function.__name__
if (is_method or (cls and (not isinstance(cls.__dict__.get(name, None), staticmethod)))):
arg_names = arg_names[1:]
if hasattr(function, '__wrapped__'):
arg_names = arg_names[num_mock_patch_args(function):]
return arg_names | 7,358,649,675,907,133,000 | Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred. | src/_pytest/compat.py | getfuncargnames | robholt/pytest | python | def getfuncargnames(function, *, name: str=, is_method=False, cls=None):
"Returns the names of a function's mandatory arguments.\n\n This should return the names of all function arguments that:\n * Aren't bound to an instance or type as in instance or class methods.\n * Don't have default values.\n * Aren't bound with functools.partial.\n * Aren't replaced with mocks.\n\n The is_method and cls arguments indicate that the function should\n be treated as a bound method even though it's not unless, only in\n the case of cls, the function is a static method.\n\n The name parameter should be the original name in which the function was collected.\n\n @RonnyPfannschmidt: This function should be refactored when we\n revisit fixtures. The fixture mechanism should ask the node for\n the fixture names, and not try to obtain directly from the\n function object well after collection has occurred.\n "
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail('Could not determine arguments of {!r}: {}'.format(function, e), pytrace=False)
arg_names = tuple((p.name for p in parameters.values() if (((p.kind is Parameter.POSITIONAL_OR_KEYWORD) or (p.kind is Parameter.KEYWORD_ONLY)) and (p.default is Parameter.empty))))
if (not name):
name = function.__name__
if (is_method or (cls and (not isinstance(cls.__dict__.get(name, None), staticmethod)))):
arg_names = arg_names[1:]
if hasattr(function, '__wrapped__'):
arg_names = arg_names[num_mock_patch_args(function):]
return arg_names |
def ascii_escaped(val):
'If val is pure ascii, returns it as a str(). Otherwise, escapes\n bytes objects into a sequence of escaped bytes:\n\n b\'ôÅÖ\' -> \'\\xc3\\xb4\\xc5\\xd6\'\n\n and escapes unicode objects into a sequence of escaped unicode\n ids, e.g.:\n\n \'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944\'\n\n note:\n the obvious "v.decode(\'unicode-escape\')" will return\n valid utf-8 unicode if it finds them in bytes, but we\n want to return escaped bytes for any byte, even if they match\n a utf-8 string.\n\n '
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode('unicode_escape').decode('ascii')
return _translate_non_printable(ret) | -5,234,808,399,818,805,000 | If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'ôÅÖ' -> '\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string. | src/_pytest/compat.py | ascii_escaped | robholt/pytest | python | def ascii_escaped(val):
'If val is pure ascii, returns it as a str(). Otherwise, escapes\n bytes objects into a sequence of escaped bytes:\n\n b\'ôÅÖ\' -> \'\\xc3\\xb4\\xc5\\xd6\'\n\n and escapes unicode objects into a sequence of escaped unicode\n ids, e.g.:\n\n \'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944\'\n\n note:\n the obvious "v.decode(\'unicode-escape\')" will return\n valid utf-8 unicode if it finds them in bytes, but we\n want to return escaped bytes for any byte, even if they match\n a utf-8 string.\n\n '
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode('unicode_escape').decode('ascii')
return _translate_non_printable(ret) |
def get_real_func(obj):
' gets the real function object of the (possibly) wrapped object by\n functools.wraps or functools.partial.\n '
start_obj = obj
for i in range(100):
new_obj = getattr(obj, '__pytest_wrapped__', None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, '__wrapped__', None)
if (new_obj is None):
break
obj = new_obj
else:
raise ValueError('could not find real function of {start}\nstopped at {current}'.format(start=saferepr(start_obj), current=saferepr(obj)))
if isinstance(obj, functools.partial):
obj = obj.func
return obj | 7,137,254,283,143,825,000 | gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial. | src/_pytest/compat.py | get_real_func | robholt/pytest | python | def get_real_func(obj):
' gets the real function object of the (possibly) wrapped object by\n functools.wraps or functools.partial.\n '
start_obj = obj
for i in range(100):
new_obj = getattr(obj, '__pytest_wrapped__', None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, '__wrapped__', None)
if (new_obj is None):
break
obj = new_obj
else:
raise ValueError('could not find real function of {start}\nstopped at {current}'.format(start=saferepr(start_obj), current=saferepr(obj)))
if isinstance(obj, functools.partial):
obj = obj.func
return obj |
def get_real_method(obj, holder):
'\n Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time\n returning a bound method to ``holder`` if the original object was a bound method.\n '
try:
is_method = hasattr(obj, '__func__')
obj = get_real_func(obj)
except Exception:
return obj
if (is_method and hasattr(obj, '__get__') and callable(obj.__get__)):
obj = obj.__get__(holder)
return obj | 3,975,037,658,141,571,600 | Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method. | src/_pytest/compat.py | get_real_method | robholt/pytest | python | def get_real_method(obj, holder):
'\n Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time\n returning a bound method to ``holder`` if the original object was a bound method.\n '
try:
is_method = hasattr(obj, '__func__')
obj = get_real_func(obj)
except Exception:
return obj
if (is_method and hasattr(obj, '__get__') and callable(obj.__get__)):
obj = obj.__get__(holder)
return obj |
def safe_getattr(object, name, default):
" Like getattr but return default upon any Exception or any OutcomeException.\n\n Attribute access can potentially fail for 'evil' Python objects.\n See issue #214.\n It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException\n instead of Exception (for more details check #2707)\n "
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default | -6,607,776,573,387,889,000 | Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707) | src/_pytest/compat.py | safe_getattr | robholt/pytest | python | def safe_getattr(object, name, default):
" Like getattr but return default upon any Exception or any OutcomeException.\n\n Attribute access can potentially fail for 'evil' Python objects.\n See issue #214.\n It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException\n instead of Exception (for more details check #2707)\n "
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default |
def safe_isclass(obj):
'Ignore any exception via isinstance on Python 3.'
try:
return inspect.isclass(obj)
except Exception:
return False | 886,407,819,187,287,400 | Ignore any exception via isinstance on Python 3. | src/_pytest/compat.py | safe_isclass | robholt/pytest | python | def safe_isclass(obj):
try:
return inspect.isclass(obj)
except Exception:
return False |
@property
def funcargnames(self):
' alias attribute for ``fixturenames`` for pre-2.3 compatibility'
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames | -2,417,199,118,803,160,000 | alias attribute for ``fixturenames`` for pre-2.3 compatibility | src/_pytest/compat.py | funcargnames | robholt/pytest | python | @property
def funcargnames(self):
' '
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames |
def main(argv=None):
'Run the operators-filter with the specified command line arguments.\n '
return OperatorsFilter().main(argv) | 2,750,703,961,454,874,600 | Run the operators-filter with the specified command line arguments. | src/cosmic_ray/tools/filters/operators_filter.py | main | Smirenost/cosmic-ray | python | def main(argv=None):
'\n '
return OperatorsFilter().main(argv) |
def filter(self, work_db: WorkDB, args: Namespace):
'Mark as skipped all work item with filtered operator\n '
if (args.config is None):
config = work_db.get_config()
else:
config = load_config(args.config)
exclude_operators = config.sub('filters', 'operators-filter').get('exclude-operators', ())
self._skip_filtered(work_db, exclude_operators) | 7,362,884,219,123,820,000 | Mark as skipped all work item with filtered operator | src/cosmic_ray/tools/filters/operators_filter.py | filter | Smirenost/cosmic-ray | python | def filter(self, work_db: WorkDB, args: Namespace):
'\n '
if (args.config is None):
config = work_db.get_config()
else:
config = load_config(args.config)
exclude_operators = config.sub('filters', 'operators-filter').get('exclude-operators', ())
self._skip_filtered(work_db, exclude_operators) |
def get_package_author_name() -> str:
'Return the package author name to be used.'
return userinput(name='python_package_author_name', label='Enter the python package author name to use.', default=load_repository_author_name(), validator='non_empty', sanitizer=['strip'], cache=False) | 3,065,983,685,440,111,000 | Return the package author name to be used. | setup_python_package/queries/get_package_author_name.py | get_package_author_name | LucaCappelletti94/setup_python_package | python | def get_package_author_name() -> str:
return userinput(name='python_package_author_name', label='Enter the python package author name to use.', default=load_repository_author_name(), validator='non_empty', sanitizer=['strip'], cache=False) |
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=False) | -7,125,971,413,056,351,000 | 3x3 convolution with padding | libs/networks/resnet_dilation.py | conv3x3 | Kinpzz/RCRNet-Pytorch | python | def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=False) |
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 2,748,212,586,768,409,000 | 1x1 convolution | libs/networks/resnet_dilation.py | conv1x1 | Kinpzz/RCRNet-Pytorch | python | def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) |
def resnet18(pretrained=False, **kwargs):
'Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model | 2,710,881,011,384,566,300 | Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | libs/networks/resnet_dilation.py | resnet18 | Kinpzz/RCRNet-Pytorch | python | def resnet18(pretrained=False, **kwargs):
'Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model |
def resnet34(pretrained=False, **kwargs):
'Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model | 321,186,425,817,952,300 | Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | libs/networks/resnet_dilation.py | resnet34 | Kinpzz/RCRNet-Pytorch | python | def resnet34(pretrained=False, **kwargs):
'Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model |
def resnet50(pretrained=False, **kwargs):
'Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model | -4,884,347,836,839,471,000 | Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | libs/networks/resnet_dilation.py | resnet50 | Kinpzz/RCRNet-Pytorch | python | def resnet50(pretrained=False, **kwargs):
'Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model |
def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model | -5,899,972,026,593,623,000 | Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | libs/networks/resnet_dilation.py | resnet101 | Kinpzz/RCRNet-Pytorch | python | def resnet101(pretrained=False, **kwargs):
'Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model |
def resnet152(pretrained=False, **kwargs):
'Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model | 5,878,302,975,223,905,000 | Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | libs/networks/resnet_dilation.py | resnet152 | Kinpzz/RCRNet-Pytorch | python | def resnet152(pretrained=False, **kwargs):
'Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model |
def parse_domain_date(domain_date: Union[(List[str], str)], date_format: str='%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"Converts whois date format to an ISO8601 string\n\n Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format\n in a datetime. If a list is returned with multiple elements, takes only\n the first one.\n\n :type domain_date: ``Union[List[str],str]``\n :param date_format:\n a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'\n\n :return: Parsed time in ISO8601 format\n :rtype: ``Optional[str]``\n "
if isinstance(domain_date, str):
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif (isinstance(domain_date, list) and (len(domain_date) > 0) and isinstance(domain_date[0], str)):
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
return None | 533,124,122,959,102,500 | Converts whois date format to an ISO8601 string
Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format
in a datetime. If a list is returned with multiple elements, takes only
the first one.
:type domain_date: ``Union[List[str],str]``
:param date_format:
a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'
:return: Parsed time in ISO8601 format
:rtype: ``Optional[str]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | parse_domain_date | DeanArbel/content | python | def parse_domain_date(domain_date: Union[(List[str], str)], date_format: str='%Y-%m-%dT%H:%M:%S.000Z') -> Optional[str]:
"Converts whois date format to an ISO8601 string\n\n Converts the HelloWorld domain WHOIS date (YYYY-mm-dd HH:MM:SS) format\n in a datetime. If a list is returned with multiple elements, takes only\n the first one.\n\n :type domain_date: ``Union[List[str],str]``\n :param date_format:\n a string or list of strings with the format 'YYYY-mm-DD HH:MM:SS'\n\n :return: Parsed time in ISO8601 format\n :rtype: ``Optional[str]``\n "
if isinstance(domain_date, str):
domain_date_dt = dateparser.parse(domain_date)
if domain_date_dt:
return domain_date_dt.strftime(date_format)
elif (isinstance(domain_date, list) and (len(domain_date) > 0) and isinstance(domain_date[0], str)):
domain_date_dt = dateparser.parse(domain_date[0])
if domain_date_dt:
return domain_date_dt.strftime(date_format)
return None |
def convert_to_demisto_severity(severity: str) -> int:
"Maps HelloWorld severity to Cortex XSOAR severity\n\n Converts the HelloWorld alert severity level ('Low', 'Medium',\n 'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)\n for mapping.\n\n :type severity: ``str``\n :param severity: severity as returned from the HelloWorld API (str)\n\n :return: Cortex XSOAR Severity (1 to 4)\n :rtype: ``int``\n "
return {'Low': IncidentSeverity.LOW, 'Medium': IncidentSeverity.MEDIUM, 'High': IncidentSeverity.HIGH, 'Critical': IncidentSeverity.CRITICAL}[severity] | -3,912,506,415,638,290,400 | Maps HelloWorld severity to Cortex XSOAR severity
Converts the HelloWorld alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the HelloWorld API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | convert_to_demisto_severity | DeanArbel/content | python | def convert_to_demisto_severity(severity: str) -> int:
"Maps HelloWorld severity to Cortex XSOAR severity\n\n Converts the HelloWorld alert severity level ('Low', 'Medium',\n 'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)\n for mapping.\n\n :type severity: ``str``\n :param severity: severity as returned from the HelloWorld API (str)\n\n :return: Cortex XSOAR Severity (1 to 4)\n :rtype: ``int``\n "
return {'Low': IncidentSeverity.LOW, 'Medium': IncidentSeverity.MEDIUM, 'High': IncidentSeverity.HIGH, 'Critical': IncidentSeverity.CRITICAL}[severity] |
def test_module(client: Client, first_fetch_time: int) -> str:
"Tests API connectivity and authentication'\n\n Returning 'ok' indicates that the integration works like it is supposed to.\n Connection to the service is successful.\n Raises exceptions if something goes wrong.\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type name: ``str``\n :param name: name to append to the 'Hello' string\n\n :return: 'ok' if test passed, anything else will fail the test.\n :rtype: ``str``\n "
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None, severity=None)
except DemistoException as e:
if ('Forbidden' in str(e)):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok' | -6,083,236,003,950,006,000 | Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: HelloWorld client to use
:type name: ``str``
:param name: name to append to the 'Hello' string
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | test_module | DeanArbel/content | python | def test_module(client: Client, first_fetch_time: int) -> str:
"Tests API connectivity and authentication'\n\n Returning 'ok' indicates that the integration works like it is supposed to.\n Connection to the service is successful.\n Raises exceptions if something goes wrong.\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type name: ``str``\n :param name: name to append to the 'Hello' string\n\n :return: 'ok' if test passed, anything else will fail the test.\n :rtype: ``str``\n "
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None, severity=None)
except DemistoException as e:
if ('Forbidden' in str(e)):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok' |
def say_hello_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-say-hello command: Returns Hello {somename}\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``str``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['name']`` is used as input name\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains the hello world message\n\n :rtype: ``CommandResults``\n "
name = args.get('name', None)
if (not name):
raise ValueError('name not specified')
result = client.say_hello(name)
readable_output = f'## {result}'
return CommandResults(readable_output=readable_output, outputs_prefix='hello', outputs_key_field='', outputs=result) | -4,154,078,156,561,007,600 | helloworld-say-hello command: Returns Hello {somename}
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``str``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['name']`` is used as input name
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains the hello world message
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | say_hello_command | DeanArbel/content | python | def say_hello_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-say-hello command: Returns Hello {somename}\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``str``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['name']`` is used as input name\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains the hello world message\n\n :rtype: ``CommandResults``\n "
name = args.get('name', None)
if (not name):
raise ValueError('name not specified')
result = client.say_hello(name)
readable_output = f'## {result}'
return CommandResults(readable_output=readable_output, outputs_prefix='hello', outputs_key_field=, outputs=result) |
def fetch_incidents(client: Client, max_results: int, last_run: Dict[(str, int)], first_fetch_time: Optional[int], alert_status: Optional[str], min_severity: str, alert_type: Optional[str]) -> Tuple[(Dict[(str, int)], List[dict])]:
'This function retrieves new alerts every interval (default is 1 minute).\n\n This function has to implement the logic of making sure that incidents are\n fetched only onces and no incidents are missed. By default it\'s invoked by\n XSOAR every minute. It will use last_run to save the timestamp of the last\n incident it processed. If last_run is not provided, it should use the\n integration parameter first_fetch_time to determine when to start fetching\n the first time.\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type max_results: ``int``\n :param max_results: Maximum numbers of incidents per fetch\n\n :type last_run: ``Optional[Dict[str, int]]``\n :param last_run:\n A dict with a key containing the latest incident created time we got\n from last fetch\n\n :type first_fetch_time: ``Optional[int]``\n :param first_fetch_time:\n If last_run is None (first time we are fetching), it contains\n the timestamp in milliseconds on when to start fetching incidents\n\n :type alert_status: ``Optional[str]``\n :param alert_status:\n status of the alert to search for. Options are: \'ACTIVE\'\n or \'CLOSED\'\n\n :type min_severity: ``str``\n :param min_severity:\n minimum severity of the alert to search for.\n Options are: "Low", "Medium", "High", "Critical"\n\n :type alert_type: ``Optional[str]``\n :param alert_type:\n type of alerts to search for. There is no list of predefined types\n\n :return:\n A tuple containing two elements:\n next_run (``Dict[str, int]``): Contains the timestamp that will be\n used in ``last_run`` on the next fetch.\n incidents (``List[dict]``): List of incidents that will be created in XSOAR\n\n :rtype: ``Tuple[Dict[str, int], List[dict]]``\n '
last_fetch = last_run.get('last_fetch', None)
if (last_fetch is None):
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
incidents: List[Dict[(str, Any)]] = []
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(alert_type=alert_type, alert_status=alert_status, max_results=max_results, start_time=last_fetch, severity=severity)
for alert in alerts:
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = (incident_created_time * 1000)
if last_fetch:
if (incident_created_time <= last_fetch):
continue
incident_name = alert['name']
incident = {'name': incident_name, 'occurred': timestamp_to_datestring(incident_created_time_ms), 'rawJSON': json.dumps(alert), 'severity': convert_to_demisto_severity(alert.get('severity', 'Low'))}
incidents.append(incident)
if (incident_created_time > latest_created_time):
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time}
return (next_run, incidents) | 439,226,341,109,222,000 | This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:type client: ``Client``
:param Client: HelloWorld client to use
:type max_results: ``int``
:param max_results: Maximum numbers of incidents per fetch
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type alert_status: ``Optional[str]``
:param alert_status:
status of the alert to search for. Options are: 'ACTIVE'
or 'CLOSED'
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:type alert_type: ``Optional[str]``
:param alert_type:
type of alerts to search for. There is no list of predefined types
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | fetch_incidents | DeanArbel/content | python | def fetch_incidents(client: Client, max_results: int, last_run: Dict[(str, int)], first_fetch_time: Optional[int], alert_status: Optional[str], min_severity: str, alert_type: Optional[str]) -> Tuple[(Dict[(str, int)], List[dict])]:
'This function retrieves new alerts every interval (default is 1 minute).\n\n This function has to implement the logic of making sure that incidents are\n fetched only onces and no incidents are missed. By default it\'s invoked by\n XSOAR every minute. It will use last_run to save the timestamp of the last\n incident it processed. If last_run is not provided, it should use the\n integration parameter first_fetch_time to determine when to start fetching\n the first time.\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type max_results: ``int``\n :param max_results: Maximum numbers of incidents per fetch\n\n :type last_run: ``Optional[Dict[str, int]]``\n :param last_run:\n A dict with a key containing the latest incident created time we got\n from last fetch\n\n :type first_fetch_time: ``Optional[int]``\n :param first_fetch_time:\n If last_run is None (first time we are fetching), it contains\n the timestamp in milliseconds on when to start fetching incidents\n\n :type alert_status: ``Optional[str]``\n :param alert_status:\n status of the alert to search for. Options are: \'ACTIVE\'\n or \'CLOSED\'\n\n :type min_severity: ``str``\n :param min_severity:\n minimum severity of the alert to search for.\n Options are: "Low", "Medium", "High", "Critical"\n\n :type alert_type: ``Optional[str]``\n :param alert_type:\n type of alerts to search for. There is no list of predefined types\n\n :return:\n A tuple containing two elements:\n next_run (``Dict[str, int]``): Contains the timestamp that will be\n used in ``last_run`` on the next fetch.\n incidents (``List[dict]``): List of incidents that will be created in XSOAR\n\n :rtype: ``Tuple[Dict[str, int], List[dict]]``\n '
last_fetch = last_run.get('last_fetch', None)
if (last_fetch is None):
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
incidents: List[Dict[(str, Any)]] = []
severity = ','.join(HELLOWORLD_SEVERITIES[HELLOWORLD_SEVERITIES.index(min_severity):])
alerts = client.search_alerts(alert_type=alert_type, alert_status=alert_status, max_results=max_results, start_time=last_fetch, severity=severity)
for alert in alerts:
incident_created_time = int(alert.get('created', '0'))
incident_created_time_ms = (incident_created_time * 1000)
if last_fetch:
if (incident_created_time <= last_fetch):
continue
incident_name = alert['name']
incident = {'name': incident_name, 'occurred': timestamp_to_datestring(incident_created_time_ms), 'rawJSON': json.dumps(alert), 'severity': convert_to_demisto_severity(alert.get('severity', 'Low'))}
incidents.append(incident)
if (incident_created_time > latest_created_time):
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time}
return (next_run, incidents) |
def ip_reputation_command(client: Client, args: Dict[(str, Any)], default_threshold: int) -> List[CommandResults]:
"ip command: Returns IP reputation for a list of IPs\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['ip']`` is a list of IPs or a single IP\n ``args['threshold']`` threshold to determine whether an IP is malicious\n\n :type default_threshold: ``int``\n :param default_threshold:\n default threshold to determine whether an IP is malicious\n if threshold is not specified in the XSOAR arguments\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains IPs\n\n :rtype: ``CommandResults``\n "
ips = argToList(args.get('ip'))
if (len(ips) == 0):
raise ValueError('IP(s) not specified')
threshold = int(args.get('threshold', default_threshold))
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
score = 0
reputation = int(ip_data.get('score', 0))
if (reputation == 0):
score = Common.DBotScore.NONE
elif (reputation >= threshold):
score = Common.DBotScore.BAD
elif (reputation >= (threshold / 2)):
score = Common.DBotScore.SUSPICIOUS
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(indicator=ip, indicator_type=DBotScoreType.IP, integration_name='HelloWorld', score=score, malicious_description=f'Hello World returned reputation {reputation}')
ip_standard_context = Common.IP(ip=ip, asn=ip_data.get('asn'), dbot_score=dbot_score)
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if (k not in ip_context_excluded_fields)}
readable_output = tableToMarkdown('IP', ip_data)
command_results.append(CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.IP', outputs_key_field='ip', outputs=ip_data, indicator=ip_standard_context))
return command_results | 6,297,315,567,717,406,000 | ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | ip_reputation_command | DeanArbel/content | python | def ip_reputation_command(client: Client, args: Dict[(str, Any)], default_threshold: int) -> List[CommandResults]:
"ip command: Returns IP reputation for a list of IPs\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['ip']`` is a list of IPs or a single IP\n ``args['threshold']`` threshold to determine whether an IP is malicious\n\n :type default_threshold: ``int``\n :param default_threshold:\n default threshold to determine whether an IP is malicious\n if threshold is not specified in the XSOAR arguments\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains IPs\n\n :rtype: ``CommandResults``\n "
ips = argToList(args.get('ip'))
if (len(ips) == 0):
raise ValueError('IP(s) not specified')
threshold = int(args.get('threshold', default_threshold))
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
score = 0
reputation = int(ip_data.get('score', 0))
if (reputation == 0):
score = Common.DBotScore.NONE
elif (reputation >= threshold):
score = Common.DBotScore.BAD
elif (reputation >= (threshold / 2)):
score = Common.DBotScore.SUSPICIOUS
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(indicator=ip, indicator_type=DBotScoreType.IP, integration_name='HelloWorld', score=score, malicious_description=f'Hello World returned reputation {reputation}')
ip_standard_context = Common.IP(ip=ip, asn=ip_data.get('asn'), dbot_score=dbot_score)
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if (k not in ip_context_excluded_fields)}
readable_output = tableToMarkdown('IP', ip_data)
command_results.append(CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.IP', outputs_key_field='ip', outputs=ip_data, indicator=ip_standard_context))
return command_results |
def domain_reputation_command(client: Client, args: Dict[(str, Any)], default_threshold: int) -> List[CommandResults]:
"domain command: Returns domain reputation for a list of domains\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['domain']`` list of domains or a single domain\n ``args['threshold']`` threshold to determine whether a domain is malicious\n\n :type default_threshold: ``int``\n :param default_threshold:\n default threshold to determine whether an domain is malicious\n if threshold is not specified in the XSOAR arguments\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains Domains\n\n :rtype: ``CommandResults``\n "
domains = argToList(args.get('domain'))
if (len(domains) == 0):
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
if ('creation_date' in domain_data):
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if ('expiration_date' in domain_data):
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if ('updated_date' in domain_data):
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
score = 0
reputation = int(domain_data.get('score', 0))
if (reputation == 0):
score = Common.DBotScore.NONE
elif (reputation >= threshold):
score = Common.DBotScore.BAD
elif (reputation >= (threshold / 2)):
score = Common.DBotScore.SUSPICIOUS
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(indicator=domain, integration_name='HelloWorld', indicator_type=DBotScoreType.DOMAIN, score=score, malicious_description=f'Hello World returned reputation {reputation}')
domain_standard_context = Common.Domain(domain=domain, creation_date=domain_data.get('creation_date', None), expiration_date=domain_data.get('expiration_date', None), updated_date=domain_data.get('updated_date', None), organization=domain_data.get('org', None), name_servers=domain_data.get('name_servers', None), registrant_name=domain_data.get('name', None), registrant_country=domain_data.get('country', None), registrar_name=domain_data.get('registrar', None), dbot_score=dbot_score)
readable_output = tableToMarkdown('Domain', domain_data)
command_results.append(CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Domain', outputs_key_field='domain', outputs=domain_data, indicator=domain_standard_context))
return command_results | 4,025,089,852,044,071,000 | domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | domain_reputation_command | DeanArbel/content | python | def domain_reputation_command(client: Client, args: Dict[(str, Any)], default_threshold: int) -> List[CommandResults]:
"domain command: Returns domain reputation for a list of domains\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['domain']`` list of domains or a single domain\n ``args['threshold']`` threshold to determine whether a domain is malicious\n\n :type default_threshold: ``int``\n :param default_threshold:\n default threshold to determine whether an domain is malicious\n if threshold is not specified in the XSOAR arguments\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains Domains\n\n :rtype: ``CommandResults``\n "
domains = argToList(args.get('domain'))
if (len(domains) == 0):
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
if ('creation_date' in domain_data):
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if ('expiration_date' in domain_data):
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if ('updated_date' in domain_data):
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
score = 0
reputation = int(domain_data.get('score', 0))
if (reputation == 0):
score = Common.DBotScore.NONE
elif (reputation >= threshold):
score = Common.DBotScore.BAD
elif (reputation >= (threshold / 2)):
score = Common.DBotScore.SUSPICIOUS
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(indicator=domain, integration_name='HelloWorld', indicator_type=DBotScoreType.DOMAIN, score=score, malicious_description=f'Hello World returned reputation {reputation}')
domain_standard_context = Common.Domain(domain=domain, creation_date=domain_data.get('creation_date', None), expiration_date=domain_data.get('expiration_date', None), updated_date=domain_data.get('updated_date', None), organization=domain_data.get('org', None), name_servers=domain_data.get('name_servers', None), registrant_name=domain_data.get('name', None), registrant_country=domain_data.get('country', None), registrar_name=domain_data.get('registrar', None), dbot_score=dbot_score)
readable_output = tableToMarkdown('Domain', domain_data)
command_results.append(CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Domain', outputs_key_field='domain', outputs=domain_data, indicator=domain_standard_context))
return command_results |
def search_alerts_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-search-alerts command: Search alerts in HelloWorld\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'\n ``args['severity']`` alert severity CSV\n ``args['alert_type']`` alert type\n ``args['start_time']`` start time as ISO8601 date or seconds since epoch\n ``args['max_results']`` maximum number of results to return\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains alerts\n\n :rtype: ``CommandResults``\n "
status = args.get('status')
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if (not all(((s in HELLOWORLD_SEVERITIES) for s in severities))):
raise ValueError(f"severity must be a comma-separated value with the following options: {','.join(HELLOWORLD_SEVERITIES)}")
alert_type = args.get('alert_type')
start_time = arg_to_datetime(arg=args.get('start_time'), arg_name='start_time', required=False)
max_results = arg_to_number(arg=args.get('max_results'), arg_name='max_results', required=False)
alerts = client.search_alerts(severity=','.join(severities), alert_status=status, alert_type=alert_type, start_time=(int(start_time.timestamp()) if start_time else None), max_results=max_results)
for alert in alerts:
if ('created' not in alert):
continue
created_time_ms = (int(alert.get('created', '0')) * 1000)
alert['created'] = timestamp_to_datestring(created_time_ms)
return CommandResults(outputs_prefix='HelloWorld.Alert', outputs_key_field='alert_id', outputs=alerts) | -8,565,637,857,912,481,000 | helloworld-search-alerts command: Search alerts in HelloWorld
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'
``args['severity']`` alert severity CSV
``args['alert_type']`` alert type
``args['start_time']`` start time as ISO8601 date or seconds since epoch
``args['max_results']`` maximum number of results to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains alerts
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | search_alerts_command | DeanArbel/content | python | def search_alerts_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-search-alerts command: Search alerts in HelloWorld\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['status']`` alert status. Options are 'ACTIVE' or 'CLOSED'\n ``args['severity']`` alert severity CSV\n ``args['alert_type']`` alert type\n ``args['start_time']`` start time as ISO8601 date or seconds since epoch\n ``args['max_results']`` maximum number of results to return\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains alerts\n\n :rtype: ``CommandResults``\n "
status = args.get('status')
severities: List[str] = HELLOWORLD_SEVERITIES
severity = args.get('severity', None)
if severity:
severities = severity.split(',')
if (not all(((s in HELLOWORLD_SEVERITIES) for s in severities))):
raise ValueError(f"severity must be a comma-separated value with the following options: {','.join(HELLOWORLD_SEVERITIES)}")
alert_type = args.get('alert_type')
start_time = arg_to_datetime(arg=args.get('start_time'), arg_name='start_time', required=False)
max_results = arg_to_number(arg=args.get('max_results'), arg_name='max_results', required=False)
alerts = client.search_alerts(severity=','.join(severities), alert_status=status, alert_type=alert_type, start_time=(int(start_time.timestamp()) if start_time else None), max_results=max_results)
for alert in alerts:
if ('created' not in alert):
continue
created_time_ms = (int(alert.get('created', '0')) * 1000)
alert['created'] = timestamp_to_datestring(created_time_ms)
return CommandResults(outputs_prefix='HelloWorld.Alert', outputs_key_field='alert_id', outputs=alerts) |
def get_alert_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-get-alert command: Returns a HelloWorld alert\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['alert_id']`` alert ID to return\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains an alert\n\n :rtype: ``CommandResults``\n "
alert_id = args.get('alert_id', None)
if (not alert_id):
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
if ('created' in alert):
created_time_ms = (int(alert.get('created', '0')) * 1000)
alert['created'] = timestamp_to_datestring(created_time_ms)
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Alert', outputs_key_field='alert_id', outputs=alert) | -4,416,676,729,715,666,400 | helloworld-get-alert command: Returns a HelloWorld alert
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | get_alert_command | DeanArbel/content | python | def get_alert_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-get-alert command: Returns a HelloWorld alert\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['alert_id']`` alert ID to return\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains an alert\n\n :rtype: ``CommandResults``\n "
alert_id = args.get('alert_id', None)
if (not alert_id):
raise ValueError('alert_id not specified')
alert = client.get_alert(alert_id=alert_id)
if ('created' in alert):
created_time_ms = (int(alert.get('created', '0')) * 1000)
alert['created'] = timestamp_to_datestring(created_time_ms)
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Alert', outputs_key_field='alert_id', outputs=alert) |
def update_alert_status_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-update-alert-status command: Changes the status of an alert\n\n Changes the status of a HelloWorld alert and returns the updated alert info\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['alert_id']`` alert ID to update\n ``args['status']`` new status, either ACTIVE or CLOSED\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains an updated alert\n\n :rtype: ``CommandResults``\n "
alert_id = args.get('alert_id', None)
if (not alert_id):
raise ValueError('alert_id not specified')
status = args.get('status', None)
if (status not in ('ACTIVE', 'CLOSED')):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
if ('updated' in alert):
updated_time_ms = (int(alert.get('updated', '0')) * 1000)
alert['updated'] = timestamp_to_datestring(updated_time_ms)
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Alert', outputs_key_field='alert_id', outputs=alert) | 2,405,045,081,726,955,500 | helloworld-update-alert-status command: Changes the status of an alert
Changes the status of a HelloWorld alert and returns the updated alert info
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['alert_id']`` alert ID to update
``args['status']`` new status, either ACTIVE or CLOSED
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an updated alert
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | update_alert_status_command | DeanArbel/content | python | def update_alert_status_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-update-alert-status command: Changes the status of an alert\n\n Changes the status of a HelloWorld alert and returns the updated alert info\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['alert_id']`` alert ID to update\n ``args['status']`` new status, either ACTIVE or CLOSED\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains an updated alert\n\n :rtype: ``CommandResults``\n "
alert_id = args.get('alert_id', None)
if (not alert_id):
raise ValueError('alert_id not specified')
status = args.get('status', None)
if (status not in ('ACTIVE', 'CLOSED')):
raise ValueError('status must be either ACTIVE or CLOSED')
alert = client.update_alert_status(alert_id, status)
if ('updated' in alert):
updated_time_ms = (int(alert.get('updated', '0')) * 1000)
alert['updated'] = timestamp_to_datestring(updated_time_ms)
readable_output = tableToMarkdown(f'HelloWorld Alert {alert_id}', alert)
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Alert', outputs_key_field='alert_id', outputs=alert) |
def scan_start_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-start-scan command: Starts a HelloWorld scan\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['hostname']`` hostname to run the scan on\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains a scan job\n\n :rtype: ``CommandResults``\n "
hostname = args.get('hostname', None)
if (not hostname):
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Scan', outputs_key_field='scan_id', outputs=scan) | -6,920,022,741,749,217,000 | helloworld-start-scan command: Starts a HelloWorld scan
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['hostname']`` hostname to run the scan on
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains a scan job
:rtype: ``CommandResults`` | Packs/HelloWorld/Integrations/HelloWorld/HelloWorld.py | scan_start_command | DeanArbel/content | python | def scan_start_command(client: Client, args: Dict[(str, Any)]) -> CommandResults:
"helloworld-start-scan command: Starts a HelloWorld scan\n\n :type client: ``Client``\n :param Client: HelloWorld client to use\n\n :type args: ``Dict[str, Any]``\n :param args:\n all command arguments, usually passed from ``demisto.args()``.\n ``args['hostname']`` hostname to run the scan on\n\n :return:\n A ``CommandResults`` object that is then passed to ``return_results``,\n that contains a scan job\n\n :rtype: ``CommandResults``\n "
hostname = args.get('hostname', None)
if (not hostname):
raise ValueError('hostname not specified')
scan = client.scan_start(hostname=hostname)
scan['hostname'] = hostname
scan_id = scan.get('scan_id')
readable_output = f'Started scan {scan_id}'
return CommandResults(readable_output=readable_output, outputs_prefix='HelloWorld.Scan', outputs_key_field='scan_id', outputs=scan) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.