content
stringlengths 5
1.05M
|
---|
import asyncio
import logging
from functools import wraps
from typing import List, Optional, SupportsInt, Union, TYPE_CHECKING # noqa
import asyncpg
from pypika import PostgreSQLQuery
from tortoise.backends.asyncpg.executor import AsyncpgExecutor
from tortoise.backends.asyncpg.schema_generator import AsyncpgSchemaGenerator
from tortoise.backends.base.client import (
BaseDBAsyncClient,
BaseTransactionWrapper,
Capabilities,
ConnectionWrapper,
)
from tortoise.exceptions import (
DBConnectionError,
IntegrityError,
OperationalError,
TransactionManagementError,
)
if TYPE_CHECKING:
from typing import AsyncContextManager
from contextvars import ContextVar
current_transaction = ContextVar("Current TransactionWrapper obj")
def retry_connection(func):
@wraps(func)
async def retry_connection_(self, *args):
try:
return await func(self, *args)
except (
asyncpg.PostgresConnectionError,
asyncpg.ConnectionDoesNotExistError,
asyncpg.ConnectionFailureError,
asyncpg.InterfaceError,
):
# Here we assume that a connection error has happened
# Re-create connection and re-try the function call once only.
if getattr(self, "transaction", None):
self._finalized = True
raise TransactionManagementError("Connection gone away during transaction")
logging.info("Attempting reconnect")
try:
await self._close()
await self.create_connection(with_db=True)
logging.info("Reconnected")
except Exception as e:
logging.info("Failed to reconnect: %s", str(e))
raise
return await func(self, *args)
return retry_connection_
def translate_exceptions(func):
@wraps(func)
async def translate_exceptions_(self, *args):
try:
return await func(self, *args)
except asyncpg.SyntaxOrAccessError as exc:
raise OperationalError(exc)
except asyncpg.IntegrityConstraintViolationError as exc:
raise IntegrityError(exc)
return translate_exceptions_
class PoolConnectionDispatcher:
__slots__ = ("pool", "connection")
log = logging.getLogger("db_client")
def __init__(self, pool: asyncpg.pool.Pool) -> None:
self.pool = pool
self.connection = None
async def __aenter__(self):
self.connection = await self.pool.acquire()
self.log.debug("Acquired connection %s", self.connection)
return self.connection
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
self.log.debug("Released connection %s", self.connection)
await self.pool.release(self.connection)
class AsyncpgDBClient(BaseDBAsyncClient):
DSN_TEMPLATE = "postgres://{user}:{password}@{host}:{port}/{database}"
query_class = PostgreSQLQuery
executor_class = AsyncpgExecutor
schema_generator = AsyncpgSchemaGenerator
capabilities = Capabilities("postgres", pooling=True)
def __init__(
self,
user: str,
password: str,
database: str,
host: str,
port: SupportsInt,
min_size: SupportsInt = 10,
max_size: SupportsInt = 10000,
max_inactive_connection_lifetime=0,
**kwargs
) -> None:
super().__init__(**kwargs)
self.user = user
self.password = password
self.database = database
self.host = host
self.port = int(port) # make sure port is int type
self.extra = kwargs.copy()
self.extra.pop("connection_name", None)
self.extra.pop("fetch_inserted", None)
self.extra.pop("loop", None)
self.extra.pop("connection_class", None)
self._pool = None # type: Optional[asyncpg.pool.Pool]
self.min_size = int(min_size)
self.max_size = int(max_size)
self.max_inactive_connection_lifetime = int(max_inactive_connection_lifetime)
self._template = {} # type: dict
self._transaction_class = type(
"TransactionWrapper", (TransactionWrapper, self.__class__), {}
)
async def create_connection(self, with_db: bool) -> None:
self._template = {
"host": self.host,
"port": self.port,
"user": self.user,
"database": self.database if with_db else None,
"min_size": self.min_size,
"max_size": self.max_size,
"max_inactive_connection_lifetime": self.max_inactive_connection_lifetime,
**self.extra,
}
try:
self._pool = await asyncpg.create_pool(None, password=self.password, **self._template)
self.log.debug("Created pool %s with params: %s", self._pool, self._template)
except asyncpg.InvalidCatalogNameError:
raise DBConnectionError(
"Can't establish connection to database {}".format(self.database)
)
async def _close(self) -> None:
if self._pool: # pragma: nobranch
try:
await asyncio.wait_for(self._pool.close(), 10)
except asyncio.TimeoutError:
await self._pool.terminate()
self.log.debug("Closed pool %s with params: %s", self._pool, self._template)
self._template.clear()
async def close(self) -> None:
await self._close()
self._pool = None
async def db_create(self) -> None:
await self.create_connection(with_db=False)
await self.execute_script(
'CREATE DATABASE "{}" OWNER "{}"'.format(self.database, self.user)
)
await self.close()
async def db_delete(self) -> None:
await self.create_connection(with_db=False)
try:
await self.execute_script('DROP DATABASE "{}"'.format(self.database))
except asyncpg.InvalidCatalogNameError: # pragma: nocoverage
pass
await self.close()
def acquire_connection(self) -> "AsyncContextManager":
return PoolConnectionDispatcher(self._pool)
def _in_transaction(self) -> "TransactionWrapper":
transaction_wrapper = self._transaction_class(None, self)
current_transaction.set(transaction_wrapper)
return transaction_wrapper
@translate_exceptions
@retry_connection
async def execute_insert(self, query: str, values: list) -> Optional[asyncpg.Record]:
transaction_wrapper = current_transaction.get()
if transaction_wrapper:
stmt = await transaction_wrapper._connection.prepare(query)
return await stmt.fetchrow(*values)
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
# TODO: Cache prepared statement
stmt = await connection.prepare(query)
return await stmt.fetchrow(*values)
@translate_exceptions
@retry_connection
async def execute_many(self, query: str, values: list) -> None:
transaction_wrapper = current_transaction.get()
if transaction_wrapper:
self.log.debug("%s: %s", query, values)
# TODO: Consider using copy_records_to_table instead
await transaction_wrapper._connection.executemany(query, values)
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
# TODO: Consider using copy_records_to_table instead
await connection.executemany(query, values)
@translate_exceptions
@retry_connection
async def execute_query(self, query: str) -> List[dict]:
transaction_wrapper = current_transaction.get()
if transaction_wrapper:
self.log.debug(query)
return await transaction_wrapper._connection.fetch(query)
async with self.acquire_connection() as connection:
self.log.debug(query)
return await connection.fetch(query)
@translate_exceptions
@retry_connection
async def execute_script(self, query: str) -> None:
transaction_wrapper = current_transaction.get()
if transaction_wrapper:
self.log.debug(query)
await transaction_wrapper._connection.execute(query)
async with self.acquire_connection() as connection:
self.log.debug(query)
await connection.execute(query)
class TransactionWrapper(BaseTransactionWrapper, AsyncpgDBClient):
def __init__(
self, connection: asyncpg.Connection, parent: Union[AsyncpgDBClient, "TransactionWrapper"]
) -> None:
self._pool = parent._pool # type: asyncpg.pool.Pool
self._current_transaction = parent._current_transaction
self.log = logging.getLogger("db_client")
self._transaction_class = self.__class__
self._old_context_value = None # type: Optional[BaseDBAsyncClient]
self.connection_name = parent.connection_name
self.transaction = None
self._finalized = False
self._parent = parent
self._connection = connection
if isinstance(parent, TransactionWrapper):
self._lock = parent._lock # type: asyncio.Lock
else:
self._lock = asyncio.Lock()
async def create_connection(self, with_db: bool) -> None:
await self._parent.create_connection(with_db)
self._pool = self._parent._pool
async def _close(self) -> None:
await self._parent._close()
self._pool = self._parent._pool
def acquire_connection(self) -> "AsyncContextManager":
return ConnectionWrapper(self._connection, self._lock)
def _in_transaction(self) -> "TransactionWrapper":
return self._transaction_class(self._connection, self)
@retry_connection
async def start(self):
if not self._connection:
self._connection = await self._pool.acquire()
self.transaction = self._connection.transaction()
await self.transaction.start()
self._old_context_value = self._current_transaction.get()
self._current_transaction.set(self)
async def finalize(self) -> None:
if not self._old_context_value:
raise OperationalError("Finalize was called before transaction start")
self._finalized = True
self._current_transaction.set(self._old_context_value)
await self._pool.release(self._connection)
self._connection = None
async def commit(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self.transaction.commit()
await self.finalize()
async def rollback(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self.transaction.rollback()
await self.finalize() |
from .model import protonet_graph
from .loss import build_loss_fn
|
# ROY AND TRAINS
from math import ceil
#take input
for _ in range(int(input())):
t0,t1,t2,v1,v2,d = list(map(int, input().split()))
if t0<=t1 or t0<=t2:
if t0>t1:
t2 += int(ceil((d/v2)*60))
print(t2)
elif t0>t2:
t1 += int(ceil((d/v1)*60))
print(t1)
else:
t1 += int(ceil((d/v1)*60))
#print(t1)
t2 += int(ceil((d/v2)*60))
#print(t2)
if t1<t2:
print(t1)
elif t2<t1:
print(t2)
else:
print(t1)
else:
print(-1)
|
import modelexp
from modelexp.experiments.sas import Saxs
from modelexp.models.sas import Cube
from modelexp.data import XyeData
from modelexp.fit import LevenbergMarquardt
from modelexp.models.sas import InstrumentalResolution
app = modelexp.App()
app.setExperiment(Saxs)
dataRef = app.setData(XyeData)
dataRef.loadFromFile('./saxsCubeData.xye')
dataRef.plotData()
modelRef = app.setModel(Cube, InstrumentalResolution)
modelRef.setParam("a", 49.900000000000006, minVal = 0, maxVal = 100, vary = True)
modelRef.setParam("sldCube", 4.5e-05, minVal = 0, maxVal = 0.00045000000000000004, vary = False)
modelRef.setParam("sldSolvent", 1e-05, minVal = 0, maxVal = 0.0001, vary = False)
modelRef.setParam("sigA", 0.049800000000000004, minVal = 0, maxVal = 0.2, vary = True)
modelRef.setParam("i0", 1.02, minVal = 0, maxVal = 10, vary = True)
modelRef.setParam("bg", 0.0, minVal = 0, maxVal = 1, vary = False)
app.setFit(LevenbergMarquardt)
app.show() |
# Copyright 2016 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from datetime import datetime
from os.path import isfile
import logging
from six.moves.urllib.parse import urlparse
from .format import object2jekyll
from .utils import retrying_http_session
LOGGER = logging.getLogger('estep')
def fetch_csljson(doi):
headers = {'Accept': 'application/vnd.citationstyles.csl+json'}
http_session = retrying_http_session()
response = http_session.get(doi, headers=headers)
response.raise_for_status()
return response.json()
def fetch_bibliography(doi):
style = 'ieee-with-url'
style = 'apa'
headers = {'Accept': 'text/bibliography; style=' + style}
http_session = retrying_http_session()
response = http_session.get(doi, headers=headers)
response.raise_for_status()
return response.text
def doi2fn(doi, collection_dir='_publication'):
fn = urlparse(doi).path.lstrip('/').replace('/', '_')
return '{0}/{1}.md'.format(collection_dir, fn)
def issued(csl):
date_parts = csl['issued']['date-parts'][0]
year = date_parts[0]
try:
month = date_parts[1]
except IndexError:
month = 1
try:
day = date_parts[2]
except IndexError:
day = 1
dt = datetime(year, month, day)
return dt.isoformat()
def docs_with_doi(doi, docs):
"""look in document list for doi fields
and return project ids with the given doi
"""
return [p[1]['@id'] for p in docs if 'doi' in p[1] and doi in p[1]['doi']]
def generate_publication(doi, endorsers, projects, docs):
publication_fn = doi2fn(doi)
if isfile(publication_fn):
raise IOError(1, '`{0}` file already exists, remove it before regeneration'.format(publication_fn), publication_fn)
uniq_projects = set(projects) | set(docs_with_doi(doi, docs))
csl = fetch_csljson(doi)
publication = {
'@id': doi,
'description': fetch_bibliography(doi),
'author': list(uniq_projects),
'inGroup': endorsers,
'publishedIn': csl['container-title'],
'type': csl['type'],
'date': issued(csl),
}
publication_md = object2jekyll(publication, 'description')
logging.info('Writing {0}'.format(publication_fn))
with open(publication_fn, 'w') as fn:
fn.write(publication_md)
|
import cv2
import numpy as np
import os
import sys
from samples.coco import coco
from mrcnn import utils
from mrcnn import model as modellib
import PIL.Image
import sys
# Load the pre-trained model data
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Change the config infermation
class InferenceConfig(coco.CocoConfig):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = [
'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush'
]
def apply_mask(image, mask):
image[:, :, 0] = np.where(
mask == 0,
125,
image[:, :, 0]
)
image[:, :, 1] = np.where(
mask == 0,
12,
image[:, :, 1]
)
image[:, :, 2] = np.where(
mask == 0,
15,
image[:, :, 2]
)
return image
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'person':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
# by mistake you put apply_mask inside for loop or you can write continue in if also
image = apply_mask(image, mask)
return image
def transparent_back(image):
image = image.convert('RGBA')
L,H = image.size
color_0 = image.getpixel((0, 0))
for h in range(H):
for l in range(L):
dot = (l, h)
color_1 = image.getpixel(dot)
if color_1 == color_0:
color_1 = color_1[:-1] + (0,)
image.putpixel(dot, color_1)
return image
if __name__ == "__main__":
image = cv2.imread(sys.argv[1], -1)
height, width, channels = image.shape
results = model.detect([image], verbose=0)
r = results[0]
frame = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
cv2.imwrite('temp.png', image)
image = PIL.Image.open("./temp.png")
image = transparent_back(image)
image.save("removed_back.png")
|
"""
lolbot rewrite - (c) 2020 tilda under MIT License
This module contains code needed to start the bot, including other services,
such as the API
"""
from discord.ext import commands
from logbook import Logger, StreamHandler, INFO
from logbook.compat import redirect_logging
from sys import stdout
from core.config import Config
import os
import asyncio
from core.bot import Lolbot
from rich import traceback
redirect_logging()
traceback.install()
StreamHandler(stdout, level=INFO).push_application()
log = Logger("lolbot")
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
log.warning(
"uvloop was not found. This is probably due to platform incompatibility."
)
except Exception:
log.warning("unable to setup uvloop", exc_info=True)
config = Config("config.yaml").config
help_command = commands.help.DefaultHelpCommand(dm_help=None)
bot = Lolbot(
command_prefix=commands.when_mentioned_or(config["bot"]["prefix"]),
description="hi im a bot",
help_command=help_command,
logger=log,
)
if __name__ == "__main__":
for ext in os.listdir("ext"):
if ext.endswith(".py"):
try:
log.info(f"attempting to load {ext}")
ext = ext.replace(".py", "")
bot.load_extension(f"ext.{ext}")
except Exception:
log.error(f"failed to load {ext}", exc_info=True)
else:
log.info(f"successfully loaded {ext}")
log.info("loading jishaku")
bot.load_extension("jishaku")
bot.run(config["tokens"]["discord"])
|
from share import models
from api.base import ShareSerializer
# TODO make an endpoint for SourceConfigs
class SourceConfigSerializer(ShareSerializer):
class Meta:
model = models.SourceConfig
fields = ('label',)
|
def print_result(time, collection):
if collection:
print("Not all customers were driven to their destinations")
print(f'Customers left: {", ".join(map(str,collection))}')
else:
print("All customers were driven to their destinations")
print(f"Total time: {total_time} minutes")
customers = [int(el) for el in input().split(", ")]
taxi_drivers = [int(el) for el in input().split(", ")]
total_time = 0
while customers and taxi_drivers:
customer = customers[0]
taxi_driver = taxi_drivers[-1]
if taxi_driver >= customer:
total_time += customer
customers.pop(0)
taxi_drivers.pop()
else:
taxi_drivers.pop()
print_result(total_time, customers) |
print("vikranth datta")
print(" roll number:AM.EN.U4ECE19157")
print("ECE")
print("marvel rocks")
|
# importing packages
from pytube import YouTube
import os
# url input from user
audio_object = YouTube(
str(input("Enter the URL of the video you want to download: \n>> ")))
# extract only audio
video = audio_object.streams.filter(only_audio=True).first()
# check for destination to save file
print("Enter the destination (leave blank for current directory)")
destination = str(input(">> ")) or '.'
# download the file
out_file = video.download(output_path=destination)
# save the file
base, ext = os.path.splitext(out_file)
new_file = base + '.mp3'
os.rename(out_file, new_file)
# result of success
print(audio_object.title + " has been successfully downloaded.")
|
import pickledb
import re
import yaml
from datetime import *
_ignoreUsage = "Sorry, I don't understand. The correct usage is '!ignore <-me | @user> [minutes]'."
_dmsUsage = "Sorry, I dom't understand. The correct usage is '!dms <-enable | -disable>'."
with open("./conf.yaml") as conf:
config = yaml.load(conf, Loader=yaml.BaseLoader)
ignoreDb = pickledb.load("./data/ignore.db", True)
slideDb = pickledb.load("./data/slide.db", True)
async def ignore(ctx, args):
if (len(args) < 1 or len(args) > 2):
return _ignoreUsage
try:
if (len(args) == 2):
mins = int(args[1])
else:
mins = 5
except ValueError:
return _ignoreUsage
ignoreTime = datetime.now() + timedelta(minutes=mins)
if (args[0] == "-me"):
ignoreDb.set(str(ctx.author.id), ignoreTime.isoformat())
await ctx.message.add_reaction(config["emotions"]["_zipit"])
return None
else:
author = ctx.author.name
if (author != "Isaac" and author != "MercWorks"):
return "Mama Mia! Only Dave can do that!"
nick = re.sub("<@!*|>", "", args[0])
ignoreDb.set(nick, ignoreTime.isoformat())
return f"Got it, I'll ignore {args[0]} for {mins} minutes. They must have been *naughty!*"
def dms(userId, flag):
userId = str(userId)
if (flag == "-enable"):
if (slideDb.get(userId)):
slideDb.rem(userId)
return "Got it, I'll be sliding into those dms sometime soon."
elif (flag == "-disable"):
slideDb.set(userId, True)
return "Okay, I won't send you any direct messages."
else:
return _dmsUsage
def shouldIgnore(userId):
userId = str(userId)
timeStr = ignoreDb.get(userId)
if (timeStr):
ignoreTime = datetime.fromisoformat(timeStr)
if (datetime.now() > ignoreTime):
ignoreDb.rem(userId)
return False
return True
return False
def canDm(userId):
userId = str(userId)
return slideDb.get(userId) != True |
"""Generate www tables."""
import io
import ast
from datetime import datetime
from os.path import isdir,isfile,basename
from os import mkdir,remove,write,rmdir
import numpy as np
from astropy.table import Table
from astropy.utils import xml
import mysql.connector
import jinja2
import bokeh.resources
from . import db
from . import www
from . import utils
from . import config as cfg
def sample_tables(samples=None):
"""Generate tables for all samples."""
# set up connection
cnx = db.get_cnx(cfg.mysql['user'], cfg.mysql['passwd'],
cfg.mysql['host'], cfg.mysql['db_sdb'])
cursor = cnx.cursor(buffered=True)
# get a list of samples and generate their pages
if samples is None:
samples = db.get_samples()
for sample in samples:
print(" sample:",sample)
sample_table_www(cursor,sample)
sample_table_votable(cursor,sample)
sample_table_photometry(cursor,sample)
cursor.close()
cnx.close()
def sample_table_www(cursor,sample,file='index.html',
absolute_paths=True,rel_loc=None):
"""Generate an HTML page with a sample table.
Extract the necessary information from the database and create HTML
pages with the desired tables, one for each sample. These are
generated using astropy's XMLWriter the jsviewer, which makes tables
that are searchable and sortable.
Change sdf.config.mysql to specify db tables to look in.
Parameters
----------
cursor : mysql.connector.connect.cursor
Cursor used to execute database query
file : str, optional
File name of html table
absolute_paths : bool, optional
Use absolute paths to sdf files, set by cfg.www and structure
set by sdb_getphot.py. Otherwise files are in relative location
set by rel_loc keyword.
rel_loc : str, optional
Relative location of sdf files to the created table, used only
if absolute_paths is False. This string goes in the middle of an
sql CONCAT statement, so could have sql in it, in which case
double and single-quotes must be used
(e.g. "folder1/',sql_column,'/file.html")
"""
wwwroot = cfg.www['site_root']
sample_root = cfg.file['www_root']+'samples/'
# create temporary tables we want to join on
sample_table_temp_tables(cursor)
# get link for sed and create dir and .htaccess if
# needed, absolute is used for main sample pages, otherwise these
# are for special cases
if absolute_paths:
www.create_dir(sample_root,sample)
url_str = wwwroot+"seds/masters/',sdbid,'/public"
file = sample_root+sample+'/'+file
else:
if rel_loc is None:
url_str = "',sdbid,'.html"
else:
url_str = rel_loc
sel = ("SELECT "
"CONCAT('<a target=\"_blank\" href=\""+url_str+"\">',"
"COALESCE(main_id,hd.xid,hip.xid,gj.xid,tmass.xid,sdbid),"
"'<span><img src=\""+url_str+"/',sdbid,'_thumb.png\"></span></a>') as id,"
"hd.xid as HD,"
"hip.xid as HIP,"
"gj.xid as GJ,"
"ROUND(Vmag,1) as Vmag,"
"ROUND(raj2000/15.,1) as `RA/h`,"
"ROUND(dej2000,1) as `Dec`,"
"sp_type as SpType,"
"ROUND(star.teff,0) as Teff,"
"ROUND(log10(star.lstar),2) as `LogL*`,"
"ROUND(1/star.plx_arcsec,1) AS Dist,"
"ROUND(log10(disk_r.ldisk_lstar)) as Log_f,"
"disk_r.temp as T_disk")
# here we decide which samples get all targets, for now "everything"
# and "public" get everything, but this could be changed so that
# "public" is some subset of "everything"
if sample == 'everything' or sample == 'public':
sel += " FROM sdb_pm"
else:
sel += (" FROM "+cfg.mysql['db_samples']+"."+sample+" "
"LEFT JOIN sdb_pm USING (sdbid)")
sel += (" LEFT JOIN simbad USING (sdbid)"
" LEFT JOIN star on sdbid=star.id"
" LEFT JOIN disk_r on sdbid=disk_r.id"
" LEFT JOIN hd USING (sdbid)"
" LEFT JOIN hip USING (sdbid)"
" LEFT JOIN gj USING (sdbid)"
" LEFT JOIN tmass USING (sdbid)"
" LEFT JOIN phot USING (sdbid)"
" WHERE sdb_pm.sdbid IS NOT NULL"
" ORDER by raj2000")
# limit table sizes
if sample != 'everything':
sel += " LIMIT "+str(cfg.www['tablemax'])+";"
cursor.execute(sel)
tsamp = Table(names=cursor.column_names,
dtype=('S1000','S50','S50','S50',
'S4','S8','S8','S10','S5','S4','S6','S4','S12'))
for row in cursor:
add = [str(x) for x in row]
tsamp.add_row(add)
for n in tsamp.colnames:
none = np.where(tsamp[n] == 'None')
tsamp[n][none] = '-'
print(" got ",len(tsamp)," rows for html table")
# get the table as xml
s = io.StringIO()
w = xml.writer.XMLWriter(s)
with w.xml_cleaning_method('bleach_clean',tags=['a','img','span'],
attributes=['src','href','target']):
with w.tag('table',attrib={'class':'display compact','id':sample}):
with w.tag('thead',attrib={'class':'datatable_header'}):
with w.tag('tr'):
for col in tsamp.colnames:
w.element('td',text=col)
for i in range(len(tsamp)):
with w.tag('tr'):
for j,txt in enumerate(tsamp[i]):
if j == 0:
w.element('td',text=txt,
attrib={'class':'td_img_hover'})
else:
w.element('td',text=txt)
# write the table out to html
env = jinja2.Environment(autoescape=False,
loader=jinja2.PackageLoader('sdf',package_path='www/templates'))
template = env.get_template("sample_table.html")
html = template.render(title=sample,table=s.getvalue(),
creation_time=datetime.utcnow().strftime("%d/%m/%y %X"))
with io.open(file, mode='w', encoding='utf-8') as f:
f.write(html)
def sample_table_votable(cursor, sample, file_path=None):
"""Generate a votable of the results.
Change sdf.config.mysql to specify db tables to look in.
Parameters
----------
cursor : mysql.connector.connect.cursor
Cursor pointing to main sdb database.
sample : str
Name of sample.
file_path : str, optional
Where to put the file, defaults set by www config.
"""
# create dir and .htaccess if neeeded
if file_path is None:
wwwroot = cfg.file['www_root']+'samples/'
www.create_dir(wwwroot,sample)
file_path = wwwroot+sample+'/'
# generate the mysql statement
sel = "SELECT *"
if sample == 'everything' or sample == 'public':
sel += " FROM sdb_pm"
else:
sel += (" FROM "+cfg.mysql['db_samples']+"."+sample+" "
"LEFT JOIN sdb_pm USING (sdbid)")
sel += (" LEFT JOIN simbad USING (sdbid)"
" LEFT JOIN "+cfg.mysql['db_results']+".star ON sdbid=star.id"
" LEFT JOIN "+cfg.mysql['db_results']+".disk_r USING (id)"
" LEFT JOIN "+cfg.mysql['db_results']+".model USING (id)"
" WHERE sdb_pm.sdbid IS NOT NULL"
" ORDER by raj2000, disk_r.temp")
# limit table sizes
if sample != 'everything':
sel += " LIMIT "+str(cfg.www['votmax'])+";"
cursor.execute(sel)
rows = cursor.fetchall()
tsamp = Table(rows=rows,names=cursor.column_names)
# add some url columns with links
tsamp['url'] = np.core.defchararray.add(
np.core.defchararray.add(
np.repeat(cfg.www['site_url']+'seds/masters/',len(tsamp)),tsamp['sdbid']
),
np.repeat('/public/',len(tsamp))
)
# to photometry file
tsamp['phot_url'] = np.core.defchararray.add(
np.core.defchararray.add(
tsamp['url'],tsamp['sdbid']
),
np.repeat('-rawphot.txt',len(tsamp))
)
# to mnest folder
tsamp['mnest_url'] = np.core.defchararray.add(
np.core.defchararray.add(
tsamp['url'],tsamp['sdbid']
),
np.repeat(cfg.fitting['pmn_dir_suffix'], len(tsamp))
)
# to best fit model json
tsamp['model_url'] = np.empty(len(tsamp),dtype='U150')
for i,comps in enumerate(tsamp['model_comps']):
if comps is not None:
try:
tsamp['model_url'][i] = (
cfg.www['site_url'] + 'seds/masters/' +
tsamp['sdbid'][i] + '/public/' +
tsamp['sdbid'][i] + cfg.fitting['pmn_dir_suffix'] + '/' +
cfg.fitting['model_join'].join(ast.literal_eval(comps)) +
cfg.fitting['pmn_model_suffix'] + '.json'
)
except ValueError:
raise utils.SdfError("{}".format(comps))
print(" got ",len(tsamp)," rows for votable")
# this may get written to the votable in the future...
tsamp.meta = {'updated':datetime.utcnow().strftime("%d/%m/%y %X")}
tsamp.write(file_path+sample+'.xml',
format='votable',overwrite=True)
def sample_table_photometry(cursor, sample, file_path=None):
"""Generate a table of the photometry.
Seems that votable can't have a blank entry where there needn't be
one (e.g. here an observed flux when none was observed). So write
as a csv.
Change sdf.config.mysql to specify db tables to look in.
Parameters
----------
cursor : mysql.connector.connect.cursor
Cursor pointing to main sdb database.
sample : str
Name of sample.
file_path : str, optional
Where to put the file, defaults set by www config.
"""
# create dir and .htaccess if neeeded
if file_path is None:
wwwroot = cfg.file['www_root']+'samples/'
www.create_dir(wwwroot,sample)
file_path = wwwroot+sample+'/'
# generate the mysql statement
sel = "SELECT name, phot.*"
if sample == 'everything' or sample == 'public':
sel += " FROM sdb_pm"
else:
sel += (" FROM "+cfg.mysql['db_samples']+"."+sample+" "
"LEFT JOIN "+cfg.mysql['db_results']+".phot ON sdbid=id"
" WHERE comp_no=-1 ORDER BY filter")
# these are large, so don't limit table sizes
# if sample != 'everything':
# sel += " LIMIT "+str(cfg.www['votmax'])+";"
cursor.execute(sel)
rows = cursor.fetchall()
tsamp = Table(rows=rows,names=cursor.column_names,masked=True)
print(" got ",len(tsamp)," rows for photometry table")
# mask blank entries
for n in tsamp.colnames:
no = tsamp[n] == None
if np.any(no):
tsamp[n].mask = no
tsamp.write(file_path+sample+'_photometry.csv',
format='csv',overwrite=True)
def sample_table_temp_tables(cursor):
"""Create temporary tables for creating sample tables."""
cursor.execute("DROP TABLE IF EXISTS tmass;")
cursor.execute("CREATE TEMPORARY TABLE tmass SELECT sdbid,GROUP_CONCAT(xid) as xid"
" FROM sdb_pm LEFT JOIN xids USING (sdbid) WHERE xid REGEXP('^2MASS')"
" GROUP BY sdbid;")
cursor.execute("ALTER TABLE tmass ADD INDEX sdbid_tmass (sdbid);")
cursor.execute("DROP TABLE IF EXISTS hd;")
cursor.execute("CREATE TEMPORARY TABLE hd SELECT sdbid,GROUP_CONCAT(xid) as xid"
" FROM sdb_pm LEFT JOIN xids USING (sdbid) WHERE xid REGEXP('^HD')"
" GROUP BY sdbid;")
cursor.execute("ALTER TABLE hd ADD INDEX sdbid_hd (sdbid);")
cursor.execute("DROP TABLE IF EXISTS hip;")
cursor.execute("CREATE TEMPORARY TABLE hip SELECT sdbid,GROUP_CONCAT(xid) as xid"
" FROM sdb_pm LEFT JOIN xids USING (sdbid) WHERE xid REGEXP('^HIP')"
" GROUP BY sdbid;")
cursor.execute("ALTER TABLE hip ADD INDEX sdbid_hip (sdbid);")
cursor.execute("DROP TABLE IF EXISTS gj;")
cursor.execute("CREATE TEMPORARY TABLE gj SELECT sdbid,GROUP_CONCAT(xid) as xid"
" FROM sdb_pm LEFT JOIN xids USING (sdbid) WHERE xid REGEXP('^GJ')"
" GROUP BY sdbid;")
cursor.execute("ALTER TABLE gj ADD INDEX sdbid_gj (sdbid);")
cursor.execute("DROP TABLE IF EXISTS phot;")
cursor.execute("CREATE TEMPORARY TABLE phot SELECT"
" id as sdbid,ROUND(-2.5*log10(ANY_VALUE(model_jy)/3882.37),1) as Vmag"
" FROM "+cfg.mysql['db_results']+".phot WHERE filter='VJ' GROUP BY id;")
cursor.execute("ALTER TABLE phot ADD INDEX sdbid_phot (sdbid);")
cursor.execute("DROP TABLE IF EXISTS star;")
cursor.execute("CREATE TEMPORARY TABLE star SELECT"
" id,ROUND(ANY_VALUE("+cfg.mysql['db_results']+".star.teff),0) as teff,"
" ANY_VALUE(plx_arcsec) as plx_arcsec, SUM(lstar) as lstar"
" from "+cfg.mysql['db_results']+".star"
" GROUP BY id;")
cursor.execute("ALTER TABLE star ADD INDEX id_star (id);")
cursor.execute("DROP TABLE IF EXISTS disk_r;")
cursor.execute("CREATE TEMPORARY TABLE disk_r SELECT"
" id,GROUP_CONCAT(ROUND(temp,1)) as temp,SUM(ldisk_lstar) as ldisk_lstar"
" from "+cfg.mysql['db_results']+".disk_r"
" GROUP BY id;")
cursor.execute("ALTER TABLE disk_r ADD INDEX id_dr (id);")
|
# flake8: NOQA
from cupyx.scipy.linalg.special_matrices import (
tri, tril, triu, toeplitz, circulant, hankel,
hadamard, leslie, kron, block_diag, companion,
helmert, hilbert, dft,
fiedler, fiedler_companion, convolution_matrix
)
from cupyx.scipy.linalg.solve_triangular import solve_triangular # NOQA
from cupyx.scipy.linalg.decomp_lu import lu, lu_factor, lu_solve # NOQA
|
# -*- coding: utf-8 -*-
def possible_moves(cordy_pionka, cord_list_of_bottom_pieces, cord_list_of_top_pieces):
return_dict = dict()
if cordy_pionka in cord_list_of_bottom_pieces:
if (cordy_pionka[0]-1, cordy_pionka[1]+1) not in cord_list_of_top_pieces + cord_list_of_bottom_pieces:
if cordy_pionka[0]-1 >= 0 and cordy_pionka[1]+1 <= 7:
return_dict[(cordy_pionka[0] - 1, cordy_pionka[1] + 1)] = 0
if (cordy_pionka[0]+1, cordy_pionka[1]+1) not in cord_list_of_top_pieces + cord_list_of_bottom_pieces:
if cordy_pionka[0]+1 <= 7 and cordy_pionka[1]+1 <= 7:
return_dict[(cordy_pionka[0] + 1, cordy_pionka[1] + 1)] = 0
else:
if (cordy_pionka[0]-1, cordy_pionka[1]-1) not in cord_list_of_top_pieces + cord_list_of_bottom_pieces:
if cordy_pionka[0]-1 >= 0 and cordy_pionka[1]-1 >= 0:
return_dict[(cordy_pionka[0] - 1, cordy_pionka[1] - 1)] = 0
if (cordy_pionka[0]+1, cordy_pionka[1]-1) not in cord_list_of_top_pieces + cord_list_of_bottom_pieces:
if cordy_pionka[0]+1 <= 7 and cordy_pionka[1]-1 >= 0:
return_dict[(cordy_pionka[0] + 1, cordy_pionka[1] - 1)] = 0
return return_dict
def possible_attacks(cordy_pionka, cord_list_of_bottom_pieces, cord_list_of_top_pieces):
if cordy_pionka in cord_list_of_bottom_pieces:
przeciwnik = cord_list_of_top_pieces
else:
przeciwnik = cord_list_of_bottom_pieces
wszystkie = cord_list_of_bottom_pieces + cord_list_of_top_pieces
return_dict = dict()
if (cordy_pionka[0] - 1, cordy_pionka[1] + 1) in przeciwnik and (cordy_pionka[0] - 2, cordy_pionka[1] + 2) not in wszystkie:
if cordy_pionka[0] - 2 >= 0 and cordy_pionka[1] + 2 <= 7:
return_dict[(cordy_pionka[0] - 2, cordy_pionka[1] + 2)] = (cordy_pionka[0] - 1, cordy_pionka[1] + 1)
if (cordy_pionka[0] + 1, cordy_pionka[1] + 1) in przeciwnik and (cordy_pionka[0] + 2, cordy_pionka[1] + 2) not in wszystkie:
if cordy_pionka[0] + 2 <= 7 and cordy_pionka[1] + 2 <= 7:
return_dict[(cordy_pionka[0] + 2, cordy_pionka[1] + 2)] = (cordy_pionka[0] + 1, cordy_pionka[1] + 1)
if (cordy_pionka[0] + 1, cordy_pionka[1] - 1) in przeciwnik and (cordy_pionka[0] + 2, cordy_pionka[1] - 2) not in wszystkie:
if cordy_pionka[0] + 2 <= 7 and cordy_pionka[1]-2 >= 0:
return_dict[(cordy_pionka[0] + 2, cordy_pionka[1] - 2)] = (cordy_pionka[0] + 1, cordy_pionka[1] - 1)
if (cordy_pionka[0] - 1, cordy_pionka[1] - 1) in przeciwnik and (cordy_pionka[0] - 2, cordy_pionka[1] - 2) not in wszystkie:
if cordy_pionka[0] - 2 >= 0 and cordy_pionka[1] - 2 >= 0:
return_dict[(cordy_pionka[0] - 2, cordy_pionka[1] - 2)] = (cordy_pionka[0] - 1, cordy_pionka[1] - 1)
return return_dict
|
RSA_MOD = 104890018807986556874007710914205443157030159668034197186125678960287470894290830530618284943118405110896322835449099433232093151168250152146023319326491587651685252774820340995950744075665455681760652136576493028733914892166700899109836291180881063097461175643998356321993663868233366705340758102567742483097 # noqa
RSA_KEY = 257
ENROLL_HOSTS = {
"CN": "mobile-service.battlenet.com.cn",
# "EU": "m.eu.mobileservice.blizzard.com",
# "US": "m.us.mobileservice.blizzard.com",
# "EU": "eu.mobile-service.blizzard.com",
# "US": "us.mobile-service.blizzard.com",
"default": "mobile-service.blizzard.com",
}
|
#!/usr/bin/env python
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
from distutils import spawn
import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
def extract(path, outdir, bin):
if os.path.exists(os.path.join(outdir, bin)):
return # Another process finished extracting, ignore.
# Use a temp directory adjacent to outdir so shutil.move can use the same
# device atomically.
tmpdir = tempfile.mkdtemp(dir=os.path.dirname(outdir))
def cleanup():
try:
shutil.rmtree(tmpdir)
except OSError:
pass # Too late now
atexit.register(cleanup)
def extract_one(mem):
dest = os.path.join(outdir, mem.name)
tar.extract(mem, path=tmpdir)
try:
os.makedirs(os.path.dirname(dest))
except OSError:
pass # Either exists, or will fail on the next line.
shutil.move(os.path.join(tmpdir, mem.name), dest)
with tarfile.open(path, 'r:gz') as tar:
for mem in tar.getmembers():
if mem.name != bin:
extract_one(mem)
# Extract bin last so other processes only short circuit when extraction is
# finished.
extract_one(tar.getmember(bin))
def main(args):
path = args[0]
suffix = '.npm_binary.tgz'
tgz = os.path.basename(path)
parts = tgz[:-len(suffix)].split('@')
if not tgz.endswith(suffix) or len(parts) != 2:
print('usage: %s <path/to/npm_binary>' % sys.argv[0], file=sys.stderr)
return 1
name, _ = parts
# Avoid importing from gerrit because we don't want to depend on the right CWD.
sha1 = hashlib.sha1(open(path, 'rb').read()).hexdigest()
outdir = '%s-%s' % (path[:-len(suffix)], sha1)
rel_bin = os.path.join('package', 'bin', name)
bin = os.path.join(outdir, rel_bin)
if not os.path.isfile(bin):
extract(path, outdir, rel_bin)
nodejs = spawn.find_executable('nodejs')
if nodejs:
# Debian installs Node.js as 'nodejs', due to a conflict with another
# package.
subprocess.check_call([nodejs, bin] + args[1:])
else:
subprocess.check_call([bin] + args[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
# Generated by Django 3.1.5 on 2021-03-05 02:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imageais', '0005_auto_20210304_2050'),
]
operations = [
migrations.RenameModel(
old_name='ImageFaceAnalyis',
new_name='ImageFaceAnalysis',
),
]
|
import logging
import fabric.api
import fabric.operations
import cloudenvy.envy
class Ssh(cloudenvy.envy.Command):
def _build_subparser(self, subparsers):
help_str = 'SSH into your Envy.'
subparser = subparsers.add_parser('ssh', help=help_str,
description=help_str)
subparser.set_defaults(func=self.run)
subparser.add_argument('-n', '--name', action='store', default='',
help='Specify custom name for an Envy.')
return subparser
def run(self, config, args):
envy = cloudenvy.envy.Envy(config)
if envy.ip():
disable_known_hosts = ('-o UserKnownHostsFile=/dev/null'
' -o StrictHostKeyChecking=no')
forward_agent = '-o ForwardAgent=yes'
options = [disable_known_hosts]
if envy.forward_agent:
options.append(forward_agent)
fabric.operations.local('ssh %s %s@%s' % (' '.join(options),
envy.remote_user,
envy.ip()))
else:
logging.error('Could not determine IP.')
|
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Various helper utils for the core
"""
def split_stack(stack):
"""
Splits the stack into two, before and after the framework
"""
stack_before, stack_after = list(), list()
in_before = True
for frame in stack:
if 'flow/core' in frame[0]:
in_before = False
else:
if in_before:
stack_before.append(frame)
else:
stack_after.append(frame)
return stack_before, stack_after
def filter_framework_frames(stack):
"""
Returns a stack clean of framework frames
"""
result = list()
for frame in stack:
# XXX Windows?
if 'flow/core' not in frame[0]:
result.append(frame)
return result
def get_context_with_traceback(context):
"""
Returns the very first context that contains traceback information
"""
if context.tb_list:
return context
if context.parent:
return get_context_with_traceback(context.parent)
def extract_stacks_from_contexts(context, stacks_list=None):
"""
Returns a list of stacks extracted from the AsyncTaskContext in the right
order
"""
if stacks_list is None:
stacks_list = list()
if context.stack_list:
stacks_list.append(context.stack_list)
if context.parent is not None:
return extract_stacks_from_contexts(context.parent, stacks_list)
else:
stacks_list.reverse()
return stacks_list
def log_task_context(context, logger):
"""
A helper for printing contexts as a tree
"""
from .async_root_task_context import AsyncRootTaskContext
root_context = None
while root_context is None:
if isinstance(context, AsyncRootTaskContext):
root_context = context
context = context.parent
_log_task_context(root_context, logger)
def _log_task_context(context, logger, indent=0):
logger.debug(" " * indent + '%r', context)
indent += 2
if not hasattr(context, 'children'):
return
for sub_context in context.children.union(context.daemon_children):
_log_task_context(sub_context, logger, indent)
|
import six
from rowboat.types import SlottedModel
class PluginConfig(SlottedModel):
def load(self, obj, *args, **kwargs):
obj = {
k: v for k, v in six.iteritems(obj)
if k in self._fields and not self._fields[k].metadata.get('private')
}
return super(PluginConfig, self).load(obj, *args, **kwargs)
|
# -*- coding: utf-8 -*-
from App import App
from PyQt5.Qt import *
from pyqtgraph.Qt import QtCore, QtGui
# Operating system
import sys
import time
def main(args):
global app
app = App(args)
app.exec_()
if __name__ == "__main__":
main(sys.argv)
|
###Titulo:Converte expressões matemáticas
###Função: Este programa comverte expressões matétáticas básicas para Python
###Autor: Valmor Mantelli Jr.
###Data: 24/11/20148
###Versão: 0.0.1
10 / 20 * 30
4 ** 2 / 30
(9 ** 2 + 2) * 6 - 1
|
from Test import Test, Test as test
'''
Timmy & Sarah think they are in love, but around where they live, they will only know once they pick a flower each. If one of the flowers has an even number of petals and the other has an odd number of petals it means they are in love.
Write a function that will take the number of petals of each flower and return true if they are in love and false if they aren't.
'''
def lovefunc( flower1, flower2 ):
if (flower1 % 2 == 0 and flower2 % 2 == 1) or (flower1 % 2 == 1 and flower2 % 2 == 0):
return True
return False
test.assert_equals(lovefunc(1, 4), True)
test.assert_equals(lovefunc(2, 2), False)
test.assert_equals(lovefunc(0, 1), True)
test.assert_equals(lovefunc(0, 0), False) |
# coding=utf-8
# Copyright 2017-2019 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
_ENGINE = None
def enable_distributed_training():
global _ENGINE
try:
import horovod.tensorflow as hvd
_ENGINE = hvd
hvd.init()
except ImportError:
sys.stderr.write("Error: You must install horovod first in order to"
" enable distributed training.\n")
exit()
def is_distributed_training_mode():
return _ENGINE is not None
def rank():
return _ENGINE.rank()
def local_rank():
return _ENGINE.local_rank()
def size():
return _ENGINE.size()
def all_reduce(tensor):
return _ENGINE.allreduce(tensor, compression=_ENGINE.Compression.fp16)
def get_broadcast_hook():
return _ENGINE.BroadcastGlobalVariablesHook(0)
|
from loguru import logger
from data.dbase.djconn import start_connection
try:
dbname, schema = start_connection()
except Exception as e:
logger.warning(f"Failed to connect to datajoint database:\n{e}")
connected = False
else:
logger.debug(f"Connected to database: {dbname}")
connected = True
from data.dbase import db_tables
|
# -*- coding: utf-8 -*-
"""
The steps file contains various generally reusable image processing steps.
"""
import hashlib
from base64 import b64encode
import numpy as np
import scipy.ndimage as ndi
from pilyso_io.imagestack import Dimensions
from ..application.application import Meta
from mfisp_boxdetection import find_box
from molyso.generic.rotation import find_rotation, rotate_image
from molyso.generic.registration import translation_2x1d, shift_image
from ..pipeline.executor import Skip, Collected
from ..misc.h5writer import CompressedObject
class Delete(object):
pass
class Compress(object):
pass
def set_result(**kwargs):
def _inner(result):
for k, v in kwargs.items():
if v is Delete:
del result[k]
elif v is Compress:
result[k] = CompressedObject(result[k])
else:
result[k] = v
return result
return _inner
# noinspection PyUnusedLocal
def image_source(ims, meta, image=None):
return ims.view(Dimensions.Position, Dimensions.Time)[meta.pos, meta.t]
# noinspection PyUnusedLocal
def calculate_image_sha256_hash(image, image_sha256_hash=None):
hasher = hashlib.sha256()
hasher.update(image.tobytes())
hash_value = b64encode(hasher.digest()).decode()
return hash_value
def image_to_ndarray(image):
return np.array(image)
# noinspection PyUnusedLocal
def pull_metadata_from_image(image, timepoint=None, position=None, calibration=None):
return image, image.meta.time, image.meta.position, image.meta.calibration
_substract_start_frame_start_images = {}
# noinspection PyUnusedLocal
def substract_start_frame(meta, ims, reference_timepoint, image, subtracted_image=None):
gaussian_blur_radius = 15.0
if meta.pos not in _substract_start_frame_start_images:
reference = image_source(ims, Meta(t=reference_timepoint, pos=meta.pos))
blurred = ndi.gaussian_filter(reference, gaussian_blur_radius)
_substract_start_frame_start_images[meta.pos] = blurred
else:
blurred = _substract_start_frame_start_images[meta.pos]
image = image.astype(np.float32)
image /= blurred
image -= image.min()
image /= image.max()
return image
_box_cache_fft_cache = {}
_box_cache_boxes = {}
_box_cache_angles = {}
def _box_detection_get_parameters(ims, timepoint, pos):
reference = image_source(ims, Meta(t=timepoint, pos=pos))
angle = find_rotation(reference)
reference = rotate_image(reference, angle)
shift, fft_a = translation_2x1d(image_a=reference, image_b=reference, return_a=True)
try:
reference = reference.astype(np.float32)
cleaned_reference = reference - ndi.uniform_filter(reference, 25) # "box" blur
cleaned_reference[cleaned_reference < 0] = 0
cleaned_reference /= cleaned_reference.max()
# qimshow(cleaned_reference)
crop = find_box(cleaned_reference, throw=True, subsample=2)
t, b, l, r = crop
cleaned_reference[t, :] = 1
cleaned_reference[b, :] = 1
cleaned_reference[:, l] = 1
cleaned_reference[:, r] = 1
except RuntimeError:
# noinspection PyCompatibility
raise Skip(Meta(pos=pos, t=Collected)) from None
return angle, fft_a, crop
# noinspection PyUnusedLocal
def box_detection(ims, image, meta, reference_timepoint, shift=None, crop=None, angle=None):
# probably implement a voting scheme?
if meta.pos not in _box_cache_boxes:
_box_cache_angles[meta.pos], _box_cache_fft_cache[meta.pos], _box_cache_boxes[meta.pos] = \
_box_detection_get_parameters(ims, reference_timepoint, meta.pos)
angle = _box_cache_angles[meta.pos]
image = rotate_image(image, angle)
shift, = translation_2x1d(image_a=None, image_b=image, ffts_a=_box_cache_fft_cache[meta.pos])
crop = _box_cache_boxes[meta.pos]
return shift, crop, angle
def create_boxcrop_from_subtracted_image(subtracted_image, shift, angle, crop, result):
result.shift_x = shift[0]
result.shift_y = shift[1]
result.crop_t, result.crop_b, result.crop_l, result.crop_r = crop
t, b, l, r = crop
subtracted_image = rotate_image(subtracted_image, angle)
subtracted_image = shift_image(subtracted_image, shift, background='blank')
box = subtracted_image[t:b, l:r]
result.image = box
return result
def rescale_image_to_uint8(image):
image = image.astype(np.float32)
image -= image.min()
image /= image.max()
image *= 255.0
return image.astype(np.uint8)
def copy_calibration(collected, result):
calibrations = np.array([item['calibration'] for item in collected.values()])
calibration = calibrations[0]
if not (calibrations == calibration).all():
print("WARNING: Calibration of individual timepoints is not identical. This is highly unexpected behavior.")
result.calibration = calibration
return result
|
'''
# coding: utf-8
Based on https://github.com/iunullc/machine-learning-sdk/blob/master/docs/model_testing_methodology.md
'''
import os
import iuml.tools.auxiliary as iutools
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from multiprocessing import pool
import multiprocessing
from datetime import datetime
from iuml.tools.train_utils import create_trainer
from iuml.tools.visual import *
from iuml.tools.net import download_images
from iuml.tools.image_utils import *
def run_test(config, common, logger):
root_path = config['root_path']
# Need a `config` object which is a dictionary:
# {
# 'creds': 'username:password'
# 'mongo_collection': 'sensordata',
# 'mongo_connect': 'mongodb://{}@candidate.21.mongolayer.com:11857/app80974971',
# 'mongo_db': 'app80974971'
# }
# Alternatively `creds` doesn't have to be present, then an environment variable with credentials should be passed:
# iutools.connect_prod_collection(env='MONGO_CREDS', **config)
collection = iutools.connect_prod_collection(**common)
# ### Parameterize Query ###
#
# title, facility, spaces - fixed
# threshold - what is the minimum number of datasets extracted over the given date range to be considered testable
# days - over how many days (starting @ midnight of the next day)
# Relevant query fields
title = 'CV_bud_grid'
facility = 'sanysabel'
spaces = config['spaces']
# collect over...
days = config['days']
# ### Fix Date Range ###
now = datetime.utcnow() + timedelta(days=1)
# set to the midnight of the next day
endtime = datetime(now.year, now.month, now.day)
date_range = (now - timedelta(days=days), endtime)
# ### Query for the desired date range ###
def retrieve_dataset(space):
df_raw = iutools.get_cv_datasets_for_date_range(title, facility, space, collection, date_range)
if df_raw.size == 0:
return None
df_zero_buds, df_nonzero_buds, _ = iutools.process_sensor_data(df_raw)
df = pd.concat([df_nonzero_buds, df_zero_buds], ignore_index=True)
df['num_buds'] = df['value_size']
df.drop(['_id', 'title', 'value_size'], inplace=True, axis=1)
return df
# Retrieve datasets in the desired range from the DB
print("Running query on {} days of datasets".format(days))
retriever = pool.ThreadPool(multiprocessing.cpu_count())
datasets = retriever.map(retrieve_dataset, spaces)
datasets = [d for d in datasets if d is not None]
if len(datasets) == 0:
print("Nothing could be retrieved")
return
# filter out images
dfs = pd.concat(datasets)
dfs['hour'] = dfs['timestamp'].dt.hour
dfs[dfs['hour'] == 0] = 24
dfs = dfs.set_index(['space', 'dataset_id'])
start_hour, end_hour = eval(config['filter_utc_hours'])
if start_hour >= end_hour:
raise ValueError("start hour shoulr be greater than end hour")
# Filter out by UTC time (e.g: 3, 23)
dfs = dfs[dfs["hour"] >= start_hour]
dfs = dfs[dfs["hour"] <= end_hour]
# ### Aggregate by Space & Dataset ###
group = dfs.groupby(level=[0, 1])
d_agg_count = group['num_buds'].agg({'total': 'sum', 'count' : 'count'})
metadata = group[['dataset_time']].first()
merged = d_agg_count.merge(metadata, left_index=True, right_index=True)
# ### Create trainer & instantiate model###
# We need image shape
net = 'Unet'
batch_size = 1
model_file_name = os.path.join(root_path, config['model_file'])
params = dict(batch_size = batch_size, model_file=model_file_name)
# not using the trainer for actual training, pass '' for root_train parameter
trainer = create_trainer(net, '', **params)
trainer.instantiate_model(predict=True)
input_shape = trainer.model.input_shape[1:3][::-1]
# ### Sample URLs from each dataset ###
#
# A directory will be created: "sample_<current date time >" and everything will be saved there:
# - images
# - test configuratin data
total_samples = config['total_samples']
samples_per_dataset = config['samples_per_dataset']
total_datasets = int(np.ceil(total_samples / samples_per_dataset))
have_spaces = set(map(lambda v: v[0], merged.index.values))
datasets_per_space = int(np.ceil(total_datasets / len(have_spaces)))
print("total datasets: {}, datasets_per_space: {}".format(total_datasets, datasets_per_space))
sampled = []
for space in have_spaces:
# get a list of datasets
df_merged_space = merged.loc[space]
dataset_ids = df_merged_space.index
if total_datasets - datasets_per_space >= 0:
sample_size = datasets_per_space
total_datasets -= datasets_per_space
else:
sample_size = total_datasets
keep_sampling = True
# sometimes, there are fewer datasets than sample size!
while keep_sampling and sample_size > 0:
try:
dataset_ids = np.random.choice(dataset_ids, size=sample_size, replace=False)
keep_sampling = False
except:
sample_size -= 1
# from each datset, sample images
if sample_size > 0:
im_samples = []
for d_idx in dataset_ids:
try:
im_samples.append(dfs.loc[(space, d_idx)].sample(n=samples_per_dataset))
except:
continue
# flatten
if len(im_samples) > 0:
sampled.append(pd.concat(im_samples))
if len(sampled) == 0:
print("nothing sampled")
return
df_sampled = pd.concat(sampled)
# ### Output the Test Configuration File ###
# #### Download Samples ####
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
sampled_path = os.path.join(root_path, r'sampled_{}'.format(now))
if not os.path.exists(sampled_path):
os.makedirs(sampled_path)
print("Starting async download of images... to {}".format(sampled_path))
images = download_images(df_sampled.loc[:, 'images'], img_shape = input_shape, out_dir=sampled_path)
# #### Validate download and remove entries that did not download correctly ####
bad_idxs = [i for i, im in enumerate(images) if len(np.nonzero(im)[0]) == 0]
df_reset = df_sampled.reset_index()
df_reset = df_reset.drop(bad_idxs)
df_sampled = df_reset.set_index(['space', 'dataset_id'])
df_sampled.shape
# #### Create Test Config and Serialize ####
print("Creating test config in: {}".format(sampled_path))
sampled_list = []
for space in have_spaces:
try:
for ds_id in set(df_sampled.loc[space].index):
total_buds_predicted = merged.loc[(space, ds_id)]['total']
dataset_size = merged.loc[(space, ds_id)]['count']
dataset_time = merged.loc[(space, ds_id)]['dataset_time'].strftime('%Y-%m-%d %H:%M:%S')
# get images
sampled = df_sampled.loc[(space, ds_id)]
sampled['file'] = sampled['images'].map(lambda x: x[x.rfind("/") + 1: ])
sampled['total_predicted'] = total_buds_predicted
sampled['dataset_time'] = dataset_time
sampled['dataset_size'] =dataset_size
sampled = sampled.drop('value', axis=1)
sampled_list.append(sampled)
except:
continue
df_sampled = pd.concat(sampled_list)
test_config_file = os.path.join(sampled_path, 'test_config.csv')
df_sampled.to_csv(test_config_file)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'initialMessagePage.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(691, 557)
Dialog.setStyleSheet("QDialog{\n"
" \n"
" background-color: rgb(255, 255, 255);\n"
"}")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 20, 81, 61))
self.label.setStyleSheet("QLabel{\n"
" background-image: url(:/newPrefix/win11.png);\n"
"}")
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/newPrefix/win11.png"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(90, 30, 331, 41))
font = QtGui.QFont()
font.setFamily("Noto Sans")
font.setPointSize(32)
font.setItalic(False)
self.label_2.setFont(font)
self.label_2.setStyleSheet("QLabel{\n"
" color : rgb(47, 47, 212) \n"
"}")
self.label_2.setObjectName("label_2")
self.labelInitialMessageTitle = QtWidgets.QLabel(Dialog)
self.labelInitialMessageTitle.setGeometry(QtCore.QRect(40, 120, 581, 61))
font = QtGui.QFont()
font.setFamily("Noto Sans")
font.setPointSize(20)
font.setItalic(False)
self.labelInitialMessageTitle.setFont(font)
self.labelInitialMessageTitle.setStyleSheet("QLabel{\n"
" color: black\n"
"}")
self.labelInitialMessageTitle.setText("")
self.labelInitialMessageTitle.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.labelInitialMessageTitle.setWordWrap(True)
self.labelInitialMessageTitle.setObjectName("labelInitialMessageTitle")
self.pushButtonInitialNext = QtWidgets.QPushButton(Dialog)
self.pushButtonInitialNext.setGeometry(QtCore.QRect(580, 510, 91, 31))
font = QtGui.QFont()
font.setFamily("Noto Sans")
font.setPointSize(13)
self.pushButtonInitialNext.setFont(font)
self.pushButtonInitialNext.setStyleSheet("QPushButton{\n"
" background-color: rgb(255, 254, 226);\n"
" border-radius: 0px;\n"
" color: black;\n"
" border: 1px dotted rgb(60,96,255)\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color: rgb(60, 96, 255)\n"
"}")
self.pushButtonInitialNext.setObjectName("pushButtonInitialNext")
self.labelInitialMessage = QtWidgets.QLabel(Dialog)
self.labelInitialMessage.setGeometry(QtCore.QRect(40, 200, 621, 281))
font = QtGui.QFont()
font.setFamily("Noto Sans")
font.setPointSize(15)
self.labelInitialMessage.setFont(font)
self.labelInitialMessage.setStyleSheet("QLabel{\n"
" color: black\n"
"}")
self.labelInitialMessage.setText("")
self.labelInitialMessage.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.labelInitialMessage.setWordWrap(True)
self.labelInitialMessage.setObjectName("labelInitialMessage")
self.pushButtonQuit = QtWidgets.QPushButton(Dialog)
self.pushButtonQuit.setGeometry(QtCore.QRect(470, 510, 91, 31))
font = QtGui.QFont()
font.setPointSize(13)
self.pushButtonQuit.setFont(font)
self.pushButtonQuit.setStyleSheet("QPushButton{\n"
" background-color: rgb(255, 254, 226);\n"
" border-radius: 0px;\n"
" color: black;\n"
" border: 1px dotted rgb(60, 96, 255)\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color: rgb(60, 96, 255)\n"
"}")
self.pushButtonQuit.setObjectName("pushButtonQuit")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(20, 507, 141, 31))
self.label_3.setStyleSheet("QLabel{\n"
" \n"
" background-image: url(:/newPrefix/microsoft.png);\n"
"}")
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap(":/newPrefix/microsoft.png"))
self.label_3.setScaledContents(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(190, 510, 71, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(270, 500, 71, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_2.setText(_translate("Dialog", "Windows 11"))
self.pushButtonInitialNext.setText(_translate("Dialog", "Next"))
self.pushButtonQuit.setText(_translate("Dialog", "Quit"))
self.label_4.setText(_translate("Dialog", "<a style=\"color:black; text-decoration:none\" href=\"https://www.aka.ms/support\">Support</a>"))
self.label_5.setText(_translate("Dialog", "<a style=\"color:black; text-decoration:none\" href=\"https://www.microsoft/en-us/legal\">Legal</a>"))
|
# serial data dumper for Jack and Dirty Dan
# Assumes that all formatting (comma separated) happens on the arduino
# and assumes that the arduino is probably using serial.print() to send
# the data over the serial port and assumes that the OS is windows
import serial
from datetime import datetime
import msvcrt
# change to match COM port
port = 'COM3'
# change to match baud rate
br = 115200
# open serial port
ser = serial.Serial(port,br)
ser.open()
# change if you want
extension = '.csv'
# create timestampped file
filename = 'data_' + datetime.now().strftime('%Y%m%d_%X') + extension
# open said file
fop = open(filename, 'w')
# create header if you want
# uncomment and change to suit your needs
# fop.write('DATA1,DATA2,')
print 'Press any key to exit...'
while not msvcrt.kbhit():
if ser.inWaiting():
data = ser.read(ser.inWaiting())
# display received data in console since you won't be
# able to use the arduino console at the same time
print data,
# write data to file assumes all formatting takes place on arduino
# like commas, newlines, and crap
fop.write(data)
# clean up
fop.close()
ser.close()
|
from pando.testing.client import FileUpload
def test_test_client_can_override_headers(harness):
harness.fs.www.mk(('foo.spt', '''
[---]
host = request.headers[b'Host'].decode('idna')
[---] text/html via stdlib_format
{host}'''))
response = harness.client.POST('/foo', HTTP_HOST=b'example.org')
assert response.body == b'example.org'
def test_test_client_handles_body(harness):
harness.fs.www.mk(('foo.spt', '''
[---]
bar = request.body['bar']
[---] text/html via stdlib_format
{bar}'''))
response = harness.client.POST('/foo', body={b'bar': b'42'})
assert response.body == b'42'
def test_test_client_sends_cookies(harness):
harness.fs.www.mk(('foo.spt', '''
[---]
miam = request.headers.cookie[str('miam')].value
[---] text/plain via stdlib_format
{miam}'''))
response = harness.client.POST('/foo', cookies={'miam': 'a_cookie'})
assert response.body == b'a_cookie'
def test_test_client_handles_file_upload(harness):
harness.fs.www.mk(('foo.spt', '''
[---]
bar = request.body['bar']
bar.value = bar.value.decode()
[---] text/plain via stdlib_format
{bar.filename}
{bar.type}
{bar.value}'''))
file_upload = FileUpload(data=b'Greetings, program!', filename=b'greetings.txt')
response = harness.client.POST('/foo', body={b'bar': file_upload})
assert response.body == b'greetings.txt\ntext/plain\nGreetings, program!'
def test_test_client_can_have_file_upload_content_type_overriden(harness):
harness.fs.www.mk(('foo.spt', '''
[---]
bar = request.body['bar']
[---] text/plain via stdlib_format
{bar.type}'''))
file_upload = FileUpload(
data=b'Greetings, program!',
filename=b'greetings.txt',
content_type=b'something/else',
)
response = harness.client.POST('/foo', body={b'bar': file_upload})
assert response.body == b'something/else'
def test_stateful_test_client_passes_cookies(harness):
harness.fs.www.mk(('foo.spt', '''
[---]
csrf_token = request.headers.cookie[str('csrf_token')].value
session = request.headers.cookie[str('session')].value
[---] text/plain via stdlib_format
{csrf_token} and {session}'''))
with harness.client.get_session() as sess:
sess.cookie['csrf_token'] = 'a_csrf_token'
response = sess.POST('/foo', cookies={'session': 'a_session_token'})
assert response.body == b'a_csrf_token and a_session_token'
|
num=int(input("Enter your percentage:"))
if num<40:
print("Failed")
elif num<55:
print("Fair")
elif num<65:
print("Good")
elif num<=100:
print("Excellent")
else :
print("You entered wrong percentage")
|
TRAIN_YOLO_IOU_IGNORE_THRES = .7
TRAIN_YOLO_CONF_THRESHOLD = .5
TEST_YOLO_CONF_THRESHOLD = .7
|
'''This task calls the `request` method of a `grizzly.users` implementation.
This is the most essential task in `grizzly`, it defines requests that the specified load user is going to execute
against the target under test.
Instances of this task is created with the step expressions:
* [`step_task_request_text_with_name_to_endpoint`](/grizzly/framework/usage/steps/scenario/tasks/#step_task_request_text_with_name_to_endpoint)
* [`step_task_request_file_with_name_endpoint`](/grizzly/framework/usage/steps/scenario/tasks/#step_task_request_file_with_name_endpoint)
* [`step_task_request_file_with_name`](/grizzly/framework/usage/steps/scenario/tasks/#step_task_request_file_with_name)
* [`step_task_request_text_with_name`](/grizzly/framework/usage/steps/scenario/tasks/#step_task_request_text_with_name)
'''
from typing import TYPE_CHECKING, List, Optional, Any, Callable
from jinja2.environment import Template
from grizzly_extras.transformer import TransformerContentType
from grizzly_extras.arguments import parse_arguments, split_value, unquote
from ..types import RequestMethod
# need to rename to avoid unused-import collision due to RequestTask.template ?!
from . import GrizzlyTask, template # pylint: disable=unused-import
if TYPE_CHECKING: # pragma: no cover
from ..context import GrizzlyContextScenario
from ..scenarios import GrizzlyScenario
from ..users.base.response_handler import ResponseHandlerAction
class RequestTaskHandlers:
metadata: List['ResponseHandlerAction']
payload: List['ResponseHandlerAction']
def __init__(self) -> None:
self.metadata = []
self.payload = []
def add_metadata(self, handler: 'ResponseHandlerAction') -> None:
self.metadata.append(handler)
def add_payload(self, handler: 'ResponseHandlerAction') -> None:
self.payload.append(handler)
class RequestTaskResponse:
status_codes: List[int]
content_type: TransformerContentType
handlers: RequestTaskHandlers
def __init__(self) -> None:
self.status_codes = [200]
self.content_type = TransformerContentType.UNDEFINED
self.handlers = RequestTaskHandlers()
def add_status_code(self, status: int) -> None:
absolute_status = abs(status)
if absolute_status not in self.status_codes or status not in self.status_codes:
if absolute_status == status:
self.status_codes.append(status)
else:
index = self.status_codes.index(absolute_status)
self.status_codes.pop(index)
@template('name', 'endpoint', 'source')
class RequestTask(GrizzlyTask):
method: RequestMethod
name: str
endpoint: str
_template: Optional[Template]
_source: Optional[str]
response: RequestTaskResponse
def __init__(self, method: RequestMethod, name: str, endpoint: str, source: Optional[str] = None, scenario: Optional['GrizzlyContextScenario'] = None) -> None:
super().__init__(scenario)
self.method = method
self.name = name
self.endpoint = endpoint
self._template = None
self._source = source
self.response = RequestTaskResponse()
content_type: TransformerContentType = TransformerContentType.UNDEFINED
if '|' in self.endpoint:
value, value_arguments = split_value(self.endpoint)
arguments = parse_arguments(value_arguments, unquote=False)
if 'content_type' in arguments:
content_type = TransformerContentType.from_string(unquote(arguments['content_type']))
del arguments['content_type']
value_arguments = ', '.join([f'{key}={value}' for key, value in arguments.items()])
if len(value_arguments) > 0:
self.endpoint = f'{value} | {value_arguments}'
else:
self.endpoint = value
self.response.content_type = content_type
@property
def source(self) -> Optional[str]:
return self._source
@source.setter
def source(self, value: Optional[str]) -> None:
self._template = None
self._source = value
@property
def template(self) -> Optional[Template]:
if self._source is None:
return None
if self._template is None:
self._template = Template(self._source)
return self._template
def __call__(self) -> Callable[['GrizzlyScenario'], Any]:
def task(parent: 'GrizzlyScenario') -> Any:
return parent.user.request(self)
return task
|
class PingbackFormulationError(Exception):
def __init__(self, *args):
# *args is used to get a list of the parameters passed in
self.args = [a for a in args] |
#HEADER
import random
from src.utils import create, search
from game.gamesrc.objects.world.items import Weapon, Armor, Potion
#CODE (Generate all items for loot tables)
#begin artifact item creation
storage = search.objects('storage')[0]
hammer = create.create_object(Weapon, key="An\'Karith's Hammer", location=storage, aliases=['epic_hammer'])
hammer.db.damage = "4d10"
hammer.db.crit_range = "18-20"
hammer.db.attribute_bonuses = {'strength': 20, 'constitution': 15, 'dexterity': 15, 'intelligence': 10}
hammer.db.desc = "This giant two handed hammer looks impossible to lift, but is quite light in hand. Runes are carved up and down the stone handle"
hammer.db.desc += " and hammer head. They periodically flash a bright red."
hammer.db.value = 3490
hammer.db.item_level = "artifact"
hammer.db.weapon_type = 'hammer'
hammer.db.lootset = 'boss'
hammer.db.skill_used = "bludgeon"
caller.msg("An\'Karith's hammer created, id: %s" % hammer.dbref)
aot = create.create_object(Weapon, key="Arm of the Tyrant", location=storage, aliases=['epic_sword'])
aot.db.damage = "3d12"
aot.db.crit_range = "18-20"
aot.db.attribute_bonuses = {'strength': 25, 'constitution': 10, 'dexterity': 10, 'intelligence': 5}
aot.db.desc = "A giant two handed sword that looks near impossible to lift, though you do notice the presense of magical runes on the hilt."
aot.db.value = 4890
aot.db.item_level = "artifact"
aot.db.weapon_type = "sword"
aot.db.lootset = 'boss'
aot.db.skill_used = "blades"
caller.msg("Arm of the Tyrant created, id: %s" % aot.dbref)
#begin rare weapon item creation
axe = create.create_object(Weapon, key="Bloodletter Axe", location=storage, aliases=['rare_axe'])
axe.db.damage = "2d8"
axe.db.attribute_bonuses = {'strength': 5, 'constitution': 5, 'intelligence': 0, 'dexterity': 5 }
axe.db.desc = "A ferocious looking axe, the blade having a layer of dried blood caked on to it."
axe.db.value = 500
axe.db.item_level = "rare"
axe.db.weapon_type = "axe"
axe.db.lootset = "miniboss;rare"
axe.db.skill_used = "blades"
caller.msg("Bloodletter Axe Created, id: %s" % axe.dbref)
sword = create.create_object(Weapon, key="Masterforged Short Sword", location=storage, aliases=['rare_sword'])
sword.db.damage = "2d6"
sword.db.attribute_bonuses = { 'strength': 5, 'constitution': 2, 'dexterity': 0, 'intelligence': 0 }
sword.db.desc = "A master work short sword with an extremely sharp edge."
sword.db.value = 289
sword.db.item_level = "rare"
sword.db.weapon_type = "sword"
sword.db.lootset = "rare"
sword.db.skill_used = "blades"
caller.msg("Masterforged Short Sword created, id: %s" % sword.dbref)
dagger = create.create_object(Weapon, key="Gut Ripper", location=storage, aliases=['rare', 'rare_dagger'])
dagger.db.damage = "2d4"
dagger.db.attribute_bonuses = { 'strength': 0, 'constitution': 4, 'dexterity': 3, 'intelligence': 0}
dagger.db.desc = "A small weapon with a deadly curved blade. The blade glows faintly, pulsing gently."
dagger.db.value = 309
dagger.db.item_level = "rare"
dagger.db.weapon_type = "dagger"
dagger.db.lootset = "rare"
dagger.db.skill_used = "blades"
caller.msg("Gut Ripper created, id: %s" % dagger.dbref)
pa = create.create_object(Weapon, key="Heroic Polearm", location=storage, aliases=['rare', 'rare_polearm'])
pa.db.damage = "2d10"
pa.db.attribute_bonuses = { 'strength': 4, 'constitution': 3, 'dexterity': 0, 'intelligence': 0 }
pa.db.desc = "A large spear with a trident style tip that slightly glows gold in the sunlight."
pa.db.value = 330
pa.db.item_level = "rare"
pa.db.weapon_type = "polearm"
pa.db.lootset = "rare"
pa.db.skill_used = "heavy"
caller.msg("Heroic Polearm created, id: %s" % pa.dbref)
#begin common weapon item creation
sword = create.create_object(Weapon, key="Longsword of the Bear", location=storage, aliases=['uncommon_sword'])
sword.db.damage = "1d8"
sword.db.attribute_bonuses = { 'strength': 3, 'constitution': 3, 'dexterity': 0, 'intelligence': 0 }
sword.db.desc = "A typical looking longsword, however there is a bear symbol carved into the shaft of the blade."
sword.db.value = random.randrange(13, 39)
sword.db.item_level = "uncommon"
sword.db.weapon_type = "sword"
sword.db.lootset = "uncommon"
sword.db.skill_used = "blades"
caller.msg("Longsword of the Bear create, id: %s" % sword.dbref)
#Merchant weapons creation
sword = create.create_object(Weapon, key="Longsword", location=storage, aliases=['storage_weapons'])
sword.db.damage = '1d8'
sword.db.attribute_bonuses = {'strength': 0, 'constitution': 0, 'dexterity': 0, 'intelligence': 0 }
sword.db.desc = "A normal, run of the mill longsword."
sword.db.value = random.randrange(2,6)
sword.db.item_level = "common"
sword.db.weapon_type = "sword"
sword.db.skill_used = "blades"
mob_sword = create.create_object(Weapon, key="Cruel Blade", location=storage, aliases=['mob_weapons', 'mob_blade'])
mob_sword.db.damage = '1d8'
mob_sword.db.attribute_bonuses = {}
mob_sword.db.desc = "A typical sword used by the mean spirited denziens of Avaloria."
mob_sword.db.value = random.randrange(2,6)
mob_sword.db.item_level = 'common'
mob_sword.db.weapon_type = sword
sword.db.skill_used = "blades"
|
# coding: utf-8
import unittest
from problems.add_two_numbers import Solution
from problems.utils.leetcode import list_to_listnode
from problems.utils.leetcode import listnode_to_list
class TestCase(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test(self):
l1 = list_to_listnode([2, 4, 3])
l2 = list_to_listnode([5, 6, 4])
expected_head = self.solution.addTwoNumbers(l1, l2)
expected = [7, 0, 8]
self.assertEqual(listnode_to_list(expected_head), expected)
def test2(self):
l1 = list_to_listnode([0, 1, 0, 9])
l2 = list_to_listnode([1, 9, 9])
expected_head = self.solution.addTwoNumbers(l1, l2)
expected = [1, 0, 0, 0, 1]
self.assertEqual(listnode_to_list(expected_head), expected)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from neutron.services.loadbalancer.drivers.netscaler import (
ncc_client, netscaler_driver
)
from neutron.tests.unit import testlib_api
NCC_CLIENT_CLASS = ('neutron.services.loadbalancer.drivers'
'.netscaler.ncc_client.NSClient')
TESTURI_SCHEME = 'http'
TESTURI_HOSTNAME = '1.1.1.1'
TESTURI_PORT = 4433
TESTURI_PATH = '/ncc_service/1.0'
TESTURI = '%s://%s:%s%s' % (TESTURI_SCHEME, TESTURI_HOSTNAME,
TESTURI_PORT, TESTURI_PATH)
TEST_USERNAME = 'user211'
TEST_PASSWORD = '@30xHl5cT'
TEST_TENANT_ID = '9c5245a2-0432-9d4c-4829-9bd7028603a1'
TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec'
class TestNSClient(testlib_api.WebTestCase):
"""A Unit test for the NetScaler NCC client module."""
def setUp(self):
self.log = mock.patch.object(ncc_client, 'LOG').start()
super(TestNSClient, self).setUp()
# mock the requests.request function call
self.request_method_mock = mock.Mock()
requests.request = self.request_method_mock
self.testclient = self._get_nsclient()
self.addCleanup(mock.patch.stopall)
def test_instantiate_nsclient_with_empty_uri(self):
"""Asserts that a call with empty URI will raise an exception."""
self.assertRaises(ncc_client.NCCException, ncc_client.NSClient,
'', TEST_USERNAME, TEST_PASSWORD)
def test_create_resource_with_no_connection(self):
"""Asserts that a call with no connection will raise an exception."""
# mock a connection object that fails to establish a connection
self.request_method_mock.side_effect = (
requests.exceptions.ConnectionError())
resource_path = netscaler_driver.VIPS_RESOURCE
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_create()
# call method under test: create_resource() and assert that
# it raises an exception
self.assertRaises(ncc_client.NCCException,
self.testclient.create_resource,
TEST_TENANT_ID, resource_path,
resource_name, resource_body)
def test_create_resource_with_error(self):
"""Asserts that a failed create call raises an exception."""
# create a mock object to represent a valid http response
# with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.unauthorized
fake_response.headers = []
requests.request.return_value = fake_response
resource_path = netscaler_driver.VIPS_RESOURCE
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_create()
# call method under test: create_resource
# and assert that it raises the expected exception.
self.assertRaises(ncc_client.NCCException,
self.testclient.create_resource,
TEST_TENANT_ID, resource_path,
resource_name, resource_body)
def test_create_resource(self):
"""Asserts that a correct call will succeed."""
# obtain the mock object that corresponds to the call of request()
fake_response = requests.Response()
fake_response.status_code = requests.codes.created
fake_response.headers = []
self.request_method_mock.return_value = fake_response
resource_path = netscaler_driver.VIPS_RESOURCE
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_create()
# call method under test: create_resource()
self.testclient.create_resource(TEST_TENANT_ID, resource_path,
resource_name, resource_body)
# assert that request() was called
# with the expected params.
resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
self.request_method_mock.assert_called_once_with(
'POST',
url=resource_url,
headers=mock.ANY,
data=mock.ANY)
def test_update_resource_with_error(self):
"""Asserts that a failed update call raises an exception."""
# create a valid http response with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.unauthorized
fake_response.headers = []
# obtain the mock object that corresponds to the call of request()
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_update()
# call method under test: update_resource() and
# assert that it raises the expected exception.
self.assertRaises(ncc_client.NCCException,
self.testclient.update_resource,
TEST_TENANT_ID, resource_path,
resource_name, resource_body)
def test_update_resource(self):
"""Asserts that a correct update call will succeed."""
# create a valid http response with a successful status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.ok
fake_response.headers = []
# obtain the mock object that corresponds to the call of request()
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
resource_name = netscaler_driver.VIP_RESOURCE
resource_body = self._get_testvip_httpbody_for_update()
# call method under test: update_resource.
self.testclient.update_resource(TEST_TENANT_ID, resource_path,
resource_name, resource_body)
resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
# assert that requests.request() was called with the
# expected params.
self.request_method_mock.assert_called_once_with(
'PUT',
url=resource_url,
headers=mock.ANY,
data=mock.ANY)
def test_delete_resource_with_error(self):
"""Asserts that a failed delete call raises an exception."""
# create a valid http response with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.unauthorized
fake_response.headers = []
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
# call method under test: create_resource
self.assertRaises(ncc_client.NCCException,
self.testclient.remove_resource,
TEST_TENANT_ID, resource_path)
def test_delete_resource(self):
"""Asserts that a correct delete call will succeed."""
# create a valid http response with a failure status code.
fake_response = requests.Response()
fake_response.status_code = requests.codes.ok
fake_response.headers = []
# obtain the mock object that corresponds to the call of request()
self.request_method_mock.return_value = fake_response
resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE,
TESTVIP_ID)
resource_url = "%s/%s" % (self.testclient.service_uri, resource_path)
# call method under test: create_resource
self.testclient.remove_resource(TEST_TENANT_ID, resource_path)
# assert that httplib.HTTPConnection request() was called with the
# expected params
self.request_method_mock.assert_called_once_with(
'DELETE',
url=resource_url,
headers=mock.ANY,
data=mock.ANY)
def _get_nsclient(self):
return ncc_client.NSClient(TESTURI, TEST_USERNAME, TEST_PASSWORD)
def _get_testvip_httpbody_for_create(self):
body = {
'name': 'vip1',
'address': '10.0.0.3',
'pool_id': 'da477c13-24cd-4c9f-8c19-757a61ef3b9d',
'protocol': 'HTTP',
'protocol_port': 80,
'admin_state_up': True,
}
return body
def _get_testvip_httpbody_for_update(self):
body = {}
body['name'] = 'updated vip1'
body['admin_state_up'] = False
return body
|
from osgeo import ogr
import sys
import psycopg2
from ede.credentials import DB_NAME, DB_PASS, DB_PORT, DB_USER, DB_HOST
import json
def main(shapefile):
## Connection to the database ##
conn = psycopg2.connect(database=DB_NAME, user=DB_USER, password=DB_PASS,
host=DB_HOST, port=DB_PORT)
cur = conn.cursor()
reader = ogr.Open(shapefile)
layer = reader.GetLayer(0)
layer_name = layer.GetName()
layer_defn = layer.GetLayerDefn()
attrs = []
for i in range(layer_defn.GetFieldCount()):
field_defn = layer_defn.GetFieldDefn(i)
attrs.append(field_defn.GetName())
attrs = '{\"' + '\",\"'.join(attrs) + '\"}'
# (1) Insert into regions_meta + return uid as meta_id
query = "insert into regions_meta (name, attributes) values (\'%s\', \'%s\') returning uid" % (layer_name, attrs)
cur.execute(query)
rows = cur.fetchall()
for row in rows:
meta_id = int(row[0])
# (2) Iterate over features
for i in range(layer.GetFeatureCount()):
print "ingesting feature no. %s" % i
feature = layer.GetFeature(i).ExportToJson(as_object=True)
geom = feature['geometry']['coordinates']
depth_fnc = lambda L: isinstance(L, list) and max(map(depth_fnc, L))+1
depth = depth_fnc(geom)
# The case of non-multi-polygons
if depth == 3:
geom_str = "POLYGON("
num_rings = len(geom)
for i_ring in range(num_rings):
geom_str += '('
ring = geom[i_ring]
num_pts = len(ring)
for i_pt in range(num_pts):
geom_str += ' '.join(map(str, ring[i_pt]))
if i_pt < num_pts-1:
geom_str += ','
geom_str += ')'
if i_ring < num_rings-1:
geom_str += ','
geom_str += ')'
# The case of multi-polygons
elif depth == 4:
geom_str = "MULTIPOLYGON("
num_polys = len(geom)
for i_poly in range(num_polys):
geom_str += '('
poly = geom[i_poly]
num_rings = len(poly)
for i_ring in range(num_rings):
geom_str += '('
ring = poly[i_ring]
num_pts = len(ring)
for i_pt in range(num_pts):
geom_str += ' '.join(map(str, ring[i_pt]))
if i_pt < num_pts-1:
geom_str += ','
geom_str += ')'
if i_ring < num_rings-1:
geom_str += ','
geom_str += ')'
if i_poly < num_polys-1:
geom_str += ','
geom_str += ')'
else:
sys.exit("got unexpected nestedness depth of %s in feature" % depth)
meta_data = json.dumps(feature['properties'])
meta_data = meta_data.replace("'", "''")
# (2) Ingest the feature with its geom + meta_data into the regions table
query = "insert into regions (meta_id, geom, meta_data) values (%s, ST_GeomFromText(\'%s\', 4326), \'%s\')" % (meta_id, geom_str, meta_data)
cur.execute(query)
conn.commit()
if __name__ == "__main__":
shapefile = sys.argv[1]
main(shapefile) |
# This python script is just for reference, do not import this file
# The same _EVE class is intergrated into CircuitPython binary
# Documented at: https://circuitpython.readthedocs.io/en/latest/shared-bindings/_eve/index.html
import struct
class _EVE:
def cc(self, s):
assert (len(s) % 4) == 0
self.buf += s
while len(self.buf) > 512:
self.write(self.buf[:512])
self.buf = self.buf[512:]
def register(self, sub):
self.buf = b''
getattr(sub, 'write') # Confirm that there is a write method
def flush(self):
self.write(self.buf)
self.buf = b''
def c4(self, i):
"""Send a 32-bit value to the GD2."""
self.cc(struct.pack("I", i))
def cmd0(self, num):
self.c4(0xffffff00 | num)
def cmd(self, num, fmt, args):
self.c4(0xffffff00 | num)
self.cc(struct.pack(fmt, *args))
# The basic graphics instructions
def AlphaFunc(self, func,ref):
self.c4((9 << 24) | ((func & 7) << 8) | ((ref & 255)))
def Begin(self, prim):
self.c4((31 << 24) | ((prim & 15)))
def BitmapHandle(self, handle):
self.c4((5 << 24) | ((handle & 31)))
def BitmapLayout(self, format,linestride,height):
self.c4((7 << 24) | ((format & 31) << 19) | ((linestride & 1023) << 9) | ((height & 511)))
def BitmapSize(self, filter,wrapx,wrapy,width,height):
self.c4((8 << 24) | ((filter & 1) << 20) | ((wrapx & 1) << 19) | ((wrapy & 1) << 18) | ((width & 511) << 9) | ((height & 511)))
def BitmapSource(self, addr):
self.c4((1 << 24) | ((addr & 0xffffff)))
def BitmapTransformA(self, a, p = 0):
self.c4((21 << 24) | ((p & 1) << 17) | ((a & 131071)))
def BitmapTransformB(self, b, p = 0):
self.c4((22 << 24) | ((p & 1) << 17) | ((b & 131071)))
def BitmapTransformC(self, c, p = 0):
self.c4((23 << 24) | ((p & 1) << 17) | ((c & 16777215)))
def BitmapTransformD(self, d, p = 0):
self.c4((24 << 24) | ((p & 1) << 17) | ((d & 131071)))
def BitmapTransformE(self, e, p = 0):
self.c4((25 << 24) | ((p & 1) << 17) | ((e & 131071)))
def BitmapTransformF(self, f, p = 0):
self.c4((26 << 24) | ((p & 1) << 17) | ((f & 16777215)))
def BlendFunc(self, src,dst):
self.c4((11 << 24) | ((src & 7) << 3) | ((dst & 7)))
def Call(self, dest):
self.c4((29 << 24) | ((dest & 65535)))
def Cell(self, cell):
self.c4((6 << 24) | ((cell & 127)))
def ClearColorA(self, alpha):
self.c4((15 << 24) | ((alpha & 255)))
def ClearColorRGB(self, red,green,blue):
self.c4((2 << 24) | ((red & 255) << 16) | ((green & 255) << 8) | ((blue & 255)))
def Clear(self, c = 1,s = 1,t = 1):
self.c4((38 << 24) | ((c & 1) << 2) | ((s & 1) << 1) | ((t & 1)))
def ClearStencil(self, s):
self.c4((17 << 24) | ((s & 255)))
def ClearTag(self, s):
self.c4((18 << 24) | ((s & 255)))
def ColorA(self, alpha):
self.c4((16 << 24) | ((alpha & 255)))
def ColorMask(self, r,g,b,a):
self.c4((32 << 24) | ((r & 1) << 3) | ((g & 1) << 2) | ((b & 1) << 1) | ((a & 1)))
def ColorRGB(self, red,green,blue):
self.c4((4 << 24) | ((red & 255) << 16) | ((green & 255) << 8) | ((blue & 255)))
def Display(self):
self.c4((0 << 24))
def End(self):
self.c4((33 << 24))
def Jump(self, dest):
self.c4((30 << 24) | ((dest & 65535)))
def LineWidth(self, width): # "width" is width of lines in diameter
self.c4((14 << 24) | ((int(width * 8) & 4095)))
def Line_Width(self, width): # "width" is width of lines in radius
self.LineWidth(2 * width)
def Macro(self, m):
self.c4((37 << 24) | ((m & 1)))
def PointSize(self, size): # "size" is diameter of rasterized points
self.c4((13 << 24) | ((int(size * 8) & 8191)))
def Point_Size(self, size): # "size" is radius of points
self.PointSize(2 * size)
def RestoreContext(self):
self.c4((35 << 24))
def Return(self):
self.c4((36 << 24))
def SaveContext(self):
self.c4((34 << 24))
def ScissorSize(self, width,height):
self.c4((28 << 24) | ((width & 4095) << 12) | ((height & 4095)))
def ScissorXY(self, x,y):
self.c4((27 << 24) | ((x & 2047) << 11) | ((y & 2047)))
def StencilFunc(self, func,ref,mask):
self.c4((10 << 24) | ((func & 7) << 16) | ((ref & 255) << 8) | ((mask & 255)))
def StencilMask(self, mask):
self.c4((19 << 24) | ((mask & 255)))
def StencilOp(self, sfail,spass):
self.c4((12 << 24) | ((sfail & 7) << 3) | ((spass & 7)))
def TagMask(self, mask):
self.c4((20 << 24) | ((mask & 1)))
def Tag(self, s):
self.c4((3 << 24) | ((s & 255)))
def Vertex2f_1(self, x, y):
x = int(x)
y = int(y)
self.c4(0x40000000 | ((x & 32767) << 15) | (y & 32767))
def Vertex2f_2(self, x, y):
x = int(2 * x)
y = int(2 * y)
self.c4(0x40000000 | ((x & 32767) << 15) | (y & 32767))
def Vertex2f_4(self, x, y):
x = int(4 * x)
y = int(4 * y)
self.c4(0x40000000 | ((x & 32767) << 15) | (y & 32767))
def Vertex2f_8(self, x, y):
x = int(8 * x)
y = int(8 * y)
self.c4(0x40000000 | ((x & 32767) << 15) | (y & 32767))
def Vertex2f_16(self, x, y):
x = int(16 * x)
y = int(16 * y)
self.c4(0x40000000 | ((x & 32767) << 15) | (y & 32767))
Vertex2f = Vertex2f_16
def Vertex2ii(self, x, y, handle = 0, cell = 0):
self.c4((2 << 30) | ((x & 511) << 21) | ((y & 511) << 12) | ((handle & 31) << 7) | ((cell & 127)))
def VertexFormat(self, frac):
self.c4((39 << 24) | (frac & 7))
self.Vertex2f = [
self.Vertex2f_1,
self.Vertex2f_2,
self.Vertex2f_4,
self.Vertex2f_8,
self.Vertex2f_16][frac]
def BitmapLayoutH(self, linestride,height):
self.c4((40 << 24) | (((linestride) & 3) << 2) | (((height) & 3)))
def BitmapSizeH(self, width,height):
self.c4((41 << 24) | (((width) & 3) << 2) | (((height) & 3)))
def PaletteSource(self, addr):
self.c4((42 << 24) | (((addr) & 4194303)))
def VertexTranslateX(self, x):
self.c4((43 << 24) | (((int(16 * x)) & 131071)))
def VertexTranslateY(self, y):
self.c4((44 << 24) | (((int(16 * y)) & 131071)))
def Nop(self):
self.c4((45 << 24))
def BitmapExtFormat(self, fmt):
self.c4((46 << 24) | (fmt & 65535))
def BitmapSwizzle(self, r, g, b, a):
self.c4((47 << 24) | ((r & 7) << 9) | ((g & 7) << 6) | ((b & 7) << 3) | ((a & 7)))
|
from django.conf import settings
from django.template import loader, TemplateDoesNotExist
from livesettings.functions import config_value
from satchmo_store.shop.signals import rendering_store_mail, sending_store_mail
from socket import error as SocketError
import logging
log = logging.getLogger('satchmo_store.mail')
if "mailer" in settings.INSTALLED_APPS:
from mailer import send_mail
else:
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
class NoRecipientsException(Exception):
pass
class ShouldNotSendMail(Exception):
pass
def send_store_mail_template_decorator(template_base):
"""
This decorator sets the arguments ``template`` and ``template_html``
when the decorated function is called.
"""
def dec(func):
def newfunc(*args, **kwargs):
default_kwargs = {
'template': '%s.txt' % template_base,
'template_html': '%s.html' % template_base
}
default_kwargs.update(kwargs)
return func(*args, **default_kwargs)
return newfunc
return dec
def send_html_email(sender, send_mail_args=None, context=None, template_html=None ,**kwargs):
send_html = config_value('SHOP', 'HTML_EMAIL')
if not send_html:
return
# perhaps send_store_mail() was not passed the *template_html* argument
if not template_html:
return
if settings.DEBUG:
log.info("Attempting to send html mail.")
try:
t = loader.get_template(template_html)
html_body = t.render(context)
except TemplateDoesNotExist:
log.warn('Unable to find html email template %s. Falling back to text only email.' % template_html)
return
# just like send_store_mail() does
if not send_mail_args.get('recipient_list'):
raise NoRecipientsException
# prepare kwargs for EmailMultiAlternatives()
multi_mail_args = send_mail_args.copy()
fail_silently = multi_mail_args.pop('fail_silently')
multi_mail_args['body'] = multi_mail_args.pop('message') # the plain text part
multi_mail_args['to'] = multi_mail_args.pop('recipient_list')
msg = EmailMultiAlternatives(**multi_mail_args)
msg.attach_alternative(html_body, "text/html")
# don't have to handle any errors, as send_store_mail() does so for us.
msg.send(fail_silently=fail_silently)
# tell send_store_mail() to abort sending plain text mail
raise ShouldNotSendMail
def send_store_mail(subject, context, template='', recipients_list=None,
format_subject=False, send_to_store=False,
fail_silently=False, sender=None, **kwargs):
"""
:param subject: A string.
:param format_subject: Determines whether the *subject* parameter is
formatted. Only the %(shop_name)s specifier is supported now.
:param context: A dictionary to use when rendering the message body. This
overwrites an internal dictionary with a single entry, `shop_name`.
:param template: The path of the plain text template to use when rendering
the message body.
:param `**kwargs`: Additional arguments that are passed to listeners of the
signal :data:`satchmo_store.shop.signals.sending_store_mail`.
"""
from satchmo_store.shop.models import Config
shop_config = Config.objects.get_current()
shop_email = shop_config.store_email
shop_name = shop_config.store_name
if not shop_email:
log.warn('No email address configured for the shop. Using admin settings.')
shop_email = settings.ADMINS[0][1]
if shop_name:
shop_email = "%s <%s>" % (shop_name, shop_email)
c_dict = {'shop_name': shop_name}
if format_subject:
subject = subject % c_dict
c_dict.update(context)
recipients = recipients_list or []
if send_to_store:
recipients.append(shop_email)
# match send_mail's signature
send_mail_args = {
'subject': subject,
'from_email': shop_email,
'recipient_list': recipients,
'fail_silently': fail_silently,
}
# let listeners modify context
rendering_store_mail.send(sender, send_mail_args=send_mail_args, context=c_dict,
**kwargs)
# render text email, regardless of whether html email is used.
t = loader.get_template(template)
body = t.render(c_dict)
# listeners may have set this entry
if not 'message' in send_mail_args:
send_mail_args['message'] = body
try:
# We inform listeners before checking recipients list, as they may
# modify it.
# Listeners may also choose to send mail themselves, so we place this
# call in the SocketError try block to handle errors for them.
try:
sending_store_mail.send(sender, send_mail_args=send_mail_args, \
context=c_dict, **kwargs)
except ShouldNotSendMail:
return
if not send_mail_args.get('recipient_list'):
raise NoRecipientsException
send_mail(**send_mail_args)
except SocketError as e:
if settings.DEBUG:
log.error('Error sending mail: %s' % e)
log.warn("""Ignoring email error, since you are running in DEBUG mode. Email was:
To: %s
Subject: %s
---
%s""" % (",".join(send_mail_args['recipient_list']), send_mail_args['subject'], send_mail_args['message']))
else:
log.fatal('Error sending mail: %s' % e)
raise IOError('Could not send email. Please make sure your email settings are correct and that you are not being blocked by your ISP.')
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for wind site modeling within system design tool.
This module is intended to model the properties associated with a wind site that
impact the determination of the levelized cost of energy at the wind farm level.
In its current state there are no functions, so it is just a container for the
associated characteristics.
"""
class Site(object):
"""Models a wind site.
This class contains characteristics that are specific to an individual wind
site. This includes the probability distribution of specific wind velocities,
the probability distribution of the wind shear coefficient, and the initial
capital costs.
Attributes:
site_name: A string containing the name of the site.
velocity_distribution: A dictionary containing velocity and probability
information specific to the site.
shear_distribution: A dictonary containing shear coefficient and probability
information specific to the site.
capital_costs: A float describing the initial capital cost for the site.
"""
def __init__(self, site_name=None, velocity_distribution=None,
shear_distribution=None, capital_costs=None):
"""Constructor.
Args:
site_name: (Optional) String containing name of site.
velocity_distribution: (Optional) Dictionary with keys 'velocity' and
'probability' that have values of 1D lists of equal length containing
sets of velocity and the associated probability of that velocity.
Velocity values must be floats monotonically increasing from 0.0, and
probability values must be floats that are zero or greater and sum to
1.0.
shear_distribution: (Optional) Dictionary with keys 'shear_coefficient'
and 'probability' that have 1D lists of equal length containing sets
of shear coefficient and the associated probability of that shear
coefficient. Shear coefficient values must be floats monotonically
increasing from 0.0, and probability values must be floats that are
zero or greater and sum to 1.0.
capital_costs: (Optional) Initial capital cost [$USD/m^2] for the site.
"""
# TODO: Allow for wind class as a method of definition.
# TODO: Add description wind shear with reference height.
# TODO: Add input checking.
self._site_name = 'Default Site Name' if site_name is None else site_name
if velocity_distribution is None:
self._velocity_distribution = {
'velocity': [0.0, 10.0, 20.0],
'probability': [0.2, 0.6, 0.2]
}
else:
self._velocity_distribution = velocity_distribution
if shear_distribution is None:
self._shear_distribution = {
'shear_coefficient': [0.0, 0.1, 0.2],
'probability': [0.0, 0.9, 0.1]
}
else:
self._shear_distribution = shear_distribution
self._capital_costs = 1.0 if capital_costs is None else capital_costs
@property
def site_name(self):
return self._site_name
@property
def velocity_distribution(self):
return self._velocity_distribution
@property
def shear_distribution(self):
return self._shear_distribution
@property
def capital_costs(self):
return self._capital_costs
|
from sofi.ui import Code
def test_basic():
assert(str(Code()) == "<code></code>")
def test_text():
assert(str(Code("text")) == "<code>text</code>")
def test_custom_class_ident_style_and_attrs():
assert(str(Code("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<code id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</code>")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappé and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from subprocess import Popen, PIPE, STDOUT
import re, shlex
from frappe.utils import flt, cstr
from frappe.utils.background_jobs import enqueue
def run_command(commands, doctype, key, cwd='..', docname=' ', after_command=None,site_request=None):
verify_whitelisted_call()
start_time = frappe.utils.time.time()
console_dump = ""
logged_command = " && ".join(commands)
logged_command += " " #to make sure passwords at the end of the commands are also hidden
sensitive_data = ["--mariadb-root-password", "--admin-password", "--root-password"]
for password in sensitive_data:
logged_command = re.sub("{password} .*? ".format(password=password), '', logged_command, flags=re.DOTALL)
doc = frappe.get_doc({'doctype': 'Bench Manager Command', 'key': key, 'source': doctype+': '+docname,'site_request':site_request,
'command': logged_command, 'console': console_dump, 'status': 'Ongoing'})
doc.insert(ignore_permissions=True)
frappe.db.commit()
frappe.publish_realtime(key, "Executing Command:\n{logged_command}\n\n".format(logged_command=logged_command), user=frappe.session.user)
try:
for command in commands:
terminal = Popen(shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=STDOUT, cwd=cwd)
for c in iter(lambda: safe_decode(terminal.stdout.read(1)), ''):
frappe.publish_realtime(key, c, user=frappe.session.user)
console_dump += c
main_domain = frappe.db.get_value("SAAS Settings", None, "main_domain")
site_name = frappe.db.get_value('Site Request', site_request, 'subdomain') +"."+main_domain
lets_encrypt_command = "sudo -H bench setup lets-encrypt {site_name}".format(site_name=site_name)
p = Popen(lets_encrypt_command, stdin=PIPE, stderr=STDOUT, stdout=PIPE, shell=True)
p.communicate(input=b'Y\nY\n')[0]
if terminal.wait():
_close_the_doc(start_time, key, console_dump, status='Failed', user=frappe.session.user,site_request=site_request)
else:
_close_the_doc(start_time, key, console_dump, status='Success', user=frappe.session.user,site_request=site_request)
except Exception as e:
_close_the_doc(start_time, key, "{} \n\n{}".format(e, console_dump), status='Failed', user=frappe.session.user,site_request=site_request)
finally:
frappe.db.commit()
# hack: frappe.db.commit() to make sure the log created is robust,
# and the _refresh throws an error if the doc is deleted
frappe.enqueue('bench_manager.bench_manager.utils._refresh',
doctype=doctype, docname=docname, commands=commands)
def _close_the_doc(start_time, key, console_dump, status, user,site_request=None):
time_taken = frappe.utils.time.time() - start_time
final_console_dump = ''
console_dump = console_dump.split('\n\r')
for i in console_dump:
i = i.split('\r')
final_console_dump += '\n'+i[-1]
frappe.db.set_value('Bench Manager Command', key, 'console', final_console_dump)
frappe.db.set_value('Bench Manager Command', key, 'status', status)
frappe.db.set_value('Bench Manager Command', key, 'time_taken', time_taken)
frappe.db.commit()
if site_request:
main_domain = frappe.db.get_value("SAAS Settings", None, "main_domain")
mysql_password = frappe.db.get_value("SAAS Settings", None, "mysql_password")
admin_password = frappe.db.get_value("SAAS Settings", None, "admin_password")
email = frappe.db.get_value("Site Request", site_request, "email")
customers = frappe.db.get_list("Customer",{"customer_email":email}, ignore_permissions=True)
if customers :
customer =frappe.get_doc("Customer",{"customer_email":email})
# site_name = frappe.db.get_value('Site Request', site_request, 'subdomain')+"."+main_domain
site_name = "https://"+frappe.db.get_value('Site Request', site_request, 'subdomain') +"."+main_domain
user = 'admin@'+cstr(frappe.db.get_value('Site Request', site_request, 'subdomain') +"."+main_domain)
password = 'back2track741'
msg = frappe.render_template('bench_manager/templates/emails/site_request.html', context={"page_link": site_name, "user": user, "passwored": password})
email_args = {
"recipients": email,
"sender": None,
"subject": "Your site is ready!",
"content": msg
}
enqueue(method=frappe.sendmail, queue='short', timeout=300, is_async=True, **email_args)
frappe.publish_realtime(key, '\n\n'+status+'!\nThe operation took '+str(time_taken)+' seconds', user=user)
def _refresh(doctype, docname, commands):
try:
frappe.get_doc(doctype, docname).run_method('after_command', commands=commands)
except:
pass
@frappe.whitelist()
def verify_whitelisted_call():
if 'bench_manager' not in frappe.get_installed_apps():
raise ValueError("This site does not have bench manager installed.")
def safe_decode(string, encoding = 'utf-8'):
try:
string = string.decode(encoding)
except Exception:
pass
return string
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
===========================
TracRemote.tests.needs_mock
===========================
Contains a superclass for test cases that need a mock Trac server running.
"""
import unittest
import os
import stat
import subprocess
import time
from pkg_resources import resource_filename
from ..connection import Connection
class NeedsMock(unittest.TestCase):
"""Superclass for test cases that need a mock Trac server running.
"""
@classmethod
def setUpClass(cls):
cls.url = 'http://localhost:8888'
cls.netrc_data = """machine localhost:8888
login foo
password bar
"""
cls.netrc_file = os.path.join(os.environ['HOME'], '.netrc')
cls.password_file = resource_filename('TracRemote.tests',
't/password.txt')
cls.existing_netrc = os.path.exists(cls.netrc_file)
if not cls.existing_netrc:
with open(cls.netrc_file, 'w') as n:
n.write(cls.netrc_data)
os.chmod(cls.netrc_file, stat.S_IRUSR | stat.S_IWUSR)
cls.trac = subprocess.Popen(['python', '-m',
'TracRemote.tests.mock_trac_server'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time.sleep(5)
@classmethod
def tearDownClass(cls):
cls.trac.kill()
if not cls.existing_netrc:
if os.path.exists(cls.netrc_file):
os.remove(cls.netrc_file)
def setUp(self):
self.conn = Connection(self.url)
def tearDown(self):
pass
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Wahiba Taouali ([email protected])
# Nicolas P. Rougier ([email protected])
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import os
import numpy as np
from parameters import *
def cartesian_to_polar(x, y):
''' Cartesian to polar coordinates. '''
rho = np.sqrt(x**2+y**2)
theta = np.arctan2(y,x)
return rho,theta
def polar_to_cartesian(rho, theta):
''' Polar to cartesian coordinates. '''
x = rho*np.cos(theta)
y = rho*np.sin(theta)
return x,y
def polar_to_logpolar(rho, theta):
''' Polar to logpolar coordinates. '''
# Shift in the SC mapping function in deg
A = 3.0
# Collicular magnification along u axe in mm/rad
Bx = 1.4
# Collicular magnification along v axe in mm/rad
By = 1.8
xmin, xmax = 0.0, 4.80743279742
ymin, ymax = -2.76745559565, 2.76745559565
rho = rho*90.0
x = Bx*np.log(np.sqrt(rho*rho+2*A*rho*np.cos(theta)+A*A)/A)
y = By*np.arctan(rho*np.sin(theta)/(rho*np.cos(theta)+A))
x = (x-xmin)/(xmax-xmin)
y = (y-ymin)/(ymax-ymin)
return x, y
def retina_projection(Rs=retina_shape, Ps=projection_shape):
'''
Compute the projection indices from retina to colliculus
Parameters
----------
Rs : (int,int)
Half-retina shape
Ps : (int,int)
Retina projection shape (might be different from colliculus)
'''
filename = "retina (%d,%d) - colliculus (%d,%d).npy" % (Rs[0],Rs[1],Ps[0],Ps[1])
if os.path.exists(filename):
return np.load(filename)
s = 4
rho = ((np.logspace(start=0, stop=1, num=s*Rs[1],base=10)-1)/9.)
theta = np.linspace(start=-np.pi/2,stop=np.pi/2, num=s*Rs[0])
rho = rho.reshape((s*Rs[1],1))
rho = np.repeat(rho,s*Rs[0], axis=1)
theta = theta.reshape((1,s*Rs[0]))
theta = np.repeat(theta,s*Rs[1], axis=0)
y,x = polar_to_cartesian(rho,theta)
xmin,xmax = x.min(), x.max()
x = (x-xmin)/(xmax-xmin)
ymin,ymax = y.min(), y.max()
y = (y-ymin)/(ymax-ymin)
P = np.zeros((Ps[0],Ps[1],2), dtype=int)
xi = np.rint(x*(Rs[0]-1)).astype(int)
yi = np.rint((0.0+1.0*y)*(Rs[1]-1)).astype(int)
yc,xc = polar_to_logpolar(rho,theta)
xmin,xmax = xc.min(), xc.max()
xc = (xc-xmin)/(xmax-xmin)
ymin,ymax = yc.min(), yc.max()
yc = (yc-ymin)/(ymax-ymin)
xc = np.rint(xc*(Ps[0]-1)).astype(int)
yc = np.rint((.0+yc*1.0)*(Ps[1]-1)).astype(int)
P[xc,yc,0] = xi
P[xc,yc,1] = yi
np.save(filename, P)
return P
|
# coding= utf-8
import random
from Crypto.PublicKey import RSA
# Função para sortear os pares
def sorteiaPares(listaDeParticipantes): # Recebe lista com nome dos participantes
# e o valor é a chave pública dela
dictSorteado = {} # Dict a ser retornado
numeroDeParticipantes = len(listaDeParticipantes) # Apenas para tornar o código mais limpo e legível
if numeroDeParticipantes < 2:
print "Você deve ter pelo menos dois participantes!!"
return
# Geramos então uma lista de N números aleatórios de 0 a N-1, sendo N o número de participantes
# Para evitar problemas na distribuição, o primeiro número não pode ser 0
# Caso seja, troco com algum outro número da lista
sorteio = random.sample(xrange(numeroDeParticipantes), numeroDeParticipantes)
if sorteio[0] == 0:
rand = random.randint(1, numeroDeParticipantes-1)
sorteio[0] = sorteio[rand]
sorteio[rand] = 0
# Realiza uma distribuição em que cada participante recebe outro participante aleatório
iterator = 0
for numero in sorteio:
if iterator == numero: # A pessoa tirou ela própria
# Nesse caso, ele troca com a pessoa anterior a ele na lista
dictSorteado[listaDeParticipantes[iterator]] = dictSorteado[listaDeParticipantes[iterator-1]]
dictSorteado[listaDeParticipantes[iterator-1]] = listaDeParticipantes[numero]
else:
dictSorteado[listaDeParticipantes[iterator]] = listaDeParticipantes[numero]
iterator += 1
return dictSorteado
# Função para criptografar o dict
def criptografaSorteio(dictDeChaves, dictSorteado): # Recebe dict Presenteante -> Chave e Presenteante -> Presenteado
dictCriptografado = {}
for participante in dictDeParticipantes:
pubKeyObj = RSA.importKey(dictDeParticipantes[participante]) # Pega a chave pública do participante
msg = dictSorteado[participante] # Pega o presenteado sorteado para ele
emsg = pubKeyObj.encrypt(msg, 'x')[0] # Encripta o nome do sujeito
caminho = "sorteio/" + participante
with open(caminho, "w") as text_file:
text_file.write(emsg)
# Início do programa:
# Crie a sua lista de participantes da maneira preferida
# A forma mais básica é:
listaDeParticipantes = [] # Uma lista de participantes
# Porém ler de um arquivo ou diretório também é interessante
dictDeParticipantes = {} # Um dict vazio
# Para cada participante, lê a sua chave e mapeia Participante -> Chave Pública
for participante in listaDeParticipantes:
with open("chaves/pubKey" + participante, mode='r') as file:
key = file.read()
dictDeParticipantes[participante] = key
dictSorteado = sorteiaPares(listaDeParticipantes) # Recebe o dicionário que mapeia presenteante -> presenteado
criptografaSorteio(dictDeParticipantes, dictSorteado)
|
def test__db_implementation_selector():
pass
|
__version__ = """1.30.0""" |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 13 09:36:43 2017
@author: Administrator
"""
from numpy import * #导入numpy的函数库
import numpy as np
import scipy.io as scio
from scipy import interp
import matplotlib.pyplot as plt
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import cross_validation
from matplotlib import colors
from sklearn.lda import LDA
import h5py as h5
from sklearn.metrics import roc_curve,auc
import matplotlib.pyplot as plt
from sklearn.cross_validation import StratifiedKFold
from sklearn import neighbors as ng
from sklearn.svm import SVC
def getTrainAndTestSet(path,trainNum, dataName, lion_type, huamn_type):
'get trainset and testset'
Data = scio.loadmat(path)
All = Data[dataName]
# if (dataName == 'GLCMData') and (huamn_type == 'glcm_SIT' ) :
# Data = scio.loadmat(path)
# All = Data[dataName]
# else:
# Data = h5.File(path)
# All = Data[dataName]
# All = np.array(All)
# All = All.T
# if huamn_type == "glcm_human_lamp":
# Data = h5.File(path)
# All = Data[dataName] #
# All = np.array(All)
# All = All.T
#
# else:
# Data = scio.loadmat(path)
# All = Data[dataName]
#Data = scio.loadmat(path)
#All = Data[dataName]
#this part for lions: mat v.7.3
# Data = h5.File(path)
# All = Data[dataName] #
# All = np.array(All)
# All = All.T
#lgbpLionsData = h5.File(path,'r')
#print (lgbpLionsData)
#All = Data[dataName] #
#All = np.array(All)
#print(type(All))
#print(shape(All))
#All = All.T
#获取矩阵的LGBP特征
Features = All[0:shape(All)[0],:] #384x1
#获取矩阵的最后一列,-1为倒数第一 shape(lgbpLionsAll)[0]得到行数,shape(lgbpLionsAll)[1]得到列数
#lgbpLionsName = lgbpLionsAll[0:shape(lgbpLionsAll)[0],-1:shape(lgbpLionsAll)[1]]
#lions train
trainSet = Features[0:trainNum,:] #200x1
#lion test
testSet = Features[trainNum:shape(All)[0],:] #183x1
#lionsTestSetName = lgbpLionsName[trainNum+1:shape(lgbpLionsAll)[0],:]
return trainSet,testSet
def getTrainAndTestSetRace(path,trainNum, dataName):
'get trainset and testset'
Data = h5.File(path)
All = Data[dataName] #
All = np.array(All)
All = All.T
Features = All[0:shape(All)[0],:] #384x1
#获取矩阵的最后一列,-1为倒数第一 shape(lgbpLionsAll)[0]得到行数,shape(lgbpLionsAll)[1]得到列数
#lgbpLionsName = lgbpLionsAll[0:shape(lgbpLionsAll)[0],-1:shape(lgbpLionsAll)[1]]
#lions train
trainSet = Features[0:trainNum,:] #200x1
#lion test
testSet = Features[trainNum:shape(All)[0],:] #183x1
#lionsTestSetName = lgbpLionsName[trainNum+1:shape(lgbpLionsAll)[0],:]
return trainSet,testSet
def LDAClassificationForIris(trainNum, _type, *dataSet):
'This function is for LDA classification'
print('kkkkkkkkkkkkkkkkkkkkkkkk')
#print(dataSet[0])
trainSet = np.concatenate((dataSet[0],dataSet[2]),axis=0) #lion + all human
trainLabelOne = np.zeros((shape(dataSet[0])[0],1)) #first class label for lions
trainLabelTwo = np.ones((shape(dataSet[2])[0],1)) #second class label for all human
trainLabel = np.concatenate((trainLabelOne,trainLabelTwo),axis=0)
testLabelOne = np.zeros((shape(dataSet[1])[0],1))
testLabelTwo = np.ones((shape(dataSet[3])[0],1))
testLabel = np.concatenate((testLabelOne, testLabelTwo),axis=0)
#print (shape(testLabel)) #417x1
testSetOne = np.array(dataSet[1])
testSetTwo = np.array(dataSet[3])
testSet = np.concatenate((testSetOne,testSetTwo),axis=0) #testSet : 417x2360
# print ('++++++++++++++++++++')
# print (shape(trainSet))
# print (shape(trainLabel))
print ('------------------------------')
#print (trainSet.shape)
#print (trainLabel.shape)
clf = LDA()
clf.fit(trainSet, trainLabel)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd', store_covariance=False, tol=0.0001)
# SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\
# decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',\
# max_iter=-1, probability=False, random_state=None, shrinking=True,\
# tol=0.001, verbose=False)
print ('=========================The classification results are :=======================')
classificationResult = clf.predict(testSet)
#存储分类结果: 已知类别 分类类别
#print(testLabel.shape), print(classificationResult.shape)
#classificationResult.shape=(testLabel.shape[0],1)
#print(testLabel.shape), print(classificationResult.shape)
origin_class = testLabel
clf_class = classificationResult
clf_class.shape = (testLabel.shape[0],1)
all_class = np.concatenate((origin_class, clf_class), axis=1)
print('---------------测试集的个数--------------')
print(all_class.shape[0])
#label_name = _type + '_SVM_ORIGIN_CLF_LABEL.csv'
#np.savetxt(label_name, all_class, delimiter = ',')
print ('=========the classfiy results==============') #1x417;
print (classificationResult)
#save the classificationResult: the first column is true label, the second is classification label
#testLabel.T: 转置; testLabel为1x417,classificationResult为417x1,维数不同,需要化为相同
trueLabelAndClassifyLabel = np.concatenate((testLabel,classificationResult),axis=0)
trueLabelAndClassifyLabel = trueLabelAndClassifyLabel.T
#print (trueLabelAndClassifyLabel.shape)
count = 0
wrong_num = 0
for i in range(1,shape(classificationResult)[0]):
if testLabel[i] == classificationResult[i]:
count = count + 1
else:
wrong_num += 1
print('=============the wrong num is ' + str(i))
print('一共有' + str(wrong_num) + '幅图分类错误')
accurcay = count/classificationResult.shape[0]
print ('======================The accurcay of LDA:==========================================')
print (accurcay)
print ('======================The scores:===============================================')
weight = [0.0001 for i in range(classificationResult.shape[0])]
for x in range(1,classificationResult.shape[0]):
weight[x-1] = random.uniform(0,1)
print(clf.score(testSet, testLabel,weight))
#print ('======================The Estimate probability=================================')
#estimate_pro = clf.predict_proba(testSet) # for get ROC
#print (estimate_pro)
#print (estimate_pro.shape)
print ('======================Predicit confidence scores for samples:============================')
predicit_confidence = clf.decision_function(testSet)
print (predicit_confidence)
#print (predicit_confidence.shape)
#call ROC
#yLabel = np.concatenate((trainLabel,testLabel),axis=0)
#getROCCurve(testLabel, predicit_confidence)
#交叉验证
X = np.concatenate((trainSet,testSet),axis=0)
Y = np.concatenate((trainLabel,testLabel),axis=0)
YY = Y
YY.shape = (YY.shape[0],)
#kFold = cross_validation.KFold(len(X),6, shuffle=True)
kFold = StratifiedKFold(YY, n_folds=6)
acc_y = getROCCurve(clf,X, Y, kFold, _type)
print ('======================The terminal acc_y of LDA:==========================================')
print(acc_y)
#return acc_y #for CCR
return accurcay
def lgbpForIrisLDA(lion_path, human_path,trainNum, _type, lion_type, huamn_type):
trainNum = 10000
#../表示上一级目录
lgbpLions = lion_path#'../../big_data_feature_extraction/LGBP/matrixLGBP/LGBPRotateLions/LGBPRotateLionsFeature.mat'
lgbpHuman = human_path#'../../big_data_feature_extraction/LGBP/matrixLGBP/LGBPThousand/LGBPThousandFeature.mat'
#lgbpHumanGlass = '../../feature_extraction/matrixLGBP/LGBPHumanGlass.mat'
#label
lionLabel = 0;
humanLabel = 1;
#humanGlassLabel = 1;
#print(shape(lgbpLions))
#for lions
(lionsTrainSet,lionsTestSet) = getTrainAndTestSet(lgbpLions,trainNum,'LGBPData',lion_type, huamn_type)
print('=====================================================================')
print(shape(lionsTrainSet))
print(type(lionsTrainSet))
#for human
(humanTrainSet,humanTestSet) = getTrainAndTestSet(lgbpHuman,trainNum,'LGBPData',lion_type, huamn_type)
#shape(lionsTrainSet)
#for humanglass
#(humanGlassTrainSet,humanGlassTestSet) = getTrainAndTestSet(lgbpHumanGlass,trainNum,'LGBPHumanGlass')
#print (type(humanGlassTrainSet))
#print (shape(humanGlassTrainSet))
accurcay = LDAClassificationForIris(trainNum,_type, lionsTrainSet,lionsTestSet, \
humanTrainSet, humanTestSet)
return accurcay
def glcmForIrisLDA(lion_path, human_path,trainNum, _type, lion_type, huamn_type):
#../表示上一级目录
glcmLions = lion_path#'../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat'
glcmHuman = human_path#'../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMThousand/GLCMThousandFeature.mat'
#glcmHumanGlass = '../../feature_extraction/matrixGLCM/GLCMHumanGlass.mat'
#for lions
(glcmLionsTrainSet,glcmLionsTestSet) = getTrainAndTestSet(glcmLions,trainNum,'GLCMData', lion_type, huamn_type)
# print('=======================kenanananana--------------------')
# print(glcmLionsTrainSet.shape)
# print(glcmLionsTestSet.shape)
#for human
(glcmHumanTrainSet,glcmHumanTestSet) = getTrainAndTestSet(glcmHuman,trainNum,'GLCMData', lion_type, '123')
# print(glcmHumanTrainSet.shape)
# print(glcmHumanTestSet.shape)
#for humanglass
#(glcmHumanGlassTrainSet,glcmHumanGlassTestSet) = getTrainAndTestSet(glcmHumanGlass,trainNum,'GLCMHumanGlass')
#print (type(humanGlassTrainSet))
#print (shape(humanGlassTrainSet))
accurcay = LDAClassificationForIris(trainNum,_type, glcmLionsTrainSet,glcmLionsTestSet, \
glcmHumanTrainSet, glcmHumanTestSet)
return accurcay
def forRaceLDA(asian_path, white_path,train_num, _type, file_type):
(AsianTrainSet,AsianTestSet) = getTrainAndTestSetRace(asian_path,train_num, _type+'AsianTrain')
(WhiteTrainSet,WhiteTestSet) = getTrainAndTestSetRace(white_path,train_num, _type+'WhiteTrain')
acc_y = LDAClassificationForIris(train_num,file_type, AsianTrainSet,AsianTestSet, \
WhiteTrainSet, WhiteTestSet)
return acc_y
def getROCCurve(clf, X, Y, kFold, _type):
print ('====================================get ROC ====================')
#交叉验证
mean_tpr = 0.0
mean_fpr = np.linspace(0,1,100)
#the accuracy
acc_y = []
for i, (trn,tst) in enumerate(kFold):
#print (tst)
proBas = clf.fit(X[trn], Y[trn]).predict_proba(X[tst])
#通过roc_curve()函数求出fpr,tpr,以及阈值
fpr,tpr,thresholds = roc_curve(Y[tst], proBas[:,1])
mean_tpr += interp(mean_fpr,fpr,tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr,tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
outVal = clf.score(X[tst], Y[tst])
acc_y.append(outVal)
#print (outVal)
# plt.plot(fpr, tpr, lw=1, label='ROC')
#acc_y = np.mean(acc_y)
print('========每一次的acc_y===========--------------------')
print(acc_y)
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
#plt.plot(fpr, tpr, lw=1, color='#FF0000', label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
mean_tpr /= len(kFold)
mean_tpr[-1] = 1.0 #坐标最后一个点为(1,1)
mean_auc = auc(mean_fpr, mean_tpr) #计算平均AUC值
#画平均ROC曲线
#print mean_fpr,len(mean_fpr)
#print mean_tpr: 存储fpr,tpr: EER: ROC与ROC空间对角线交点的横坐标
print('------------fpr, tpr-----------------')
mean_fpr1 = mean_fpr
mean_fpr1.shape = (mean_fpr.shape[0],1)
#print(mean_fpr1.shape)
mean_tpr1 = mean_tpr
mean_tpr1.shape = (mean_tpr.shape[0],1)
#print(mean_tpr1.shape)
fpr_tpr = np.concatenate((mean_fpr1,mean_tpr1),axis=1)
#roc_data_name = _type + '_FPR_TPR.csv'
#np.savetxt(roc_data_name, fpr_tpr, delimiter = ',')
#print(mean_fpr.shape)
#print(mean_tpr.shape)
#for EER
for i in range(mean_fpr.shape[0]):
if mean_fpr[i] == mean_tpr[i]:
eer = mean_fpr[i]
break;
print('--------------------eer------------' )
print(eer)
plt.plot(mean_fpr, mean_tpr, '--',color='#0000FF',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=1)
plt.xlim([-0.02, 1.02])
plt.ylim([-0.02, 1.02])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
return np.mean(acc_y)
if __name__ == "__main__":
loop_num=np.array([100,1000,2000,4000,6000,8000,10000])
acc=[]
#first: lions:thousand
#LGBP
# for i in range(len(loop_num)):
# print ('++++++++++++++++LGBP IRIS 测试集个数为:' + str(loop_num[i])+ '+++++++++++++++++++++++')
# #glcmLions = '../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat'
# #glcmHuman = '../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMThousand/GLCMThousandFeature.mat'
# lgbp_lions_path = '../big_data_feature_extraction/LGBP/matrixLGBP/LGBPRotateLions/LGBPRotateLionsFeature.mat' #19200
# lgbp_human_path = '../big_data_feature_extraction/LGBP/matrixLGBP/LGBPThousand/LGBPThousandFeature.mat' #20000
# trainNum = loop_num[i]
# accurcay = lgbpForIrisLDA(lgbp_lions_path, lgbp_human_path,train_num, 'LGBP_Lions_Thousand','lgbp_lions', 'lgbp_human')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('LGBP_Lions_Thousand_CCR_LDA.csv', acc, delimiter = ',')
#GLCM
# for i in range(len(loop_num)):
# print ('++++++++++++++++GLCM IRIS 测试集个数为:' + str(loop_num[i])+ '+++++++++++++++++++++++')
# #glcmLions = '../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat'
# #glcmHuman = '../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMThousand/GLCMThousandFeature.mat'
# glcm_lions_path = '../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat' #19200
# glcm_human_path = '../big_data_feature_extraction/GLCM/matrixGLCM/GLCMThousand/GLCMThousandFeature.mat' #20000
# trainNum = loop_num[i]
# accurcay = glcmForIrisLDA(glcm_lions_path, glcm_human_path, trainNum,'GLCM_Lions_Thousand','glcm_lions', 'glcm_thousand')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('GLCM_Lions_Thousand_CCR_LDA.csv', acc, delimiter = ',')
#second: lamp:lions
#LGBP
# for i in range(len(loop_num)):
# print ('++++++++++++++++LGBP IRIS 测试集个数为:' + str(loop_num[i])+ '+++++++++++++++++++++++')
# #glcmLions = '../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat'
# #glcmHuman = '../../big_data_feature_extraction/GLCM/matrixGLCM/GLCMThousand/GLCMThousandFeature.mat'
# lgbp_lions_path = '../big_data_feature_extraction/LGBP/matrixLGBP/LGBPRotateLions/LGBPRotateLionsFeature.mat' #19200
# lgbp_human_path = '../big_data_feature_extraction/LGBP/matrixLGBP/LGBPLamp/LGBPLampFeature.mat'
# trainNum = loop_num[i]
# accurcay=lgbpForIrisLDA(lgbp_lions_path, lgbp_human_path,train_num, 'LGBP_Lions_Lamp','lgbp_lions', 'lgbp_lamp')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('LGBP_Lions_Lamp_CCR_LDA.csv', acc, delimiter = ',')
#GLCM
# for i in range(len(loop_num)):
# print ('++++++++++++++++GLCM IRIS 测试集个数为:' + str(loop_num[i])+ '+++++++++++++++++++++++')
# glcm_lions_path = '../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat'
# glcm_human_path = '../big_data_feature_extraction/GLCM/matrixGLCM/GLCMLamp/GLCMLampFeature.mat'
# trainNum = loop_num[i]
# accurcay = glcmForIrisLDA(glcm_lions_path, glcm_human_path, trainNum,'GLCM_Lions_Lamp','glcm_lions', 'glcm_lamp')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('GLCM_Lions_Lamp_CCR_LDA.csv', acc, delimiter = ',')
#
#third:
#LGBP
# for i in range(len(loop_num)):
# print ('++++++++++++++++LGBP IRIS 测试集个数为:' + str(loop_num[i])+ '+++++++++++++++++++++++')
# lgbp_lions_path = '../big_data_feature_extraction/LGBP/matrixLGBP/LGBPRotateLions/LGBPRotateLionsFeature.mat' #19200
# lgbp_human_path = '../big_data_feature_extraction/LGBP/matrixLGBP/LGBPSIT/LGBPSITFeature.mat'
# trainNum = loop_num[i]
# accurcay=lgbpForIrisLDA(lgbp_lions_path, lgbp_human_path,train_num, 'LGBP_Lions_SIT','lgbp_lions', 'lgbp_SIT')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('LGBP_Lions_SIT_CCR_LDA.csv', acc, delimiter = ',')
#GLCM
# for i in range(len(loop_num)):
# print ('++++++++++++++++GLCM IRIS 测试集个数为:' + str(loop_num[i])+ '+++++++++++++++++++++++')
# glcm_lions_path = '../big_data_feature_extraction/GLCM/matrixGLCM/GLCMRotateLions/GLCMRotateLionsFeature.mat'
# glcm_human_path = '../big_data_feature_extraction/GLCM/matrixGLCM/GLCMSIT/GLCMSITFeature.mat'
# trainNum = loop_num[i]
# accurcay = glcmForIrisLDA(glcm_lions_path, glcm_human_path, trainNum,'GLCM_Lions_SIT','glcm_lions', 'glcm_SIT')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('GLCM_Lions_SIT_CCR_LDA.csv', acc, delimiter = ',')
#
# print('=====ccr=====') #训练集10000表示每类中选取10000个作为训练
# print(acc)
# plt.plot(loop_num*2,acc, 'r--*')
# plt.xlabel('Number of iris images userd for training per each class')
# plt.ylabel('Correct Classification Rate')
#
#fourth:
# loop_num1=np.array([100,200,300,400,500,600,700])
#LGBP
for i in range(len(loop_num1)):
print ('++++++++++++++++LGBP RACE 测试集个数为:' + str(loop_num1[i])+ '+++++++++++++++++++++++')
lgbp_asian_path = '../RACE_classification/Race_Data/LGBPAsian.mat'
lgbp_white_path = '../RACE_classification/Race_Data/LGBPWhite.mat'
trainNum = loop_num1[i]
accurcay = forRaceLDA(lgbp_asian_path,lgbp_white_path, trainNum,'LGBP','LGBP_Asian_White')
print('==============accurcay:')
print(accurcay)
acc.append(accurcay)
np.savetxt('LGBP_RACE_CCR_LDA.csv', acc, delimiter = ',')
#GLCM
# for i in range(len(loop_num1)):
# print ('++++++++++++++++GLCM RACE 测试集个数为:' + str(loop_num1[i])+ '+++++++++++++++++++++++')
# glcm_asian_path = '../RACE_classification/Race_Data/GLCMAsian.mat'
# glcm_white_path = '../RACE_classification/Race_Data/GLCMWhite.mat'
# trainNum = loop_num1[i]
# accurcay = forRaceLDA(glcm_asian_path,glcm_white_path, trainNum,'GLCM','GLCM_Asian_White')
# print('==============accurcay:')
# print(accurcay)
# acc.append(accurcay)
# np.savetxt('GLCM_RACE_CCR_LDA.csv', acc, delimiter = ',')
#
# print('=====ccr=====')
# print(acc)
# plt.plot(loop_num1*2,acc, 'r--*')
# plt.xlabel('Number of iris images userd for training per each class')
# plt.ylabel('Correct Classification Rate')
|
print("HEMANTH NAIDU ABBURI")
PRINT("AM.EN.U4CSE19223")
PRINT("CSE-D")
|
# -*- coding=utf -*-
import unittest
from cubes import __version__
import json
from .common import CubesTestCaseBase
from sqlalchemy import MetaData, Table, Column, Integer, String
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from cubes.server import create_server
from cubes import compat
from cubes import Workspace
import csv
TEST_DB_URL = "sqlite:///"
class SlicerTestCaseBase(CubesTestCaseBase):
def setUp(self):
super(SlicerTestCaseBase, self).setUp()
self.config = compat.ConfigParser()
self.slicer = create_server(self.config)
self.slicer.debug = True
self.server = Client(self.slicer, BaseResponse)
self.logger = self.slicer.logger
self.logger.setLevel("DEBUG")
def get(self, path, *args, **kwargs):
if not path.startswith("/"):
path = "/" + path
response = self.server.get(path, *args, **kwargs)
try:
result = json.loads(compat.to_str(response.data))
except ValueError:
result = response.data
return (result, response.status_code)
def assertHasKeys(self, d, keys):
for key in keys:
self.assertIn(key, d)
class SlicerTestCase(SlicerTestCaseBase):
def test_version(self):
response, status = self.get("version")
self.assertEqual(200, status)
self.assertIsInstance(response, dict)
self.assertIn("version", response)
self.assertEqual(__version__, response["version"])
def test_unknown(self):
response, status = self.get("this_is_unknown")
self.assertEqual(404, status)
@unittest.skip("We need to fix the model")
class SlicerModelTestCase(SlicerTestCaseBase):
def setUp(self):
super(SlicerModelTestCase, self).setUp()
ws = Workspace()
ws.register_default_store("sql", url=TEST_DB_URL)
self.ws = ws
self.slicer.cubes_workspace = ws
# Satisfy browser with empty tables
# TODO: replace this once we have data
store = ws.get_store("default")
table = Table("sales", store.metadata)
table.append_column(Column("id", Integer))
table.create()
ws.import_model(self.model_path("model.json"))
ws.import_model(self.model_path("sales_no_date.json"))
def test_cube_list(self):
response, status = self.get("cubes")
self.assertIsInstance(response, list)
self.assertEqual(2, len(response))
for info in response:
self.assertIn("name", info)
self.assertIn("label", info)
self.assertNotIn("dimensions", info)
names = [c["name"] for c in response]
self.assertCountEqual(["contracts", "sales"], names)
def test_no_cube(self):
response, status = self.get("cube/unknown_cube/model")
self.assertEqual(404, status)
self.assertIsInstance(response, dict)
self.assertIn("error", response)
# self.assertRegexpMatches(response["error"]["message"], "Unknown cube")
def test_get_cube(self):
response, status = self.get("cube/sales/model")
import pdb; pdb.set_trace()
self.assertEqual(200, status)
self.assertIsInstance(response, dict)
self.assertNotIn("error", response)
self.assertIn("name", response)
self.assertIn("measures", response)
self.assertIn("aggregates", response)
self.assertIn("dimensions", response)
# We should not get internal info
self.assertNotIn("mappings", response)
self.assertNotIn("joins", response)
self.assertNotIn("options", response)
self.assertNotIn("browser_options", response)
self.assertNotIn("fact", response)
# Propert content
aggregates = response["aggregates"]
self.assertIsInstance(aggregates, list)
self.assertEqual(4, len(aggregates))
names = [a["name"] for a in aggregates]
self.assertCountEqual(["amount_sum", "amount_min", "discount_sum",
"record_count"], names)
def test_cube_dimensions(self):
response, status = self.get("cube/sales/model")
# Dimensions
dims = response["dimensions"]
self.assertIsInstance(dims, list)
self.assertIsInstance(dims[0], dict)
for dim in dims:
self.assertIn("name", dim)
self.assertIn("levels", dim)
self.assertIn("default_hierarchy_name", dim)
self.assertIn("hierarchies", dim)
self.assertIn("is_flat", dim)
self.assertIn("has_details", dim)
names = [d["name"] for d in dims]
self.assertCountEqual(["date", "flag", "product"], names)
# Test dim flags
self.assertEqual(True, dims[1]["is_flat"])
self.assertEqual(False, dims[1]["has_details"])
self.assertEqual(False, dims[0]["is_flat"])
self.assertEqual(True, dims[0]["has_details"])
class SlicerAggregateTestCase(SlicerTestCaseBase):
sql_engine = "sqlite:///"
def setUp(self):
super(SlicerAggregateTestCase, self).setUp()
self.workspace = self.create_workspace(model="server.json")
self.cube = self.workspace.cube("aggregate_test")
self.slicer.cubes_workspace = self.workspace
self.facts = Table("facts", self.metadata,
Column("id", Integer),
Column("id_date", Integer),
Column("id_item", Integer),
Column("amount", Integer)
)
self.dim_date = Table("date", self.metadata,
Column("id", Integer),
Column("year", Integer),
Column("month", Integer),
Column("day", Integer)
)
self.dim_item = Table("item", self.metadata,
Column("id", Integer),
Column("name", String)
)
self.metadata.create_all()
data = [
# Master-detail Match
( 1, 20130901, 1, 20),
( 2, 20130902, 1, 20),
( 3, 20130903, 1, 20),
( 4, 20130910, 1, 20),
( 5, 20130915, 1, 20),
# --------
# ∑ 100
# No city dimension
( 6, 20131001, 2, 200),
( 7, 20131002, 2, 200),
( 8, 20131004, 2, 200),
( 9, 20131101, 3, 200),
(10, 20131201, 3, 200),
# --------
# ∑ 1000
# ========
# ∑ 1100
]
self.load_data(self.facts, data)
data = [
(1, "apple"),
(2, "pear"),
(3, "garlic"),
(4, "carrod")
]
self.load_data(self.dim_item, data)
data = []
for day in range(1, 31):
row = (20130900+day, 2013, 9, day)
data.append(row)
self.load_data(self.dim_date, data)
def test_aggregate_csv_headers(self):
# Default = labels
url = "cube/aggregate_test/aggregate?drilldown=date&format=csv"
response, status = self.get(url)
response = compat.to_str(response)
reader = csv.reader(response.splitlines())
header = next(reader)
self.assertSequenceEqual(["Year", "Total Amount", "Item Count"],
header)
# Labels - explicit
url = "cube/aggregate_test/aggregate?drilldown=date&format=csv&header=labels"
response, status = self.get(url)
response = compat.to_str(response)
reader = csv.reader(response.splitlines())
header = next(reader)
self.assertSequenceEqual(["Year", "Total Amount", "Item Count"],
header)
# Names
url = "cube/aggregate_test/aggregate?drilldown=date&format=csv&header=names"
response, status = self.get(url)
response = compat.to_str(response)
reader = csv.reader(response.splitlines())
header = next(reader)
self.assertSequenceEqual(["date.year", "amount_sum", "count"],
header)
# None
url = "cube/aggregate_test/aggregate?drilldown=date&format=csv&header=none"
response, status = self.get(url)
response = compat.to_str(response)
reader = csv.reader(response.splitlines())
header = next(reader)
self.assertSequenceEqual(["2013", "100", "5"],
header)
|
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""websocket-proxy plugin."""
import gettext
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.websocket_proxy import constants as owspcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""websocket-proxy plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: self.environment.get(
owspcons.ConfigEnv.WEBSOCKET_PROXY_CONFIG
),
)
def _setup(self):
self.environment[
osetupcons.RenameEnv.PKI_ENTITIES
].append(
{
'name': 'websocket-proxy',
'display_name': 'WebSocket Proxy',
'ca_cert': None,
'extract_key': True,
'extra_action': None,
'shortLife': True,
}
)
# vim: expandtab tabstop=4 shiftwidth=4
|
#import numpy as np # linear algebra
#import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import NearestCentroid
from sklearn import model_selection
from sklearn.model_selection import train_test_split
#from sklearn.metrics import classification_report
#from sklearn.metrics import confusion_matrix
#from sklearn.metrics import accuracy_score
#load the csv file
iris = datasets.load_iris()
X = iris.data[:, :] # we only take the first two features.
y = iris.target
#Initialize Gaussian Naive Bayes
NB_clf = GaussianNB()
# One-third of data as a part of test set
validation_size = 0.33
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=validation_size, random_state=seed)
# Test options and evaluation metric
scoring = 'accuracy'
#Naive Bayes Classifier
#Fitting the training set
NB_clf.fit(X_train, Y_train)
#Predicting for the Test Set
pred_clf = NB_clf.predict(X_validation)
#Prediction Probability
prob_pos_clf = NB_clf.predict_proba(X_validation)[:, 1]
#Model Performance
#setting performance parameters
kfold = model_selection.KFold(n_splits=10, random_state=seed)
#calling the cross validation function
cv_results = model_selection.cross_val_score(GaussianNB(), X_train, Y_train, cv=kfold, scoring=scoring)
#displaying the mean and standard deviation of the prediction
print()
print("NB Classifier")
msg = "%s: %f (%f)" % ('NB accuracy', cv_results.mean(), cv_results.std())
print(msg)
#minimun distace Classifier
MDC_clf = NearestCentroid()
#fitting the training set
MDC_clf.fit(X_train, Y_train)
#predicting for test set
pred_MDC_clf = MDC_clf.predict(X_validation)
#Model performance
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(NearestCentroid(), X_train, Y_train, cv=kfold, scoring=scoring)
print()
print("MDC Classifier")
msg = "%s: %f (%f)" % ('NB accuracy', cv_results.mean(), cv_results.std())
print(msg)
print()
|
# Copyright 2018 Geobeyond Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .models import COG
from cog.settings.development import RASTERIO_COGEO_PROFILE
from PIL import Image
import rasterio
import os
from rio_cogeo.cogeo import cog_translate
from rio_cogeo.profiles import cog_profiles
from rasterio.io import MemoryFile
from django.shortcuts import get_object_or_404
from django.core.files import File
from rest_framework.exceptions import ParseError
from rest_framework.parsers import FileUploadParser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
class ImageUploadParser(FileUploadParser):
media_type = 'image/*'
class COGListCreateView(APIView):
parser_class = (ImageUploadParser,)
def get(self, request, format=None):
cogs = [{
"id": cog.id,
"name": cog.name,
"bucket_name": cog.bucket_name,
"resource_uri": cog.resource_uri,
"created_at": cog.created_at,
"updated_at": cog.updated_at
} for cog in COG.objects.all()]
return Response(cogs)
def post(self, request, format=None):
"""
Example
-------
Example of request for creating a COG resource.
$ curl -X POST \
http://localhost:5000/api/cogs/ \
-u 'cog:cog' \
-H 'Content-Disposition: attachment; filename=example.tif' \
-H 'Content-Type: image/tif' \
-H 'content-type: multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW' \
-F image=@/Users/geobart/example.tif \
-F name=example.tif
-F compression=raw
"""
if 'image' not in request.data.keys():
raise ParseError("Empty content")
img = request.data['image']
name = request.data['name']
if 'compression' in request.data.keys():
compression = request.data['compression']
else:
compression = RASTERIO_COGEO_PROFILE
cog_img_name = "cog" + "_" + name
f_name = COG.objects.filter(name=cog_img_name)
if f_name.count() > 0:
return Response(status=status.HTTP_409_CONFLICT)
# try to open file with rasterio and validate cog
# see https://pythonexample.com/code/validate%20cloud%20optimized%20geotiff/
# PR to exploit when merged
# https://github.com/mapbox/rio-cogeo/pull/6
with rasterio.open(img) as dataset:
is_cog = True
try:
assert dataset.driver == "GTiff"
assert dataset.is_tiled
assert dataset.overviews(1)
except (
AttributeError,
KeyError
):
raise ParseError(
"Unsupported image type opened by Rasterio"
)
except AssertionError:
# @TODO add logging if it isn't COG
is_cog = False
inpt_profile = dataset.profile
block_size = 512
config = dict(
NUM_THREADS=8,
GDAL_TIFF_INTERNAL_MASK=os.environ.get("GDAL_TIFF_INTERNAL_MASK", True),
GDAL_TIFF_OVR_BLOCKSIZE=os.environ.get("GDAL_TIFF_OVR_BLOCKSIZE", block_size),
)
if not is_cog:
cog_profile = cog_profiles.get(compression)
cog_profile.update(dict(BIGTIFF=os.environ.get("BIGTIFF", "IF_SAFER")))
with MemoryFile(filename=cog_img_name) as dst:
with dst.open(**inpt_profile) as cog_img:
cog_translate(
dataset.files[0],
cog_img.files[0],
cog_profile,
indexes=None,
nodata=None,
alpha=None,
overview_level=6,
config=config
)
dst.seek(0)
ci_file = File(dst)
ci_file.name = os.path.basename(ci_file.name)
cog = COG.objects.create(name=ci_file.name, image=ci_file)
else:
cog = COG.objects.create(name=name, image=img)
cog.save()
return Response(status=status.HTTP_201_CREATED)
class COGDetailView(APIView):
"""
This view should return the Cloud Optimized Geotiff queryset
as determined by the uuid portion of the URL.
"""
parser_class = (ImageUploadParser,)
def get_queryset(self):
uuid = self.kwargs['uuid']
return COG.objects.filter(id=uuid)
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset)
# self.check_object_permissions(self.request, obj)
return obj
def get(self, request, format=None, *args, **kwargs):
cog_item = self.get_object()
cog = {
"id": cog_item.id,
"name": cog_item.name,
"bucket_name": cog_item.bucket_name,
"resource_uri": cog_item.resource_uri,
"created_at": cog_item.created_at,
"updated_at": cog_item.updated_at
}
return Response(cog)
def put(self, request, format=None, *args, **kwargs):
"""
Example
-------
Example of request for creating a COG resource.
$ curl -X PUT \
http://localhost:5000/api/cogs/<id> \
-u 'cog:cog' \
-H 'Content-Disposition: attachment; filename=example.tif' \
-H 'Content-Type: image/tif' \
-H 'content-type: multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW' \
-F image=@/Users/geobart/example.tif \
-F name=example.tif
"""
if 'image' not in request.data.keys():
raise ParseError("Empty content")
img = request.data['image']
name = request.data['name']
# try to open file with rasterio
# see https://github.com/mapbox/rio-glui/blob/master/rio_glui/raster.py
with rasterio.open(img) as dataset:
is_cog = True
try:
assert dataset.driver == "GTiff"
assert dataset.is_tiled
assert dataset.overviews(1)
except (
AttributeError,
AssertionError,
KeyError
) as err:
if err[0] or err[2]:
raise ParseError(
"Unsupported image type opened by Rasterio"
)
elif err[1]:
# @TODO add logging if it isn't COG
is_cog = False
data_array = dataset.read()
if not is_cog:
pass
cog_item = self.get_object()
cog = COG.objects.filter(
id=cog_item.id
).update(name=name, image=img)
cog.save()
return Response(status=status.HTTP_200_OK)
def delete(self, request, format=None, *args, **kwargs):
"""
"""
cog_item = self.get_object()
cog = COG.objects.filter(
id=cog_item.id
)
cog.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
import numpy as np
from code.pytorch.LAMPO.core.model import sum_logs_np
from sklearn.cluster import KMeans
from scipy.stats import multivariate_normal as scipy_normal
from sklearn.covariance import ledoit_wolf
def stable_cov(cov, reg):
reg_new = np.copy(reg)
cov_new = np.copy(cov)
while True:
try:
np.linalg.inv(cov_new)
break
except:
print("singular!")
cov_new += reg_new
reg_new *= 2.
return cov_new
class IRWRGMM:
"""
Iterative Reward Weighted Responsability Gaussian Mixture Model.
Colome and Torras 2018.
"""
def __init__(self, n_componente=1, tol=1E-5, n_init=100, max_iter=100, discount=0.98, cov_regularization=1E-15):
self._n_components = n_componente
self._tol = tol
self._data = None
self._dim = None
self._n_i = 0
self._mus = None
self._covs = None
self._log_pi = None
self._n_init = n_init
self._max_iter = max_iter
self._discount = discount
self._reg = cov_regularization
def _initialize(self, X):
n_samples, observed_dimensions = X.shape
kmeans = KMeans(self._n_components, n_init=self._n_init)
lab = kmeans.fit(X).predict(X)
self._covs = []
for i in range(self._n_components):
cl_indxs = np.where(lab == i)[0]
rnd_indxs = np.random.choice(range(n_samples), size=5)
indx = np.concatenate([cl_indxs, rnd_indxs])
# Avoid non-singular covariance
self._covs.append(ledoit_wolf(X[indx])[0])
self._pi = np.ones(self._n_components) / self._n_components
self._log_pi = np.log(self._pi)
self._mus = np.array(kmeans.cluster_centers_)
def fit_new_data(self, X, w):
"""
:param X: (n_samples x dim)
:param w: (n_samples)
:return:
"""
first = False
if self._mus is None:
first = True
self._initialize(X)
# w = w/np.sum(w)
old_log_likelihood = np.inf
log_resp, log_likelihood = self.get_log_responsability(X, w)
it = 0
old_mu = np.copy(self._mus)
old_cov = np.copy(self._covs)
old_n_i = np.copy(self._n_i)
reg = self._reg * np.eye(X.shape[1])
while np.abs(old_log_likelihood - log_likelihood) > self._tol and it < self._max_iter:
print("iter", it, log_likelihood)
n_i = []
for i in range(self._n_components):
d = w * np.exp(log_resp[i])
if first:
n = np.sum(d)
n_i.append(n)
self._mus[i] = np.einsum("i,ij->j", d, X)/n # eq 20
Y = X - self._mus[i]
cov = np.einsum('k,ki,kj->ij', d, Y, Y)
self._covs[i] = stable_cov(cov/n, reg) # eq 21
else:
n = np.sum(d) + old_n_i[i] # eq 25
n_i.append(n)
if np.sum(d) >= 1E-10:
self._mus[i] = (old_n_i[i]*old_mu[i] + np.einsum("i,ij->j", d, X))/n # eq 27
Y = X - self._mus[i]#np.einsum("i,ij->j", d, X)/np.sum(d) # np.einsum("i,ij->j", d, X)/np.sum(d) #self._mus[i]
cov = np.einsum('k,ki,kj->ij', d, Y, Y)
self._covs[i] = stable_cov(old_n_i[i]/n * old_cov[i] + cov/n,
reg)
# eq 21
self._n_i = np.copy(n_i)
# print("n_i", self._n_i)
# print("n", np.sum(self._n_i))
self._log_pi = np.log(np.array(n_i)) - np.log(np.sum(n_i)) # eq 22
old_log_likelihood = np.copy(log_likelihood)
log_resp, log_likelihood = self.get_log_responsability(X, w)
it += 1
self._n_i = self._n_i * self._discount # eq 29
def get_log_responsability(self, X, w):
log_p = []
for i in range(self._n_components):
dist = scipy_normal(self._mus[i], self._covs[i], allow_singular=True)
log_p.append(dist.logpdf(X) + self._log_pi[i])
# log_p = np.log(np.exp(log_p) + 1E-10) # avoid collapse
z = sum_logs_np(log_p, axis=0)
return np.array(log_p) - z, np.sum(w*z)/np.sum(w)
def predict(self, x, dim):
mus = []
covs = []
resp = []
for i in range(self._n_components):
cov_xx = self._covs[i][:dim, :dim]
cov_yy = self._covs[i][dim:, dim:]
cov_xy = self._covs[i][:dim, dim:]
mu_x = self._mus[i][:dim]
mu_y = self._mus[i][dim:]
cov_xx_i = np.linalg.inv(cov_xx)
new_mu = mu_y + cov_xy.T @ cov_xx_i @ (x - mu_x)
new_cov = cov_yy - cov_xy.T @ cov_xx_i @ cov_xy
mus.append(new_mu)
covs.append(new_cov)
gauss = scipy_normal(mu_x, cov_xx, allow_singular=True)
resp = gauss.logpdf(x) + self._log_pi
select_p = np.exp(np.array(resp) - sum_logs_np(resp))
cluster = np.random.choice(range(self._n_components), p=select_p/np.sum(select_p))
return np.random.multivariate_normal(mus[cluster], covs[cluster]), cluster
|
DEBUG = False
BCRYPT_LOG_ROUNDS = 12 |
#!/usr/bin/python2
# Python3-like changes
from __future__ import absolute_import, division, print_function
# Comment on documentation:
# When reading the doc strings if "Pre:" is present then this stands for "precondition", or the conditions in order to invoke something.
# Oppositely, "Post:" stands for "postcondition" and states what is returned by the method.
__author__ = "Tristan J. Hillis"
# Imports
import wx
import evora.common.utils.logs as log_utils
class logBox(wx.Panel):
"""
Sets up log text control
"""
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Main Sizers
# self.vertSizer = wx.BoxSizer(wx.VERTICAL)
# subsizers
# widgets
self.logFrame = wx.StaticBox(self, label="Event Log", size=(500, 300), style=wx.ALIGN_CENTER)
self.logFrameSizer = wx.StaticBoxSizer(self.logFrame, wx.VERTICAL)
self.logBox = wx.TextCtrl(self, size=(500, 300), style=wx.TE_READONLY | wx.TE_MULTILINE)
# adjust subsizers
self.logFrameSizer.Add(self.logBox, proportion=1, flag=wx.ALIGN_CENTER | wx.EXPAND)
# adjust main sizers
# self.vertSizer.Add(self.logFrameSizer, flag=wx.ALIGN_CENTER)
self.SetSizer(self.logFrameSizer)
self.logFrameSizer.Fit(self)
def threadSafeLogStatus(self, string):
"""
Note: This should be called with with wx.CallAfter to update a GUI element.
Pre: Takes in a string.
Post: Displays that string in the log status box in the log tab of the gui.
"""
msg = log_utils.time_stamp() + " " + string
val = self.logBox.GetValue()
self.logBox.SetValue(val + msg + "\n")
self.logBox.SetInsertionPointEnd()
|
from nextsong import Playlist as p
p(
"01.mp3",
p(
p("02.mp3", weight=1 / 4),
p(weight=3 / 4),
count=1,
),
"03.mp3",
"04.mp3",
loop=True,
).save_xml()
|
import logging
import os
import inspect
from Utils.cli import CliArgs
class PhaazeLoggerFormatter(logging.Formatter):
""" custom logging colors """
def __init__(self, fmt:str):
super().__init__(fmt)
self.default_color:str = "\033[00m"
self.colors:dict = dict(
DEBUG="\033[90m",
INFO="\033[36m",
WARNING="\033[93m",
ERROR="\033[33m",
CRITICAL="\033[31m",
)
def formatMessage(self, Record:logging.LogRecord) -> str:
lvl:str = Record.levelname
wanted_color:str = self.colors.get(lvl, self.default_color)
Record.levelname = f"{wanted_color}{lvl}{self.default_color}"
return self._style.format(Record)
class PhaazeLogger(object):
""" Logger for project, sends to systemd or console """
def __init__(self):
self.Log:logging.Logger = logging.getLogger("Phaazebot")
self.Log.setLevel(logging.DEBUG)
# we lock if there are already handlers applied, we do this because its not bound to the Logger object but to the module 'logging'
# when we call logging.getLogger("Phaazebot") we may get a object that already has handlers.
# most likely happens when the main program calls in protocol executions that copy a clean Phaazebot() from .phaazebot
if not self.Log.handlers:
self.Formatter:PhaazeLoggerFormatter = PhaazeLoggerFormatter("[%(levelname)s]: %(message)s")
self.active_debugs:list = [a.lower() for a in CliArgs.get("debug", "").split(",")]
# default stream handler
PhaazeStreamHandler:logging.StreamHandler = logging.StreamHandler()
PhaazeStreamHandler.setFormatter(self.Formatter)
self.Log.addHandler(PhaazeStreamHandler)
def info(self, msg:str) -> None:
self.Log.info(msg)
def warning(self, msg:str) -> None:
self.Log.warning(msg)
def error(self, msg:str) -> None:
self.Log.error(msg)
def critical(self, msg:str) -> None:
self.Log.critical(msg)
def debug(self, msg:str, require:str="all") -> None:
show:bool = False
if require == "": show = True
for ad in self.active_debugs:
if ad == "all":
show = True
break
if require == ad:
show = True
break
if require.split(":")[0] == ad:
show = True
break
if show:
# Caller tracks back the command that called this function,
# i only take the line, and the file in which it happen
Caller:inspect.Traceback = inspect.getframeinfo(inspect.stack()[1][0])
location:str = Caller.filename.replace(os.getcwd(), "")
self.Log.debug(f"{location}:{Caller.lineno} | {msg}")
def printSQL(self, statement:str) -> None:
"""
pretty prints a sql statement
(i like using tabs, so that should remove them before printing,
so you don't have to watch disordered stairs)
"""
# just remove leading whitespaces and put back together
statement = '\n'.join([lt.lstrip("\t") for lt in statement.splitlines()])
self.debug(f"{'+'*10}\n{statement}\n{'-'*10}", require="")
|
import logging
import pprint
import boto3
import json
from src.utils import awsutils
from botocore.exceptions import ClientError
#logger = logging.getLogger(__name__)
#ec2 = boto3.resource('ec2')
def get_tag_specifications(dct, name_prefix):
dct["Name"] = name_prefix + dct["Name"]
tags = []
for k, v in dct.items():
tags.append({ "Key": k, "Value": v })
ts = [{ "ResourceType": "instance", "Tags": tags },
{"ResourceType": "volume", "Tags": tags}]
return ts
def launch_ec2s(client, lst, name_prefix, key_name, security_group_ids, subnet_id, DryRun=True):
res_lst = []
#pprint.pprint(lst)
for elt in lst:
tags = elt[1]
image_id = elt[4]
instance_type = tags["InstanceType"]
tag_specifications = get_tag_specifications(tags, name_prefix)
res = client.run_instances(ImageId = image_id,
InstanceType = instance_type,
KeyName = key_name,
MaxCount = 1,
MinCount = 1,
SecurityGroupIds = security_group_ids,
SubnetId = subnet_id,
TagSpecifications=tag_specifications,
DryRun = DryRun)
res_lst.append((elt[0], elt[1], elt[2], elt[3], elt[4],
res["Instances"][0]["InstanceId"]))
return res_lst
# main
def main():
pprint.pprint("Entering launch_target_instances.main()")
vars = awsutils.read_vars()
client = awsutils.get_ec2_client('us-east-1')
with open('input/copied_target_amis.txt') as infile:
lst = json.load(infile)
#pprint.pprint(vars['KmsKeyId'])
instances = launch_ec2s(client,
lst,
vars["EcsRestoredPrefix"],
vars["EC2KeyName"],
vars["EC2SecurityGroupIds"],
vars["EC2SubnetId"],
DryRun=False)
pprint.pprint(instances)
with open('input/launched_target_ec2s.txt', 'w') as outfile:
json.dump(instances, outfile, indent=4)
pprint.pprint("Leaving launch_target_instances.main()")
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework.utils.urls import remove_query_param, replace_query_param
from collections import OrderedDict
from rest_framework.response import Response
from infi.django_rest_utils import pagination
from django.utils import six
from .filters import ClickhouseRestFilter, ClickhouseOrderingFilter
from django.core.paginator import InvalidPage
from rest_framework.viewsets import ReadOnlyModelViewSet
from infi.django_rest_utils.views import ViewDescriptionMixin
class ClickhousePaginator(pagination.InfinidatPaginationSerializer):
def get_paginated_response(self, data):
return Response(OrderedDict([
('number_of_objects', self.number_of_objects),
('page_size', self.page_size),
('pages_total', self.pages_total),
('page', self.page_number),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_next_link(self):
if not self.has_next():
return None
url = self.request.build_absolute_uri()
page_number = int(self.page_number) + 1
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if not self.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = int(self.page_number) - 1
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def has_next(self):
return int(self.page_number) < self.pages_total
def has_previous(self):
return int(self.page_number) > 1
def paginate_queryset(self, queryset, request, view=None):
self.request = request
page_size = self.get_page_size(request)
if not page_size:
return None
page_number = request.query_params.get(self.page_query_param, 1)
self.page = queryset.paginate(page_num=int(page_number),
page_size=int(page_size))
if self.page.pages_total and int(page_number) > int(self.page.pages_total):
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(InvalidPage)
)
raise NotFound(msg)
self.number_of_objects = self.page.number_of_objects
self.page_size = self.page.page_size
self.pages_total = self.page.pages_total
self.page_number = self.page.number
return self.page.objects
class ClickhouseViewSet(ViewDescriptionMixin, ReadOnlyModelViewSet):
pagination_class = ClickhousePaginator
# to be used in the ViewDescriptionMixin
filter_backends = [ClickhouseRestFilter, ClickhouseOrderingFilter]
def filter_queryset(self, queryset):
for backend in list(self.filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
def list(self, request, *args, **kwargs):
self.request = request
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
self.page = self.paginate_queryset(queryset)
if self.page is not None:
serializer = self.get_serializer(self.page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data) |
from huey import SqliteHuey
from cloudcopy.server.config import settings
# if ASYNC_TASKS is False (e.g. in testing)
# async tasks will be executed immediately
# or added to an in-memory schedule registry
app = SqliteHuey(
'tasks',
filename=settings.INTERNAL_DATABASE_FILE,
immediate=not settings.ASYNC_TASKS
)
|
import re
from iofree import schema
HTTP_LINE = re.compile(b"([^ ]+) +(.+?) +(HTTP/[^ ]+)")
class HTTPResponse(schema.BinarySchema):
head = schema.EndWith(b"\r\n\r\n")
def __post_init__(self):
first_line, *header_lines = self.head.split(b"\r\n")
self.ver, self.code, *status = first_line.split(None, 2)
self.status = status[0] if status else b""
self.header_lines = header_lines
class HTTPRequest(schema.BinarySchema):
head = schema.EndWith(b"\r\n\r\n")
def __post_init__(self):
first_line, *header_lines = self.head.split(b"\r\n")
self.method, self.path, self.ver = HTTP_LINE.fullmatch(first_line).groups()
self.headers = dict([line.split(b": ", 1) for line in header_lines])
|
from PyQt4 import QtCore, QtGui
from sm_ui import Ui_MainWindowSM
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
import sqlite3
import datetime
import time
from data_models import *
#some useful tutorials:
#http://www.qgisworkshop.org/html/workshop/python_in_qgis_tutorial2.html
#http://www.qgis.org/api/classQgsVectorDataProvider.html
#http://www.qgis.org/pyqgis-cookbook/index.html
# create the dialog for the plugin builder
class SMDialog(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
# Set up the user interface from Designer.
self.ui = Ui_MainWindowSM()
self.ui.setupUi(self)
self.conn = sqlite3.connect("sm.sqlite")
self.ui.addRunButton.clicked.connect(self.addRun)
self.ui.addStudyButton.clicked.connect(self.addStudy)
self.study = None
self.qtdb = QtSql.QSqlDatabase.addDatabase("QSQLITE")
self.qtdb.setDatabaseName("sm.sqlite")
print self.qtdb.open()
self.studyModel = studyModel(); self.ui.studyView.setModel(self.studyModel)
self.ui.studyView.hideColumn(2)
self.ui.studyView.hideColumn(4)
self.runModel = runModel(); self.ui.runView.setModel(self.runModel)
self.ui.runView.hideColumn(0)
self.ui.runView.hideColumn(5)
self.ui.runView.hideColumn(6)
self.connect(self.studyModel, QtCore.SIGNAL("primeInsert(QSqlRecord)"), self.insertStudyRow)
def addStudy(self):
max_id = self.conn.execute("select max(id) from study").fetchone()[0]
if max_id is None:
max_id = 0
d = str(datetime.datetime.now())[0:16]
# self.conn.execute("insert into study values (?,?,?,?,?)",(max_id+1, max_id+1, None, d, None,))
# self.conn.commit()
self.study = max_id+1
# self.ui.runView.dataChanged()
QtCore.QSqlRecord record = self.studyModel.record();
record.setValue(1,QVariant(max_id+1));
record.setValue(2,QVariant(max_id+1));
record.setValue(2,QVariant(tr("")));
record.setValue(2,QVariant(tr(d)));
self.studyModel.insertRows(self.studyModel.rowCount(), 1, record)
# self.studyModel.record(1).value("id")
# index = QtCore.QModelIndex()
# index.column = 1;
# print self.studyModel.data(,1)
def insertStudyRow(self, record):
print "LALALA"
# print record
def addRun(self):
if self.study is None:
self.addStudy()
scenario = self.conn.execute("select default_scenario from study where id=?",(self.study,)).fetchone()[0]
max_id = self.conn.execute("select max(id) from run").fetchone()[0]
if max_id is None:
max_id = 1
d = str(datetime.datetime.now())[0:16]
self.conn.execute("insert into run values (?,?,?,?,?,?,?)",(max_id+1,self.study,None,max_id+1, d, scenario,None,))
self.conn.commit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) 2020, Bodo Schulz <[email protected]>
# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import print_function
import re
from ansible.module_utils import distro
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '0.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: package_version.py
author:
- 'Bodo Schulz'
short_description: tries to determine the version of a package to be installed.
description: ''
"""
EXAMPLES = """
- name: get version of installed php-fpm
package_version:
package_name: "php-fpm"
register: package_version
"""
class PackageVersion(object):
def __init__(self, module):
self.module = module
self.package_name = module.params.get("package_name")
def run(self):
result = dict(
changed=False,
failed=True,
msg="initial"
)
(distribution, version, codename) = distro.linux_distribution(full_distribution_name=False)
self.module.log(msg="distribution : '{0}'".format(distribution))
if(distribution.lower() in ["centos", "oracle", "redhat", "fedora"]):
"""
redhat based
"""
package_mgr = self.module.get_bin_path('yum', False)
if(not package_mgr):
package_mgr = self.module.get_bin_path('dnf', True)
if(not package_mgr):
return True, "", "no valid package manager (yum or dnf) found"
self.module.log(msg=" '{0}'".format(package_mgr))
rc, out, err = self.module.run_command(
[package_mgr, "list", "installed", "--cacheonly", "*{0}*".format(self.package_name)],
check_rc=False)
pattern = re.compile(r".*{0}.*(?P<version>[0-9]+\.[0-9]+)\..*@(?P<repo>.*)".format(self.package_name))
version = re.search(pattern, out)
if(version):
version_string = version.group('version')
version_string_compressed = version_string.replace('.', '')
result = dict(
failed=False,
version=version_string,
version_compressed=version_string_compressed
)
else:
result = dict(
failed=False,
msg="package {0} is not installed".format(self.package_name),
)
elif(distribution.lower() in ["debian", "ubuntu"]):
"""
debain based
"""
import apt
cache = apt.cache.Cache()
cache.update()
cache.open()
try:
pkg = cache[self.package_name]
# debian:10 / buster:
# [php-fpm=2:7.3+69]
# ubuntu:20.04 / focal
# [php-fpm=2:7.4+75]
if(pkg):
pkg_version = pkg.versions[0]
version = pkg_version.version
pattern = re.compile(r'^\d:(?P<version>\d.+)\+.*')
version = re.search(pattern, version)
version_string = version.group('version')
version_string_compressed = version_string.replace('.', '')
result = dict(
failed=False,
version=version_string,
version_compressed=version_string_compressed
)
except KeyError as error:
self.module.log(msg="error : {0}".format(error))
result = dict(
failed=False,
msg="package {0} is not installed".format(self.package_name),
)
else:
"""
all others
"""
result = dict(
failed=False,
msg="unknown distribution: {0}".format(distribution),
version=""
)
return result
# ---------------------------------------------------------------------------------------
# Module execution.
#
def main():
''' ... '''
module = AnsibleModule(
argument_spec=dict(
package_name=dict(required=True, type='str'),
),
supports_check_mode=False,
)
result = PackageVersion(module).run()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
|
def write_out_sam(header, outsam, outpath):
print('Writing trimmed sam to: %s' % outpath)
with open(outpath, 'w') as OUT:
OUT.write('\n'.join(header + outsam))
def trim_sam_by_windows(sam, positions):
header = []
output = []
with open(sam, 'r') as FILE:
for line in FILE:
if line[0] == '@':
header.append(line.strip())
else:
if '_'.join(line.strip().split('\t')[2:4]) in positions:
output.append(line.strip())
return header, output
def write_to_bed(windows, outpath):
print('Writing windows to: %s' % outpath)
with open(outpath, 'w') as OUT:
OUT.write('\n'.join(windows))
def collapse_windows(sortwindows):
# this function is janky
# sortwindows is a naturally sorted list of strings
# the strings inside the list are formatted as scaffold\tstartposition\tendposition
collapsedsortwindows = []
collapsewindepth = []
overlapgate = 1 # record start position of first window in overlap
for count, win in enumerate(sortwindows):
scaff = win.split('\t')[0]
winstart = int(win.split('\t')[1])
winend = int(win.split('\t')[2])
windepth = float(win.split('\t')[3])
if count == 0:
pass
elif (scaff == prevscaff) and (winstart <= prevwinend) and (prevwinstart <= winend):
# some type of overlap # also equivalent to (i think) a[1] > b[0] and a[0] < b[1]
# determine if there is overlap between previous window and current window, then combine them
if overlapgate == 1:
collapsewindow = '%s\t%s' % (scaff, prevwinstart) # will add end coordinate when i find end of overlap
overlapgate = 0
collapsewindepth.append(prevwindepth)
collapsewindepth.append(windepth)
elif overlapgate == 0:
# if it makes it to this point, there is no overlap with previous window
# but if overlapgate == 0 then there was overlap between at least two windows and we combine coordinates
collapsedsortwindows.append('%s\t%s\t%.2f' % (collapsewindow, prevwinend, sum(collapsewindepth) / len(collapsewindepth)))
collapsewindepth = [] # reset for next series of overlapping windows
overlapgate = 1 # reset overlapgate to allow for first start coordinates to be recorded
elif overlapgate == 1:
# if no overlap with previous, and overlapgate == 1 then no windows were overlapping
# check if this window WILL overlap with the NEXT window and add it to collapsedsortwindows if NO
if win != sortwindows[-1]: # need to have this to avoid list indexing error
if (scaff == sortwindows[count+1].split('\t')[0]) and (winstart <= int(sortwindows[count+1].split('\t')[2])) and (int(sortwindows[count+1].split('\t')[1]) <= winend):
pass
else:
# no overlap with NEXT window
collapsedsortwindows.append(win)
collapsewindepth = []
else: # if it is the last window here output the window
collapsedsortwindows.append(win)
collapsewindepth = []
prevscaff = win.split('\t')[0]
prevwinstart = int(win.split('\t')[1])
prevwinend = int(win.split('\t')[2])
prevwindepth = float(win.split('\t')[3])
return collapsedsortwindows
def mean_with_denominator(lst, denominator):
mean = sum(lst) / denominator
return mean
def calculate_depth_with_pysam_sliding_windows(bamfile, dscaff_max, step=25, winsize=100, cutoff=100):
import pysam
windows = []
positions = set()
# d = {} # i don't keep all read depth values
samfile = pysam.AlignmentFile(bamfile, "rb") #pysam.AlignmentFile("ex1.bam", "rb")
for scaff, maxcoord in dscaff_max.items():
print(scaff, end=', ')
for i in range(0, maxcoord, step):
# tempdepth = [] # temppos = []
# .pileup skips bases when depth = 0 # need to figure out mpileup -a option in pysam
tempdepth = [pileupcolumn.nsegments for pileupcolumn in samfile.pileup(scaff, i, i + winsize)]
#for pileupcolumn in samfile.pileup(scaff, i, i + winsize):
# #print("\ncoverage at base %s = %s" % (pileupcolumn.pos, pileupcolumn.n))
# #temppos.append(pileupcolumn.reference_pos + 1) # pos depricated
# tempdepth.append(pileupcolumn.nsegments) # n depricated
if len(tempdepth) != 0:
average_depth = mean_with_denominator(tempdepth, denominator=winsize)
else:
average_depth = 0
if average_depth >= cutoff:
# print(str(average_depth), end=', ')
# scaff, start, end, averagedepth
windows.append([scaff, str(i + 1), str(i + winsize), str(average_depth)])
for j in range(i + 1, i + winsize):
if j <= maxcoord:
positions.add('_'.join([scaff, str(j)]))
samfile.close()
return windows, positions
def read_fasta_as_dict(f):
d = {} # fasta names are key, values are sequences as string
namelist = []
with open(f, 'r') as FILE:
for line in FILE:
if line[0] == '>':
if ' ' in line:
name = line.strip().split()[0][1:-len('_with_IES')]
namelist.append(name)
d[name] = []
else:
name = line.strip()[1:]
namelist.append(name)
d[name] = []
elif line.strip() != '': # else: # trying to prevent some crap happening on the last line
d[name].append(line.strip())
for name in namelist:
d[name] = ''.join(d[name]) # join list of partial sequences. Useful if interleaved fasta
return d, namelist
def length_of_fasta_sequences(genomefile):
import os
print('Counting lengths of all scaffolds')
path, f = os.path.split(genomefile)
dgenome, names = read_fasta_as_dict(genomefile)
d = {k: len(v) for k, v in dgenome.items()}
return d, names
def max_coords_per_scaffold(d):
dmax_coords = {}
dscaff_max = {}
for n in list(d.keys()):
# collect all coordinates in list, for each scaffold
dmax_coords.setdefault('_'.join(n.split('_')[:-1]), []).append(int(n.split('_')[-1]))
for scaff in list(dmax_coords.keys()):
# get maximum coordinate per scaffold
dscaff_max[scaff] = max(dmax_coords[scaff])
return dscaff_max
def main(bamfiles, genomefile, step=25, winsize=100, cutoff=100, maxdepth=0):
# input is sam file # output does not include positions of 0 coverage
# key is scafffold_position # value is depth
import os
from natsort import natsorted, ns
for bam in bamfiles:
path, file = os.path.split(bam)
prefix = os.path.join(path, '.'.join(file.split('.')[:-1]))
print('step: %d, window size: %d, cutoff: %d' % (step, winsize, cutoff))
#d, names, nseqs = calculate_depth_of_coverage(sam, maxdepth)
# dscaff_max = max_coords_per_scaffold(d)
dscafflengths, names = length_of_fasta_sequences(genomefile)
windows, positions = calculate_depth_with_pysam_sliding_windows(bam, dscafflengths, step, winsize, cutoff)
#d = fill_in_zero_depth(d, dscaff_max)
#windows, positions = sliding_windows(d, dscaff_max, interval, cutoff)
sortwindows = natsorted(['\t'.join(w) for w in windows], key=lambda y: y.lower())
write_to_bed(sortwindows, '%s.windows.s%d.w%d.d%d.pileup.bed' % (prefix, step, winsize, cutoff))
collapsedsortwindows = collapse_windows(sortwindows)
write_to_bed(collapsedsortwindows, '%s.collapsedwindows.s%d.w%d.d%d.pileup.bed' % (prefix, step, winsize, cutoff))
#header, outsam = trim_sam_by_windows(sam, positions)
#write_out_sam(header, outsam, '%s.windows.w%d.d%d.sam' % (prefix, interval, cutoff))
# d_for, names_for, nseqs_for = calculate_depth_of_coverage(forward, maxdepth)
# d_rev, names_rev, nseqs_rev = calculate_depth_of_coverage(reverse, maxdepth)
|
"""Testing basic ways to setup a dataset."""
import unittest
import uuid
from copy import copy
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader
from pytoda.datasets import (
ConcatKeyDataset,
DatasetDelegator,
KeyDataset,
indexed,
keyed,
)
from pytoda.types import Hashable
class Indexed(KeyDataset):
"""As DataFrameDataset but only implementing necessary methods."""
def __init__(self, df):
self.df = df
super().__init__()
def __len__(self):
return len(self.df)
def __getitem__(self, index):
return self.df.iloc[index].values
def get_key(self, index: int) -> Hashable:
"""Get key from integer index."""
return self.df.index[index]
def get_index(self, key: Hashable) -> int:
"""Get index for first datum mapping to the given key."""
# item will raise if not single value (deprecated in pandas)
try:
indexes = np.nonzero(self.df.index == key)[0]
return indexes.item()
except ValueError:
if len(indexes) == 0:
raise KeyError
else:
# key not unique, return first as ConcatKeyDataset
return indexes[0]
class Delegating(DatasetDelegator):
"""NOT implementing methods (and only built-ins from inheritance)."""
def __init__(self, data):
self.dataset = data
class TestBaseDatasets(unittest.TestCase):
"""Testing dataset for base methods."""
length = 11 # of a single dataset
dims = 5
def random_data(self, length, dims):
an_array = np.random.randn(self.length, self.dims)
keys = [str(uuid.uuid4()) for _ in range(self.length)]
a_df = pd.DataFrame(an_array, index=keys)
return keys, a_df, Indexed(a_df)
def setUp(self):
(self.a_1st_keys, self.a_1st_df, self.a_1st_ds) = self.random_data(
self.length, self.dims
)
(self.a_2nd_keys, self.a_2nd_df, self.a_2nd_ds) = self.random_data(
self.length, self.dims
)
self.delegating_ds = Delegating(self.a_1st_ds)
# KeyDataset.__add__ i.e. ConcatKeyDataset
self.concat_ds = self.delegating_ds + self.a_2nd_ds
self.concat_keys = self.a_1st_keys + self.a_2nd_keys
def assertListedEqual(self, listable1, listable2):
"""Easier comparison between arrays, series and or lists."""
self.assertListEqual(list(listable1), list(listable2))
def test_delegation_dir(self):
# stacking delegation
ds_dir = dir(Delegating(Delegating(self.delegating_ds)))
# delegated to Indexed
self.assertIn('get_key', ds_dir)
self.assertIn('get_index', ds_dir)
# delegated to KeyDataset
self.assertIn('get_item_from_key', ds_dir)
self.assertIn('keys', ds_dir)
self.assertIn('has_duplicate_keys', ds_dir)
# futile, as built-ins delegation needed hardcoding in DatasetDelegator
self.assertIn('__len__', ds_dir) # see test___len__
self.assertIn('__getitem__', ds_dir) # see test___getitem__
self.assertIn('__add__', ds_dir) # see tests on self.concat_ds
# no tests on implementation specific attributes here
def test___len__(self) -> None:
"""Test __len__."""
self.assertEqual(len(self.a_1st_ds), self.length)
self.assertEqual(len(self.a_2nd_ds), self.length)
self.assertEqual(len(self.delegating_ds), self.length)
self.assertEqual(len(self.concat_ds), 2 * self.length)
def test___getitem__(self) -> None:
"""Test __getitem__."""
i = 0
self.assertListedEqual(self.a_1st_ds[i], self.a_1st_df.iloc[i])
self.assertListedEqual(self.a_2nd_ds[i], self.a_2nd_df.iloc[i])
self.assertListedEqual(self.delegating_ds[i], self.a_1st_df.iloc[i])
# first in datasets
self.assertListedEqual(self.concat_ds[i], self.a_1st_df.iloc[i])
i = -1
self.assertListedEqual(self.a_1st_ds[i], self.a_1st_df.iloc[i])
self.assertListedEqual(self.a_2nd_ds[i], self.a_2nd_df.iloc[i])
self.assertListedEqual(self.delegating_ds[i], self.a_1st_df.iloc[i])
# last in datasets
self.assertListedEqual(self.concat_ds[i], self.a_2nd_df.iloc[i])
def _test__getitem__modified(self, mutate_copy) -> None:
"""Test __getitem__ returning tuple with item first."""
for i in [0, -1]:
self.assertListedEqual(
mutate_copy(self.a_1st_ds)[i][0], self.a_1st_df.iloc[i]
)
self.assertListedEqual(
mutate_copy(self.a_2nd_ds)[i][0], self.a_2nd_df.iloc[i]
)
self.assertListedEqual(
mutate_copy(self.delegating_ds)[i][0], self.a_1st_df.iloc[i]
)
# first in datasets
self.assertListedEqual(mutate_copy(self.concat_ds)[0][0], self.a_1st_df.iloc[0])
# last in datasets
self.assertListedEqual(
mutate_copy(self.concat_ds)[-1][0], self.a_2nd_df.iloc[-1]
)
def test__getitem__mutating_utils(self):
self._test__getitem__modified(mutate_copy=indexed)
self._test__getitem__modified(mutate_copy=keyed)
def test_data_loader(self) -> None:
"""Test data_loader."""
batch_size = 4
a_1st_dl = DataLoader(self.a_1st_ds, batch_size=batch_size, shuffle=True)
full_batches = self.length // batch_size
for batch_index, batch in enumerate(a_1st_dl):
if batch_index >= full_batches: # if drop_last
self.assertEqual(batch.shape, (self.length % batch_size, self.dims))
else:
self.assertEqual(batch.shape, (batch_size, self.dims))
# concatenated
concat_dl = DataLoader(self.concat_ds, batch_size=batch_size, shuffle=True)
full_batches = (2 * self.length) // batch_size
for batch_index, batch in enumerate(concat_dl):
if batch_index >= full_batches: # if drop_last
self.assertEqual(
batch.shape, ((2 * self.length) % batch_size, self.dims)
)
else:
self.assertEqual(batch.shape, (batch_size, self.dims))
def _test_item_independent(self, ds, keys, index):
key = keys[index]
positive_index = index % len(ds)
# get_key (support for negative index?)
self.assertEqual(key, ds.get_key(positive_index))
self.assertEqual(key, ds.get_key(index))
# get_index
self.assertEqual(positive_index, ds.get_index(key))
# keys
self.assertListedEqual(keys, ds.keys())
# duplicate keys
self.assertFalse(ds.has_duplicate_keys)
def _test_base_methods(self, ds, keys, index):
key = keys[index]
self._test_item_independent(ds, keys, index)
# get_item_from_key
self.assertListedEqual(ds[index], ds.get_item_from_key(key))
# in case of returning a tuple:
# for from_index, from_key in zip(ds[index], ds.get_item_from_key(key)): # noqa
# self.assertListedEqual(from_index, from_key)
def _test_keyed_util(self, ds, keys, index):
ds_ = keyed(ds)
key = keys[index]
self._test_item_independent(ds_, keys, index)
# modified methods
item_of_i, k_of_i = ds_[index]
item_of_k, k_of_k = ds_.get_item_from_key(key)
# get_item_from_key
self.assertListedEqual(item_of_i, item_of_k)
self.assertTrue(key == k_of_i and key == k_of_k)
def _test_indexed_util(self, ds, keys, index):
ds_ = indexed(ds)
key = keys[index]
positive_index = index % len(ds_)
self._test_item_independent(ds_, keys, index)
# modified methods
item_of_i, i_of_i = ds_[index]
item_of_k, i_of_k = ds_.get_item_from_key(key)
# get_item_from_key
self.assertListedEqual(item_of_i, item_of_k)
self.assertTrue(positive_index == i_of_i and positive_index == i_of_k)
def _test_stacked_indexed_keyed_util(self, ds, keys, index):
ds_ = indexed(keyed(indexed(ds)))
key = keys[index]
positive_index = index % len(ds_)
self._test_item_independent(ds_, keys, index)
# modified methods
(((item_of_i, i_of_i0), k_of_i), i_of_i1) = ds_[index]
(((item_of_k, i_of_k0), k_of_k), i_of_k1) = ds_.get_item_from_key(key)
# get_item_from_key
self.assertListedEqual(item_of_i, item_of_k)
self.assertTrue(key == k_of_i and key == k_of_k)
self.assertTrue(
positive_index == i_of_i0
and positive_index == i_of_i1
and positive_index == i_of_k0
and positive_index == i_of_k1
)
def test_all_base_for_indexed_methods_and_copy(self):
(other_keys, _, other_ds) = self.random_data(self.length, self.dims)
for ds, keys in [
(self.a_1st_ds, self.a_1st_keys),
(self.a_2nd_ds, self.a_2nd_keys),
(self.delegating_ds, self.a_1st_keys),
(self.concat_ds, self.concat_keys),
# test shallow copy (not trivial with delegation)
(copy(self.a_1st_ds), self.a_1st_keys),
(copy(self.a_2nd_ds), self.a_2nd_keys),
(copy(self.delegating_ds), self.a_1st_keys),
(copy(self.concat_ds), self.concat_keys),
]:
index = -1
self._test_indexed_util(ds, keys, index)
self._test_keyed_util(ds, keys, index)
self._test_stacked_indexed_keyed_util(ds, keys, index)
self._test_base_methods(ds, keys, index)
# again with self delegation and concatenation
ds = ConcatKeyDataset([Delegating(other_ds), Delegating(ds)])
index = self.length + 1 # dataset_index == 1
keys = other_keys + keys
self._test_indexed_util(ds, keys, index)
self._test_keyed_util(ds, keys, index)
self._test_stacked_indexed_keyed_util(ds, keys, index)
self._test_base_methods(ds, keys, index)
# get_index_pair
self.assertTupleEqual((1, index - self.length), ds.get_index_pair(index))
# get_key_pair
self.assertTupleEqual((1, keys[index]), ds.get_key_pair(index))
# ConcatKeyDataset is not a DatasetDelegator
self.assertNotIn('df', dir(ds))
index == self.length + 1
duplicate_ds = other_ds + other_ds
self.assertTrue(duplicate_ds.has_duplicate_keys)
# duplicate keys lookup returns first in this case
self.assertNotEqual(index, duplicate_ds.get_index(duplicate_ds.get_key(index)))
if __name__ == '__main__':
unittest.main()
|
import pandas as pd
from desafio_iafront.jobs.pedidos.contants import KEPT_COLUNS, COLUMN_RENAMES
def _prepare(pedidos_joined: pd.DataFrame) -> pd.DataFrame:
# Remove colunas resultantes do merge
result_dataset = drop_merged_columns(pedidos_joined)
# Remove colunas que não serão usadas
result_dataset = result_dataset[KEPT_COLUNS]
# Renomeia colunas
result_dataset = result_dataset.rename(columns=COLUMN_RENAMES)
return result_dataset
def drop_merged_columns(data_frame: pd.DataFrame) -> pd.DataFrame:
result_dataset = data_frame.copy(deep=True)
for column in data_frame.columns:
if column.endswith("_off"):
result_dataset = data_frame.drop(column, axis=1)
return result_dataset
|
import numpy as np
"""
Additional functions for the movement algorithms
"""
FRICCION = 0.995
def normalize(vector: np.ndarray) -> np.ndarray:
"""
Normalizes input vector to have a lenght of 1 if
the vector is not a 0-vector. Otherwise, it is left unchanged
Args:
vector: vector to normalize
Returns:
vector: normalized vector
"""
vec_norm = np.linalg.norm(vector)
if vec_norm != 0:
vector = vector / vec_norm
return vector
def random_binomial() -> float:
"""
Creates a random number between -1 and 1 where the numbers close
to 0 are more likely to appear
Returns:
rand_num: random number between -1 and 1
"""
rand_num = np.random.rand() - np.random.rand()
return rand_num
def as_vector(orientation: float) -> np.ndarray:
"""
Transform the orientation from floa type to vecor shape
Args:
orientation: orientation in float type (must be degrees)
Returns:
orientation_vector: orientation in type vector
"""
orientation_vector = np.array(
[np.sin(np.deg2rad(orientation)), -np.cos(np.deg2rad(orientation))]
)
return orientation_vector
|
import os
import base64
import json
from pathlib import Path
class EnvironmentReferenceError(Exception):
pass
def decode_base64(input_str: str) -> str:
base64_bytes = input_str.encode("utf-8")
message_bytes = base64.b64decode(base64_bytes)
return message_bytes.decode("utf-8")
def read_json(directory: Path, filename: str):
with open(directory.joinpath(filename)) as f:
return json.load(f)
def walk_dir(directory: str, callback, ending_with=None):
for subdir, dirs, files in os.walk(directory):
for filename in files:
filepath = subdir + os.sep + filename
if ending_with and not filepath.endswith(ending_with):
continue
callback(filepath, filename)
|
# AutoTransform
# Large scale, component based code modification library
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro>
# @black_format
"""A change represents a submission from a run of AutoTransform on a particular Batch. They
are used for managing submissions to code review/source control systems. A pull request is
an example of a potential change."""
|
import sqlite3
from sqlite3 import Error
import cmd2web
import sys
import datetime
class DatabaseConnection:
def __init__(self,db_file):
try:
self.conn = sqlite3.connect(db_file)
# return self.conn
except Error as e:
print(e)
# return None
def close(self):
self.conn.close()
def get_restricted_access(self,group_list):
cur = self.conn.cursor()
query='SELECT RestrictedGroup FROM Groups WHERE GroupName IN (%s)' % ','.join('"{}"'.format(i) for i in group_list)
sys.stderr.write("\n\n\nQuery: {0}\n\n\n".format(query))
cur.execute(query)
rows = cur.fetchall()
give_access=False
if(len(rows) > 0):
for i in rows:
if i[0]==1:
give_access=True
break
else:
give_access=False
if(give_access==True):
return True
else:
return False
# [restricted_acess]=rows[0]
# # 0 means restricted group so token will be needed
# if(restricted_acess == 1):
# return True
# else:
# return False
else:
# //Error Record does not exist for the servicename
return cmd2web.Server.error('Record does not exist for the service')
def check_token_access(self,group_list,token):
cur = self.conn.cursor()
query='select k.groupID, k.Expiry from Keys k join Groups s on s.GroupID = k.GroupID where s.GroupName in (%s)' % ','.join('"{}"'.format(i) for i in group_list) + ' and k.token={0}'.format(token)
sys.stderr.write("\n\n\nQuery: {0}\n\n\n".format(query))
cur.execute(query)
rows = cur.fetchall()
if(len(rows) > 0):
group_id, date_expiry_str = rows[0]
format_str = "%m-%d-%Y"
expiry_date = datetime.datetime.strptime(date_expiry_str, format_str)
current_date = datetime.datetime.now()
if(current_date<=expiry_date):
return True
else:
return cmd2web.Server.error('Wrong or expired token. Access Denied')
else:
# //Error
return False
# if __name__ == '__main__':
# s = DatabaseConnection("../DBScript/CMD2WEB.sqlite")
# print(s.get_restricted_access("rmsk"))
# print(s.get_restricted_access("rmsk2"))
# print(s.check_token_access("rmsk","12223453445")) |
import math
import matplotlib.pyplot as plt
from merl import Merl
def main():
brdf = Merl('aluminium.binary')
samples = 1024
theta_h = [math.pi / 2 * x / samples for x in range(0, samples)]
reflectance_raw_ndf = [brdf.eval_raw(th, 0, 0) for th in theta_h]
reflectance_interp_ndf = [brdf.eval_interp(th, 0, 0) for th in theta_h]
plot_raw_ndf = plt.subplot(121)
plot_raw_ndf.plot(theta_h, reflectance_raw_ndf)
plot_raw_ndf.grid(True)
plot_raw_ndf.set_title("NDF Raw")
plot_interp_ndf = plt.subplot(122)
plot_interp_ndf.plot(theta_h, reflectance_interp_ndf)
plot_interp_ndf.grid(True)
plot_interp_ndf.set_title("NDF Interpolated")
plt.show()
if __name__ == '__main__':
main() |
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from acrolib.quaternion import Quaternion
from acrolib.sampling import SampleMethod
from acrolib.geometry import rotation_matrix_to_rpy
from acrobotics.robot import Robot, IKResult
from acrobotics.path.tolerance import (
Tolerance,
NoTolerance,
SymmetricTolerance,
QuaternionTolerance,
)
from acrobotics.path.path_pt_base import PathPt
from acrobotics.path.path_pt import TolEulerPt, TolPositionPt, TolQuatPt
from acrobotics.path.sampling import SamplingSetting, SearchStrategy
IK_RESULT = IKResult(True, [np.ones(6), 2 * np.ones(6)])
class DummyRobot(Robot):
def __init__(self, is_colliding=False):
self.is_colliding = is_colliding
self.ndof = 6
def is_in_collision(self, joint_position, scene=None):
return self.is_colliding
def ik(self, transform):
return IK_RESULT
def assert_in_range(x, lower, upper):
assert x <= upper
assert x >= lower
class TestPathPt:
def test_calc_ik(self):
samples = [[], []]
joint_solutions = PathPt._calc_ik(DummyRobot(), samples)
assert len(joint_solutions) == 4
assert_almost_equal(joint_solutions[0], IK_RESULT.solutions[0])
assert_almost_equal(joint_solutions[1], IK_RESULT.solutions[1])
assert_almost_equal(joint_solutions[2], IK_RESULT.solutions[0])
assert_almost_equal(joint_solutions[3], IK_RESULT.solutions[1])
class TestTolPositionPt:
def test_create(self):
TolPositionPt([1, 2, 3], Quaternion(), 3 * [NoTolerance()])
def test_sample_grid(self):
q = Quaternion()
pos_tol = [Tolerance(-0.5, 0.5, 2), Tolerance(-0.5, 0.5, 2), NoTolerance()]
point = TolPositionPt([0.5, 0.5, 1], q, pos_tol)
grid = point.sample_grid()
position_samples = [[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
for pos, T in zip(position_samples, grid):
assert_almost_equal(pos, T[:3, 3])
def test_sample_grid_2(self):
"""
Rotate with pi / 2 around z, give a tolerance along x,
expect a grid sampled along y
"""
tol = [SymmetricTolerance(0.1, 3), NoTolerance(), NoTolerance()]
pt = TolPositionPt(
[1, 2, 3], Quaternion(angle=np.pi / 2, axis=np.array([0, 0, 1])), tol
)
tf_samples = pt.sample_grid()
g = [tf[:3, 3] for tf in tf_samples]
g_desired = np.array([[1, 1.9, 3], [1, 2, 3], [1, 2.1, 3]])
assert_almost_equal(g, g_desired)
def test_sample_incremental_1(self):
"""
Rotate with pi / 2 around z, give a tolerance along x,
expect a grid sampled along y
"""
tol = [SymmetricTolerance(0.1, 3), NoTolerance(), NoTolerance()]
pt = TolPositionPt(
[1, 2, 3], Quaternion(angle=np.pi / 2, axis=np.array([0, 0, 1])), tol
)
g = pt.sample_incremental(10, SampleMethod.random_uniform)
pos_samples = [tf[:3, 3] for tf in g]
for sample in pos_samples:
assert_almost_equal(sample[0], 1)
assert_almost_equal(sample[2], 3)
assert_in_range(sample[1], 1.9, 2.1)
def test_sample_incremental_2(self):
tol = [SymmetricTolerance(0.1, 3), Tolerance(0.2, 1.0, 3), NoTolerance()]
pt = TolPositionPt(
[1, 2, 3], Quaternion(angle=np.pi / 2, axis=np.array([0, 0, 1])), tol
)
g = pt.sample_incremental(10, SampleMethod.random_uniform)
pos_samples = [tf[:3, 3] for tf in g]
for sample in pos_samples:
assert_almost_equal(sample[2], 3)
assert_in_range(sample[0], 0, 3) # 1 + (-1, 2)
assert_in_range(sample[1], 1.9, 2.1) # 2 + (-0.1, 0.1)
def test_transform_to_rel_tolerance_deviation(self):
tol = [SymmetricTolerance(0.1, 3), Tolerance(0.2, 1.0, 3), NoTolerance()]
quat = Quaternion(angle=np.pi / 2, axis=np.array([0, 0, 1]))
pt = TolPositionPt([1, 2, 3], quat, tol)
tf = quat.transformation_matrix
tf[:3, 3] = np.array([1.06, 1.91, 3])
p_rel = pt.transform_to_rel_tolerance_deviation(tf)
p_desired = np.array([-0.09, -0.06, 0.0])
assert_almost_equal(p_rel, p_desired)
class TestEulerPt:
def test_grid(self):
pos = [1, 2, 3]
pos_tol = [NoTolerance()] * 3
rot_tol = [NoTolerance(), NoTolerance(), SymmetricTolerance(np.pi, 3)]
pt = TolEulerPt(pos, Quaternion(), pos_tol, rot_tol)
tf_samples = pt.sample_grid()
assert_almost_equal(tf_samples[0], tf_samples[2])
assert_almost_equal(tf_samples[1][:3, :3], np.eye(3))
def test_incremental(self):
pos = [1, 2, 3]
pos_tol = [NoTolerance()] * 3
rot_tol = [NoTolerance(), NoTolerance(), SymmetricTolerance(np.pi, 3)]
pt = TolEulerPt(pos, Quaternion(), pos_tol, rot_tol)
tf_samples = pt.sample_incremental(10, SampleMethod.random_uniform)
euler = [rotation_matrix_to_rpy(tf[:3, :3]) for tf in tf_samples]
for i in range(10):
assert_almost_equal(euler[i][0], 0)
assert_almost_equal(euler[i][1], 0)
assert_in_range(euler[i][2], -np.pi, np.pi)
def test_transform_to_rel_tolerance_deviation(self):
tol = [SymmetricTolerance(0.1, 3), Tolerance(0.2, 1.0, 3), NoTolerance()]
rot_tol = [NoTolerance(), NoTolerance(), SymmetricTolerance(0.1, 3)]
quat = Quaternion(angle=np.pi / 2, axis=np.array([0, 0, 1]))
pt = TolEulerPt([1, 2, 3], quat, tol, rot_tol)
quat2 = Quaternion(angle=np.pi / 2 - 0.05, axis=np.array([0, 0, 1]))
tf = quat2.transformation_matrix
tf[:3, 3] = np.array([1.06, 1.91, 3])
p_rel = pt.transform_to_rel_tolerance_deviation(tf)
assert p_rel.shape == (6,)
p_desired = np.array([-0.09, -0.06, 0.0, 0.0, 0.0, -0.05])
assert_almost_equal(p_rel, p_desired)
class TestTolQuatPt:
def test_create(self):
q = Quaternion()
pos_tol = 3 * [NoTolerance()]
TolQuatPt([1, 2, 3], q, pos_tol, QuaternionTolerance(0.5))
def test_sample_incremental(self):
method = SampleMethod.random_uniform
q = Quaternion()
pos_tol = 3 * [NoTolerance()]
distance = 0.1
point = TolQuatPt([1, 2, 3], q, pos_tol, QuaternionTolerance(distance))
samples = point.sample_incremental(100, method)
for tf in samples:
newquat = Quaternion(matrix=tf)
assert Quaternion.distance(q, newquat) <= distance
def test_to_transform(self):
distance = 0.1
q = Quaternion()
pos_tol = 3 * [NoTolerance()]
distance = 0.1
point = TolQuatPt([1, 2, 3], q, pos_tol, QuaternionTolerance(distance))
tf = point.to_transform([1, 2, 3], q)
assert_almost_equal(
[[1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3], [0, 0, 0, 1]], tf
)
def setting_generator(search_strategy):
if search_strategy == SearchStrategy.GRID:
return SamplingSetting(
SearchStrategy.GRID, 1, SampleMethod.random_uniform, 10, 10, 10, 2
)
if search_strategy == SearchStrategy.INCREMENTAL:
return SamplingSetting(
SearchStrategy.INCREMENTAL, 1, SampleMethod.random_uniform, 10, 10, 10, 2
)
if search_strategy == SearchStrategy.MIN_INCREMENTAL:
return SamplingSetting(
SearchStrategy.MIN_INCREMENTAL,
1,
SampleMethod.random_uniform,
10,
10,
10,
2,
)
def test_to_joint_solutions():
for search_strategy in SearchStrategy:
tol = [SymmetricTolerance(0.1, 3), NoTolerance(), NoTolerance()]
pt = TolPositionPt(
[1, 2, 3], Quaternion(angle=np.pi / 2, axis=np.array([0, 0, 1])), tol
)
robot = DummyRobot(is_colliding=True)
settings = setting_generator(search_strategy)
if search_strategy is not SearchStrategy.MIN_INCREMENTAL:
joint_solutions = pt.to_joint_solutions(robot, settings)
assert len(joint_solutions) == 0
else:
with pytest.raises(Exception) as info:
joint_solutions = pt.to_joint_solutions(robot, settings)
msg = "Maximum iterations reached in to_joint_solutions."
assert msg in str(info.value)
|
class ImageStore:
pass
|
# LOCUS log parser
# (c) 2013 Don Coleman
import sys
from pprint import pformat
from datetime import datetime
# turn a string of bytes into a byte array
def toByteArray(str):
bytes = []
while len(str) > 1:
byte = str[:2]
bytes.append(int(byte, 16))
str = str[2::]
return bytes
def parseInt(bytes):
if len(bytes) != 2:
print >> sys.stderr, "WARNING: expecting 2 bytes got %s" % bytes
number = ((0xFF & bytes[1]) << 8) | (0xFF & bytes[0])
return number
def parseLong(bytes):
if len(bytes) != 4:
print >> sys.stderr, "WARNING: expecting 4 bytes got %s" % bytes
number = ((0xFF & bytes[3]) << 24) | ((0xFF & bytes[2]) << 16) | ((0xFF & bytes[1]) << 8) | (0xFF & bytes[0])
return number
def parseFloat(bytes):
longValue = parseLong(bytes)
# borrowed code from https://github.com/douggilliland/Dougs-Arduino-Stuff/blob/master/Host%20code/parseLOCUS/parseLOCUS.cpp
exponent = ((longValue >> 23) & 0xff) # float
exponent -= 127.0
exponent = pow(2,exponent)
mantissa = (longValue & 0x7fffff)
mantissa = 1.0 + (mantissa/8388607.0)
floatValue = mantissa * exponent
if ((longValue & 0x80000000) == 0x80000000):
floatValue = -floatValue
return floatValue
def parseLine(line):
"""Returns an array of Coordinates"""
if line.startswith("$PMTKLOX,1"):
data, actual_checksum = line.split("*")
generated_checksum = checksum(data)
actual_checksum = actual_checksum.strip()
if generated_checksum != actual_checksum:
# TODO stop processing?
print >> sys.stderr, "WARNING: Checksum failed. Expected %s but calculated %s for %s" % (actual_checksum, generated_checksum, line)
parts = data.split(",")
# remove the first 3 parts - command, type, line_number
# following this 8 byte hex strings (max 24)
dataFields = parts[3:]
# turn the remaining data into a byte array
bytes = toByteArray("".join(dataFields)) # could call in a loop appending instead of join
# Slice into chunks based on the record size
records = []
chunksize = 16 # Basic logging
while len(bytes) >= chunksize:
record = parseBasicRecord(bytes[:chunksize])
records.append(record)
bytes = bytes[chunksize::]
return records
# http://www.hhhh.org/wiml/proj/nmeaxor.html
def checksum(line):
check = 0
# XOR all the chars in the line except leading $
for char in line[1:]:
check = check ^ ord(char)
# convert to hex string, remove 0x, 0 pad
return hex(check)[2:].upper().zfill(2)
# See "LOCUS logging content.pdf"
# http://learn.adafruit.com/adafruit-ultimate-gps/downloads-and-resources
#
# Basic Record - 16 bytes
# 0 - 3 timestamp
# 4 fix flag
# 5 - 8 latitude
# 9 - 12 longitude
# 13 - 14 height
def parseBasicRecord(bytes):
timestamp = parseLong(bytes[0:4])
# if timestamp > 4290000000: # skip bad values
# continue
date = datetime.fromtimestamp(timestamp)
fix = bytes[4] # TODO bit flag unsigned char u1VALID = 0x00; // 0:NoFix , 1: Fix, 2: DGPS, 6: Estimated
latitude = parseFloat(bytes[5:9])
longitude = parseFloat(bytes[9:13])
height = parseInt(bytes[13:15])
return Coordinates(date, fix, latitude, longitude, height)
def parseFile(filename):
f = open(filename, "r")
coords = []
for line in f.readlines():
results = parseLine(line)
if (results):
coords += (results)
return coords
# TODO Is Coordinates the right name? Or would it be better to have a
# Position object that contains a TimeStamp and Coordinates?
# Or just replace this class with a dictionary?
class Coordinates:
def __init__(self, datetime, fix, latitude, longitude, height):
self.datetime = datetime
self.fix = fix
self.latitude = latitude
self.longitude = longitude
self.height = height
def __repr__(self):
return pformat(self.__dict__)
# TODO fix formatting
def __str__(self):
return """
datetime: %s
fix: %s
latitude: %3.14f
longitude: %3.14f
height: %i
""" % (self.datetime, self.fix, self.latitude, self.longitude, self.height)
def __eq__(self, other):
return self.__dict__ == other.__dict__
if __name__ == "__main__":
from pprint import pprint
coords = parseFile("sample.log")
# remove invalid and print
coords = [c for c in coords if c.fix < 5]
pprint(coords)
|
from rest_framework.routers import DefaultRouter
class OptionalSlashRouter(DefaultRouter):
def __init__(self, *args, **kwargs):
super(DefaultRouter, self).__init__(*args, **kwargs)
self.trailing_slash = '/?'
|
"""This module handles the auto-file using Geofabrik's download service.
"""
import logging
import os
import shutil
import subprocess
import helpers
def get_region_filename(region, subregion):
"""Returns the filename needed to download/manage PBF files.
Parameters
----------------------
region : str
subregion : str
Returns
----------------------
filename : str
"""
base_name = '{}-latest.osm.pbf'
if subregion is None:
filename = base_name.format(region)
else:
filename = base_name.format(subregion)
return filename
def prepare_data(region, subregion, pgosm_date, out_path):
"""Ensures the PBF file is available.
Checks if it already exists locally, download if needed,
and verify MD5 checksum.
Parameters
----------------------
region : str
subregion : str
pgosm_date : str
out_path : str
Returns
----------------------
pbf_file : str
Full path to PBF file
"""
pbf_filename = get_region_filename(region, subregion)
pbf_file = os.path.join(out_path, pbf_filename)
pbf_file_with_date = pbf_file.replace('latest', pgosm_date)
md5_file = f'{pbf_file}.md5'
md5_file_with_date = f'{pbf_file_with_date}.md5'
if pbf_download_needed(pbf_file_with_date, md5_file_with_date, pgosm_date):
logging.getLogger('pgosm-flex').info('Downloading PBF and MD5 files...')
download_data(region, subregion, pbf_file, md5_file)
archive_data(pbf_file, md5_file, pbf_file_with_date, md5_file_with_date)
else:
logging.getLogger('pgosm-flex').info('Copying Archived files')
unarchive_data(pbf_file, md5_file, pbf_file_with_date, md5_file_with_date)
helpers.verify_checksum(md5_file, out_path)
return pbf_file
def pbf_download_needed(pbf_file_with_date, md5_file_with_date, pgosm_date):
"""Decides if the PBF/MD5 files need to be downloaded.
Parameters
-------------------------------
pbf_file_with_date : str
md5_file_with_date : str
Returns
--------------------------
download_needed : bool
"""
logger = logging.getLogger('pgosm-flex')
# If the PBF file exists, check for the MD5 file too.
if os.path.exists(pbf_file_with_date):
logger.info(f'PBF File exists {pbf_file_with_date}')
if os.path.exists(md5_file_with_date):
logger.info('PBF & MD5 files exist. Download not needed')
download_needed = False
else:
if pgosm_date == helpers.get_today():
print('PBF for today available but not MD5... download needed')
download_needed = True
else:
err = f'Missing MD5 file for {pgosm_date}. Cannot validate.'
logger.error(err)
raise FileNotFoundError(err)
else:
if not pgosm_date == helpers.get_today():
err = f'Missing PBF file for {pgosm_date}. Cannot proceed.'
logger.error(err)
raise FileNotFoundError(err)
logger.info('PBF file not found locally. Download required')
download_needed = True
return download_needed
def get_pbf_url(region, subregion):
"""Returns the URL to the PBF for the region / subregion.
Parameters
----------------------
region : str
subregion : str
Returns
----------------------
pbf_url : str
"""
base_url = 'https://download.geofabrik.de'
if subregion is None:
pbf_url = f'{base_url}/{region}-latest.osm.pbf'
else:
pbf_url = f'{base_url}/{region}/{subregion}-latest.osm.pbf'
return pbf_url
def download_data(region, subregion, pbf_file, md5_file):
"""Downloads PBF and MD5 file using wget.
Parameters
---------------------
region : str
subregion : str
pbf_file : str
md5_file : str
"""
logger = logging.getLogger('pgosm-flex')
logger.info(f'Downloading PBF data to {pbf_file}')
pbf_url = get_pbf_url(region, subregion)
subprocess.run(
['/usr/bin/wget', pbf_url,
"-O", pbf_file , "--quiet"
],
capture_output=True,
text=True,
check=True
)
logger.info(f'Downloading MD5 checksum to {md5_file}')
subprocess.run(
['/usr/bin/wget', f'{pbf_url}.md5',
"-O", md5_file , "--quiet"
],
capture_output=True,
text=True,
check=True
)
def archive_data(pbf_file, md5_file, pbf_file_with_date, md5_file_with_date):
"""Copies `pbf_file` and `md5_file` to `pbf_file_with_date` and
`md5_file_with_date`.
If either file exists, does nothing.
Parameters
--------------------------------
pbf_file : str
md5_file : str
pbf_file_with_date : str
md5_file_with_date : str
"""
if os.path.exists(pbf_file_with_date):
pass # Do nothing
else:
shutil.copy2(pbf_file, pbf_file_with_date)
if os.path.exists(md5_file_with_date):
pass # Do nothing
else:
shutil.copy2(md5_file, md5_file_with_date)
def unarchive_data(pbf_file, md5_file, pbf_file_with_date, md5_file_with_date):
"""Copies `pbf_file_with_date` and `md5_file_with_date`
to `pbf_file` and `md5_file`.
Always copies, will overwrite a -latest file if it is in the way.
Parameters
--------------------------------
pbf_file : str
md5_file : str
pbf_file_with_date : str
md5_file_with_date : str
"""
logger = logging.getLogger('pgosm-flex')
if os.path.exists(pbf_file):
logger.debug(f'{pbf_file} exists. Overwriting.')
logger.info(f'Copying {pbf_file_with_date} to {pbf_file}')
shutil.copy2(pbf_file_with_date, pbf_file)
if os.path.exists(md5_file):
logger.debug(f'{md5_file} exists. Overwriting.')
logger.info(f'Copying {md5_file_with_date} to {md5_file}')
shutil.copy2(md5_file_with_date, md5_file)
def remove_latest_files(region, subregion, paths):
"""Removes the PBF and MD5 file with -latest in the name.
Files are archived via prepare_data() before processing starts
Parameters
-------------------------
region : str
subregion : str
paths : dict
"""
pbf_filename = get_region_filename(region, subregion)
pbf_file = os.path.join(paths['out_path'], pbf_filename)
md5_file = f'{pbf_file}.md5'
logging.info(f'Done with {pbf_file}, removing.')
os.remove(pbf_file)
logging.info(f'Done with {md5_file}, removing.')
os.remove(md5_file)
|
import json
from enum import Enum
import meilisearch
import structlog
from django.conf import settings
from zoo.analytics.models import Dependency
from zoo.base import redis
from zoo.services.models import Service
from zoo.utils import model_instance_to_json_object
log = structlog.get_logger()
class IndexType(Enum):
Service = "services"
Dependency = "analytics"
class Indexer:
def __init__(self):
self.meiliclient = meilisearch.Client(
settings.MEILI_HOST, settings.MEILI_MASTER_KEY
)
self.models_to_index = [
(Service, IndexType.Service.value),
(Dependency, IndexType.Dependency.value),
]
def index_specified_models(self):
for model, index_name in self.models_to_index:
for instance in model.objects.all():
serialized_model_instance = model_instance_to_json_object(instance)
try:
serialized_model_instance["fields"][
"id"
] = serialized_model_instance["pk"]
self.meiliclient.get_or_create_index(
index_name, {"name": model.__name__}
).update_documents([serialized_model_instance["fields"]])
# deepcode ignore W0703: Multiple Possible Exceptions
except Exception as err:
log.info(
"Failed to Index Model Instance",
error=err,
model=model,
instance=instance,
)
def index_openapi(self):
redis_conn = redis.get_connection()
definition_keys = redis_conn.keys()
for key in definition_keys:
definition = redis_conn.get(key)
try:
json_definition = json.loads(definition)
except json.JSONDecodeError as err:
log.info(
"Failed to decode definition",
error=err,
definition=definition,
key=key,
)
continue
try:
json_definition["id"] = key
self.meiliclient.get_or_create_index("open-api").update_documents(
[{json_definition}]
)
# deepcode ignore W0703: Multiple Possible Exceptions
except Exception as err:
log.info("Failed to Index OpenAPI", error=err, key=key)
@staticmethod
def _get_all_paths(definitions):
paths = []
for definition in definitions:
for path in list(definition["paths"].keys()):
paths.append(path)
return paths
|
from .login import Login, LoginForm
from .logout import Logout
from .signup import Signup, SignupForm
from .password_reset import PasswordReset, PasswordResetForm
from .password_reset_token import PasswordResetToken, PasswordResetTokenForm
from .update import FlexibleUpdate
from .delete import MultipleDelete |
""" define the linechart dataset tag """
from __future__ import absolute_import, unicode_literals
import json
from django import template
from saef.models import DatasetProfileHistory
register = template.Library()
@register.inclusion_tag('dataset/linechart_dataset.html')
def linechart_dataset(ds_id = -1, amount = 5, profile_name="row count"):
if(ds_id == -1):
return None
data= []
labels = []
profile_history = DatasetProfileHistory.objects.filter(dataset_id = ds_id).order_by('-create_timestamp')[:amount]
for row in profile_history:
batch_time = row.create_timestamp.strftime('%Y-%m-%d %H:%M:%S')
profile = json.loads(row.profile_json)
profile_value = profile[profile_name]
labels.append(str(batch_time))
data.append(profile_value)
labels.reverse()
data.reverse()
return {'data': data, 'labels': labels}
|
import os
import csv
import re
from os import listdir
from os.path import isfile, join
from shutil import copyfile
from shutil import copyfile
from os import listdir,path
import random
import string
from datetime import datetime
cur = os.getcwd()
prefix = "TARGET"
# vol_num = 1
vol_nums = [1,2]
# [1,2,3,4,5,6,7,8]
prod_nums = [1,2,3,4,5,6,7,8]
# prod_num = 4
include_if_no_date = True
search_email_recipients_senders_only = False
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
for prod_num in prod_nums:
# THEY MAY CHANGE BELOW from PRO to PROD
prod_fol = f"{prefix}_PROD{format(prod_num, '03d')}"
folders = []
texts = []
for vol_num in vol_nums:
vol_fol = f"VOL{format(vol_num, '05d')}"
if os.path.exists(f"{cur}\\{prod_fol}\\{vol_fol}"):
n_path = f"\\{prod_fol}\\{vol_fol}\\NATIVES\\NATIVE00001"
i_path = f"\\{prod_fol}\\{vol_fol}\\IMAGES\\IMAGES00001"
m_path = f"\\{prod_fol}\\{vol_fol}\\TEXT\\TEXT00001"
images = f"{cur}{i_path}"
natives = f"{cur}{n_path}"
text = f"{cur}{m_path}"
texts.append(text)
folders.append(text)
folders.append(images)
folders.append(natives)
# t_path = f"\\temp"
search_after = datetime.strptime("04/13/2018", "%m/%d/%Y")
# "mark tritton","tritton","marktritton","mtritton","[email protected]"
# "hunter boot","owner brand","ob goods","hunterboot","ownerbrand","obgoods"
# "kelly","[email protected]","tara.kelly"
# "lerdal","[email protected]","keli.lerdal"
search_terms = ["kelly","[email protected]","tara.kelly"]
# User Prompts
search_fol = f"RESULTS_{format(prod_num, '03d')}_{search_terms[0].upper().replace(' ', '_')}_{get_random_string(5)}"
dest = f"{cur}\\DillonProdApp\\SearchProduction\\{search_fol}"
print(search_fol)
filtered_docs = []
# //CREATE A LIST OF FILENAMES TO SEARCH FOR IN PRODUCTION BASED ON DAT FILE
dat_path = f"{cur}\\{prod_fol}\\{prefix}_PRO{format(prod_num, '03d')}.dat"
if os.path.isfile(dat_path):
with open(dat_path) as csvfile:
spamreader = csvfile.readlines()
list_of_docs = []
index = 0
for rowz in spamreader:
row = rowz.split("þ")
if len(row) == 1:
row = rowz.split("þ")
start_index = 1
end_index = 3
startatt_index = 5
endatt_index = 7
mdate_index = 31
sdate_index = 29
cust_index = 11
fname_index = 13
from_index = 17
to_index = 19
cc_index = 21
bcc_index = 23
subject_index = 25
if index != 0:
# print(row)
docs = []
if row[startatt_index] == '':
start = row[start_index]
end = row[end_index]
else:
start = row[startatt_index]
end = row[endatt_index]
m = re.search(r"\d", start)
if m is not None:
start_num = int(start[m.start():])
end_num = int(end[m.start():])
pre = start[:m.start()]
# print("start end:",start_num,end_num)
for a in range(start_num,end_num+1):
tmp_name = pre + str(format(a, '08d'))
docs.append(tmp_name)
# print(docs)
file_date_str = ""
if prod_num == 2:
if row[35] == '':
file_date_str = row[39]
else:
file_date_str = row[35]
else:
if row[29] == '':
file_date_str = row[31]
else:
file_date_str = row[29]
if file_date_str != "":
# print("tmp hide becasue just added")
file_date = datetime.strptime(file_date_str, "%m/%d/%Y")
if file_date >= search_after:
is_filtered = False
if prod_num == 2:
file_details = [row[21],row[27],row[9],row[11],row[13],row[15],row[17]]
else:
file_details = [row[11],row[13],row[17],row[19],row[21],row[23],row[25]]
# print(file_details)
for term in search_terms:
for detail in file_details:
if term in detail.lower():
if docs not in filtered_docs:
filtered_docs.append(docs)
is_filtered = True
if is_filtered == False:
list_of_docs.append(docs)
elif include_if_no_date:
list_of_docs.append(docs)
else:
attstart_index = 5
cur_title = 0
for title in row:
if "Begin Bates" in title:
start_index = cur_index
if "End Bates" in title:
start_index = cur_index
if "Begin Attachment" in title:
startatt_index = cur_index
if "Begin Attachment" in title:
endatt_index = cur_index
cur_index += 1
index += 1
print("done with .dat")
# print(list_of_docs)
# GO THROUGH FILES IN PRODUCTION AND COPY OUT FILES IN LIST OF NAME (list_of_docs)
# ["1 start","3 end","5 att_start","7 att_end","9 imageCount", "11 Cusodian","13 FileName","15 Folder","17 From","19 To","21 CC","23 Bcc","25 Subject","27 Created", "29 Modified", "31 Sent", "33 TXT", "35 FILEPATH"]
# Begin Bates,End Bates,Begin Attachment,End Attachment,9 Email From,11 Email To, 13 Email CC, 15 Email BCC, 17 Email Subject, 19 Confidential Designation, 21 Custodian, 23 Author, 25 Document Title, 27 File Name, 29 Document Extension, 31 Date Created, 33 Time Created, 35 Date Last Modified, 37 Time Last Modified, 39 Date Sent, 41 Time Sent, 43 Date Received, 45 Time Received, 47 HC Folder/Binder Name, 49 File Size, 51 Page Count, 53 MD5 Hash, 55 Text Path, 57 Native File
# START
filtered_list = []
for docs in filtered_docs:
for doc in docs:
filtered_list.append(doc)
# iterate through text and search
for texter in texts:
dirs = [f for f in listdir(texter) if isfile(join(texter, f))]
for file_batch in list_of_docs:
for file_pre in file_batch:
for img in dirs:
if file_pre in img:
with open(f"{texter}\\{img}","r",encoding="utf-8") as txtfile:
text_in_file = txtfile.read()
for term in search_terms:
if term in text_in_file.lower():
pre, _ = os.path.splitext(img)
if pre not in filtered_list:
file_batch_pre = []
for filer in file_batch:
filer_pre, _ = os.path.splitext(filer)
file_batch_pre.append(filer_pre)
# print(file_batch_pre)
filtered_list=filtered_list+file_batch_pre
print("done with search")
# print(filtered_list)
if not path.exists(f"{dest}"):
os.mkdir(f"{dest}")
# copy texts
for folder in folders:
dirs = [f for f in listdir(folder) if isfile(join(folder, f))]
for pre in filtered_list:
for img in dirs:
if pre in img:
copyfile(f"{folder}\\{img}", f"{dest}\\{img}")
print("done")
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os
import pyfasta
import allel
import seaborn as sns
import petl as etl
import h5py
import pandas
title = 'Phase 1 AR3 release'
pop_ids = 'AOM', 'BFM', 'GWA', 'GNS', 'BFS', 'CMS', 'GAS', 'UGS', 'KES'
pop_labels = {
'AOM': 'AO $coluzzii$',
'BFM': 'BF $coluzzii$',
'GWA': 'GW',
'GNS': 'GN $gambiae$',
'BFS': 'BF $gambiae$',
'CMS': 'CM $gambiae$',
'UGS': 'UG $gambiae$',
'GAS': 'GA $gambiae$',
'KES': 'KE',
'colony': 'colony',
}
pop_colors = {
'AOM': sns.color_palette('YlOrBr', 5)[4],
'BFM': sns.color_palette('Reds', 3)[1],
'GWA': sns.color_palette('YlOrBr', 5)[1],
'GNS': sns.color_palette('Blues', 3)[0],
'BFS': sns.color_palette('Blues', 3)[1],
'CMS': sns.color_palette('Blues', 3)[2],
'UGS': sns.color_palette('Greens', 2)[0],
'GAS': sns.color_palette('Greens', 2)[1],
'KES': sns.color_palette('Greys', 5)[2],
'colony': sns.color_palette('Greys', 5)[-1]
}
# convert to hex notation for ease of use elsewhere
for p in pop_colors:
h = '#%02x%02x%02x' % tuple(int(255*c) for c in pop_colors[p])
# chromatin
_data_chromatin = b"""CHX chro X 20009764 24393108
CH2R chro 2R 58984778 61545105
CH2L chro 2L 1 2431617
PEU2L chro 2L 2487770 5042389
IH2L chro 2L 5078962 5788875
IH3R chro 3R 38988757 41860198
CH3R chro 3R 52161877 53200684
CH3L chro 3L 1 1815119
PEU3L chro 3L 1896830 4235209
IH3L chro 3L 4264713 5031692
"""
tbl_chromatin = (
etl
.fromtext(etl.MemorySource(_data_chromatin))
.split('lines', '\s+', ['name', 'type', 'chrom', 'start', 'stop'])
.convert(('start', 'stop'), int)
.cutout('type')
)
# genome regions
region_X_speciation = 'X-speciation', 'X', 15000000, 24000000
region_X_free = 'X-free', 'X', 1, 14000000
region_3L_free = '3L-free', '3L', 15000000, 41000000
region_3R_free = '3R-free', '3R', 1, 37000000
# noinspection PyGlobalUndefined
def init(release_dir, load_geneset=False):
"""Initialise data resources.
Parameters
----------
release_dir : string
Local filesystem path where data from the release are stored.
load_geneset : string
If True, load geneset into memory.
"""
# reference sequence
####################
global genome_fn, genome
genome_dir = os.path.join(release_dir, 'genome')
genome_fn = os.path.join(genome_dir, 'Anopheles-gambiae-PEST_CHROMOSOMES_AgamP3.fa')
if os.path.exists(genome_fn):
genome = pyfasta.Fasta(genome_fn)
# genome annotations
####################
global geneset_agamp42_fn, geneset_agamp42
geneset_dir = os.path.join(release_dir, 'geneset')
geneset_agamp42_fn = os.path.join(
geneset_dir,
'Anopheles-gambiae-PEST_BASEFEATURES_AgamP4.2.sorted.gff3.gz')
if os.path.exists(geneset_agamp42_fn) and load_geneset:
geneset_agamp42 = allel.FeatureTable.from_gff3(geneset_agamp42_fn)
# variant callsets
##################
global callset, callset_pass
variation_dir = os.path.join(release_dir, 'variation')
# main callset
callset_h5_fn = os.path.join(variation_dir, 'main', 'hdf5', 'ag1000g.phase1.ar3.h5')
if os.path.exists(callset_h5_fn):
callset = h5py.File(callset_h5_fn, mode='r')
# main callset, PASS variants only
callset_pass_h5_fn = os.path.join(variation_dir, 'main', 'hdf5', 'ag1000g.phase1.ar3.pass.h5')
if os.path.exists(callset_pass_h5_fn):
callset_pass = h5py.File(callset_pass_h5_fn, mode='r')
# accessibility
###############
global accessibility
accessibility_dir = os.path.join(release_dir, 'accessibility')
accessibility_fn = os.path.join(accessibility_dir, 'accessibility.h5')
if os.path.exists(accessibility_fn):
accessibility = h5py.File(accessibility_fn, mode='r')
# sample metadata
#################
global samples_fn, tbl_samples, lkp_samples, sample_ids, df_samples
samples_dir = os.path.join(release_dir, 'samples')
samples_fn = os.path.join(samples_dir, 'samples.all.txt')
if os.path.exists(samples_fn):
tbl_samples = (
etl
.fromtsv(samples_fn)
.convert(('index', 'year', 'n_sequences', 'kt_2la', 'kt_2rb'), int)
.convert(('mean_coverage', 'latitude', 'longitude') + tuple(range(20, 36)), float)
)
lkp_samples = tbl_samples.recordlookupone('ox_code')
sample_ids = tbl_samples.values('ox_code').list()
df_samples = pandas.read_csv(samples_fn, sep='\t', index_col='index')
# extras
########
global allele_counts, allele_counts_gq10, outgroup_alleles, outgroup_allele_counts, \
outgroup_species
extras_dir = os.path.join(release_dir, 'extras')
# allele counts
allele_counts_fn = os.path.join(extras_dir, 'allele_counts.h5')
if os.path.exists(allele_counts_fn):
allele_counts = h5py.File(allele_counts_fn, mode='r')
allele_counts_gq10_fn = os.path.join(extras_dir, 'allele_counts.gq10.h5')
if os.path.exists(allele_counts_gq10_fn):
allele_counts_gq10 = h5py.File(allele_counts_gq10_fn, mode='r')
# outgroup data
outgroup_species = 'arab', 'meru', 'mela', 'quad', 'epir', 'chri'
outgroup_alleles_fn = os.path.join(extras_dir, 'outgroup_alleles.h5')
if os.path.exists(outgroup_alleles_fn):
outgroup_alleles = h5py.File(outgroup_alleles_fn, mode='r')
outgroup_allele_counts_fn = os.path.join(extras_dir, 'outgroup_allele_counts.h5')
if os.path.exists(outgroup_allele_counts_fn):
outgroup_allele_counts = h5py.File(outgroup_allele_counts_fn, mode='r')
|
def birthday(s, d, m):
count = 0
for i in range(0, len(s)+1-m):
if sum(s[i:i+m]) == d:
count += 1
return count
|
import random
import requests
import time
import logging
import json
from urllib.parse import urlencode, quote
from playwright.sync_api import sync_playwright
import string
import logging
import os
from .utilities import update_messager
from .exceptions import *
os.environ["no_proxy"] = "127.0.0.1,localhost"
BASE_URL = "https://m.tiktok.com/"
def parse_script_tag_contents(html):
nonce_start = '<head nonce="'
nonce_end = '">'
nonce = html.split(nonce_start)[1].split(nonce_end)[0]
j_raw = html.split(
'<script id="__NEXT_DATA__" type="application/json" nonce="%s" crossorigin="anonymous">' % nonce
)[1].split("</script>")[0]
return j_raw
class TikTokApi:
__instance = None
def __init__(self, **kwargs):
"""The TikTokApi class. Used to interact with TikTok, use get_instance NOT this."""
# Forces Singleton
if TikTokApi.__instance is None:
TikTokApi.__instance = self
else:
raise Exception("Only one TikTokApi object is allowed")
logging.basicConfig(level=kwargs.get("logging_level", logging.WARNING))
logging.info("Class initalized")
# Some Instance Vars
self.executablePath = kwargs.get("executablePath", None)
self.custom_did = kwargs.get("custom_did", None)
self.userAgent = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/86.0.4240.111 Safari/537.36"
)
self.proxy = kwargs.get("proxy", None)
self.custom_verifyFp = kwargs.get("custom_verifyFp")
self.signer_url = kwargs.get("external_signer", None)
self.request_delay = kwargs.get("request_delay", None)
if kwargs.get("use_test_endpoints", False):
global BASE_URL
BASE_URL = "https://t.tiktok.com/"
if kwargs.get("use_selenium", False):
from .browser_selenium import browser
else:
from .browser import browser
if kwargs.get("generate_static_did", False):
self.custom_did = "".join(random.choice(string.digits) for num in range(19))
if self.signer_url is None:
self.browser = browser(**kwargs)
self.userAgent = self.browser.userAgent
try:
self.timezone_name = self.__format_new_params__(self.browser.timezone_name)
self.browser_language = self.__format_new_params__(
self.browser.browser_language
)
self.browser_platform = self.__format_new_params__(
self.browser.browser_platform
)
self.browser_name = self.__format_new_params__(self.browser.browser_name)
self.browser_version = self.__format_new_params__(
self.browser.browser_version
)
self.width = self.browser.width
self.height = self.browser.height
except Exception as e:
logging.error(e)
logging.warning(
"An error ocurred while opening your browser but it was ignored."
)
self.timezone_name = ""
self.browser_language = ""
self.browser_platform = ""
self.browser_name = ""
self.browser_version = ""
self.width = "1920"
self.height = "1080"
@staticmethod
def get_instance(**kwargs):
"""The TikTokApi class. Used to interact with TikTok. This is a singleton
class to prevent issues from arising with playwright
Parameters
----------
logging_level: The logging level you want the program to run at, optional
These are the standard python logging module's levels.
request_delay: The amount of time to wait before making a request, optional
This is used to throttle your own requests as you may end up making too
many requests to TikTok for your IP.
custom_did: A TikTok parameter needed to download videos, optional
The code generates these and handles these pretty well itself, however
for some things such as video download you will need to set a consistent
one of these.
All the methods take this as a optional parameter, however it's cleaner code
to store this at the instance level. You can override this at the specific
methods.
generate_static_did: A parameter that generates a custom_did at the instance level
Use this if you want to download videos from a script but don't want to generate
your own custom_did parameter.
custom_verifyFp: A TikTok parameter needed to work most of the time, optional
To get this parameter look at [this video](https://youtu.be/zwLmLfVI-VQ?t=117)
I recommend watching the entire thing, as it will help setup this package.
All the methods take this as a optional parameter, however it's cleaner code
to store this at the instance level. You can override this at the specific
methods.
You can use the following to generate `"".join(random.choice(string.digits)
for num in range(19))`
use_test_endpoints: Send requests to TikTok's test endpoints, optional
This parameter when set to true will make requests to TikTok's testing
endpoints instead of the live site. I can't guarantee this will work
in the future, however currently basically any custom_verifyFp will
work here which is helpful.
proxy: A string containing your proxy address, optional
If you want to do a lot of scraping of TikTok endpoints you'll likely
need a proxy.
Ex: "https://0.0.0.0:8080"
All the methods take this as a optional parameter, however it's cleaner code
to store this at the instance level. You can override this at the specific
methods.
use_selenium: Option to use selenium over playwright, optional
Playwright is selected by default and is the one that I'm designing the
package to be compatable for, however if playwright doesn't work on
your machine feel free to set this to True.
executablePath: The location of the driver, optional
This shouldn't be needed if you're using playwright
**kwargs
Parameters that are passed on to basically every module and methods
that interact with this main class. These may or may not be documented
in other places.
"""
if not TikTokApi.__instance:
TikTokApi(**kwargs)
return TikTokApi.__instance
def clean_up(self):
"""A basic cleanup method, called automatically from the code"""
self.__del__()
def __del__(self):
"""A basic cleanup method, called automatically from the code"""
try:
self.browser.clean_up()
except Exception:
pass
try:
get_playwright().stop()
except Exception:
pass
TikTokApi.__instance = None
def external_signer(self, url, custom_did=None, verifyFp=None):
"""Makes requests to an external signer instead of using a browser.
Parameters
----------
url: The server to make requests to
This server is designed to sign requests. You can find an example
of this signature server in the examples folder.
custom_did: A TikTok parameter needed to download videos
The code generates these and handles these pretty well itself, however
for some things such as video download you will need to set a consistent
one of these.
custom_verifyFp: A TikTok parameter needed to work most of the time,
To get this parameter look at [this video](https://youtu.be/zwLmLfVI-VQ?t=117)
I recommend watching the entire thing, as it will help setup this package.
"""
if custom_did is not None:
query = {"url": url, "custom_did": custom_did, "verifyFp": verifyFp}
else:
query = {"url": url, "verifyFp": verifyFp}
data = requests.get(self.signer_url + "?{}".format(urlencode(query)))
parsed_data = data.json()
return (
parsed_data["verifyFp"],
parsed_data["did"],
parsed_data["_signature"],
parsed_data["userAgent"],
parsed_data["referrer"],
)
def get_data(self, **kwargs) -> dict:
"""Makes requests to TikTok and returns their JSON.
This is all handled by the package so it's unlikely
you will need to use this.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
if self.request_delay is not None:
time.sleep(self.request_delay)
if self.proxy is not None:
proxy = self.proxy
if kwargs.get("custom_verifyFp") == None:
if self.custom_verifyFp != None:
verifyFp = self.custom_verifyFp
else:
verifyFp = "verify_khr3jabg_V7ucdslq_Vrw9_4KPb_AJ1b_Ks706M8zIJTq"
else:
verifyFp = kwargs.get("custom_verifyFp")
if self.signer_url is None:
kwargs["custom_verifyFp"] = verifyFp
verify_fp, did, signature = self.browser.sign_url(**kwargs)
userAgent = self.browser.userAgent
referrer = self.browser.referrer
else:
verify_fp, did, signature, userAgent, referrer = self.external_signer(
kwargs["url"],
custom_did=kwargs.get("custom_did"),
verifyFp=kwargs.get("custom_verifyFp", verifyFp),
)
query = {"verifyFp": verify_fp, "did": did, "_signature": signature}
url = "{}&{}".format(kwargs["url"], urlencode(query))
r = requests.get(
url,
headers={
"authority": "m.tiktok.com",
"method": "GET",
"path": url.split("tiktok.com")[1],
"scheme": "https",
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"dnt": "1",
"origin": referrer,
"pragma": "no-cache",
"referer": referrer,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": userAgent,
},
cookies=self.get_cookies(**kwargs),
proxies=self.__format_proxy(proxy),
)
try:
json = r.json()
if json.get("type") == "verify":
logging.error(
"Tiktok wants to display a catcha. Response is:\n" + r.text
)
logging.error(self.get_cookies(**kwargs))
raise TikTokCaptchaError()
if json.get("statusCode", 200) == 10201:
# Invalid Entity
raise TikTokNotFoundError(
"TikTok returned a response indicating the entity is invalid"
)
return r.json()
except ValueError as e:
text = r.text
logging.error("TikTok response: " + text)
if len(text) == 0:
raise EmptyResponseError(
"Empty response from Tiktok to " + url
) from None
else:
logging.error("Converting response to JSON failed")
logging.error(e)
raise JSONDecodeFailure() from e
def get_cookies(self, **kwargs):
"""Extracts cookies from the kwargs passed to the function for get_data"""
did = kwargs.get(
"custom_did", "".join(random.choice(string.digits) for num in range(19))
)
if kwargs.get("custom_verifyFp") == None:
if self.custom_verifyFp != None:
verifyFp = self.custom_verifyFp
else:
verifyFp = "verify_khr3jabg_V7ucdslq_Vrw9_4KPb_AJ1b_Ks706M8zIJTq"
else:
verifyFp = kwargs.get("custom_verifyFp")
if kwargs.get("force_verify_fp_on_cookie_header", False):
return {
"tt_webid": did,
"tt_webid_v2": did,
"tt_csrf_token": "".join(
random.choice(string.ascii_uppercase + string.ascii_lowercase)
for i in range(16)
),
"s_v_web_id": verifyFp,
}
else:
return {
"tt_webid": did,
"tt_webid_v2": did,
"tt_csrf_token": "".join(
random.choice(string.ascii_uppercase + string.ascii_lowercase)
for i in range(16)
),
}
def get_bytes(self, **kwargs) -> bytes:
"""Returns TikTok's response as bytes, similar to get_data"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
if self.signer_url is None:
verify_fp, did, signature = self.browser.sign_url(**kwargs)
userAgent = self.browser.userAgent
referrer = self.browser.referrer
else:
verify_fp, did, signature, userAgent, referrer = self.external_signer(
kwargs["url"], custom_did=kwargs.get("custom_did", None)
)
query = {"verifyFp": verify_fp, "_signature": signature}
url = "{}&{}".format(kwargs["url"], urlencode(query))
r = requests.get(
url,
headers={
"Accept": "*/*",
"Accept-Encoding": "identity;q=1, *;q=0",
"Accept-Language": "en-US;en;q=0.9",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": url.split("/")[2],
"Pragma": "no-cache",
"Range": "bytes=0-",
"Referer": "https://www.tiktok.com/",
"User-Agent": userAgent,
},
proxies=self.__format_proxy(proxy),
cookies=self.get_cookies(**kwargs),
)
return r.content
def by_trending(self, count=30, **kwargs) -> dict:
"""
Gets trending TikToks
Parameters
----------
count: The amount of TikToks you want returned, optional
Note: TikTok seems to only support at MOST ~2000 TikToks
from a single endpoint.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
first = True
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"count": realCount,
"id": 1,
"secUid": "",
"sourceType": 12,
"appId": 1233,
"itemID": 1,
"insertedItemID": "",
"region": region,
"priority_region": region,
"language": language,
}
api_url = "{}api/recommend/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
for t in res.get("itemList", []):
response.append(t)
if not res["hasMore"] and not first:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response[:count]
realCount = count - len(response)
first = False
return response[:count]
def search_for_users(self, search_term, count=28, **kwargs) -> list:
"""Returns a list of users that match the search_term
Parameters
----------
search_term: The string to search for users by
This string is the term you want to search for users by.
count: The number of users to return
Note: maximum is around 28 for this type of endpoint.
"""
return self.discover_type(search_term, prefix="user", count=count, **kwargs)
def search_for_music(self, search_term, count=28, **kwargs) -> list:
"""Returns a list of music that match the search_term
Parameters
----------
search_term: The string to search for music by
This string is the term you want to search for music by.
count: The number of music to return
Note: maximum is around 28 for this type of endpoint.
"""
return self.discover_type(search_term, prefix="music", count=count, **kwargs)
def search_for_hashtags(self, search_term, count=28, **kwargs) -> list:
"""Returns a list of hashtags that match the search_term
Parameters
----------
search_term: The string to search for music by
This string is the term you want to search for music by.
count: The number of music to return
Note: maximum is around 28 for this type of endpoint.
"""
return self.discover_type(
search_term, prefix="challenge", count=count, **kwargs
)
def discover_type(self, search_term, prefix, count=28, offset=0, **kwargs) -> list:
"""Returns a list of whatever the prefix type you pass in
Parameters
----------
search_term: The string to search by
This string is the term you want to search by.
prefix: The prefix of what to search for
Valid options are user/music/challenge
count: The number search results to return
Note: maximum is around 28 for this type of endpoint.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
while len(response) < count:
query = {
"discoverType": 1,
"needItemList": False,
"keyWord": search_term,
"offset": offset,
"count": 20,
"useRecommend": False,
"language": "en",
}
api_url = "{}api/discover/{}/?{}&{}".format(
BASE_URL, prefix, self.__add_new_params__(), urlencode(query)
)
data = self.getData(url=api_url, **kwargs)
if "userInfoList" in data.keys():
for x in data["userInfoList"]:
response.append(x)
elif "musicInfoList" in data.keys():
for x in data["musicInfoList"]:
response.append(x)
elif "challengeInfoList" in data.keys():
for x in data["challengeInfoList"]:
response.append(x)
else:
logging.info("TikTok is not sending videos beyond this point.")
break
offset += maxCount
return response[:count]
def user_posts(self, userID, secUID, count=30, cursor=0, **kwargs) -> dict:
"""Returns an array of dictionaries representing TikToks for a user.
Parameters
----------
userID: The userID of the user, which TikTok assigns
You can find this from utilizing other methods or
just use by_username to find it.
secUID: The secUID of the user, which TikTok assigns
You can find this from utilizing other methods or
just use by_username to find it.
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
first = True
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"count": realCount,
"id": userID,
"cursor": cursor,
"type": 1,
"secUid": secUID,
"sourceType": 8,
"appId": 1233,
"region": region,
"priority_region": region,
"language": language,
}
api_url = "{}api/post/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
if "itemList" in res.keys():
for t in res["itemList"]:
response.append(t)
if not res["hasMore"] and not first:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response
realCount = count - len(response)
cursor = res["cursor"]
first = False
return response[:count]
def by_username(self, username, count=30, **kwargs) -> dict:
"""Returns a dictionary listing TikToks given a user's username.
Parameters
----------
username: The username of the TikTok user
This is just the username of the user you want to
get videos from.
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
data = self.getUserObject(username, **kwargs)
return self.userPosts(
data["id"],
data["secUid"],
count=count,
**kwargs,
)
def user_page(self, userID, secUID, page_size=30, cursor=0, **kwargs) -> dict:
"""Returns a dictionary listing of one page of TikToks given a user's ID and secUID
Parameters
----------
userID: The userID of the user, which TikTok assigns
You can find this from utilizing other methods or
just use by_username to find it.
secUID: The secUID of the user, which TikTok assigns
You can find this from utilizing other methods or
just use by_username to find it.
page_size: The number of posts to return per page
Gets a specific page of a user, doesn't iterate.
cursor: The offset of a page
The offset to return new videos from
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
api_url = (
BASE_URL + "api/post/item_list/?{}&count={}&id={}&type=1&secUid={}"
"&cursor={}&sourceType=8&appId=1233®ion={}&language={}".format(
self.__add_new_params__(),
page_size,
str(userID),
str(secUID),
cursor,
region,
language,
)
)
return self.getData(url=api_url, **kwargs)
def get_user_pager(self, username, page_size=30, cursor=0, **kwargs):
"""Returns a generator to page through a user's feed
Parameters
----------
username: The username of the user
page_size: The number of posts to return in a page
cursor: The offset of a page
The offset to return new videos from
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
data = self.getUserObject(username, **kwargs)
while True:
resp = self.userPage(
data["id"],
data["secUid"],
page_size=page_size,
cursor=cursor,
**kwargs,
)
try:
page = resp["itemList"]
except KeyError:
# No mo results
return
cursor = resp["cursor"]
yield page
if not resp["hasMore"]:
return # all done
def user_liked(self, userID, secUID, count=30, cursor=0, **kwargs) -> dict:
"""Returns a dictionary listing TikToks that a given a user has liked.
Note: The user's likes must be public
Parameters
----------
userID: The userID of the user, which TikTok assigns
secUID: The secUID of the user, which TikTok assigns
count: The number of posts to return
Note: seems to only support up to ~2,000
cursor: The offset of a page
The offset to return new videos from
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
first = True
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"count": realCount,
"id": userID,
"type": 2,
"secUid": secUID,
"cursor": cursor,
"sourceType": 9,
"appId": 1233,
"region": region,
"priority_region": region,
"language": language,
}
api_url = "{}api/favorite/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
try:
res["itemList"]
except Exception:
logging.error("User's likes are most likely private")
return []
for t in res["itemList"]:
response.append(t)
if not res["hasMore"] and not first:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response
realCount = count - len(response)
cursor = res["cursor"]
first = False
return response[:count]
def user_liked_by_username(self, username, count=30, **kwargs) -> dict:
"""Returns a dictionary listing TikToks a user has liked by username.
Note: The user's likes must be public
Parameters
----------
username: The username of the user
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
data = self.getUserObject(username, **kwargs)
return self.userLiked(
data["id"],
data["secUid"],
count=count,
**kwargs,
)
def by_sound(self, id, count=30, offset=0, **kwargs) -> dict:
"""Returns a dictionary listing TikToks with a specific sound.
Parameters
----------
id: The sound id to search by
Note: Can be found in the URL of the sound specific page or with other methods.
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"secUid": "",
"musicID": str(id),
"count": str(realCount),
"cursor": offset,
"shareUid": "",
"language": language,
}
api_url = "{}api/music/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
try:
for t in res["items"]:
response.append(t)
except KeyError:
for t in res["itemList"]:
response.append(t)
if not res["hasMore"]:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response
realCount = count - len(response)
offset = res["cursor"]
return response[:count]
def get_music_object(self, id, **kwargs) -> dict:
"""Returns a music object for a specific sound id.
Parameters
----------
id: The sound id to get the object for
This can be found by using other methods.
"""
return self.getMusicObjectFull(id, **kwargs)["music"]
def get_music_object_full(self, id, **kwargs):
"""Returns a music object for a specific sound id.
Parameters
----------
id: The sound id to get the object for
This can be found by using other methods.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
r = requests.get(
"https://www.tiktok.com/music/-{}".format(id),
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
j_raw = parse_script_tag_contents(t)
return json.loads(j_raw)["props"]["pageProps"]["musicInfo"]
def by_hashtag(self, hashtag, count=30, offset=0, **kwargs) -> dict:
"""Returns a dictionary listing TikToks with a specific hashtag.
Parameters
----------
hashtag: The hashtag to search by
Without the # symbol
A valid string is "funny"
count: The number of posts to return
Note: seems to only support up to ~2,000
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
id = self.getHashtagObject(hashtag)["challengeInfo"]["challenge"]["id"]
response = []
required_count = count
while len(response) < required_count:
if count > maxCount:
count = maxCount
query = {
"count": count,
"challengeID": id,
"type": 3,
"secUid": "",
"cursor": offset,
"priority_region": "",
}
api_url = "{}api/challenge/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
for t in res["itemList"]:
response.append(t)
if not res["hasMore"]:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response
offset += maxCount
return response[:required_count]
def get_hashtag_object(self, hashtag, **kwargs) -> dict:
"""Returns a hashtag object.
Parameters
----------
hashtag: The hashtag to search by
Without the # symbol
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {"name": hashtag, "isName": True, "lang": language}
api_url = "{}node/share/tag/{}?{}&{}".format(
BASE_URL, quote(hashtag), self.__add_new_params__(), urlencode(query)
)
data = self.getData(url=api_url, **kwargs)
if data["challengeInfo"].get("challenge") is None:
raise TikTokNotFoundError("Challenge {} does not exist".format(hashtag))
return data
def get_recommended_tiktoks_by_video_id(
self, id, count=30, minCursor=0, maxCursor=0, **kwargs
) -> dict:
"""Returns a dictionary listing reccomended TikToks for a specific TikTok video.
Parameters
----------
id: The id of the video to get suggestions for
Can be found using other methods
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
response = []
first = True
while len(response) < count:
if count < maxCount:
realCount = count
else:
realCount = maxCount
query = {
"count": realCount,
"id": 1,
"secUid": "",
"maxCursor": maxCursor,
"minCursor": minCursor,
"sourceType": 12,
"appId": 1233,
"region": region,
"priority_region": region,
"language": language,
}
api_url = "{}api/recommend/item_list/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = self.getData(url=api_url, **kwargs)
for t in res.get("items", []):
response.append(t)
if not res["hasMore"] and not first:
logging.info("TikTok isn't sending more TikToks beyond this point.")
return response[:count]
realCount = count - len(response)
maxCursor = res["maxCursor"]
first = False
return response[:count]
def get_tiktok_by_id(self, id, **kwargs) -> dict:
"""Returns a dictionary of a specific TikTok.
Parameters
----------
id: The id of the TikTok you want to get the object for
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
did = kwargs.get("custom_did", None)
query = {
"itemId": id,
"language": language,
}
api_url = "{}api/item/detail/?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
return self.getData(url=api_url, **kwargs)
def get_tiktok_by_url(self, url, **kwargs) -> dict:
"""Returns a dictionary of a TikTok object by url.
Parameters
----------
url: The TikTok url you want to retrieve
This currently doesn't support the shortened TikTok
url links.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
custom_did = kwargs.get("custom_did", None)
if "@" in url and "/video/" in url:
post_id = url.split("/video/")[1].split("?")[0]
else:
raise Exception(
"URL format not supported. Below is an example of a supported url.\n"
"https://www.tiktok.com/@therock/video/6829267836783971589"
)
return self.getTikTokById(
post_id,
**kwargs,
)
def get_tiktok_by_html(self, url, **kwargs) -> dict:
"""This method retrieves a TikTok using the html
endpoints rather than the API based ones.
Parameters
----------
url: The url of the TikTok to get
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
r = requests.get(
url,
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"path": url.split("tiktok.com")[1],
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
try:
j_raw = parse_script_tag_contents(t)
except IndexError:
if not t:
logging.error("TikTok response is empty")
else:
logging.error("TikTok response: \n " + t)
raise TikTokCaptchaError()
data = json.loads(j_raw)["props"]["pageProps"]
if data["serverCode"] == 404:
raise TikTokNotFoundError(
"TikTok with that url doesn't exist".format(username)
)
return data
def discover_hashtags(self, **kwargs) -> dict:
"""Discover page, consists challenges (hashtags)"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {"noUser": 1, "userCount": 30, "scene": 0}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
return self.getData(url=api_url, **kwargs)["body"][1]["exploreList"]
def discover_music(self, **kwargs) -> dict:
"""Discover page, consists of music"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {"noUser": 1, "userCount": 30, "scene": 0}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
return self.getData(url=api_url, **kwargs)["body"][2]["exploreList"]
def get_user_object(self, username, **kwargs) -> dict:
"""Gets a user object (dictionary)
Parameters
----------
username: The username of the user
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
return self.getUser(username, **kwargs)["userInfo"]["user"]
def get_user(self, username, **kwargs) -> dict:
"""Gets the full exposed user object
Parameters
----------
username: The username of the user
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
r = requests.get(
"https://tiktok.com/@{}?lang=en".format(quote(username)),
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"path": "/@{}".format(quote(username)),
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
try:
j_raw = parse_script_tag_contents(t)
except IndexError:
if not t:
logging.error("Tiktok response is empty")
else:
logging.error("Tiktok response: \n " + t)
raise TikTokCaptchaError()
user = json.loads(j_raw)["props"]["pageProps"]
if user["serverCode"] == 404:
raise TikTokNotFoundError(
"TikTok user with username {} does not exist".format(username)
)
return user
def get_suggested_users_by_id(
self, userId="6745191554350760966", count=30, **kwargs
) -> list:
"""Returns suggested users given a different TikTok user.
Parameters
----------
userId: The id of the user to get suggestions for
count: The amount of users to return, optional
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {
"noUser": 0,
"pageId": userId,
"userId": userId,
"userCount": count,
"scene": 15,
}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = []
for x in self.getData(url=api_url, **kwargs)["body"][0]["exploreList"]:
res.append(x["cardItem"])
return res[:count]
def get_suggested_users_by_id_crawler(
self, count=30, startingId="6745191554350760966", **kwargs
) -> list:
"""Crawls for listing of all user objects it can find.
Parameters
----------
count: The amount of users to crawl for
startingId: The ID of a TikTok user to start at, optional
Optional but uses a static one to start, so you may get more
unique results with setting your own.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
users = []
unusedIDS = [startingId]
while len(users) < count:
userId = random.choice(unusedIDS)
newUsers = self.getSuggestedUsersbyID(userId=userId, **kwargs)
unusedIDS.remove(userId)
for user in newUsers:
if user not in users:
users.append(user)
unusedIDS.append(user["id"])
return users[:count]
def get_suggested_hashtags_by_id(
self, count=30, userId="6745191554350760966", **kwargs
) -> list:
"""Returns suggested hashtags given a TikTok user.
Parameters
----------
userId: The id of the user to get suggestions for
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {
"noUser": 0,
"pageId": userId,
"userId": userId,
"userCount": count,
"scene": 15,
}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = []
for x in self.getData(url=api_url, **kwargs)["body"][1]["exploreList"]:
res.append(x["cardItem"])
return res[:count]
def get_suggested_hashtags_by_id_crawler(
self, count=30, startingId="6745191554350760966", **kwargs
) -> list:
"""Crawls for as many hashtags as it can find.
Parameters
----------
count: The amount of users to crawl for
startingId: The ID of a TikTok user to start at
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
hashtags = []
ids = self.getSuggestedUsersbyIDCrawler(
count=count, startingId=startingId, **kwargs
)
while len(hashtags) < count and len(ids) != 0:
userId = random.choice(ids)
newTags = self.getSuggestedHashtagsbyID(userId=userId["id"], **kwargs)
ids.remove(userId)
for hashtag in newTags:
if hashtag not in hashtags:
hashtags.append(hashtag)
return hashtags[:count]
def get_suggested_music_by_id(
self, count=30, userId="6745191554350760966", **kwargs
) -> list:
"""Returns suggested music given a TikTok user.
Parameters
----------
userId: The id of the user to get suggestions for
count: The amount of users to return
proxy: The IP address of a proxy to make requests from
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
query = {
"noUser": 0,
"pageId": userId,
"userId": userId,
"userCount": count,
"scene": 15,
}
api_url = "{}node/share/discover?{}&{}".format(
BASE_URL, self.__add_new_params__(), urlencode(query)
)
res = []
for x in self.getData(url=api_url, **kwargs)["body"][2]["exploreList"]:
res.append(x["cardItem"])
return res[:count]
def get_suggested_music_id_crawler(
self, count=30, startingId="6745191554350760966", **kwargs
) -> list:
"""Crawls for hashtags.
Parameters
----------
count: The amount of users to crawl for
startingId: The ID of a TikTok user to start at
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
musics = []
ids = self.getSuggestedUsersbyIDCrawler(
count=count, startingId=startingId, **kwargs
)
while len(musics) < count and len(ids) != 0:
userId = random.choice(ids)
newTags = self.getSuggestedMusicbyID(userId=userId["id"], **kwargs)
ids.remove(userId)
for music in newTags:
if music not in musics:
musics.append(music)
return musics[:count]
def get_video_by_tiktok(self, data, **kwargs) -> bytes:
"""Downloads video from TikTok using a TikTok object.
You will need to set a custom_did to do this for anything but trending.
To do this, this is pretty simple you can either generate one yourself or,
you can pass the generate_static_did=True into the constructor of the
TikTokApi class.
Parameters
----------
data: A TikTok object
A TikTok JSON object from any other method.
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
try:
api_url = data["video"]["downloadAddr"]
except Exception:
try:
api_url = data["itemInfos"]["video"]["urls"][0]
except Exception:
api_url = data["itemInfo"]["itemStruct"]["video"]["playAddr"]
return self.get_Video_By_DownloadURL(api_url, **kwargs)
def get_video_by_download_url(self, download_url, **kwargs) -> bytes:
"""Downloads video from TikTok using download url in a TikTok object
Parameters
----------
download_url: The download url key value in a TikTok object
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
return self.getBytes(url=download_url, **kwargs)
def get_video_by_url(self, video_url, **kwargs) -> bytes:
"""Downloads a TikTok video by a URL
Parameters
----------
video_url: The TikTok url to download the video from
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
kwargs["custom_did"] = did
tiktok_schema = self.getTikTokByUrl(video_url, **kwargs)
download_url = tiktok_schema["itemInfo"]["itemStruct"]["video"]["downloadAddr"]
return self.getBytes(url=download_url, **kwargs)
def get_video_no_watermark(self, video_url, return_bytes=1, **kwargs) -> bytes:
"""Gets the video with no watermark
.. deprecated::
Deprecated due to TikTok fixing this
Parameters
----------
video_url: The url of the video you want to download
return_bytes: Set this to 0 if you want url, 1 if you want bytes
"""
(
region,
language,
proxy,
maxCount,
did,
) = self.__process_kwargs__(kwargs)
raise Exception("Deprecated method, TikTok fixed this.")
kwargs["custom_did"] = did
tiktok_html = self.get_tiktok_by_html(video_url)
# Thanks to @HasibulKabir for pointing this out on #448
cleanVideo = (
"https://api2-16-h2.musical.ly/aweme/v1/play/?video_id={}&line=0&ratio=default"
"&media_type=4&vr_type=0"
).format(tiktok_html["itemInfo"]["itemStruct"]["video"]["id"])
if return_bytes == 0:
return cleanVideo
r = requests.get(
cleanVideo,
headers={
"method": "GET",
"accept-encoding": "utf-8",
"user-agent": "okhttp",
},
proxies=self.__format_proxy(proxy),
)
if r.text[0] == "{":
raise TikTokCaptchaError()
return r.content
def get_music_title(self, id, **kwargs):
"""Retrieves a music title given an ID
Parameters
----------
id: The music id to get the title for
"""
r = requests.get(
"https://www.tiktok.com/music/-{}".format(id),
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(kwargs.get("proxy", None)),
cookies=self.get_cookies(**kwargs),
)
t = r.text
j_raw = parse_script_tag_contents(t)
music_object = json.loads(j_raw)["props"]["pageProps"]["musicInfo"]
if not music_object.get("title", None):
raise TikTokNotFoundError("Song of {} id does not exist".format(str(id)))
return music_object["title"]
def get_secuid(self, username, **kwargs):
"""Gets the secUid for a specific username
Parameters
----------
username: The username to get the secUid for
"""
r = requests.get(
"https://tiktok.com/@{}?lang=en".format(quote(username)),
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"authority": "www.tiktok.com",
"path": "/@{}".format(quote(username)),
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Host": "www.tiktok.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
},
proxies=self.__format_proxy(
kwargs.get("proxy", None), cookies=self.get_cookies(**kwargs)
),
)
try:
return r.text.split('"secUid":"')[1].split('","secret":')[0]
except IndexError as e:
logging.info(r.text)
logging.error(e)
raise Exception(
"Retrieving the user secUid failed. Likely due to TikTok wanting captcha validation. Try to use a proxy."
)
def generate_did(self):
"""Generates a valid did for other methods. Pass this as the custom_did field to download videos"""
return "".join(random.choice(string.digits) for num in range(19))
#
# PRIVATE METHODS
#
def __format_proxy(self, proxy) -> dict:
"""
Formats the proxy object
"""
if proxy is None and self.proxy is not None:
proxy = self.proxy
if proxy is not None:
return {"http": proxy, "https": proxy}
else:
return None
def __get_js(self, proxy=None) -> str:
return requests.get(
"https://sf16-muse-va.ibytedtos.com/obj/rc-web-sdk-gcs/acrawler.js",
proxies=self.__format_proxy(proxy),
).text
def __format_new_params__(self, parm) -> str:
return parm.replace("/", "%2F").replace(" ", "+").replace(";", "%3B")
def __add_new_params__(self) -> str:
query = {
"aid": 1988,
"app_name": "tiktok_web",
"device_platform": "web",
"referer": "",
"root_referer": "",
"user_agent": self.__format_new_params__(self.userAgent),
"cookie_enabled": "true",
"screen_width": self.width,
"screen_height": self.height,
"browser_language": self.browser_language,
"browser_platform": self.browser_platform,
"browser_name": self.browser_name,
"browser_version": self.browser_version,
"browser_online": "true",
"ac": "4g",
"timezone_name": self.timezone_name,
"appId": 1233,
"appType": "m",
"isAndroid": False,
"isMobile": False,
"isIOS": False,
"OS": "windows",
}
return urlencode(query)
# Process the kwargs
def __process_kwargs__(self, kwargs):
region = kwargs.get("region", "US")
language = kwargs.get("language", "en")
proxy = kwargs.get("proxy", None)
maxCount = kwargs.get("maxCount", 35)
if kwargs.get("custom_did", None) != None:
did = kwargs.get("custom_did")
else:
if self.custom_did != None:
did = self.custom_did
else:
did = "".join(random.choice(string.digits) for num in range(19))
return region, language, proxy, maxCount, did
#
# Backwards compatibility of the naming scheme
#
getData = get_data
getBytes = get_bytes
userPosts = user_posts
byUsername = by_username
userPage = user_page
getUserPager = get_user_pager
userLiked = user_liked
userLikedbyUsername = user_liked_by_username
bySound = by_sound
getMusicObject = get_music_object
getMusicObjectFull = get_music_object_full
byHashtag = by_hashtag
getHashtagObject = get_hashtag_object
getRecommendedTikToksByVideoID = get_recommended_tiktoks_by_video_id
getTikTokById = get_tiktok_by_id
getTikTokByUrl = get_tiktok_by_url
discoverHashtags = discover_hashtags
discoverMusic = discover_music
getUserObject = get_user_object
getUser = get_user
getSuggestedUsersbyID = get_suggested_users_by_id
getSuggestedUsersbyIDCrawler = get_suggested_users_by_id_crawler
getSuggestedHashtagsbyID = get_suggested_hashtags_by_id
getSuggestedHashtagsbyIDCrawler = get_suggested_hashtags_by_id_crawler
getSuggestedMusicbyID = get_suggested_music_by_id
getSuggestedMusicIDCrawler = get_suggested_music_id_crawler
get_Video_By_TikTok = get_video_by_tiktok
get_Video_By_DownloadURL = get_video_by_download_url
get_Video_By_Url = get_video_by_url
get_secUid = get_secuid
trending = by_trending
# pdoc ignore old naming scheme
__pdoc__ = {
"TikTokApi.getData": False,
"TikTokApi.getBytes": False,
"TikTokApi.userPosts": False,
"TikTokApi.byUsername": False,
"TikTokApi.userPage": False,
"TikTokApi.getUserPager": False,
"TikTokApi.userLiked": False,
"TikTokApi.userLikedbyUsername": False,
"TikTokApi.bySound": False,
"TikTokApi.getMusicObject": False,
"TikTokApi.getMusicObjectFull": False,
"TikTokApi.byHashtag": False,
"TikTokApi.getHashtagObject": False,
"TikTokApi.getRecommendedTikToksByVideoID": False,
"TikTokApi.getTikTokById": False,
"TikTokApi.getTikTokByUrl": False,
"TikTokApi.discoverHashtags": False,
"TikTokApi.discoverMusic": False,
"TikTokApi.getUserObject": False,
"TikTokApi.getUser": False,
"TikTokApi.getSuggestedUsersbyID": False,
"TikTokApi.getSuggestedUsersbyIDCrawler": False,
"TikTokApi.getSuggestedHashtagsbyID": False,
"TikTokApi.getSuggestedHashtagsbyIDCrawler": False,
"TikTokApi.getSuggestedMusicbyID": False,
"TikTokApi.getSuggestedMusicIDCrawler": False,
"TikTokApi.get_Video_By_TikTok": False,
"TikTokApi.get_Video_By_DownloadURL": False,
"TikTokApi.get_Video_By_Url": False,
"TikTokApi.get_secUid": False,
"TikTokApi.trending": False,
}
|
#!/bin/env python3
import argparse
import sys
from common import *
parser = argparse.ArgumentParser(description='Transform CSV to i18n for use in epsilon.')
parser.add_argument('file', help='Input file, without the langage part.')
args = parser.parse_args()
data, langs = load_csv(args.file + ".csv")
if (data, langs) == (None, None):
print("Invalid input " + args.file)
sys.exit(1)
data, langs = clean_data(data, langs)
print(data, langs)
save_i18n(args.file, data, langs)
|
import os
from functools import partial
import numpy as np
import fiona
import pyproj
from osgeo import gdal as gd
import geopandas as gpd
from shapely.ops import transform
from shapely.geometry import Point, Polygon
## add fiona support
fiona.drvsupport.supported_drivers['kml'] = 'rw' # enable KML support which is disabled by default
fiona.drvsupport.supported_drivers['KML'] = 'rw' # enable KML support which is disabled by default
fiona.drvsupport.supported_drivers['LIBKML'] = 'rw'
# define projection
proj_wgs84 = pyproj.Proj(init='epsg:4326')
class NdviAnalysis:
def __init__(self, ms_path, object_shp_path, radius, size_x, size_y):
self.ms_path = ms_path
self.object_shp_path = object_shp_path
self.radius = radius
self.size_x = size_x
self.size_y = size_y
# read ndvi tiff file
def read_ms(self, path):
ds_ms = gd.Open(path)
return ds_ms
# convert latlong coordinates as pixels
def convert_latlong_as_pixel(self, ds_ms, point):
gt = ds_ms.GetGeoTransform()
row = int((point.x - gt[0])/gt[1])
col = int((point.y - gt[3])/gt[5])
return row, col
# create a circle around a lat long
def geodesic_point_buffer(self, lat, lon):
# Azimuthal equidistant projection
aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),
proj_wgs84)
buf = Point(0, 0).buffer(self.radius) # radius in metres
return transform(project, buf).exterior.coords[:]
# return average ndvi based on xy offset
def return_avg_ndvi(self, ds_ms, xoff, yoff):
red = ds_ms.GetRasterBand(5).ReadAsArray(xoff, yoff, self.size_x, self.size_y).astype('int16')
nir1 = ds_ms.GetRasterBand(7).ReadAsArray(xoff, yoff, self.size_x, self.size_y).astype('int16')
# calculate ndvi
ndvi_array = (nir1 - red)/(nir1 + red)
ndvi_array[~ np.isfinite(ndvi_array)] = 0
return np.average(ndvi_array)
def encroachment_analysis(self, save_path):
ds_ms = self.read_ms(self.ms_path)
# read predicted hv towers kml/shp
df_hv = gpd.read_file(self.object_shp_path)
# print (len(df_hv), df_hv.crs)
df_hv = df_hv[['id', 'geometry']]
df_hv['centroid'] = df_hv.centroid
# loop through all towers and create a circle of radius and convert as shapely polygon
geom_circle = []
for i in range(len(df_hv)):
pt = df_hv['centroid'].iloc[i]
lat = pt.y
lon = pt.x
polycircle = Polygon(self.geodesic_point_buffer(lat, lon))
geom_circle.append(polycircle)
# update geometry column
df_hv['geometry'] = geom_circle
# loop through all towers and calculate avaerage ndvi
avg_ndvi = []
for j in range(len(df_hv)):
row, col = self.convert_latlong_as_pixel(ds_ms, df_hv['centroid'].iloc[j])
xoff = row
yoff = col
try:
avg_ndvi.append(self.return_avg_ndvi(ds_ms, xoff, yoff))
except:
avg_ndvi.append(0)
continue
# add average ndvi column in dataframe
df_hv['avg_ndvi'] = avg_ndvi
# save as shapefile
df_hv = df_hv[['id', 'avg_ndvi', 'geometry']]
df_hv.to_file(os.path.join(save_path, 'vegetation_encroachment.shp'), driver='ESRI Shapefile')
df_hv.to_file(os.path.join(save_path, 'vegetation_encroachment.kml'), driver='KML')
|
from django.shortcuts import render, render_to_response
from . import scrape1, scrape2
from . import trends
from .models import *
from django.http.response import HttpResponseRedirect
from django.http import HttpResponse
from django.db.models import Max
from .form import upFile, fixbrowse, fixscrape
from .tests import todlnn, getPost, usedvar, handle_uploaded_file, getFile, getKeyJson, uploadtoarray, getall, gettweet,gettweet1, handle, downloadfile, downloadfile1, toJst,downloadprepros
import os
from . import urls
import sqlite3
import json, csv, string
import numpy as np
from _io import StringIO
from wsgiref.util import FileWrapper
from .static.img.graph.visualisation import *
from jst.models import *
from preprocess.form import PostForm
from preprocess.form2 import PostForm2
from preprocess.ED_rule import *
from preprocess.ED import *
from dlnn.listFunction import *
from dlnn.listFunction2 import *
from dlnn.tests import *
# Create your views here.
def delet(request):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
db_temp = os.path.join(BASE_DIR, 'db.sqlite3')
conn = sqlite3.connect(db_temp)
cursor = conn.cursor()
cursor.execute("DELETE from social_media_crawling_TwitterCrawl;")
conn.commit()
return HttpResponseRedirect('../search2')
def delet2(request):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
db_temp = os.path.join(BASE_DIR, 'db.sqlite3')
conn = sqlite3.connect(db_temp)
cursor = conn.cursor()
cursor.execute("DELETE from social_media_crawling_FacebookCrawl;")
conn.commit()
return HttpResponseRedirect('../search2')
# def changemetod(mth):
# BASE_DIR = os.path.dirname((__file__))
# var = os.path.join(BASE_DIR, 'var.txt')
# f = open(var,'w')
# f.write(mth)
# usedvar.method = mth
# f.close()
def index(request):
return render(request, 'WMMS/index.html')
# def crawl(request):
# #variabel
# m1 = request.session.get('Method')
# m2 = request.session.get('Lang')
# m3 = request.session.get('Tapdown')
# if m1==None:
# ad = usedvar.method
# else:
# ad = m1
# form1 = advOpt(request.POST)
# form = browse_id(request.POST)
# form2 = option(request.POST)
# form3 = ScrapeOpt(request.POST)
# #trend
# #countryL = trends.getCountry() #hapus commen jika siap pake
# #bad = trends.getAllTrending()
#
# #trend temp
# countryL = ['Worldwide','Indonesia','United Kingdom']
# bad = trends.getAllTrending(countryL)
#
# #form
# if request.method == 'POST':
# if 'methods' in request.POST:
# if form1.is_valid():
# a = request.POST.get('methods')
# changemetod(a)
# request.session['Method']=form1.cleaned_data['methods']
# ad = form1.cleaned_data['methods']
# if form2.is_valid():
# request.session['Lang']=form2.cleaned_data['language']
# m2 = form2.cleaned_data['language']
# if form3.is_valid():
# request.session['Tapdown']=form3.cleaned_data['tapdown']
# m3 = form3.cleaned_data['tapdown']
#
# if 'crawl' in request.POST:
# if form.is_valid():
# data = request.POST.get('content')
# if ad == '0':
# scrape1.API(data,m2)
# if ad == '1':
# scrape1.bac(data,m2,m3)
#
# return render(request, 'WMMS/crawl.html', {
# 'form': browse_id(), 'form1':advOpt(initial={'methods':ad}),'form2':option(initial={'language':m2}),'form3':ScrapeOpt(initial={'tapdown':m3}), 'country':countryL, 'm1':ad, 'test':bad, "f3":upFile
# })
def crawl2(request):
#variabel
twitterdata = TwitterCrawl.objects.all()
facebookdata = FacebookCrawl.objects.all()
analisis = ['None','JST','DLNN']
badword = request.session.get('badword')
cleandata = request.session.get('clean')
checked = ''
if cleandata == 'Yes':
checked= 'checked'
#trend
#countryL = trends.getCountry() #hapus commen jika siap pake
#bad = trends.getAllTrending()
#trend temp
# countryL = ['Worldwide','Indonesia','United Kingdom']
# bad = trends.getAllTrending(countryL)
#form
if request.method == 'POST':
form = fixbrowse(request.POST)
form1 = fixscrape(request.POST)
form2 = upFile(request.POST, request.FILES)
if 'API' in request.POST:
if form.is_valid():
data = request.POST.get('content')
bahasa = request.POST.get('language')
jumlah = request.POST.get('jumlah')
scrape2.API(data,bahasa, jumlah, badword, cleandata)
# an = request.POST.get('analisis')
prepross = request.POST.get('prepross')
if prepross == "Yes":
# if an == "None":
hasil = ''
input1 = gettweet()
return render(request, "hasil_crawl.html", {'inputDB':input1, 'hasil':hasil, 'f':PostForm, 'f2':PostForm2,'database':'twitter'})
# if an != None:
# if an == 'JST':
# return render(request, "WMMS/hasil_crawl2.html",{'database':'twitter'})
#
if 'Scrape' in request.POST:
if form.is_valid():
data = request.POST.get('content')
bahasa = request.POST.get('language')
since = request.POST.get('since')
until = request.POST.get('until')
tapdown = request.POST.get('tapdown')
date = request.POST.get('date')
date = [date,since,until]
scrape2.scrapeTwitter(data,bahasa,int(tapdown),date,badword, cleandata)
prepross = request.POST.get('prepross')
# an = request.POST.get('analisis')
# if an != "None" and prepross== "Yes":
# if an == "JST":
# return render(request, "WMMS/hasil_crawl2.html",{'database':'twitter'})
#
# elif an == 'None' and prepross == "Yes":
if prepross == "YES":
hasil = ''
input1 = gettweet()
return render(request, "hasil_crawl.html", {'inputDB':input1, 'hasil':hasil, 'f':PostForm, 'f2':PostForm2,'database':'twitter'})
else:
print(form.errors)
if 'FScrape' in request.POST:
data = request.POST.get('content')
tapdown = request.POST.get('tapdown')
scrape2.scrapeFacebook(data,int(tapdown))
prepross = request.POST.get('prepross')
# an = request.POST.get('analisis')
# if an != "None" and prepross== "Yes":
# print('as')
# # chAnalisis(an)
# elif an == "None" and prepross == "Yes":
if prepross == "Yes":
hasil = ''
input1 = getPost()
return render(request, "hasil_crawl.html", {'inputDB':input1, 'hasil':hasil, 'f':PostForm, 'f2':PostForm2,'database':'facebook'})
if 'file' in request.FILES:
crawler = request.POST.get('dataCrawl')
print(crawler)
if form2.is_valid():
handle(request.FILES['file'],request.FILES['file'].name)
else:
print(form.errors)
if 'download' in request.POST:
dllist = request.POST.getlist('dlFile')
return downloadfile(dllist, twitterdata)
if 'download1' in request.POST:
dllist = request.POST.getlist('dlFile')
return downloadfile1(dllist, facebookdata)
if 'optioncrawl' in request.POST:
request.session['badword']=request.POST.get('badword')
request.session['clean']=request.POST.get('cleanopt')
if 'PP' in request.POST:
hasil = ''
input1 = ''
crawler = request.POST.get('dataCrawl')
if crawler == "twitter":
input1 = gettweet()
elif crawler == "facebook":
input1 = getPost()
return render(request, "hasil_crawl.html", {'inputDB':input1, 'hasil':hasil, 'f':PostForm, 'f2':PostForm2,'database':crawler})
if 'JST' in request.POST:
crawler = request.POST.get('dataCrawl')
return render(request, "WMMS/hasil_crawl2.html",{'database':crawler})
if 'DLNN' in request.POST:
Topik, FeatX, kelasSentimen = todlnn()
input1 = ''
crawler = request.POST.get('dataCrawl')
if crawler == "twitter":
input1 = gettweet()
elif crawler == "facebook":
input1 = getPost()
return render(request,"WMMS/hasil_crawl3.html",{'database':crawler,'inputDB':input1,'data':FeatX,'sent':predict, 'topik':Topik, 'kelasSentimen':kelasSentimen})
####### handle preprocessing #########
if 'inputA' in request.POST:
del request.session['hasilpr']
hasilpr = []
input1 = ''
data2 = []
hasil = ''
crawler = request.POST.get('dataCrawl')
if crawler == "twitter":
input1 = gettweet()
elif crawler == "facebook":
input1 = getPost()
input2 = ' '.join(input1)
situs1 = request.POST.get('method', '')
if situs1=='EDR':
for a in input1:
b = F_EDR(a)
hasilpr.append(b)
data2.append((a,b))
elif situs1 == 'ED':
for a in input1:
b = F_ED(a)
hasilpr.append(b)
data2.append((a,b))
elif situs1 == 'BG':
for a in input1:
b = F_BG(a)
hasilpr.append(b)
data2.append((a,b))
situs2 = request.POST.get('method1', '')
if situs2 == 'FR':
for a in data2:
a[1] = correction(a[1])
hasilpr.append(a[1])
request.session['hasilpr'] = hasilpr
return render(request, "hasil_prepross.html", {'dicthasil':data2, 'name1':situs1, 'name2':situs2})
if 'downloadPreprocess' in request.POST:
hasilpr = request.session.get('hasilpr')
b = downloadprepros(hasilpr)
return b
if 'toJSTAnalisis' in request.POST:
return render(request, "WMMS/hasil_crawl2.html",{'database':'preprocess'})
####### HANDLE JST #########
if 'inputB' in request.POST:
crawler = request.POST.get('dataCrawl')
if crawler == "twitter":
input1 = gettweet()
elif crawler == "facebook":
input1 = getPost()
elif crawler == "preprocess":
input1= request.session.get('hasilpr')
statusMI = request.POST['statusMI']
stopwords = request.POST['stopwords']
vocabSize = int(request.POST['vocabSize'])
dictData, kata,indexDoc, w, kalimat, statusMI = toJst(statusMI, vocabSize, stopwords, input1)
return render(request, 'JST/previewMI.html', {'dictData': dictData, 'kata': kata, 'jarak': range(0, w),
'kalimat': kalimat, 'lenCorpus' : indexDoc, 'name':"hasilCrawl" ,
'statusMI': statusMI})
######## handle DLNN #######
if 'input' in request.POST:
Topik, FeatX, kelasSentimen = todlnn()
tabledata = ''
prediction = ''
data = ''
IA = ''
topik = ''
tabledata = []
input1 = []
crawler = request.POST.get('dataCrawl')
if crawler == "twitter":
input1 = gettweet()
elif crawler == "facebook":
input1 = getPost()
elif crawler == "preprocess":
input1= request.session.get('hasilpr')
topik = request.POST.get("topik")
FE = request.POST.get("FE")
loadfile = input1
datatemp = []
for i in loadfile:
datatemp.append(i)
for i in datatemp:
prepros, prediction = predict(i,int(FE),topik) #memanggil fungsi predict dari listFunction
tabledata.append({
'input': i,
'prepros': prepros,
'prediction': prediction,
'confirm': True
})
data = json.dumps(tabledata) #memasukan ke tabel hasil
return render(request, "index_dlnnFinal.html",{'selected_topic': topik,'data':FeatX,'sent':predict, 'IA':IA, 'hasil':data,'topik':Topik, 'kelasSentimen':kelasSentimen})
return render(request, 'WMMS/social_media_crawling.html', {
'form': fixbrowse(), 'form1':fixscrape(initial={'date':'0','tapdown':'50'}), 'f3':upFile, 'Tweets':twitterdata, 'statuses':facebookdata,'analisis':analisis, 'badword':badword, 'check':checked })
# def fileUpload(request):
# d, e = getFile()
# listda ={}
# c =[]
# data = request.POST.get('data_choice')
# if data != None:
# listda[data] = getKeyJson(data)
# c = listda[data]
#
# if request.method == 'POST':
# form = upFile(request.POST, request.FILES)
# if form.is_valid():
# handle_uploaded_file(request.FILES['file'],request.FILES['file'].name)
# else:
# print(form.errors)
# if 'TOTM' in request.POST:
# print('succes')
# datatwit = getall()
#
# return render(request,'WMMS/test.html',{"f3":upFile, 'data' : c, 'test':d, 'test1':e, 'sta':data})
#
# def analisis1(request):
# Cfeature = ['TF','TF-IDF','BOW','BIGRAM']
# algorthm = ['SVM','Deep learning', 'Naive bayes']
# hasil = "Sentimen Positif"
#
# if request.method == 'POST':
# form = upFile(request.POST, request.FILES)
# if form.is_valid():
# print(uploadtoarray(request.FILES['file']))
# print(request.POST.get('Cfeature'))
# print(request.POST.get('algorthm'))
# else:
# print(form.errors)
#
#
# return render(request,'WMMS/test1.html',{"f3":upFile, 'test':Cfeature, 'test1':algorthm, 'hasil':hasil})
#
# def download(request):
# twitterdata = TwitterCrawl.objects.all()
# f = StringIO()
# writer = csv.writer(f)
#
# for row in twitterdata:
# writer.writerow([row.name,row.tweet,row.date,row.Retweet_user,row.hashtag])
# f.flush()
# f.seek(0)
# response = HttpResponse(FileWrapper(f), content_type='text/csv')
# response['Content-Disposition'] = 'attachment; filename=Data_twitter.csv'
# return response
def visualisasi2(request):
usrfav =visual1()
return render(request, 'WMMS/visualFacebook.html',{'user':usrfav})
def visualisasi(request):
userfav = visual()
res = TwitterCrawl.objects.filter().aggregate(max_id=Max('pk'))
res.get('max_id')
return render(request, 'WMMS/visualTwitter.html',{'user':userfav})
def savetoDB(request):
FBtabel= FacebookCrawl.objects.all()
TWtabel = TwitterCrawl.objects.all()
if request.method=="POST":
dbpick = request.POST.get("dataCrawl")
name = request.POST.get('name')
if dbpick == "twitter":
r = TwitterTopik(topik = name)
r.save()
data = []
for a in TWtabel:
q = TwitterDataset(name = a.name, tweet=a.tweet,date = a.date, Retweet_user=a.Retweet_user,hashtag=a.hashtag,topik=r)
q.save()
data.append(a.tweet)
visualDS(name,data,dbpick)
if dbpick == "facebook":
r = FacebookTopik(topik = name)
r.save()
data = []
for a in FBtabel:
q = FacebookDataset(name = a.name, status=a.status,like = a.like, comment = a.comment, share=a.share, topik=r)
q.save()
data.append(a.status)
visualDS(name,data,dbpick)
return HttpResponseRedirect("../")
def fullDBTW(request):
topik = TwitterTopik.objects.all()
db = ''
twitter =''
if request.method == "POST":
if 'data_choice' in request.POST:
db = request.POST.get('data_choice')
twitter = TwitterDataset.objects.filter(topik=db)
elif 'delete' in request.POST:
check = request.POST.getlist('check')
db_choice = request.POST.get('db_choice')
for a in check:
TwitterDataset.objects.get(id=a).delete()
## renew Wordcloud
twitter = TwitterDataset.objects.filter(topik=db_choice)
db2 = twitter[0].topik.topik
data = [a.tweet for a in twitter ]
visualDS(db2, data, 'twitter')
elif 'deleteDS' in request.POST:
db_choice = request.POST.get('db_choice')
TwitterTopik.objects.get(id=db_choice).delete()
elif 'visual' in request.POST:
db_choice = request.POST.get('db_choice')
twitter = TwitterDataset.objects.filter(topik=db_choice)
visualisation = TwitterTopik.objects.get(id=db_choice).topik
return render(request, 'WMMS/full_db_tw.html',{'choice':db_choice,'topik':topik,'Tweets':twitter, 'visualisation':'GO', 'dbpicked':visualisation})
if 'download' in request.POST:
dllist = request.POST.getlist('dlFile')
db_choice = request.POST.get('db_choice')
twitter = TwitterDataset.objects.filter(topik=db_choice)
return downloadfile(dllist, twitter)
if 'PP' in request.POST:
hasil = ''
input1 = []
db_choice = request.POST.get('db_choice')
input2 = TwitterDataset.objects.filter(topik=db_choice).values('tweet')
for a in input2:
input1.append(a['tweet'])
return render(request, "hasil_crawl.html", {'inputDB':input1, 'hasil':hasil, 'f':PostForm, 'f2':PostForm2,'database':db_choice})
if 'JST' in request.POST:
db_choice = request.POST.get('db_choice')
return render(request, "WMMS/hasil_crawl2.html",{'database':db_choice})
if 'DLNN' in request.POST:
Topik, FeatX, kelasSentimen = todlnn()
input1 = []
db_choice = request.POST.get('db_choice')
input2 = TwitterDataset.objects.filter(topik=db_choice).values('tweet')
for a in input2:
input1.append(a['tweet'])
return render(request,"WMMS/hasil_crawl3.html",{'database':db_choice,'inputDB':input1,'data':FeatX,'sent':predict, 'topik':Topik, 'kelasSentimen':kelasSentimen})
####### handle preprocessing #########
if 'inputA' in request.POST:
del request.session['hasilpr']
hasilpr = []
input1 = []
data2 = []
hasil = ''
crawler = request.POST.get('dataCrawl')
input2 = TwitterDataset.objects.filter(topik=crawler).values('tweet')
for a in input2:
input1.append(a['tweet'])
input2 = ' '.join(input1)
situs1 = request.POST.get('method', '')
if situs1=='EDR':
for a in input1:
b = F_EDR(a)
hasilpr.append(b)
data2.append((a,b))
elif situs1 == 'ED':
for a in input1:
b = F_ED(a)
hasilpr.append(b)
data2.append((a,b))
elif situs1 == 'BG':
for a in input1:
b = F_BG(a)
hasilpr.append(b)
data2.append((a,b))
situs2 = request.POST.get('method1', '')
if situs2 == 'FR':
for a in data2:
a[1] = correction(a[1])
hasilpr.append(a[1])
request.session['hasilpr'] = hasilpr
return render(request, "hasil_prepross.html", {'dicthasil':data2, 'name1':situs1, 'name2':situs2})
if 'downloadPreprocess' in request.POST:
hasilpr = request.session.get('hasilpr')
b = downloadprepros(hasilpr)
return b
if 'toJSTAnalisis' in request.POST:
return render(request, "WMMS/hasil_crawl2.html",{'database':'preprocess'})
####### HANDLE JST #########
if 'inputB' in request.POST:
input1 = []
crawler = request.POST.get('dataCrawl')
if crawler != 'preprocess':
input2 = TwitterDataset.objects.filter(topik=crawler).values('tweet')
for a in input2:
input1.append(a['tweet'])
else:
input1 = request.session.get('hasilpr')
statusMI = request.POST['statusMI']
stopwords = request.POST['stopwords']
vocabSize = int(request.POST['vocabSize'])
dictData, kata,indexDoc, w, kalimat, statusMI = toJst(statusMI, vocabSize, stopwords, input1)
return render(request, 'JST/previewMI.html', {'dictData': dictData, 'kata': kata, 'jarak': range(0, w),
'kalimat': kalimat, 'lenCorpus' : indexDoc, 'name':"hasilCrawl" ,
'statusMI': statusMI})
######## handle DLNN #######
if 'input' in request.POST:
Topik, FeatX, kelasSentimen = todlnn()
tabledata = ''
prediction = ''
data = ''
IA = ''
topik = ''
tabledata = []
input1 = []
crawler = request.POST.get('dataCrawl')
if crawler != 'preprocess':
input2 = TwitterDataset.objects.filter(topik=crawler).values('tweet')
for a in input2:
input1.append(a['tweet'])
else:
input1 = request.session.get('hasilpr')
topik = request.POST.get("topik")
FE = request.POST.get("FE")
loadfile = input1
datatemp = []
for i in loadfile:
datatemp.append(i)
for i in datatemp:
prepros, prediction = predict(i,int(FE),topik) #memanggil fungsi predict dari listFunction
tabledata.append({
'input': i,
'prepros': prepros,
'prediction': prediction,
'confirm': True
})
data = json.dumps(tabledata) #memasukan ke tabel hasil
return render(request, "index_dlnnFinal.html",{'selected_topic': topik,'data':FeatX,'sent':predict, 'IA':IA, 'hasil':data,'topik':Topik, 'kelasSentimen':kelasSentimen})
return render(request, 'WMMS/full_db_tw.html',{'choice':db,'topik':topik,'Tweets':twitter})
def fullDBFB(request):
topik = FacebookTopik.objects.all()
statuses = ''
db = ''
if request.method == "POST":
if 'data_choice' in request.POST:
db = request.POST.get('data_choice')
statuses = FacebookDataset.objects.filter(topik=db)
elif 'delete' in request.POST:
check = request.POST.getlist('check')
db_choice = request.POST.get('db_choice')
for a in check:
FacebookDataset.objects.get(id=int(a)).delete()
## renew wordcloud
statuses = FacebookDataset.objects.filter(topik=db_choice)
db2 = statuses[0].topik.topik
data = [a.status for a in statuses]
visualDS(db2,data,'facebook')
elif 'deleteDS' in request.POST:
db_choice = request.POST.get('db_choice')
FacebookTopik.objects.get(id=db_choice).delete()
elif 'visual' in request.POST:
db_choice = request.POST.get('db_choice')
statuses = FacebookDataset.objects.filter(topik=db_choice)
visualisation = FacebookTopik.objects.get(id=db_choice).topik
return render(request, 'WMMS/full_db_fb.html',{'choice':db_choice,'topik':topik,'statuses':statuses, 'visualisation':'GO', 'dbpicked':visualisation})
if 'PP' in request.POST:
hasil = ''
input1 = []
db_choice = request.POST.get('db_choice')
input2 = FacebookDataset.objects.filter(topik=db_choice).values('status')
for a in input2:
input1.append(a['status'])
return render(request, "hasil_crawl.html", {'inputDB':input1, 'hasil':hasil, 'f':PostForm, 'f2':PostForm2,'database':db_choice})
if 'download1' in request.POST:
dllist = request.POST.getlist('dlFile')
db_choice = request.POST.get('db_choice')
statuses = FacebookDataset.objects.filter(topik=db_choice)
return downloadfile1(dllist, statuses)
if 'JST' in request.POST:
db_choice = request.POST.get('db_choice')
return render(request, "WMMS/hasil_crawl2.html",{'database':db_choice})
if 'DLNN' in request.POST:
Topik, FeatX, kelasSentimen = todlnn()
input1 = []
db_choice = request.POST.get('db_choice')
input2 = FacebookDataset.objects.filter(topik=db_choice).values('status')
for a in input2:
input1.append(a['status'])
return render(request,"WMMS/hasil_crawl3.html",{'database':db_choice,'inputDB':input1,'data':FeatX,'sent':predict, 'topik':Topik, 'kelasSentimen':kelasSentimen})
####### handle preprocessing #########
if 'inputA' in request.POST:
del request.session['hasilpr']
hasilpr = []
input1 = []
data2 = []
hasil = ''
crawler = request.POST.get('dataCrawl')
input2 = FacebookDataset.objects.filter(topik=crawler).values('status')
for a in input2:
input1.append(a['status'])
input2 = ' '.join(input1)
situs1 = request.POST.get('method', '')
if situs1=='EDR':
for a in input1:
b = F_EDR(a)
hasilpr.append(b)
data2.append((a,b))
elif situs1 == 'ED':
for a in input1:
b = F_ED(a)
hasilpr.append(b)
data2.append((a,b))
elif situs1 == 'BG':
for a in input1:
b = F_BG(a)
hasilpr.append(b)
data2.append((a,b))
situs2 = request.POST.get('method1', '')
if situs2 == 'FR':
for a in data2:
a[1] = correction(a[1])
hasilpr.append(a[1])
request.session['hasilpr'] = hasilpr
return render(request, "hasil_prepross.html", {'dicthasil':data2, 'name1':situs1, 'name2':situs2})
if 'downloadPreprocess' in request.POST:
hasilpr = request.session.get('hasilpr')
b = downloadprepros(hasilpr)
return b
if 'toJSTAnalisis' in request.POST:
return render(request, "WMMS/hasil_crawl2.html",{'database':'preprocess'})
####### HANDLE JST #########
if 'inputB' in request.POST:
input1 = []
crawler = request.POST.get('dataCrawl')
print(crawler)
if crawler != 'preprocess':
input2 = FacebookDataset.objects.filter(topik=crawler).values('status')
for a in input2:
input1.append(a['status'])
else:
input1 = request.session.get('hasilpr')
statusMI = request.POST['statusMI']
stopwords = request.POST['stopwords']
vocabSize = int(request.POST['vocabSize'])
dictData, kata,indexDoc, w, kalimat, statusMI = toJst(statusMI, vocabSize, stopwords, input1)
return render(request, 'JST/previewMI.html', {'dictData': dictData, 'kata': kata, 'jarak': range(0, w),
'kalimat': kalimat, 'lenCorpus' : indexDoc, 'name':"hasilCrawl" ,
'statusMI': statusMI})
######## handle DLNN #######
if 'input' in request.POST:
Topik, FeatX, kelasSentimen = todlnn()
tabledata = ''
prediction = ''
data = ''
IA = ''
topik = ''
tabledata = []
input1 = []
crawler = request.POST.get('dataCrawl')
if crawler != 'preprocess':
input2 = FacebookDataset.objects.filter(topik=crawler).values('status')
for a in input2:
input1.append(a['status'])
else:
input1 = request.session.get('hasilpr')
topik = request.POST.get("topik")
FE = request.POST.get("FE")
loadfile = input1
datatemp = []
for i in loadfile:
datatemp.append(i)
for i in datatemp:
prepros, prediction = predict(i,int(FE),topik) #memanggil fungsi predict dari listFunction
tabledata.append({
'input': i,
'prepros': prepros,
'prediction': prediction,
'confirm': True
})
data = json.dumps(tabledata) #memasukan ke tabel hasil
return render(request, "index_dlnnFinal.html",{'selected_topic': topik,'data':FeatX,'sent':predict, 'IA':IA, 'hasil':data,'topik':Topik, 'kelasSentimen':kelasSentimen})
return render(request, 'WMMS/full_db_fb.html',{'choice':db,'topik':topik,'statuses':statuses})
|
import difflib
import six
if six.PY3:
from urllib.parse import quote
elif six.PY2:
from urllib import quote
from .jsonutil import JsonTable
from .uriutil import uri_parent
from .schema import datatype_attributes
class EAttrs(object):
""" Accessor class to resource fields.
Help to retrieve the attributes paths relevant to this element::
>>> subject.attrs()
['xnat:subjectData/sharing',
'xnat:subjectData/sharing/share',
'xnat:subjectData/resources',
...
'xnat:subjectData/experiments/experiment'
]
All paths are not valid but they give an indication of what
is available. To retrieve the paths, the corresponding
schemas must be downloaded first through the schema
management interface in order to be parsed::
>>> interface.manage.schemas.add('xnat.xsd')
>>> interface.manage.schemas.add('myschema/myschema.xsd')
"""
def __init__(self, eobj):
"""
Parameters
----------
eobj:
:class:`EObject` Object
"""
self._eobj = eobj
self._intf = eobj._intf
self._datatype = None
self._id = None
def __call__(self):
""" List the attributes paths relevant to this element.
"""
paths = []
for root in self._intf.manage.schemas._trees.values():
paths.extend(datatype_attributes(root, self._get_datatype()))
return paths
def _get_datatype(self):
if self._datatype is None:
self._datatype = self._eobj.datatype()
return self._datatype
def _get_id(self):
return self._eobj.id()
def set(self, path, value, **kwargs):
""" Set an attribute.
Parameters
----------
path: string
The xpath of the attribute relative to the element.
value: string
The attribute's value. Note that the python type is
always a string but the content of the value must
match what is defined in the schema.
e.g. an element defined as a float in the schema
must be given a string containing a number, a
valid date must follow the ISO 8601 which is the
standard representation for dates and times
established by the W3C.
"""
dt = self._get_datatype()
if dt is None:
dt = ''
put_uri = self._eobj._uri + '?xsiType=%s&%s=%s' % (quote(dt),
quote(path),
quote(value))
self._intf._exec(put_uri, 'PUT', **kwargs)
def mset(self, dict_attrs, **kwargs):
""" Set multiple attributes at once.
It is more efficient to use this method instead of
multiple times the `set()` method when setting more than
one attribute because only a single HTTP call is issued to
the server.
Parameters
----------
dict_attrs: dict
The dict of key values to set. It follows the same
principles as the single `set()` method.
"""
t = ['&%s=%s' % (quote(path), quote(val))
for path, val in dict_attrs.items()]
query_str = '?xsiType=%s' % quote(self._get_datatype()) + ''.join(t)
put_uri = self._eobj._uri + query_str
self._intf._exec(put_uri, 'PUT', **kwargs)
def get(self, path):
""" Get an attribute value.
.. note::
The value is always returned in a Python string. It must
be explicitly casted or transformed if needed.
Parameters
----------
path: string
The xpath of the attribute relative to the element.
Returns
-------
A string containing the value.
"""
query_str = '?columns=ID,%s' % path
get_uri = uri_parent(self._eobj._uri) + query_str
jdata = JsonTable(self._intf._get_json(get_uri))
jdata = jdata.where(ID=self._get_id())
# unfortunately the return headers do not always have the
# expected name
header = difflib.get_close_matches(path.split('/')[-1],
jdata.headers())
if header == []:
header = difflib.get_close_matches(path, jdata.headers())[0]
else:
header = header[0]
replaceSlashS = lambda x: x.replace('\s', ' ')
if type(jdata.get(header)) == list:
return map(replaceSlashS, jdata.get(header))
else:
return jdata.get(header).replace('\s', ' ')
def mget(self, paths):
""" Set multiple attributes at once.
It is more efficient to use this method instead of
multiple times the `get()` method when getting more than
one attribute because only a single HTTP call is issued to
the server.
Parameters
----------
paths: list
List of attributes' paths.
Returns
-------
list: ordered list of values (in the order of the
requested paths)
"""
query_str = '?columns=ID,%s' % ','.join(paths)
get_uri = uri_parent(self._eobj._uri) + query_str
jdata = JsonTable(self._intf._get_json(get_uri)
).where(ID=self._get_id())
results = []
# unfortunately the return headers do not always have the
# expected name
for path in paths:
header = difflib.get_close_matches(path.split('/')[-1],
jdata.headers())
if header == []:
header = difflib.get_close_matches(path, jdata.headers())[0]
else:
header = header[0]
results.append(jdata.get(header).replace('\s', ' '))
return results
|
def even(x):
if even%2==0:
print 'even'
return even
x=even(5)
print 'odd'
|
"""SMACT benchmarking."""
from .utilities import timeit
from ..structure_prediction.mutation import CationMutator
class MutatorBenchmarker:
"""Benchmarking tests for CationMutator."""
@timeit
def run_tests(self):
"""Initialize Mutator and perform tests."""
self.__cm_setup()
self.__pair_corr()
@timeit
def __cm_setup(self) -> CationMutator:
"""Create a CationMutator."""
self.cm = CationMutator.from_json()
@timeit
def __pair_corr(self):
"""Get pair correlations."""
self.cm.complete_pair_corrs()
@timeit(delim=True, n=100)
def mutator_test_run():
MutatorBenchmarker().run_tests()
|
import numpy
import tensorflow as tf
from pysao.metamodels.metamodel import Metamodel
class DNNMetamodel(Metamodel):
def __init__(self, n_neural_nets=1):
Metamodel.__init__(self)
self.n_neural_nets = n_neural_nets
self.estimators = None
def _predict(self, X):
vals = []
for estimator in self.estimators:
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": X},
num_epochs=1,
shuffle=False)
val = estimator.predict(input_fn=predict_input_fn)
val = [e for e in list(val)]
vals.append(numpy.array([e['predictions'] for e in val]).T[0])
vals = numpy.array(vals)
return numpy.median(vals, axis=0), numpy.std(vals, axis=0)
def _fit(self, X, F, data):
self.estimators = []
feature_columns = [tf.feature_column.numeric_column("x", shape=[X.shape[1]])]
input_fn = tf.estimator.inputs.numpy_input_fn({"x": X}, F, batch_size=4, num_epochs=None, shuffle=True)
for _ in range(self.n_neural_nets):
estimator = tf.estimator.DNNRegressor([1024, 512, 256], feature_columns=feature_columns)
estimator = estimator.train(input_fn=input_fn, steps=100000)
self.estimators.append(estimator)
@staticmethod
def get_params():
return [{}]
|
#!/usr/bin/env python3
"""
Collects temperature / gravity readings from Spindel
start standalone with sudo python3 spindel_server.py 85
test with curl -d "{\"temperature\":12,\"angle\":10,\"gravity\":121212}" ip:port
"""
import socketserver
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
from db import Logger
import configparser
import requests
import os
def get_config(config_file):
config_input = configparser.ConfigParser()
config_input.read(config_file)
config = dict()
config['PUSH_ENDPOINT'] = config_input.get('holdmybeer', 'collection_endpoint')
config['PUSH_API_KEY'] = config_input.get('holdmybeer', 'apikey')
return config
def post_data(url, apikey, body):
headers = {'content-type': 'application/json', 'x-api-key': apikey}
requests.post(url, data=json.dumps(body), headers=headers)
def RequestHandlerFactory(logger, config_file):
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("<html><body>ACK</body></html>")
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def handle_http(self, status_code, path):
self.send_response(status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
content = 'thanks'
return bytes(content, 'UTF-8')
def respond(self, opts):
response = self.handle_http(opts['status'], self.path)
self.wfile.write(response)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
# this is very fragile, add some error handling: parsing body, and accessing temp, gravity and angle
# works for the Spindel payload though.
print('POST: body: ' + body.decode('utf-8'))
logger.writeRawSpindel(body)
self.respond({'status': 200})
parsed_body = json.loads(body.decode('utf-8'))
print('Parsed: ', parsed_body)
logger.writeInternalTemperature(parsed_body['temperature'])
logger.writeAngle(parsed_body['angle'])
logger.writeGravity(parsed_body['gravity'])
try:
config = get_config(config_file)
target_endpoint = config.get('PUSH_ENDPOINT')
target_apikey = config.get('PUSH_API_KEY')
print('forwarding payload to', target_endpoint)
post_data(target_endpoint, target_apikey, parsed_body)
except:
print('failed while posting data to remote server')
return S
def spindel_server(logger, config_file, port=80):
request_handler = RequestHandlerFactory(logger, config_file)
server_address = ('', port)
httpd = HTTPServer(server_address, request_handler)
print('Starting httpd...')
httpd.serve_forever()
# run the server on its own, useful for debugging
if __name__ == "__main__":
logger = Logger('test')
config_file = os.path.dirname(
os.path.realpath(__file__)) + "/config.ini"
spindel_server(logger, config_file, port=int(sys.argv[1]))
|
from sentry_sdk import capture_exception
from healthcheck.constants import HealthStatus
from healthcheck.models import HealthCheck
def db_check():
"""
Performs a basic check on the database by performing a select query on a simple table
:return: str "OK" or "FAIL" according to successful retrieval
"""
try:
h = HealthCheck.objects.first()
return HealthStatus.OK
except Exception as e:
capture_exception(e)
return HealthStatus.FAIL
|
# dispute_href is the stored href of the dispute
dispute = balanced.Dispute.fetch(dispute_href) |
from quantum_systems import QuantumSystem, GeneralOrbitalSystem
class SpatialOrbitalSystem(QuantumSystem):
r"""Quantum system containing orbital matrix elements, i.e., we only keep
the spatial orbitals as they are degenerate in each spin direction. We have
.. math:: \psi(x, t) = \psi(\mathbf{r}, t) \sigma(m_s),
where :math:`x = (\mathbf{r}, m_s)` is a generalized coordinate of position
and spin, and :math:`\sigma(m_s)` is either :math:`\alpha(m_s)` or
:math:`\beta(m_s)` as the two-dimensional spin basis states. This means
that we only store :math:`\psi(\mathbf{r}, t)`.
Parameters
----------
n : int
Number of particles. Internally ``SpatialOrbitalSystem`` converts
``n`` to ``n // 2`` such that ``n`` denotes the number of occupied
basis functions (half the number of particles). See example in doctest
below.
basis_set : BasisSet
Spatial orbital basis set without explicit spin-dependence.
See Also
-------
QuantumSystem
Example
-------
>>> n = 4 # Four particles
>>> l = 20 # Twenty basis functions
>>> dim = 2
>>> from quantum_systems import RandomBasisSet
>>> spas = SpatialOrbitalSystem(n, RandomBasisSet(l, dim))
>>> spas.n == n // 2
True
"""
def __init__(self, n, basis_set, **kwargs):
assert (
n % 2 == 0
), "n must be divisable by 2 to be a closed-shell system"
assert not basis_set.includes_spin, (
f"{self.__class__.__name__} only supports basis sets without "
+ "spin-dependence."
)
super().__init__(n // 2, basis_set, **kwargs)
def construct_general_orbital_system(self, anti_symmetrize=True):
r"""Function constructing a ``GeneralOrbitalSystem`` by
duplicating every basis element of current system. That is,
.. math:: \psi(\mathbf{r}, t)
\to \psi(x, t) = \psi(\mathbf{r}, t) \sigma(m_2),
where :math:`x = (\mathbf{r}, m_s)` is a generalized coordinate of both
position :math:`\mathbf{r}` and spin :math:`m_s`, with
:math:`\sigma(m_s)` one of the two spin-functions.
Note that this function creates a copy of the basis set.
Parameters
----------
anti_symmetrize : bool
Whether or not to create the anti-symmetrized two-body elements.
Default is ``True``.
Returns
-------
GeneralOrbitalSystem
The doubly degenerate general spin-orbital system.
See Also
-------
BasisSet.change_to_general_orbital_basis
"""
gos = GeneralOrbitalSystem(
self.n * 2,
self._basis_set.copy_basis(),
anti_symmetrize=anti_symmetrize,
)
import copy
if not self._time_evolution_operator is None:
gos.set_time_evolution_operator(
copy.deepcopy(self._time_evolution_operator)
)
return gos
def compute_reference_energy(self):
r"""Function computing the reference energy in an orbital system.
This is given by
.. math:: E_0 = \langle \Phi_0 \rvert \hat{H} \lvert \Phi_0 \rangle
= 2 h^{i}_{i} + 2 u^{ij}_{ij} - u^{ij}_{ji},
where :math:`\lvert \Phi_0 \rangle` is the reference determinant, and
:math:`i, j` are occupied indices.
Returns
-------
complex
The reference energy.
"""
o, v = self.o, self.v
return (
2 * self.np.trace(self.h[o, o])
+ 2
* self.np.trace(self.np.trace(self.u[o, o, o, o], axis1=1, axis2=3))
- self.np.trace(self.np.trace(self.u[o, o, o, o], axis1=1, axis2=2))
)
def construct_fock_matrix(self, h, u, f=None):
r"""Function setting up the restricted Fock matrix in a closed-shell
orbital basis.
In an orbital basis we compute:
.. math:: f^{p}_{q} = h^{p}_{q} + 2 u^{pi}_{qi} - u^{pi}_{iq},
where :math:`p, q, r, s, ...` run over all indices and :math:`i, j, k,
l, ...` correspond to the occupied indices. The two-body elements are
assumed to not be anti-symmetric.
Parameters
----------
h : np.ndarray
The one-body matrix elements.
u : np.ndarray
The two-body matrix elements.
f : np.ndarray
An empty array of the same shape as `h` to be filled with the Fock
matrix elements. Default is `None` which means that we allocate a
new matrix.
Returns
-------
np.ndarray
The filled Fock matrix.
"""
np = self.np
o, v = (self.o, self.v)
if f is None:
f = np.zeros_like(h)
f.fill(0)
f += h
f += 2 * np.einsum("piqi -> pq", u[:, o, :, o])
f -= np.einsum("piiq -> pq", u[:, o, o, :])
return f
def change_to_hf_basis(self, *args, **kwargs):
# TODO: Change to RHF-basis.
raise NotImplementedError("There is currently no RHF implementation")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-04 14:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('events', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SavedFormDataEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('form_data_headers', models.TextField(blank=True, null=True, verbose_name='Form data headers')),
('saved_data', models.TextField(blank=True, null=True, verbose_name='Plugin data')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('form_entry', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.Event', verbose_name='Form')),
('invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.EmailApp', verbose_name='Invitee')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name_plural': 'Saved form data entries',
'verbose_name': 'Saved form data entry',
'db_table': 'db_store_savedformdataentry',
'abstract': False,
},
),
]
|
# -*- coding: utf-8 -*-
# @Time : 2019/2/28 17:20
# @Author : shine
# @File : adjustLight.py
from PIL import ImageEnhance, ImageQt, Image, ImageStat
# enhance(factor)增强器,
# 这个方法会返回一个被加强过的image对象,
# 参数factor为一个大于0的浮点数,1表示返回原始图片
# 图片颜色增强
# enhancer = ImageEnhance.Color(img)
# 图片亮度
# enhancer = ImageEnhance.Brightness(img)
# 图片对比度
# enhancer = ImageEnhance.Contrast(img)
# 图片锐化
# enhancer = ImageEnhance.Sharpness(img)
def adjust_image_light(qimage, factor):
image = ImageQt.fromqimage(qimage)
res_qimg = ImageQt.ImageQt(add_light(image, factor))
return res_qimg
def add_light(image, factor):
enhancer = ImageEnhance.Brightness(image)
return enhancer.enhance(factor)
def get_image_mean_light(dst_src):
im = Image.open(dst_src).convert('L')
stat = ImageStat.Stat(im)
return float("%.2f" % stat.mean[0])
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 02 22:51:28 2017
@author: netzer
The script takes an xml filename of a file on an ftp server containing
transaction errors as input and - depending on the type of error (error_text)
selected - it processes the failed items once more in several steps.
1) creating a new working directory
2) searching for the error file on the ftp server
3) zipping and transferring the file to the working directory, and unzipping
4) asking the relevant db for the next processing number
5) creating a new import file and naming it correctly according to the db
6) parsing relevant data from err file and write it to new import file, including
correct file naming, product count, product meta-data and date
6) zipping and transferring the new import file and sending it to a folder on the
ftp, unzipping it
7) start script on ftp for importing data into the db
8) start another script on ftp for further processing
"""
import sys
import os, time, zipfile, gzip, shutil
from sftp import sftp
from XMLs import tx_solds
from mytoolbox import selectCountryConnection, db_lookup, increase_consecutive_no
from selects import last_tx_sld_imp
#Achtung: Voraussetzung ist hier, daß die Karten auf STATUS GELIEFERT stehen
#########################INPUT#########INPUT##########################################
tx_sold_file = r'RHWG_tx_sld__20190126__000942.xml' #File Name aus Systemreport
country = 'DEU'
error_text = 'Kartenstatus ist nicht GELIEFERT'
#error_text = 'Unbekannter Produktcode' #nur Produkte, die mit dieser Fehlermeldung anfangen
#error_text = 'Kartennummer unbekannt'
#error_text = 'Unbekannter Produktcode (EAN)'
#error_text = 'Transaktion hat falschen Kartenwert'
#error_text = 'keine Provision fur Retail Partner definiert'
#####################################################
#####################################################
corrected_ean = '' #this should be empty if the EAN was correct
corrected_value = '' #zB. 50.00; this should be empty if the crd_value was correct
work_dir = r'R:\IT\Betriebe\Korrekturen' #kann operationalional veraendert werden
#########################INPUT#########INPUT##########################################
current_date = time.strftime("%Y%m%d") #today's date YYYYMMDD
work_dir = os.path.join(work_dir, country, current_date + '_' + tx_sold_file[:-3])
# #eg R:\IT\Betriebe\Korrekturen\DEU\20170320_RLBB_tx_sld__20170317__002403
os.mkdir(work_dir)
print('new working folder created: ' + work_dir + '\n')
retailer = tx_sold_file[:4]
print('retailer in filename (technical retailer): ' + retailer + '\n')
import_path = '/operational/r_output/' + country + '/archiv/' + retailer + '/in/'
print('path to tx_sold_file on server = ' + import_path + '\n')
file_listing = sftp.listdir(import_path)
i = len(file_listing)-1
while tx_sold_file[:-3] not in file_listing[i]:
print(file_listing[i])
i = i - 1
else:
print('file found \n')
zipped_tx_sold = file_listing[i] #das gezippte file
print('full file name = ' + zipped_tx_sold + '\n')
import_path = import_path + zipped_tx_sold
print('import_path: ' + import_path + '\n')
print('copy file to R \n')
sftp.get(import_path, os.path.join(work_dir, zipped_tx_sold))
print('done copying ' + zipped_tx_sold + '\n')
#sftp.close()
to_be_unzipped = os.path.join(work_dir, zipped_tx_sold)
zfile = zipfile.ZipFile(to_be_unzipped)
zfile.extractall(work_dir)
zfile.close()
print('zip file successfully decompressed \n')
if tx_sold_file[:-3] + 'err' in os.listdir(work_dir):
print(tx_sold_file[:-3] + 'err' + ' exists and will be processed now \n')
####Retrieve consecutive number of last tx_sld for RREG###########################
cursor = selectCountryConnection(country)
resultset1 = db_lookup(cursor, last_tx_sld_imp)
print(resultset1[0][:])
ppr_id = resultset1[0][0]
print('ppr_id =' + str(ppr_id))
numbering = str(resultset1[0][7])
print('Name of last tx_sld Import: ' + str(numbering))
fln = resultset1[0][7][-10:-4]
RREG_filename = 'RREG_tx_sld__' + str(current_date) + '__' + str(increase_consecutive_no(fln, 6)) + '.xml'
print('new filename: ' + str(RREG_filename))
####Retrieve number of last tx_sld for RREG###########################
##### Parse relevant data from err file and write it to new RREG xml ################################
err_data = os.path.join(work_dir, tx_sold_file[:-3] + 'err')
from xml.etree import ElementTree as ET
tree1 = ET.parse(err_data)
record_count_err = tree1.find('header/processed_err').text
tree2 = ET.ElementTree(ET.fromstring(tx_solds))
file_name = tree2.find('header/file_name')
file_name.text = RREG_filename
print('Insert file name ' + str(RREG_filename))
export_date = tree2.find('header/export_date')
export_date.text = time.strftime("%Y-%m-%d")
print('Insert export date = ' + str(export_date.text))
transactions = tree2.find('transactions')
card_numbers = []
record_count = 0
for i in tree1.findall('./error/err_content'):
if (i.find('err_desc').text).startswith(error_text):
tx = ET.SubElement(transactions, 'tx')
crd_no = ET.SubElement(tx, 'crd_no')
crd_no.text = i.find('crd_no').text
card_numbers.append(crd_no.text)
prt = ET.SubElement(tx, 'prt')
prt.text = i.find('prt').text
retailer = i.find('prt').text #retailer is given by real (not technical) retailer abbreviation !!!
type_ = ET.SubElement(tx, 'type')
type_.text = i.find('type').text
date_ = ET.SubElement(tx, 'date')
date_.text = i.find('date').text
pos_ = ET.SubElement(tx, 'pos')
pos_.text = i.find('pos').text
ean_no = ET.SubElement(tx, 'ean_no')
ean_no.text = i.find('ean_no').text
if corrected_ean <> '':
ean_no.text = corrected_ean
crd_value = ET.SubElement(tx, 'crd_value')
crd_value.text = i.find('crd_value').text
if corrected_value <> '':
crd_value.text = corrected_value
record_count += 1
print('Note that number of error products in err file = ' + str(record_count_err))
print('and that number of error products written into RREG file = ' + str(record_count) + '\n' )
print('following card numbers will be further processed: ')
for i in card_numbers:
print(i)
record_count_ = tree2.find('header/record_count')
record_count_.text = str(record_count)
#tree2.write(os.path.join(work_dir, RREG_filename), encoding='utf8', method='xml')
tree2.write(os.path.join(work_dir, RREG_filename), method='xml')
print('done writing RREG file \n')
##### Parse relevant data from err file and write it to new RREG xml ################################
###############zip xml file and send it to sftp-server and unzip########################
RREG_path = os.path.join(work_dir, RREG_filename)
print(os.access(RREG_path, os.F_OK))
with open(os.path.abspath(RREG_path)) as f_in, gzip.open(os.path.abspath(RREG_path + '.gz'),'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
sftp.cwd('/operational/ftpdings/ftp_homie/' + str(country) + '/RREG/LIVE/in')
sftp.put(RREG_path + '.gz')
print('directory contents: \n')
print(sftp.listdir())
sftp.execute('gzip -d /operational/ftpdings/ftp_homie/' + str(country) + '/RREG/LIVE/in/' + RREG_filename + '.gz')
print(sftp.listdir())
##############zip xml file and send it to sftp-server and unzip########################
########################## start scripts ############################################
print('starting the pearl script: ./pearl RREG SOLD admin_name admin_name_pwd \n')
sftp.execute('cd /operational/jboss/prod_server/' + str(country) + '/r-scheduler/; ./pearl RREG SOLD admin_name admin_name_pwd')
print('starting the pearl script: ./pearl ' + retailer + ' PROCESS2 admin_name admin_name_pwd \n')
command_string = './pearl ' + retailer + ' PROCESS2 admin_name admin_name_pwd'
sftp.execute('cd /operational/jboss/prod_server/' + str(country) + '/r-scheduler/;' + command_string)
########################## start scripts ############################################
sftp.close()
cursor.close()
print('done')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.