content
stringlengths 5
1.05M
|
---|
from django.apps import AppConfig
class LearningRestFrameworkConfig(AppConfig):
name = 'learning_rest_framework'
|
import PySimpleGUI as sg
import os.path
import const
import sysSettings
sg.theme("SystemDefaultForReal")
sg.set_options(button_color=('#000000','#cecece'), auto_size_buttons=False, button_element_size=(10,1))
# noinspection PySimplifyBooleanCheck
def popCharge(batteryLevel=20, currentLimit=16, file=const.C_DEFAULT_SETTINGS_FILE, pop_location = (200,200) ):
"""
Function reads settings file and writes back if there are changes by the operator
:param batteryLevel: actual level in %
:param currentLimit: max allowed charge current
:param file: json file containing manual settings under key 'manual'
:return: Dict {'manual': {'cancelled', 'currentSet', 'chargeLimit', '3_phases'}}
"""
if os.path.isfile(const.C_DEFAULT_SETTINGS_FILE):
settings = sysSettings.readSettings(const.C_DEFAULT_SETTINGS_FILE)
else: # this is for test only, should never happen
settings = sysSettings.defaultSettings
sysSettings.writeSettings(const.C_DEFAULT_SETTINGS_FILE, settings)
manualSettings = settings['manual']
done = False
chargeLimit = manualSettings['chargeLimit']
currentSet = manualSettings['currentSet']
phaseSet = manualSettings['phaseSet']
phases = manualSettings['3_phases']
layout_popC = [[sg.Text('Battery Level:', pad=0)],
[sg.ProgressBar(100, orientation='h', size=(20, 10), key='-BATT_LEVEL BAR-',
bar_color=('lightgreen', 'grey')),
sg.Text(batteryLevel, key='-BATT_LEVEL DISP-', pad=0), sg.Text('%')],
[sg.Text('Charge Limit:', pad=0)],
[sg.Slider(k='-CHARGE LIMIT-', default_value=chargeLimit, range=(20, 100), orientation='h',
s=(25, 10), tick_interval=20), sg.Text('%')],
[sg.Text('Charge Current:', pad=0)],
[sg.Slider(k='-CURRENT-', default_value=currentSet, range=(6, currentLimit), orientation='h',
s=(25, 10), tick_interval=currentLimit / 8), sg.Text('A')],
[sg.Radio('1 Phase', "RADIO1", k='-1_PHASE-', default=not phases),
sg.Radio('3 Phases', "RADIO1", k='-3_PHASES-', default=phases)],
[sg.HSeparator(pad=(0, 1))],
# [sg.Frame('', [[sg.Button('Cancel'), sg.Button('Charge!', focus=True)]])]]
[sg.Button('Cancel'), sg.Button('Charge!', focus=True)]]
# test global padding popWin = sg.Window('Manual Charge Options', layout_popC, element_padding=0)
popWin = sg.Window('Manual Charge Options', layout_popC, location=pop_location, modal=True, icon=const.C_LOGO)
# if phaseSet == False:
# popWin['-3_PHASES-'].update(visible = False)
# sg.Radio.visible = False
exitPop = False
while not exitPop:
ev2, val2 = popWin.read(100)
if ev2 == sg.WIN_CLOSED or ev2 == 'Cancel':
done = False
exitPop = True
if ev2 == 'Charge!':
manualSettings['currentSet'] = int(val2['-CURRENT-'])
manualSettings['3_phases'] = val2['-3_PHASES-']
manualSettings['chargeLimit'] = val2['-CHARGE LIMIT-']
done = True
exitPop = True
if phaseSet == False:
popWin['-3_PHASES-'].update(visible=False)
popWin['-1_PHASE-'].update(visible=False)
popWin['-BATT_LEVEL BAR-'].UpdateBar(batteryLevel)
popWin['-BATT_LEVEL DISP-'].update(batteryLevel)
settings['manual'] = manualSettings
sysSettings.writeSettings(file, settings)
popWin.close()
return done
if __name__ == "__main__":
result = popCharge()
print(result)
|
import sys
import struct
import time
from i2cdriver import I2CDriver, EDS
if __name__ == '__main__':
i2 = I2CDriver(sys.argv[1])
d = EDS.Magnet(i2)
while 1:
print(d.measurement())
|
# Copyright (c) 2021 Zenqi
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# TODO: This module is use for handling all module
from typing import Any
from typing import Callable
from typing import Optional
from typing import Type
from typing import List
from ._event import EventHandler
from .endpoint import Route
from .endpoint import Method
from .user import User
import datetime
from .embed import Embed
from . import get_tokens
import asyncio
import signal
class Discord:
"""
A `:class:` used for handling all module
that containes both sending data and integrating
with discord component
"""
def __init__(
self,
*,
loop: Optional[asyncio.AbstractEventLoop] = None
):
self.eventhandler = EventHandler()
self.user = None
self.loop = loop or asyncio.get_event_loop()
self.tokens = get_tokens()
def on(self, type, handler: Callable[..., Any] = None):
"""
A wrapper for `event.Event.on()`
"""
return self.eventhandler.on(type, handler)
def send_message(
self,
channel_id: int,
content: str,
embeds: List[Type['Embed']] = [],
tts: Optional[bool] = False,
timestamp: Optional[datetime.datetime] = None
):
if self.user:
self.user.send_message(
channel_id,
content=content,
embeds=embeds,
tts=tts,
timestamp=timestamp
)
else:
raise UserError('User is missing')
async def __listen_to_event(self, token):
self.user = User(token)
self.eventhandler.emit('ready', self.user)
def run(self):
"""
Wrapper for running coroutine task
"""
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except Exception:
pass
async def run_loop():
for token in self.tokens:
await self.__listen_to_event(token)
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(run_loop(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
finally:
future.remove_done_callback(stop_loop_on_completion)
class DiscordWare:
"""
A main class for `DiscordWare` that contains
all workers like spreader, account delete
"""
def __init__(self):
pass
class UserError(Exception):
pass
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
' a test module ' # 表示模块的文档注释,任何模块代码的第一个字符串都被视为模块的文档注释
__author__ = 'yang' # 使用__author__变量把作者写进去,这样当你公开源代码后别人就可以瞻仰你的大名;
# from LYFibonacci import module1, module2
from collections import * # 这提供了一个简单的方法来导入一个模块中的所有项目。然而这种声明不该被过多地使用。
# import ly_sum
# from ly_sum import sum123,sum1234
from ly_sum import * # 需要注意的是在实践中往往不鼓励从一个模块或包中使用 * 导入所有,因为这样会让代码变得很难读
import LYFibonacci.ly_fibo
import sys
import datetime
print(isinstance('abc', Iterator))
LYFibonacci.ly_fibo.fib(1000)
print(LYFibonacci.ly_fibo.fib2(100))
print(LYFibonacci.ly_fibo.__name__)
# print(ly_sum.sum123(90, 60))
print(sum123(90, 60))
print(sum1234(10, 10, 10))
print('--------------sys----------------')
print('命令行参数如下:')
for i in sys.argv:
print(i)
print('Python 路径为:', sys.path)
print(datetime.date.today())
print(datetime.datetime.today())
print(sys)
print(LYFibonacci.ly_fibo) |
from REST_WebFramework import core, http
class RequestValidator:
def get_response_or_none (request):
scheme = request['header']['scheme']
method = request['header']['method']
host = request['header']['Host']
port = int(request['header']['Port'])
if (scheme != core.configuration.SCHEME):
return http.HttpResponse(request, status_code=403)
if (method not in core.configuration.METHODS):
return http.HttpResponse(request,
status_code=405, methods=core.configuration.METHODS,
)
if ((host != core.configuration.SERVER_HOST)
and (host not in core.configuration.SETTINGS.ALLOWED_HOSTS)
and ('*' not in core.configuration.SETTINGS.ALLOWED_HOSTS)
and (host != core.configuration.DEFAULT_HOST)
):
return http.HttpResponse(request, status_code=403)
if (port != core.configuration.SERVER_PORT):
return http.HttpResponse(request, status_code=403)
return None
|
# -*- coding:utf-8 -*-
from enum import Enum
def get_enum(enumclass: Enum, value) -> Enum:
for en in enumclass:
if en.value == value:
return en
return None
class ColorTableType(Enum):
GLOBAL = 1
LOCAL = 2
class BlockType(Enum):
EXTENTION = 0x21
IMAGE_DESC = 0x2C
EOF = 0x3B
class ExtensionLabel(Enum):
GRAPHIC_CONTROL = 0xF9
COMMENT = 0xFE
APPLICATION = 0xFF
|
from armulator.armv6.opcodes.abstract_opcodes.stm_user_registers import StmUserRegisters
from armulator.armv6.opcodes.opcode import Opcode
class StmUserRegistersA1(StmUserRegisters, Opcode):
def __init__(self, instruction, increment, word_higher, registers, n):
Opcode.__init__(self, instruction)
StmUserRegisters.__init__(self, increment, word_higher, registers, n)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
register_list = instr[16:32]
rn = instr[12:16]
increment = instr[8]
word_higher = increment == instr[7]
if rn.uint == 15 or register_list.count(1) < 1:
print "unpredictable"
else:
return StmUserRegistersA1(instr, **{"increment": increment, "word_higher": word_higher,
"registers": register_list, "n": rn.uint})
|
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class BucketsNotPublic(BaseResourceCheck):
def __init__(self):
name = "Bucket should not be public"
id = "CKV_GCP_997"
supported_resources = ['google_storage_bucket_iam_binding']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
#check members does not contain allUsers
if 'members' in conf.keys():
members = conf['members']
if 'allUsers' in members:
return CheckResult.FAILED
else:
return CheckResult.PASSED
else:
return CheckResult.PASSED
check = BucketsNotPublic()
|
import itertools
import pytest
import requests_cache
class DumbConnection(object):
def __init__(self, parent):
self.parent = parent
def close(self):
self.parent.close()
class DumbCachedSession(requests_cache.CachedSession):
"""
Spotipy calls connection.close() after an API request
but regular CachedSession's response object doesn't
have a connection attribute.
Too dumb to figure it out so here's a hack ...
"""
def send(self, request, **kwargs):
response = super(DumbCachedSession, self).send(request, **kwargs)
response.connection = DumbConnection(self)
return response
@pytest.fixture(scope='session')
def spotify():
import os
from playlistcake import sessionenv
from playlistcake import spotifystuff
token = eval(os.getenv('PLAYLISTCAKE_TEST_TOKEN'))
sessionenv.set('spotify_token', token)
sessionenv.set('spotify_kwargs',
{'requests_session': DumbCachedSession()})
return spotifystuff.get_spotify()
@pytest.fixture(scope='session')
def some_artists(spotify):
from playlistcake.sources import find_artist
artists = [
find_artist('Tom Waits'),
find_artist('The Beatles'),
find_artist('Bright Eyes'),
find_artist('Howlin Wolf'),
find_artist('Led Zeppelin'),
find_artist('Pink Floyd'),
find_artist('Eels'),
find_artist('The Who'),
find_artist('Eminem'),
find_artist('Rihanna'),
find_artist('Jimi Hendrix'),
find_artist('Aesop Rock'),
find_artist('The Album Leaf'),
find_artist('Ezra Furman'), ]
return artists
@pytest.fixture(scope='session')
def some_albums(spotify):
from playlistcake.sources import find_album
albums = [
find_album('Tom Waits', 'Swordfishtrombones'), # 1983
find_album('Ezra Furman', 'Day of the dog'), # 2013
find_album('Sonic Youth', 'Goo'), # 1990
find_album('The Beatles', 'Revolver'), # 1966
find_album('Eels', 'Shootenanny'),
find_album('The Album Leaf', 'In a safe place'),
find_album('Eminem', 'Relapse'), ]
return albums
@pytest.fixture(scope='session')
def some_tracks(some_albums):
from random import shuffle
from playlistcake.sources import tracks_from_albums
tracks = list(tracks_from_albums(some_albums))
shuffle(tracks)
return tracks
def test_sort_by_audio_feature(some_tracks):
from playlistcake.sources import tracks_sort_by_audio_feature
sortedt = list(tracks_sort_by_audio_feature(
some_tracks, 'energy', order='asc'))
for i, track in enumerate(sortedt):
energy = track['audio_features']['energy']
print('Track {}: energy:{}'.format(i, energy))
if i == 0:
continue
prevtrack = sortedt[i-1]
assert energy >= prevtrack['audio_features']['energy']
def test_sort_by_multiple(some_tracks):
from playlistcake.sources import items_sorted_by_attributes
sortedt = items_sorted_by_attributes(
some_tracks,
sort_func=lambda x: (x['artists'][0]['name'], x['popularity']))
# sortedt = items_sorted_by_attributes(
# some_tracks,
# sort_func=lambda x: x['popularity'])
sortedt = list(sortedt)
for track in sortedt:
print('{}:{} - {}'.format(
track['popularity'], track['artists'][0]['name'], track['name']))
assert 1==2
def test_filter_album_release_year(some_albums):
import isodate
from playlistcake.sources import albums_filter_release_year
start, end = 1980, 1990
filtered = list(albums_filter_release_year(
some_albums, start=start, end=end))
for album in some_albums:
rdate = isodate.parse_date(album['release_date'])
if start <= rdate.year <= end:
assert album in filtered
else:
assert album not in filtered
def test_filter_tracks_release_year(some_tracks):
import isodate
from playlistcake.sources import (tracks_filter_release_year,
full_album)
start, end = 1980, 1990
filtered = list(tracks_filter_release_year(
some_tracks, start=start, end=end))
for track in some_tracks:
album = full_album(track['album']['id'])
rdate = isodate.parse_date(album['release_date'])
if start <= rdate.year <= end:
assert track in filtered
else:
assert track not in filtered
def test_filter_artist_variety(some_tracks):
from playlistcake.sources import (tracks_filter_artist_variety)
filtered = list(tracks_filter_artist_variety(
some_tracks, 1))
seen_before = set()
for track in filtered:
aid = track['artists'][0]['id']
assert aid not in seen_before
seen_before.add(aid)
filtered = list(tracks_filter_artist_variety(
some_tracks, 3))
seen_count = {}
for track in filtered:
aid = track['artists'][0]['id']
if aid in seen_count:
seen_count[aid] += 1
else:
seen_count[aid] = 1
assert seen_count[aid] <= 3
assert len(seen_count) > 0
def test_filter_unique(some_tracks):
from playlistcake.sources import tracks_filter_unique
t1 = some_tracks.copy()
t2 = some_tracks.copy()
tracks = t1 + t2
filtered = list(tracks_filter_unique(tracks))
assert len(filtered) == len(tracks)/2
def test_filter_audio_features(some_tracks):
raise NotImplementedError
def test_decorators():
from playlistcake import library, sources, filters
from playlistcake.genutils import content_type
artists = library.saved_artists()
albums = sources.artists_albums(artists)
tracks = sources.artists_top_tracks(artists, 1)
assert content_type(artists) == 'artists'
assert content_type(albums) == 'albums'
assert content_type(tracks) == 'tracks'
filtered = filters.filter_release_years(tracks)
assert content_type(filtered) == 'tracks'
filtered = filters.filter_release_years(albums)
assert content_type(filtered) == 'albums'
|
from datetime import datetime
'''
from app import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.String, nullable=False, primary_key=True)
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
updated = db.Column(db.DateTime, default=datetime.utcnow, nullable=False,
onupdate=datetime.utcnow)
name = db.Column(db.String, nullable=False)
profile_url = db.Column(db.String, nullable=False)
access_token = db.Column(db.String, nullable=False)
'''
class User:
def __init__(self,id,name,profile_url,access_token):
self.id = id
self.name = name
self.profile_url = profile_url
self.access_token = access_token
|
import marshmallow
class BaseSchema(marshmallow.Schema):
class Meta:
unknown = marshmallow.EXCLUDE
|
from . import test_audio
from . import test_dataframe
from . import test_dataframe
|
from skyfield.api import Topos, load
from skyfield.constants import ERAD
from skyfield.framelib import itrs, true_equator_and_equinox_of_date
from skyfield.positionlib import Geocentric
def test_frame_rotation():
# Does a frame's rotation and twist get applied in the right
# directions? Let's test whether the position and velocity of an
# ITRS vector (ERAD,0,0) are restored to the proper orientation.
top = Topos(latitude_degrees=0, longitude_degrees=0)
ts = load.timescale()
t = ts.utc(2020, 11, 27, 15, 34) # Arbitrary time; LST ~= 20.03.
p = top.at(t)
r = p.frame_xyz(itrs)
assert max(abs(r.m - [ERAD, 0, 0])) < 4e-8 # meters
r, v = p.frame_xyz_and_velocity(itrs)
assert max(abs(r.m - [ERAD, 0, 0])) < 4e-8 # meters
assert max(abs(v.km_per_s)) < 3e-15 # km/s
def test_from_frame_method():
ts = load.timescale()
t = ts.utc(2020, 11, 27, 15, 34)
g1 = Geocentric([1,2,3], [4,5,6], t=t)
r, v = g1.frame_xyz_and_velocity(itrs) # which we trust: see the test above
g2 = Geocentric.from_time_and_frame_vectors(t, itrs, r, v)
assert max(abs(g2.position.au - [1,2,3])) < 2e-14
assert max(abs(g2.velocity.au_per_d - [4,5,6])) < 3e-14
# Make sure original vectors were not harmed (for example, by "+=").
assert list(g1.position.au) == [1,2,3]
assert list(g1.velocity.au_per_d) == [4,5,6]
def test_frame_without_spin():
ts = load.timescale()
t = ts.utc(2020, 11, 27, 15, 34)
g = Geocentric([1,2,3], [4,5,6], t=t)
# Simply test whether "None" spin raises an exception in either direction.
f = true_equator_and_equinox_of_date
r, v = g.frame_xyz_and_velocity(f)
Geocentric.from_time_and_frame_vectors(t, f, r, v)
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Firewall configuration plugin for AIO.
"""
import gettext
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
Firewall configuration plugin for AIO
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
after=(
osetupcons.Stages.NET_FIREWALL_MANAGER_AVAILABLE,
oenginecons.Stages.AIO_CONFIG_AVAILABLE,
),
# must be run before firewall_manager plugin
condition=lambda self: self.environment[oenginecons.AIOEnv.CONFIGURE],
# must be always enabled to create examples
)
def _configuration(self):
self.environment[osetupcons.NetEnv.FIREWALLD_SERVICES].append(
{
'name': 'ovirt-aio',
'directory': 'aio'
}
)
# vim: expandtab tabstop=4 shiftwidth=4
|
import dropbox
class transferData:
def __init__(self,access_token):
self.access_token = access_token
def uploadfiles(self,filefrom,fileto):
dbx = dropbox.Dropbox(self.access_token)
f = open (filefrom,"rb")
dbx.files_upload(f.read(),fileto)
def main():
access_token="sl.Aibo5HljdO-1l1P0xZTXk1BJMUZTzoVsglZxGQElHpe_roa4EJX8xVMvAVKAYrhN2e4yFrKJo43DIHpDkX9HFZWSKgZLXQVa0ZwENZXgrYMNunXbVKNDuNXLZNRVH98qZDJ-qGU"
transferdata=transferData(access_token)
filefrom = input("enter the file path to transfer: ")
fileto = input("enter the file to upload into dropbox: ")
transferdata.uploadfiles(filefrom,fileto)
print("file has been moved")
main()
|
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from users.validators import validate_facebook_url
class OwnerProfile(AbstractUser):
is_information_confirmed = models.BooleanField(default=False)
facebook = models.URLField(max_length=250, blank=True, null=True,
validators=[validate_facebook_url])
phone = models.CharField('Telefone', max_length=30, blank=True)
def get_absolute_url(self):
return reverse('users:user_profile', args=[self.id])
def __str__(self):
return self.username
|
from discord.ext import commands
from apscheduler.schedulers.asyncio import AsyncIOScheduler
import asyncio
import datetime
class Break(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.scheduler= AsyncIOScheduler({'apscheduler.timezone': 'Europe/Helsinki'})
self.scheduler.start()
self.setup_in_progress = False
@commands.group(name='break', help='Handles reminders for break time')
async def _break(self, ctx):
if ctx.subcommand_passed is None:
await ctx.send_help(ctx.command)
@_break.command(help='Shows the breaks that have been set up')
async def show(self, ctx):
jobs = self.scheduler.get_jobs()
if len(jobs) == 0:
await ctx.send('No job setup. Schedule on with \'!break setup\'.')
else:
jobs_str = [self.job_tostring(j, f'Break #{i}', j.id)
for i, j in enumerate(jobs)]
await ctx.send('\n'.join(jobs_str))
@_break.command(help='Removes a break by id')
async def remove(self, ctx, id):
if self.scheduler.get_job(id) is None:
await ctx.send(f'No break with id \'{id}\' exists.')
else:
self.scheduler.remove_job(id)
await ctx.send(f'Break with id \'{id}\' removed successfully.')
@_break.command(help='Removes all breaks.')
async def clear(self, ctx):
self.scheduler.remove_all_jobs()
await ctx.send('All breaks have been removed successfully.')
@_break.command(help='Sets up the break time interactively, use \'!break abort\' to abort')
async def setup(self, ctx, id=None):
if self.setup_in_progress:
await ctx.send('Another break setup is in progress, please wait for it to finish.')
return
self.setup_in_progress = True
job_id = id if id is not None else f'break_{len(self.scheduler.get_jobs()) + 1}'
def check_context(m):
return m.channel == ctx.channel and m.author == ctx.author
def check_command(m):
# Only allow '!break abort' through
return m.content == '!break abort' or not m.content.startswith(ctx.prefix)
def check_range(m, lower_inc, upper_inc):
try:
num = int(m.content)
return num >= lower_inc and num <= upper_inc
except ValueError:
return False
def check_message(m):
return check_context(m) and check_command(m)
def check_weekday(m):
if not check_context(m):
return False
if check_command(m):
return True
if m.content in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']:
return True
return check_range(m, 0, 6)
def check_hour(m):
if not check_context(m):
return False
if check_command(m):
return True
return check_range(m, 0, 23)
def check_minute(m):
if not check_context(m):
return False
if check_command(m):
return True
return check_range(m, 0, 59)
timeout_err_msg = 'Took too long to answer, aborting break time setup.'
msg = await self._prompt(ctx, 'Message?', check_message, timeout_err_msg, 60.0)
if msg is None:
return
weekday = await self._prompt(ctx, 'Week day?', check_weekday, timeout_err_msg, 60.0)
if weekday is None:
return
hour = await self._prompt(ctx, 'Hour(s)?', check_hour, timeout_err_msg, 60.0)
if hour is None:
return
minute = await self._prompt(ctx, 'Minute(s)?', check_minute, timeout_err_msg, 60.0)
if minute is None:
return
try:
self.scheduler.add_job(send_message, 'cron', args=[ctx, msg], name=msg,
id=job_id, replace_existing=True,
day_of_week=weekday, hour=hour, minute=minute)
await ctx.send('Break setup successfully.')
except ValueError:
await ctx.send('Invalid argument format(s)! Try again.')
self.setup_in_progress = False
async def _prompt(self, ctx, msg, check, err_msg=None, timeout_sec=60.0):
await ctx.send(msg)
try:
reply = await self.bot.wait_for('message', check=check, timeout=timeout_sec)
if reply.content == '!break abort':
await self._abort_setup(ctx, 'Setup aborted.')
return None
return reply.content
except asyncio.TimeoutError:
await self._abort_setup(ctx, err_msg)
return None
async def _abort_setup(self, ctx, msg=None):
if msg is not None:
await ctx.send(msg)
self.setup_in_progress = False
def job_tostring(self, job, title, id):
t = job.trigger
fields = {f.name: str(f) for f in t.fields}
time = datetime.time(hour=int(fields['hour']),
minute=int(fields['minute']))
return f'''{title} (id: {id})
Message: {job.name}
When: every {fields['day_of_week']} at {time}
'''
async def send_message(ctx, msg):
await ctx.send(msg)
def setup(bot):
bot.add_cog(Break(bot))
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_pynative_embeddinglookup """
import pytest
import numpy as np
import mindspore.ops.operations as op
from mindspore import Tensor, context
from mindspore.nn import Cell
def setup_module():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
class MetaFactory:
def __init__(self):
self.device_target = context.get_context('device_target')
self.rank_size = None
self.device_id = None
self.global_rank_id = None
class OpsFactory(MetaFactory):
def __init__(self, dtype=np.float16):
super().__init__()
self.dtype = dtype
if self.dtype == np.float16:
self.loss = 1e-3
elif self.dtype == np.float32:
self.loss = 1e-4
elif self.dtype == np.float64:
self.loss = 1e-5
else:
self.loss = 0
class EmbeddingLookup(Cell):
def __init__(self, offset):
super().__init__()
self.op = op.EmbeddingLookup()
self.offset = offset
def construct(self, params, indices):
x = self.op(params, indices, self.offset)
return x
class EmbeddingLookupFactory(OpsFactory):
def __init__(self, params_shape, indices_shape, offset=0, low=0, high=2, dtype=np.float32, ids_type=np.int32):
super().__init__(dtype=dtype)
self.input_np = np.random.randn(*params_shape).astype(dtype)
self.indices_np = np.random.randint(low, high, size=indices_shape).astype(ids_type)
self.offset = offset
self.output_grad_np = None
def forward_mindspore_impl(self):
net = EmbeddingLookup(self.offset)
out = net(Tensor(self.input_np), Tensor(self.indices_np))
return out.asnumpy()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_embeddinglookup_indices_outrange():
fact = EmbeddingLookupFactory(params_shape=(2, 4), indices_shape=(2, 3), low=1, high=3, offset=10, dtype=np.int8)
out = fact.forward_mindspore_impl()
out_expect = np.zeros((2, 3, 4))
np.allclose(out_expect, out)
|
#!/usr/bin/env python
import os
import os.path
import zipfile
configs_zip_name = 'Configs.zip'
script_dir = os.getcwd() #os.path.dirname(os.path.realpath(__file__))
exclude_files = ['idchecker.py', 'genconfigszip.py', 'blockids.py', 'biomeids.py', 'itemids.py', configs_zip_name]
exclude_dirs = ['options_gen']
dircontents = os.listdir(".")
# remove old configs zip
if os.path.exists(configs_zip_name):
os.remove(configs_zip_name)
with zipfile.ZipFile('Configs.zip', 'w', zipfile.ZIP_DEFLATED) as zip:
for dirpath, dirnames, filenames in os.walk(script_dir):
reldirpath = os.path.relpath(dirpath)
if reldirpath not in exclude_dirs:
for filename in filenames:
if filename not in exclude_files:
filepath = os.path.join(reldirpath, filename)
print("Adding file: " + filepath)
zip.write(filepath)
|
from torch.utils.data import Dataset # 데이터로더
from kogpt2.utils import download, tokenizer, get_tokenizer
from gluonnlp.data import SentencepieceTokenizer
import gluonnlp
import numpy as np
def sentencePieceTokenizer():
tok_path = get_tokenizer()
sentencepieceTokenizer = SentencepieceTokenizer(tok_path)
return sentencepieceTokenizer
def koGPT2Vocab():
cachedir = '~/kogpt2/'
# download vocab
vocab_info = tokenizer
vocab_path = download(vocab_info['url'],
vocab_info['fname'],
vocab_info['chksum'],
cachedir=cachedir)
koGPT2_vocab = gluonnlp.vocab.BERTVocab.from_sentencepiece(vocab_path,
mask_token=None,
sep_token=None,
cls_token=None,
unknown_token='<unk>',
padding_token='<pad>',
bos_token='<s>',
eos_token='</s>')
return koGPT2_vocab
def toString(list):
if not list:
return ''
result = ''
for i in list:
result = result + i
return result
class NovelDataset(Dataset):
"""web novel dataset"""
def __init__(self, file_path,vocab,tokenizer):
self.file_path = file_path
self.data =[]
self.vocab =vocab
self.tokenizer = tokenizer
file = open(self.file_path, 'r', encoding='utf-8')
while True:
line = file.readline()
if not line:
break
toeknized_line = tokenizer(line[:-1])
index_of_words = [vocab[vocab.bos_token],] + vocab[toeknized_line]+ [vocab[vocab.eos_token]]
self.data.append(index_of_words)
file.close()
def __len__(self):
return len(self.data)
def __getitem__(self,index):
item = self.data[index]
# print(item)
return item
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: prediction_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import predict_pb2 as predict__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='prediction_service.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n\x18prediction_service.proto\x12\x12tensorflow.serving\x1a\rpredict.proto2g\n\x11PredictionService\x12R\n\x07Predict\x12\".tensorflow.serving.PredictRequest\x1a#.tensorflow.serving.PredictResponseB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[predict__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
_PREDICTIONSERVICE = _descriptor.ServiceDescriptor(
name='PredictionService',
full_name='tensorflow.serving.PredictionService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=63,
serialized_end=166,
methods=[
_descriptor.MethodDescriptor(
name='Predict',
full_name='tensorflow.serving.PredictionService.Predict',
index=0,
containing_service=None,
input_type=predict__pb2._PREDICTREQUEST,
output_type=predict__pb2._PREDICTRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PREDICTIONSERVICE)
DESCRIPTOR.services_by_name['PredictionService'] = _PREDICTIONSERVICE
# @@protoc_insertion_point(module_scope)
|
import numpy as np
def orthoTrans(params, trans, etc):
"""
This function uses principal component analysis to modify parameter values.
Parameters
----------
params: Array of params to be modified, length is npars for 1D
If 2D, shape is npars x nsteps
invtrans: Inverse transformation matrix, np.matrix() type, shape is npars x npars
origin: Array of length npars indicating the reference frame origin
sigma: Array of length npars indicating the uncertainties
Returns
-------
This function returns the modified parameter values of shape params
Revisions
---------
2011-07-22 Kevin Stevenson, UCF
[email protected]
Original version
2011-07-27 kevin
Added sigma, 2D params
"""
origin, sigma = etc
foo = ((params.T - origin)/sigma).T
if foo.ndim == 1:
foo = foo[:, np.newaxis]
return np.squeeze(np.asarray(trans*foo))
|
"""Test simple stdout (and stderr) hookup in spawning a child process."""
import os
import sys
import time
import pprint
import unittest
import process
class RetvalTestCase(unittest.TestCase):
def _assertRetvalIs(self, expected, actual):
if sys.platform.startswith("win"):
self.failUnless(actual == expected)
else:
self.failUnless(os.WIFEXITED(actual))
exitStatus = os.WEXITSTATUS(actual)
# convert from 8-bit unsigned to signed
if exitStatus >= 2**7: exitStatus -= 2**8
self.failUnless(exitStatus == expected)
def test_ProcessProxy_0(self):
p = process.ProcessProxy(['quiet'])
retval = p.wait()
self._assertRetvalIs(0, retval)
def test_ProcessProxy_42(self):
p = process.ProcessProxy(['quiet', '42'])
retval = p.wait()
self._assertRetvalIs(42, retval)
def test_ProcessProxy_minus_42(self):
p = process.ProcessProxy(['quiet', '-42'])
retval = p.wait()
self._assertRetvalIs(-42, retval)
def test_ProcessOpen_0(self):
p = process.ProcessOpen(['quiet'])
retval = p.wait()
self._assertRetvalIs(0, retval)
def test_ProcessOpen_42(self):
p = process.ProcessOpen(['quiet', '42'])
retval = p.wait()
self._assertRetvalIs(42, retval)
def test_ProcessOpen_minus_42(self):
p = process.ProcessOpen(['quiet', '-42'])
retval = p.wait()
self._assertRetvalIs(-42, retval)
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(RetvalTestCase)
if __name__ == "__main__":
import logging
logging.basicConfig()
testsupport.setup()
sys.argv.insert(1, "-v") # always want verbose output
unittest.main()
|
#!/usr/bin/python
#===============================================================================
#
# FILE: sevcleanup.py
#
# USAGE: sevcleanup.py [--ef fermi_level] aims_dir(s) > STDOUT
#
# DESCRIPTION: Takes "Sorted_Eigenvalues.dat" file and sums it up such that each
# energy appears only once and the weights are summed up
#
# CREATED: 02/02/2014 18:23:00 PM IDT
# AUTHOR: Ofer Sinai
#===============================================================================
from __future__ import print_function
import sys
import re
import os
# Main action
def main(args=sys.argv[1:]):
# Basic command line argument parsing code.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
USAGE = 'Usage: sevcleanup.py [--ef fermi_level] aims_dir(s) > STDOUT'
# Parse options
Efermi = 0.0
if '--ef' in args:
opt_ind = args.index('--ef')
opt_args = args[opt_ind:]
if len(opt_args) < 2:
print(USAGE, file=sys.stderr)
sys.exit(1)
try:
Efermi = float(opt_args[1])
except ValueError:
print(USAGE, file=sys.stderr)
sys.exit(1)
del args[opt_ind:opt_ind+2]
# At least 1 argument must be given
if len(args) < 1:
print(USAGE, file=sys.stderr)
sys.exit(1)
# Format output
floatformat = ' {0:10.3f} {1:2.9f}'
for path in args:
# Check path and see if it pertains to a file or a directory
if os.path.isfile(path):
filepath = path
elif os.path.isdir(path):
filepath = os.path.join(path, 'Sorted_Eigenvalues.dat')
else:
print('Path ' + path + ' not found', file=sys.stderr)
continue
# Check existence of file
if not os.path.isfile(filepath):
print('File ' + filepath + ' not found', file=sys.stderr)
continue
# Get data from file
f = open(filepath, 'rU')
lines = f.readlines()
f.close()
# Go over lines by order and sum weights. Use dictionary
eigenvals_weights_hash = {}
for line in lines:
# Print comment lines as is
match = re.search(r'^\s*#', line)
if match:
print(line.split('\n')[0])
continue
# Not a comment. Get data
words = line.split()
try:
numbers = [ float(n) for n in words ]
except ValueError:
# Comment out, print as is and continue
print('# ' + line.split('\n')[0])
continue
if numbers[0] not in eigenvals_weights_hash.keys():
eigenvals_weights_hash[numbers[0]] = 0
eigenvals_weights_hash[numbers[0]] += numbers[1]
# Print out all keys and values, shifted back by Ef
for eigenval in sorted(eigenvals_weights_hash.keys()):
weight = eigenvals_weights_hash[eigenval]
print(floatformat.format(eigenval + Efermi, weight))
# Standard boilerplate
if __name__ == "__main__":
main()
|
import pyautogui as py
import time
import random
py.FAILSAFE = True
#indefinite loop MAKE SURE FAILSAFE IS ON /
#end of script is "x += 1"
x = 1
while True:
#drop inventory
#row 1
py.moveTo(1643, 615, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.PAUSE
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 615, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 615, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 615, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
#row 2
py.moveTo(1643, 667, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 667, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 667, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 667, duration=.44)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
#row 3
py.moveTo(1643, 720, duration=.43)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 720, duration=.43)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 720, duration=.43)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 720, duration=.43)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
#row 4
py.moveTo(1643, 777, duration=.41)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 777, duration=.41)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 777, duration=.41)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 777, duration=.41)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
#row 5
py.moveTo(1643, 827, duration=.40)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 827, duration=.40)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 827, duration=.40)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 827, duration=.40)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
#row 6
py.moveTo(1643, 882, duration=.29)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 882, duration=.29)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 882, duration=.29)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 882, duration=.29)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
#row 7
py.moveTo(1643, 940, duration=.25)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1705, 940, duration=.25)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1770, 940, duration=.25)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
py.moveTo(1830, 940, duration=.21)
py.PAUSE = .2
py.keyDown('shift')
py.click(button='left')
py.keyUp('shift')
x += 1
|
from .pygoko import CoverTree, PyBayesCovertree
__all__ = ["CoverTree", "PyBayesCovertree"]
|
def sol():
N = 3
mal = ["E", "A", "B", "C", "D"]
while N:
N -= 1
yut = list(map(int, input().split()))
num_of_zero = yut.count(0)
print(mal[num_of_zero])
if __name__ == "__main__":
sol()
|
import spacy
from markovconstraints.markov_chain import MarkovProcess, parse_sequences
from markovconstraints.suffix_tree import get_suffix_tree
from datetime import datetime
from random import shuffle
import pickle
nlp = spacy.load("en_core_web_md")
def get_similarity(w1, w2):
return nlp.vocab[w1.lower()].similarity(nlp.vocab[w2.lower()])
def tokenize(line):
return nlp(line.strip().lower())
class Sentence(object):
def __init__(self, words, orders):
self.words = words
self.orders = orders
def __repr__(self):
return ' '.join([w for w in self.words if w not in {'<s>', '</s>'}])
class Corpus:
def __init__(self, source, order=3, language='en'):
self.order = order
self.language = language
t = datetime.now()
with open(source) as f:
self.sentences = [tokenize(line) for line in f if line.strip()]
self._words = set(word.text.lower() for sentence in self.sentences for word in sentence if not word.is_punct)
print('time to tokenize {} sentences {}'.format(len(self.sentences), datetime.now() - t))
t = datetime.now()
to_parse = [[w.text.lower() for w in sentence if not w.is_punct] for sentence in self.sentences]
self.matrices = parse_sequences(to_parse, order)
print('time to estimate the matrices', datetime.now() - t)
t = datetime.now()
self.suffix_tree = get_suffix_tree(self.sentences)
print('time to compute the suffix tree', datetime.now() - t)
@property
def words(self):
return list(self._words)
def generate_sentences(self, constraints, n=10):
mp = MarkovProcess(self.matrices, constraints)
sentences = []
for _ in range(n):
sequence = mp.generate()
orders = self.suffix_tree.get_all_orders(sequence)
sentences.append(Sentence(sequence, orders))
return sentences
def generate_semantic_sentence(self, sense, length, n, number__of_words=10):
words = self.get_similar_words(sense, number__of_words)
print(words)
indices = [i for i in range(length)]
shuffle(indices)
for i in indices:
try:
cts = [None] * i + [words] + [None] * (length - i - 1)
return self.generate_sentences(cts, n)
except RuntimeError:
pass
return []
def get_similar_words(self, sense, n=10):
similarities = []
for w in self.words:
try:
similarities.append((get_similarity(sense, w), w))
except KeyError:
pass
return [k for _, k in sorted(similarities, reverse=True)[:n]]
dylan = Corpus('/Users/gabriele/Workspace/misc/redylan-desktop/redylan/data/dylan')
# with open('dylan.pkl', 'wb') as f:
# pickle.dump(dylan, f)
for s in dylan.generate_semantic_sentence('love', 10, 10):
print(s)
|
# -*- coding=utf-8
# SCSD-PY001
# hi-windom/ColorThemeAnalyse
# https://gitee.com/hi-windom/color-theme-analyse
'''
# ---------------------------------
# 创建于 2021-7-20
# 更新于 2021-7-20 02:08:57
# ---------------------------------
# Need help ? => [email protected]
# ---------------------------------
# 作者很懒,还没想好说些什么
# ---------------------------------
'''
################################################################
import os,sys
from importlib import import_module
import json
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if root_path not in sys.path:
sys.path.append(root_path)
# logger = import_module('.logger','lib')
# logger = logger.myLogging("gitee.com/soltus")
from MMCQsc.scp.lib.error_sc import *
from MMCQsc.scp.lib.logger import *
logger = myLogging("gitee.com/soltus")
from MMCQsc.scp.lib.error_sc import *
def fun_version(v1,v2):
# v1 == v2 return 0
# v1 > v2 return 1
# v1 < v2 return -1
l_1 = v1.split('.')
l_2 = v2.split('.')
c = 0
while True:
if c == len(l_1) and c == len(l_2):
return 0
if len(l_1) == c:
l_1.append(0)
if len(l_2) == c:
l_2.append(0)
if int(l_1[c]) > int(l_2[c]):
return 1
elif int(l_1[c]) < int(l_2[c]):
return -1
c += 1
os.system("cls")
file_path = sys.argv[0]
def check_conda():
if "\\envs\\" in sys.executable:
conda_path = os.path.abspath(os.path.join(os.path.dirname(sys.executable), "../..", "Script", "conda.exe"))
conda_env = sys.executable.split("\\")[-2]
else:
conda_path = os.path.abspath(os.path.join(os.path.dirname(sys.executable), "Script", "conda.exe"))
conda_env = "base"
return conda_path, conda_env
from subprocess import Popen
import shlex
# try:
# raise error_sc.EnvError('e97304')
# except error_sc.EnvError as e:
# logger.error("引发异常:" + repr(e))
# tb = sys.exc_info()[2]
# if tb:
# raise error_sc.OA().with_traceback(tb)
def run_in_env(env):
PY3_VNO = ''
for i in sys.version_info[:3]:
PY3_VNO += str(i)
PY3_VNO = '.'.join(PY3_VNO)
os.system("cls")
logger.info("You are using Conda , Activated conda env : '{}' Python {}".format(env, PY3_VNO))
with os.popen("conda --version") as conda_v:
if "conda" in conda_v.read():
logger.debug("\n\n\n\t\t使用当前 Conda 环境继续吗 (y) ?\n\t\t或者重新选择运行环境 (n) ?\n\t\t也可以输入任意字符作为新环境名,将为你自创建一个 Python 3.9.5 的新环境\n\n\t\tProccess ? [Y/n/*]")
while True:
pick_env = input("main.py:93 >>> ")
if pick_env in ['Y','y']:
if sys.version_info.major < 3:
logger.error(" Can NOT run in Python 2.x ")
raise EnvError('\n\n\t''This script is only for use with ''Python 3.6 or later\n\n\t https://gitee.com/hi-windomcolor-theme-analyse/ \n\n')
elif sys.version_info[:3] < (3,6,0):
logger.error(" Can NOT run in Python < 3.6 ")
raise EnvError('\n\n\t''This script is only for use with ''Python 3.6 or later\n\n\t https://gitee.com/hi-windomcolor-theme-analyse/ \n\n')
else:
return env
elif pick_env in ['N','n']:
python = sys.executable.replace(check_conda()[1],pick_env)
print(python)
os.system("cls")
os.system("conda info -e")
logger.debug("\n\n\n\t\t输入你想激活的 Conda 环境")
pick_env = input("main.py:109 >>> ")
logger.debug(f"请在终端执行指令 conda activate {pick_env} 手动激活环境")
logger.warning("\n\n\t\t[ tip ] : 方向上键 ^ 可调出调出历史指令\n\n")
exit()
else:
os.system("conda deactivate")
os.system("deactivate")
os.system("cls")
python = sys.executable.replace(check_conda()[1],pick_env)
change_env = file_path.replace('main','change_env')
try:
args = shlex.split(f"conda create -n {pick_env} python==3.9.5 -y")
result = Popen(args, bufsize=0, executable=None, close_fds=False, shell=True, env=None, startupinfo=None, creationflags=0)
logger.debug(f"创建下载线程 PID: {result.pid}")
logger.warning("\n\n\t\t[ tip ] : 快捷键 CTR + C 强制结束\n\n")
result.wait()
except BaseException as e:
if isinstance(e, KeyboardInterrupt):
logger.warning("用户中止了下载")
logger.warning("当前窗口已完成使命,是时候和它告别了")
result.kill()
#os.system(f"conda create -n {pick_env} python==3.9.5 -y")
# args = shlex.split(f"conda activate {pick_env}")
# result = Popen(args, bufsize=0, executable=None, close_fds=False, shell=True, env=None, startupinfo=None, creationflags=0)
logger.debug(check_conda()[0])
logger.debug(python)
logger.debug(file_path)
logger.debug(f"已创建的环境 : [ {pick_env} ] 请使用创建的环境重新运行\n\n")
logger.warning("\n\n\t\t[ tip ] : 方向上键 ^ 可调出调出历史指令\n\n")
exit()
return pick_env
pick_env = check_conda()[1]
while pick_env:
env_tmep = pick_env
pick_env = run_in_env(pick_env)
if pick_env == env_tmep:
break
PY3_VNO = ''
for i in sys.version_info[:3]:
PY3_VNO += str(i)
PY3_VNO = '.'.join(PY3_VNO)
logger.warning("You are using Python {}".format(PY3_VNO))
if fun_version(PY3_VNO,"3.8.0") == -1:
logger.critical("Required version : Python >= 3.8.0")
with os.popen("conda --version") as conda_v:
if "conda" in conda_v.read():
logger.info("You are using Conda , Press key 'y' to upgrade your Python")
logger.info("If you want to upgrade later by yourself , use command: conda install python==3.9.5")
logger.debug("Upgrade your Python to 3.9.5 ? [Y/*]")
isupdate = input("main.py:123 >>> ")
if isupdate not in ['Y','y']:
exit()
os.system("cls")
logger.info("即将开始下载,这取决于你的网络")
try:
args = shlex.split(f"conda conda install python==3.9.5 -n {pick_env} -y")
result = Popen(args, bufsize=0, executable=r"C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe", close_fds=False, shell=False, env=None, startupinfo=None, creationflags=0)
logger.debug(f"创建下载线程 PID: {result.pid}")
logger.warning("\n\n\t\t[ tip ] : 快捷键 CTR + C 强制结束\n\n")
result.wait()
except BaseException as e:
if isinstance(e, KeyboardInterrupt):
logger.warning("用户中止了下载")
logger.warning("当前窗口已完成使命,是时候和它告别了")
result.kill()
finally:
if result.returncode:
logger.error("下载失败,请手动升级 Python 后重试")
else:
args = [sys.executable, file_path]
logger.debug(args)
logger.debug(f"请在终端执行指令 conda activate {pick_env} 手动激活环境")
logger.warning("\n\n\t\t[ tip ] : 方向上键 ^ 可调出调出历史指令\n\n")
exit()
elif fun_version(PY3_VNO,"3.9.5") == -1:
logger.warning("Recommended version : Python >= 3.9.5 However, it doesn't matter")
|
import pytest
from minus80 import Accession
from minus80 import Cohort
from minus80 import CloudData
from minus80.Tools import *
@pytest.fixture(scope='module')
def simpleCloudData():
return CloudData()
@pytest.fixture(scope='module')
def simpleAccession():
# Create a simple Accession
return Accession('Sample1',files=['file1.txt','file2.txt'],type='sample')
@pytest.fixture(scope='module')
def RNAAccession1():
a = Accession(
'RNAAccession1',
files = [
'./data/Sample1_ATGTCA_L007_R1_001.fastq',
'./data/Sample1_ATGTCA_L007_R2_001.fastq',
'./data/Sample1_ATGTCA_L008_R1_001.fastq',
'./data/Sample1_ATGTCA_L008_R2_001.fastq',
],
type='RNASeq'
)
return a
@pytest.fixture(scope='module')
def RNAAccession2():
a = Accession(
'RNAAccession2',
files = [
'./data/Sample2_ATGTCA_L005_R1_001.fastq',
'./data/Sample2_ATGTCA_L005_R2_001.fastq',
'./data/Sample2_ATGTCA_L006_R1_001.fastq',
'./data/Sample2_ATGTCA_L006_R2_001.fastq',
],
type='RNASeq'
)
return a
@pytest.fixture(scope='module')
def RNACohort(RNAAccession1,RNAAccession2):
delete('Cohort','RNACohort',force=True)
x = Cohort('RNACohort')
x.add_accession(RNAAccession1)
x.add_accession(RNAAccession2)
return x
@pytest.fixture(scope='module')
def simpleCohort():
delete('Cohort','TestCohort',force=True)
# Create the simple cohort
a = Accession('Sample1',files=['file1.txt','file2.txt'],type='WGS')
b = Accession('Sample2',files=['file1.txt','file2.txt'],type='WGS')
c = Accession('Sample3',files=['file1.txt','file2.txt'],type='CHIP')
d = Accession('Sample4',files=['file1.txt','file2.txt'],type='CHIP')
x = Cohort('TestCohort')
for acc in [a,b,c,d]:
x.add_accession(acc)
return x
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# datapath type
# At this moment, this information is not used yet and unknown type is
# defined as place-holder.
# switches are categorized by its rolls and openflow controller may want to
# handle switch differently depending on it role.
#
# unknown:
#
UNKNOWN = 'UNKNOWN'
|
import hek
# stop monitor mode
result = hek.net.monitor_stop(name="wlan0mon")
# Output
print(result) |
#!/usr/bin/env python3
# Copyright (C) 2019-2021 All rights reserved.
# FILENAME: middleware.py
# VERSION: 0.0.1
# CREATED: 2021-08-31 16:10
# AUTHOR: Aekasitt Guruvanich <[email protected]>
# DESCRIPTION:
#
# HISTORY:
#*************************************************************
### Third-Party Packages ###
from fastapi.requests import Request
from starlette.datastructures import MutableHeaders
from starlette.types import ASGIApp, Message, Receive, Scope, Send
### Local Modules ###
from .backends import BackstageSeshBackend
from .backstage_config import BackstageConfig
from .session import Session
class BackstageSeshMiddleware(BackstageConfig):
def __init__(self, app: ASGIApp):
self._app = app
@property
def app(self) -> ASGIApp:
return self._app
@property
def autoload(self) -> bool:
return self._autoload
@property
def backend(self) -> BackstageSeshBackend:
return self._backend
@property
def max_age(self) -> int:
return self._max_age
@property
def security_flags(self) -> str:
return self._security_flags
@property
def session_cookie(self) -> str:
return self._session_cookie
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope['type'] not in ('http', 'websocket'):
await self.app(scope, receive, send)
return
connection = Request(scope)
session_id = connection.cookies.get(self.session_cookie, None)
scope['session'] = Session(self.backend, session_id)
if self.autoload:
await scope['session'].load()
async def send_wrapper(message: Message) -> None:
if message['type'] == 'http.response.start':
path = scope.get('root_path', '') or '/'
if scope['session'].is_modified:
# We have session data to persist (data was changed, cleared, etc).
nonlocal session_id
session_id = await scope['session'].persist()
headers = MutableHeaders(scope=message)
header_value = '%s=%s; path=%s; Max-Age=%d; %s' % (
self.session_cookie,
session_id,
path,
self.max_age, self.security_flags,
)
headers.append('Set-Cookie', header_value)
elif scope['session'].is_loaded and scope['session'].is_empty:
# no interactions to session were done
headers = MutableHeaders(scope=message)
header_value = '{}={}; {}'.format(
self.session_cookie,
f'null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;',
self.security_flags,
)
headers.append('Set-Cookie', header_value)
await send(message)
await self.app(scope, receive, send_wrapper)
|
import unittest
import url_decoder
from url_decoder_test import UrlDecoderTestCase
from url_decoder_github import GitHubRepositoryUrlDecoder, GitHubCommitUrlDecoder, GitHubGistUrlDecoder
class _GitHubTestClient:
def __init__(self):
self.get_repository_args = []
self.get_commit_args = []
def _get_repository_json(self):
owner_json = { "login": "mgp" }
return { "owner": owner_json, "name": "repo-name" }
def get_repository(self, *pargs):
self.get_repository_args.append(pargs)
return self._get_repository_json()
def _get_commit_json(self):
return { "sha": "a8b7818", "message": "Initial commit" }
def get_commit(self, *pargs):
self.get_commit_args.append(pargs)
return self._get_commit_json()
class GitHubRepositoryUrlDecoderTest(UrlDecoderTestCase):
def setUp(self):
UrlDecoderTestCase.setUp(self)
self.test_client = _GitHubTestClient()
self.url_decoder = GitHubRepositoryUrlDecoder(self.test_client)
def test_can_decode_url(self):
# Invalid netloc.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://invalid.github.com/mgp/sharebears"))
# Invalid path.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/mgp"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears/wiki"))
# Valid URL.
self.assertTrue(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears"))
def test_decode_url(self):
url = "https://github.com/mgp/sharebears"
parsed_url = self._parse_url(url)
json = self.url_decoder.decode_url(url, parsed_url)
self.assertDictEqual(json, self.test_client._get_repository_json())
self.assertEqual(0, len(self.test_client.get_commit_args))
self.assertEqual(1, len(self.test_client.get_repository_args))
owner, repo = self.test_client.get_repository_args[0]
self.assertEqual("mgp", owner)
self.assertEqual("sharebears", repo)
def test_item_for_rendering(self):
owner_json = {"login": "login-value", "avatar_url": "avatar-url-value", "html_url": "html_url-value"}
decoded_url = {
"name": "name-value",
"description": "description-value",
"html_url": "html_url-value",
"language": "language-value",
"owner": owner_json
}
item = self.url_decoder.item_for_rendering(decoded_url)
self.assertEqual(decoded_url["name"], item.name)
self.assertEqual(decoded_url["description"], item.description)
self.assertEqual(decoded_url["html_url"], item.html_url)
self.assertEqual(decoded_url["language"], item.language)
# Assert that the GitHubRepositoryOwnerItem instance is correct.
owner = item.owner
self.assertEqual(owner_json["login"], owner.login)
self.assertEqual(owner_json["avatar_url"], owner.avatar_url)
self.assertEqual(owner_json["html_url"], owner.html_url)
class GitHubCommitUrlDecoderTest(UrlDecoderTestCase):
def setUp(self):
UrlDecoderTestCase.setUp(self)
self.test_client = _GitHubTestClient()
self.url_decoder = GitHubCommitUrlDecoder(self.test_client)
def test_can_decode_url(self):
# Invalid netloc.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://invalid.github.com/mgp/sharebears/commit/a8b7818"))
# Invalid path.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears/commit"))
# Valid URL.
self.assertTrue(self._can_decode_url(
self.url_decoder, "https://github.com/mgp/sharebears/commit/a8b7818"))
def test_decode_url(self):
url = "https://github.com/mgp/sharebears/commit/a8b7818"
parsed_url = self._parse_url(url)
json = self.url_decoder.decode_url(url, parsed_url)
self.assertDictEqual(json, self.test_client._get_commit_json())
self.assertEqual(0, len(self.test_client.get_repository_args))
self.assertEqual(1, len(self.test_client.get_commit_args))
owner, repo, sha = self.test_client.get_commit_args[0]
self.assertEqual("mgp", owner)
self.assertEqual("sharebears", repo)
self.assertEqual("a8b7818", sha)
def _make_user_json(self, name, email, date_string):
return { "name": name, "email": email, "date": date_string }
def _assert_user(self, user_json, user):
self.assertEqual(user_json["name"], user.name)
self.assertEqual(user_json["email"], user.email)
expected_datetime = url_decoder.to_datetime(user_json["date"])
self.assertEqual(expected_datetime, user.date)
def test_item_for_rendering(self):
author_json = self._make_user_json(
"author_name", "author_email", "2010-04-10T14:10:01-07:00")
committer_json = self._make_user_json(
"committer_name", "committer_email", "2011-05-11T15:11:02-08:00")
decoded_url = {
"sha": "sha-value",
"url": "url-value",
"message": "message-value",
"author": author_json,
"committer": committer_json
}
item = self.url_decoder.item_for_rendering(decoded_url)
self.assertEqual(decoded_url["sha"], item.sha)
self.assertEqual(decoded_url["url"], item.url)
self.assertEqual(decoded_url["message"], item.message)
# Assert that the GitHubCommitUserItem instances are correct.
self._assert_user(author_json, item.author)
self._assert_user(committer_json, item.committer)
class GitHubGistUrlDecoderTest(UrlDecoderTestCase):
def setUp(self):
UrlDecoderTestCase.setUp(self)
self.url_decoder = GitHubGistUrlDecoder()
def test_can_decode_url(self):
# Invalid netloc.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://invalid.gist.github.com/mgp/92b50ae3e1b1b46eadab"))
# Invalid path.
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://gist.github.com/"))
self.assertFalse(self._can_decode_url(
self.url_decoder, "https://gist.github.com/mgp"))
# Valid URL.
self.assertTrue(self._can_decode_url(
self.url_decoder, "https://gist.github.com/mgp/92b50ae3e1b1b46eadab"))
def test_decode_url(self):
url = "https://gist.github.com/mgp/92b50ae3e1b1b46eadab"
parsed_url = self._parse_url(url)
expected_dict = { "url": url }
self.assertDictEqual(expected_dict, self.url_decoder.decode_url(url, parsed_url))
def test_item_for_rendering(self):
url = "https://gist.github.com/mgp/92b50ae3e1b1b46eadab"
decoded_url = { "url": url }
item = self.url_decoder.item_for_rendering(decoded_url)
self.assertEqual(url, item.url)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GitHubRepositoryUrlDecoderTest))
suite.addTest(unittest.makeSuite(GitHubCommitUrlDecoderTest))
suite.addTest(unittest.makeSuite(GitHubGistUrlDecoderTest))
return suite
|
#!/usr/bin/env
import logging
_LOG = logging.getLogger(__name__)
def error_return(msg):
_LOG.debug(msg)
return False
VERSION = '0.0.4a'
class SeverityCodes(object):
"""An enum of Warning/Error severity."""
ERROR, WARNING = range(2)
facets = ['ERROR', 'WARNING']
numeric_codes_registered = set(range(len(facets)))
class _NEXEL(object):
TOP_LEVEL = 0
NEXML = 1
OTUS = 2
OTU = 3
TREES = 4
TREE = 5
NODE = 6
EDGE = 7
META = 8
INTERNAL_NODE = 9
LEAF_NODE = 10
CODE_TO_STR = {}
CODE_TO_PAR_CODE = {}
CODE_TO_OTHER_ID_KEY = {}
CODE_TO_TOP_ENTITY_NAME = {}
_NEXEL.CODE_TO_STR = {
_NEXEL.TOP_LEVEL: 'top-level',
_NEXEL.NEXML: 'nexml',
_NEXEL.OTUS: 'otus',
_NEXEL.OTU: 'otu',
_NEXEL.TREES: 'trees',
_NEXEL.TREE: 'tree',
_NEXEL.NODE: 'node',
_NEXEL.INTERNAL_NODE: 'node',
_NEXEL.LEAF_NODE: 'node',
_NEXEL.EDGE: 'edge',
_NEXEL.META: 'meta',
None: 'unknown',
}
_NEXEL.CODE_TO_PAR_CODE = {
_NEXEL.TOP_LEVEL: None,
_NEXEL.NEXML: _NEXEL.TOP_LEVEL,
_NEXEL.OTUS: _NEXEL.NEXML,
_NEXEL.OTU: _NEXEL.OTUS,
_NEXEL.TREES: _NEXEL.NEXML,
_NEXEL.TREE: _NEXEL.TREES,
_NEXEL.NODE: _NEXEL.TREE,
_NEXEL.INTERNAL_NODE: _NEXEL.TREE,
_NEXEL.LEAF_NODE: _NEXEL.TREE,
_NEXEL.EDGE: _NEXEL.TREE,
None: None,
}
_NEXEL.CODE_TO_OTHER_ID_KEY = {
_NEXEL.TOP_LEVEL: None,
_NEXEL.NEXML: None,
_NEXEL.OTUS: '@otusID',
_NEXEL.OTU: '@otuID',
_NEXEL.TREES: '@treesID',
_NEXEL.TREE: '@treeID',
_NEXEL.NODE: '@nodeID',
_NEXEL.INTERNAL_NODE: '@nodeID',
_NEXEL.LEAF_NODE: '@nodeID',
_NEXEL.EDGE: '@edgeID',
None: None,
}
_NEXEL.CODE_TO_TOP_ENTITY_NAME = {
_NEXEL.TOP_LEVEL: '',
_NEXEL.NEXML: 'nexml',
_NEXEL.OTUS: 'otus',
_NEXEL.OTU: 'otus',
_NEXEL.TREES: 'trees',
_NEXEL.TREE: 'trees',
_NEXEL.NODE: 'trees',
_NEXEL.INTERNAL_NODE: 'trees',
_NEXEL.LEAF_NODE: 'trees',
_NEXEL.EDGE: 'trees',
None: '',
}
|
class Stacks:
def __init__(self):
self._stack = []
def is_empty(self) -> bool:
if self._stack is None:
return True
else:
return False
def pop(self) -> None:
self._stack.pop()
def push(self, g) -> None:
self._stack.append(g)
stack = Stacks()
stack.push("test")
stack.push("test3")
print("Initial stack: {}".format(stack._stack))
stack.pop()
print("Stack after popping: {}".format(stack._stack))
print(stack.is_empty())
|
import numpy as np
import os
from PIL import Image
def read_image(path):
"""Reads an image located at `path` into an array.
Arguments:
path (str): Path to a valid image file in the filesystem.
Returns:
`numpy.ndarray` of size `(height, width, channels)`.
"""
full_path = os.path.expanduser(path)
return np.array(Image.open(full_path).convert('RGB'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module contains various experimental environments used for testing
human behavior.
Created on Thu Feb 22 14:50:01 2018
@author: Dimitrije Markovic
"""
import torch
from torch import zeros, ones
from torch.distributions import Categorical, Multinomial, Dirichlet
__all__ = [
'MultiArmedBandit'
]
class MultiArmedBandit(object):
"""Implementation of a multi-armed bandit task. Bandit has K arms, each associated
with specific probability of delivering different outcomes.
"""
def __init__(self, priors, transitions, context, offers, arm_types, states=None, nsub=1, blocks=1, trials=100):
self.priors = priors # prior probabilities
self.tms = transitions # dictionary containing tranistion matrices
self.blocks = blocks
self.trials = trials
self.nsub = nsub
if states is not None:
self.states = states
self.fixed_states = 1
else:
self.fixed_states = 0
self.arm_types = arm_types
self.context = context
self.offers = offers
self.initialise()
def initialise(self):
if not self.fixed_states:
blocks = self.blocks
trials = self.trials
nsub = self.nsub
ns, nf = self.priors['probs'].shape
self.states = {'points': zeros(blocks, trials+1, nsub, nf, dtype=torch.long),
'probs': zeros(blocks, trials+1, nsub, ns, nf),
'locations': zeros(blocks, trials+1, nsub, dtype=torch.long) }
return self
def get_offers(self, block, trial):
if trial == 0:
self.update_states(block, trial)
return {'locations': self.states['locations'][block, trial],
'points': self.states['points'][block, trial]}
else:
return {'locations': self.states['locations'][block, trial],
'points': self.states['points'][block, trial]}
def update_states(self, block, trial, responses=None, outcomes=None):
if trial == 0:
self.states['points'][block, trial] = 0
if block == 0:
probs = self.priors['probs']
self.states['probs'][block, trial] = probs
else:
self.states['probs'][block, trial] = self.states['probs'][block-1, -1]
self.states['locations'][block, trial] = Categorical(probs=self.priors['locations']).sample((self.nsub,))
else:
self.states['points'][block, trial] = self.states['points'][block, trial - 1] + outcomes.long()
self.states['probs'][block, trial] = self.states['probs'][block, trial - 1]
loc = self.states['locations'][block, trial - 1]
self.states['locations'][block, trial] = \
Categorical(probs=self.tms['locations'][responses, loc]).sample()
if trial < self.trials:
return zeros(self.nsub)
else:
success = torch.any(self.states['points'][block, trial, :, 1:] > 2*self.trials//3, -1)
return success.long()
def update_environment(self, block, trial, responses):
"""Generate stimuli for the current block and trial and update the state
"""
# offers in the current trial
offers = self.offers[block][trial]
# selected arm types
arm_types = self.arm_types[offers, responses]
# each selected arm is associated with specific set of reward probabilities
probs = self.states['probs'][block, trial, range(self.nsub), arm_types]
out1 = Multinomial(probs=probs).sample()
out = {'locations': responses,
'features': out1.argmax(-1)}
out2 = self.update_states(block, trial+1, responses=responses, outcomes=out1)
return [responses, (out, out2)]
|
for i in range(1, 10000):
for j in range(2, i//2):
if (i % j == 0):
break
else:
print(i)
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from nose import SkipTest
class KnownFailure(SkipTest):
pass
|
## API for testing matrix instructions of core - MVM-f/b/d, CRS - with one matrix per core only
## Set config file to following IMA hyperparameters
#xbar_bits = 2
#num_matrix = 1 # each matrix is 8-fw xbars, 8-bw xbars and 16-delta xbars
#xbar_size = 128
#dac_res = 1
#adc_res = 8
#num_adc = 2 * num_matrix
#num_ALU = 1
#dataMem_size = 4 * (2*xbar_size) # 4 for 4 input spaces within matrix (1 for f/b each, 2 for d)
#instrnMem_size = 512 #in entries
import sys
import os
import numpy as np
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, root_dir)
from src.data_convert import *
import src.ima as ima
from src.instrn_proto import *
import include.configTest as cfg
path = 'coreMvm_test/'
wt_path = path
inst_file = path + 'imem1.npy'
trace_file = path + 'trace.txt'
dump_file = path + 'memsim.txt'
datamem_off = cfg.datamem_off # each matrix has 6 memory spaces (1 for f/b, 2 for d)
phy2log_ratio = cfg.phy2log_ratio # ratio of physical to logical xbar
## Create memory dump function for an ima
def dump (ima, filename = ''):
if (filename == ''):
filename = 'memsim.txt'
def mem_dump (memfile, name): # for conciseness
assert (type(memfile) == list), 'memfile should be list'
fid.write (name + ' contents\n')
for entry in memfile:
if (entry != ''):
temp_val = fixed2float (entry, cfg.int_bits, cfg.frac_bits)
fid.write(str(temp_val) + '\n')
fid = open (filename, 'w')
# dump the datamemory
mem_dump (ima.dataMem.memfile, 'DataMemory')
# traverse the matrices in an ima
mvmu_list = ['f', 'b', 'd']
for i in range(cfg.num_matrix):
# traverse mvmus in a matrix
for mvmu_t in mvmu_list:
# dump the xbar input memory
mem_dump (ima.xb_inMem_list[i][mvmu_t].memfile, 'Xbar Input Memory: matrixId: ' + str(i) + 'mvmu_type: '
+ mvmu_t)
# dump the xbar output memory
mem_dump (ima.xb_outMem_list[i][mvmu_t].memfile, 'Xbar Output Memory: matrixId: ' + str(i) + 'mvmu_type: '
+ mvmu_t)
fid.close()
### Create a test matrix for MVM functionality check - positive matrices only
#xbar_size = cfg.xbar_size
## scaling down weight values to ensure that output of MVM doesn't overflow
#log_xbar = 0.1*np.random.rand(xbar_size, xbar_size)
#phy_xbar = [np.random.rand(xbar_size, xbar_size) for i in range(phy2log_ratio)]
#
## NOTE: weights programmed to xbars are stored in terms of their representative floating values
## for use in np.dot (to store bits representation, use fixed point version of np.dot)
#for i in range (xbar_size):
# for j in range (xbar_size):
# temp_val = float2fixed(log_xbar[i][j], cfg.int_bits, cfg.frac_bits)
# assert (len(temp_val) == 16)
# for k in range (len(phy_xbar)):
# if (k==0):
# val = temp_val[-(k+1)*cfg.xbar_bits:]
# else:
# val = temp_val[-(k+1)*cfg.xbar_bits:-(k+1)*cfg.xbar_bits+2]
# # augment sign extension (used in MSB xbar only)
# if (k == (len(phy_xbar)-1)):
# val = (cfg.num_bits - cfg.xbar_bits)*val[0] + val[0:]
# phy_xbar[k][i][j] = fixed2float(val, cfg.int_bits, cfg.frac_bits)
#
## save log_xbar and phy_xbar to disc
#np.save (wt_path+'log_xbar', log_xbar)
#for i in range (len(phy_xbar)):
# np.save (wt_path+'phy_xbar'+str(i), phy_xbar[i])
## Setup files
phy2log_ratio = cfg.num_bits/cfg.xbar_bits
inst_refresh = 1
## Create core instruction stream for testing
if (inst_refresh):
num_inst = 0 # track number of instructions generated
# instructions for IMA1
dict_list = []
# Copy data from data memory to xbar_in_memory - Matrix0: f-xbar
i_temp = i_copy (0, datamem_off+0, cfg.xbar_size)
dict_list.append (i_temp.copy())
# Copy data from data memory to xbar_in_memory - Matrix0: b-xbar
i_temp = i_copy (256, datamem_off+0, cfg.xbar_size)
dict_list.append (i_temp.copy())
# MVM instruction to populate xbar_out_memory - runs inner product on f and b xbar
i_temp = i_mvm(['110'])
dict_list.append (i_temp.copy())
# Copy output of f and b xbars to input memory spaces of d-xbar
i_temp = i_copy (512, 128, cfg.xbar_size)
dict_list.append (i_temp.copy())
i_temp = i_copy (640, 384, cfg.xbar_size)
dict_list.append (i_temp.copy())
# MVM instruction to populate d-xbar - runs outer product on d-xbar
i_temp = i_mvm(['001'])
dict_list.append (i_temp.copy())
# CRS instruction to populate populate xbar values in f/b-xbar from d-xbar
i_temp = i_crs(['1'])
dict_list.append (i_temp.copy())
# Add a halt instruction
i_temp = i_hlt()
dict_list.append (i_temp.copy())
print (inst_file + ' generated...')
np.save (inst_file, dict_list)
print ('Total no. of instructions: ', len(dict_list))
## Simulate core
ima = ima.ima ()
fid = open(trace_file, "w+")
ima.pipe_init(inst_file, fid)
# program the xbars for matrix0_fw xbar (for functionality check of mvm, using just one matrix is fine)
for i in range (phy2log_ratio):
wt_temp = np.load(wt_path+'phy_xbar'+str(i)+'.npy')
ima.matrix_list[0]['f'][i].program(wt_temp)
ima.matrix_list[0]['b'][i].program(wt_temp)
cycle = 0
while (ima.halt != 1 and cycle < cfg.cycles_max):
ima.pipe_run (cycle, fid) # fid points to tracefile
cycle += 1
print (cycle)
fid.close ()
dump (ima, dump_file)
## Testcases for Functionality Debug of different insructions
## compare golden output to ima output
# 1. MVM instruction - inner-product (fw, bw xbars) -keep upto MVM ('110') instrn
wt_gold = np.load(wt_path+'log_xbar.npy')
out_gold = np.dot (ima.dataMem.memfile_float, wt_gold)
out_expF = ['']*cfg.xbar_size
out_expB = ['']*cfg.xbar_size
for i in range (cfg.xbar_size):
out_expF[i] = fixed2float(ima.xb_outMem_list[0]['f'].memfile[i], cfg.int_bits, cfg.frac_bits)
out_expB[i] = fixed2float(ima.xb_outMem_list[0]['b'].memfile[i], cfg.int_bits, cfg.frac_bits)
out_expF = np.asarray(out_expF)
out_expB = np.asarray(out_expB)
errF = abs(np.tanh(out_gold) - np.tanh(out_expF))
errB = abs(np.tanh(out_gold) - np.tanh(out_expB))
print ("fw xbar error has mean " + str(np.average(errF)) + " and stdev " + \
str(np.std(errF)))
print ("bw xbar error has mean " + str(np.average(errB)) + " and stdev " + \
str(np.std(errB)))
# 2. MVM instruction - inner-product (f,b xbar), followed by outer-product (d-xbar) - keep upto MVM ('001') instrn
out_goldD = cfg.lr * np.outer (out_expF, out_expB)
out_expD = ima.get_matrix (0, 'd')
errD = abs(out_goldD - out_expD)
print ("delta xbar error has mean " + str(np.average(errD)) + " and stdev " + \
str(np.std(errD)))
# 3. CRS instruction - read wt slices delta xbars, compose wt, write slices to f/b xbars - keep upto CRS instrn
out_expF = ima.get_matrix (0, 'f')
out_expB = ima.get_matrix (0, 'b')
out_expD = ima.get_matrix (0, 'd')
errFD = abs (out_expF - out_expD)
errBD = abs (out_expB - out_expD)
print ("f-d matrix error has mean " + str(np.average(errFD)) + " and stdev " + \
str(np.std(errFD)))
print ("b-d matrix error has mean " + str(np.average(errBD)) + " and stdev " + \
str(np.std(errBD)))
|
# -*- coding: utf-8 -*-
"""
Datalog extraction tool using VISA for Agilent 34410A Digital Multimeter
Matthew Sharpe 10-12-18
"""
import sys
import xlsxwriter
import visa
rm = visa.ResourceManager()
inst = rm.list_resources()
picked = 'x'
clear = ''
deviceDict = {}
#build device dictionary
for item in inst:
deviceDict[inst.index(item) + 1] = item
#user instructions
print('')
print('This program extracts Datalog values in NVMEM via USB from Agilent 34410A DMM.\n')
print('An Excel file will be created in the same directory as this script.\n')
print('Please complete Datalogging manually before running this program.')
print('')
#printing Instrument List
print('Connected devices: ')
for item in deviceDict:
print(str(item) + ': ' + deviceDict[item])
#pick device loop
while picked not in deviceDict:
#get user input for device
try :
print('')
picked = int(input('Choose an instrument '))
except ValueError:
print("Invalid input! please select Device Number i.e. 1, 2, 3...)\n")
for item in deviceDict:
print(str(item) + ': ' + deviceDict[item])
myDevice = deviceDict[picked]
print('')
print('...reading NVMEM...\n')
#open resource and check for NVMEM data in DMM
try:
meter = rm.open_resource(myDevice)
data = meter.query('DATA:DATA? NVMEM')
points = meter.query('DATA:POINTS? NVMEM')
lp = points.split('\n')
print('')
print(lp[0] + ' points in NVMEM\n')
if data == '\n':
print('No data in NVMEM! Check DMM Datalog.')
sys.exit()
except:
t = input('Program will exit.')
sys.exit()
print('')
#format data from DMM
l = data.split(',')
f = [float(i) for i in l]
# Create a workbook and add a worksheet.
print('...writing to Excel...\n')
filename = input('Choose a filename: ')
workbook = xlsxwriter.Workbook(filename + '.xlsx')
worksheet = workbook.add_worksheet()
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
for item in (f):
worksheet.write(row, col, item)
row += 1
workbook.close()
print('Excel workbook created!\n')
try:
clear = input('Clear NVMEM data? (y/n)')
if clear == 'y':
meter.write('DATA:DEL NVMEM')
else:
meter.close()
except:
meter.close() |
import platform
import pickle
import os
import numpy as np
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError('invalid python version: {}'.format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype('float')
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1, 6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(ROOT, num_training=49000, num_validation=1000, num_test=1000, subtract_mean=False):
# Load the raw CIFAR-10 data
X_train, y_train, X_test, y_test = load_CIFAR10(ROOT)
# Split data
mask = range(num_training, num_training+num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = xrange(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test
}
# # Preprocessing: reshape the image data into rows
# X_train = X_train.reshape(X_train.shape[0], -1)
# X_val = X_val.reshape(X_val.shape[0], -1)
# X_test = X_test.reshape(X_test.shape[0], -1)
#
# # Normalize the data: subtract the mean rows
# mean_image = np.mean(X_train, axis=0)
# X_train -= mean_image
# X_val -= mean_image
# X_test -= mean_image
return X_train, y_train, X_val, y_val, X_test, y_test
def load_imagenet_val(num=None):
"""
Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
imagenet_fn = 'datasets/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
assert False, 'Need to download imagenet_val_25.npz in datasets folder'
f = np.load(imagenet_fn)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generates matplotlib line and bar charts from the netperf.csv raw data file
#
try:
import matplotlib.pyplot as plt
except Exception, e:
# Translate the traceback to a more friendly error message
print "Exception (%s) while importing matplotlib - install with apt-get install python-matplotlib (or equivalent package manager)" % e
raise
import numpy
from optparse import OptionParser
import os
parser = OptionParser()
parser.add_option("-c", "--csv", dest="csvfile", metavar="FILE",
help="Input CSV file")
parser.add_option("-o", "--outputdir", dest="outputdir", metavar="DIR",
help="Output Directory", default="/plotdata")
parser.add_option("-s", "--suffix", dest="suffix",
help="Generated file suffix")
(options, args) = parser.parse_args()
def getData(filename):
# Returns a list of lines split around the ',' and whitespace stripped
fd = open(filename, "rt")
lines = fd.readlines()
fd.close()
rl = []
for l in lines:
raw_elements = l.split(',')
elements = [ e.strip() for e in raw_elements ]
rl.append(elements)
return rl
colors = [ 'r', 'g', 'b', 'c', 'm', 'k', '#ff6677' ]
def convert_float(l):
rl = []
for e in l:
try:
rl.append(float(e))
except:
pass
return rl
if __name__ == "__main__":
data = getData(options.csvfile)
x_data = convert_float(data[0][2:])
plt.figure(figsize=(16,6))
plt.axis([0, 1500, 0, 45000])
chart = plt.subplot(111)
color_index = 0
for n in range(1, len(data)):
if len(data[n]) <= 4:
continue
y_dataset = convert_float(data[n][2:])
chart.plot(x_data, y_dataset, marker=".", label=data[n][0], color=colors[color_index], linewidth=1.5)
color_index += 1
plt.xlabel("{0} - MSS or Packet Size".format(options.suffix))
plt.ylabel("Mbits/sec")
plt.title(options.suffix)
# Shrink height by 10% on the bottom
box = chart.get_position()
chart.set_position([box.x0, box.y0,
box.width, box.height * 0.95])
plt.legend(loc="upper center", bbox_to_anchor=(0.5, 1.14,), ncol=3, borderaxespad=0.)
for ext in [ "png", "svg" ]:
fname = os.path.join(options.outputdir, "{0}.{1}".format(options.suffix, ext))
plt.savefig(fname, dpi=100)
print "Saved {0}".format(fname)
barlabels = []
barvalues = []
for n in range(1, len(data)):
l = l = data[n][0]
splitOn='VM'
l = ('\n%s'%splitOn).join(l.split(splitOn))
barlabels.append(l)
barvalues.append(float(data[n][1]))
plt.clf()
plt.barh(bottom=range(0, len(data)-1),
height=0.5,
width=barvalues,
align='center')
plt.yticks(numpy.arange(len(data)-1),
barlabels)
plt.grid(True)
plt.title('Network Performance - Testcase {0}'.format(options.suffix))
plt.xlabel("Testcase {0} - Mbits/sec".format(options.suffix))
for ext in [ "png", "svg" ]:
fname = os.path.join(options.outputdir, "{0}.bar.{1}".format(options.suffix, ext))
plt.savefig(fname, dpi=100)
print "Saved {0}".format(fname)
|
from mc2p import MC2PClient as MC2PClientPython
__title__ = 'MyChoice2Pay Django'
__version__ = '0.1.3'
__author__ = 'MyChoice2Pay'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2017 MyChoice2Pay'
# Version synonym
VERSION = __version__
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
# Default datetime input and output formats
ISO_8601 = 'iso-8601'
default_app_config = 'django_mc2p.apps.DjangoMC2PConfig'
class MC2PClient(MC2PClientPython):
"""
Wrapper of MC2PClient of Python
"""
def __init__(self):
"""
Initializes a MC2PClient getting key and secret key from DB
"""
from .models import MC2PConfig
try:
mc2p_config = MC2PConfig.objects.get()
key = mc2p_config.key
secret_key = mc2p_config.secret_key
except:
key = ''
secret_key = ''
super(MC2PClient, self).__init__(key, secret_key)
|
"""API Discovery api.
The AXIS API Discovery service makes it possible to retrieve information about APIs supported on their products.
"""
import attr
from .api import APIItem, APIItems, Body
URL = "/axis-cgi/apidiscovery.cgi"
API_DISCOVERY_ID = "api-discovery"
API_VERSION = "1.0"
class ApiDiscovery(APIItems):
"""API Discovery for Axis devices."""
def __init__(self, request: object) -> None:
"""Initialize API discovery manager."""
super().__init__({}, request, URL, Api)
async def update(self) -> None:
"""Refresh data."""
raw = await self.get_api_list()
self.process_raw(raw)
@staticmethod
def pre_process_raw(raw: dict) -> dict:
"""Return a dictionary of discovered APIs."""
api_data = raw.get("data", {}).get("apiList", [])
return {api["id"]: api for api in api_data}
async def get_api_list(self) -> dict:
"""List all APIs registered on API Discovery service."""
return await self._request(
"post",
URL,
json=attr.asdict(
Body("getApiList", API_VERSION),
filter=attr.filters.exclude(attr.fields(Body).params),
),
)
async def get_supported_versions(self) -> dict:
"""Supported versions of API Discovery API."""
return await self._request(
"post",
URL,
json=attr.asdict(
Body("getSupportedVersions", API_VERSION),
filter=attr.filters.include(attr.fields(Body).method),
),
)
class Api(APIItem):
"""API Discovery item."""
@property
def name(self):
"""Name of API."""
return self.raw["name"]
@property
def version(self):
"""Version of API."""
return self.raw["version"]
|
import unittest
import os
import mock
import handler as sut
class TestHomeRequestWithNoPermission(unittest.TestCase):
def setUp(self):
os.environ['SKILL_ID'] = "TEST_SKILL_ID"
self.context = {
}
self.event = {
'session': {
'sessionId': 'unittest',
'application': {
'applicationId': "TEST_SKILL_ID"
}
},
'request': {
'requestId': 'test-homerequest',
'type': 'IntentRequest',
'intent': {
'name': 'HomeRequestIntent'
}
},
'context': {
'System': {
'user': {
'permissions': {
},
},
'device': {
'deviceId': 'TEST_DEVICE_ID'
},
'apiEndpoint': 'https://api.eu.amazonalexa.com'
}
}
}
self.result = sut.lambda_handler(self.event, self.context)
def testOutputSpeech(self):
self.assertEqual(
self.result['response']['outputSpeech'],
{
'text': "I'm sorry, I was not able to lookup your home town. "\
"With your permission, I can provide you with this information. "\
"Please check your companion app for details",
'type': "PlainText"})
def testShouldHaveCard(self):
self.assertEqual(self.result['response']['card'],
{
'type': 'AskForPermissionsConsent',
'permissions': [
'read::alexa:device:all:address'
]
})
def testShouldEndSession(self):
self.assertTrue(self.result['response']['shouldEndSession'])
def testResponse(self):
self.assertEqual(self.result['sessionAttributes'], {})
self.assertEqual(self.result['version'], "1.0")
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from tap.problemae.solucion import puede_hallar_secuencia
class TestProblemaE:
def test_puede_hallar_secuencia(self):
assert puede_hallar_secuencia(
[(3, 2), (3, 1), (2, 1)]
) == 'S'
assert puede_hallar_secuencia(
[(2, 2), (3, 3), (1, 1)]
) == 'N'
assert puede_hallar_secuencia(
[(2, 3), (2, 1), (3, 1)]
) == 'S'
assert puede_hallar_secuencia(
[(2, 3), (3, 1), (1, 1)]
) == 'S'
assert puede_hallar_secuencia(
[(2, 3), (3, 1), (1, 2)]
) == 'N'
assert puede_hallar_secuencia(
[(2, 3, 1), (3, 1, 2), (2, 1, 3)]
) == 'S'
|
import xarray as xr
import panel as pn
from xrviz.dashboard import Dashboard, find_cmap_limits
import pytest
from . import data
from ..utils import _is_coord
from ..compatibility import has_cartopy, has_crick_tdigest
@pytest.fixture(scope='module')
def dashboard(data):
return Dashboard(data)
@pytest.fixture(scope='module')
def dash_for_Array(data):
return Dashboard(data.temp)
def test_dashboard_with_invalid_datatype():
with pytest.raises(ValueError):
Dashboard(str)
def test_check_is_plottable_1D(dashboard):
# `validtime` is 1D variable
dashboard.plot_button.disabled = False
dashboard.control.displayer.select_variable('validtime')
assert dashboard.plot_button.disabled
def test_check_is_plottable_coord(dashboard):
# `time` is data coordinate
dashboard.plot_button.disabled = False
dashboard.control.displayer.select_variable('time')
assert dashboard.plot_button.disabled
def test_check_is_plottable_other_vars(dashboard):
# `temp` is neither 1D nor coordinate
dashboard.plot_button.disabled = True
dashboard.control.displayer.select_variable('temp')
assert dashboard.plot_button.disabled is False
def test_2d_variable_for_dims(dashboard):
dashboard.control.displayer.select_variable('lat')
fields = dashboard.control.fields
assert fields.x.value == 'nx'
assert fields.y.value == 'ny'
assert [agg_sel.name for agg_sel in fields.agg_selectors] == []
dashboard.plot_button.clicks += 1
graph = dashboard.output[0]
assert isinstance(graph, pn.pane.holoviews.HoloViews)
assert [index_sel for index_sel in dashboard.output[1]] == []
def test_3d_variable_for_dims(dashboard):
dashboard.control.displayer.select_variable('air_v')
fields = dashboard.control.fields
assert fields.x.value == 'nx'
assert fields.y.value == 'ny'
agg_selectors = [agg_sel.name for agg_sel in fields.agg_selectors]
assert agg_selectors == ['time']
dashboard.plot_button.clicks += 1
graph = dashboard.output[0]
assert isinstance(graph, pn.pane.holoviews.HoloViews)
index_selectors = [index_sel.name for index_sel in dashboard.output[1]]
assert index_selectors == ['time']
@pytest.mark.parametrize('dashboard',
['dashboard', 'dash_for_Array'],
indirect=True)
def test_4d_variable_for_dims(dashboard):
dashboard.control.displayer.select_variable('temp')
fields = dashboard.control.fields
assert fields.x.value == 'nx'
assert fields.y.value == 'ny'
agg_selectors = [agg_sel.name for agg_sel in fields.agg_selectors]
assert agg_selectors == ['sigma', 'time']
dashboard.plot_button.clicks += 1
graph = dashboard.output[0]
assert isinstance(graph, pn.pane.holoviews.HoloViews)
index_selectors = [index_sel.name for index_sel in dashboard.output[1]]
assert index_selectors == ['sigma', 'time']
def test_link_aggregation_selectors(dashboard):
dashboard.control.displayer.select_variable('temp')
llim = dashboard.control.style.lower_limit
ulim = dashboard.control.style.upper_limit
llim.value = '10'
ulim.value = '20'
agg_sel = dashboard.control.fields.agg_selectors[0]
agg_sel.value = 'mean'
assert llim.value is ''
assert ulim.value is ''
def test_set_coords(dashboard):
new_coords = sorted(['time', 'sigma', 'lat', 'lon'])
dashboard.control.coord_setter.coord_selector.value = new_coords
assert sorted(list(dashboard.data.coords)) == new_coords
def test_animate_wigdet_for_dims(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['time', 'sigma']
dashboard.control.displayer.select_variable('temp')
fields = dashboard.control.fields
agg_selectors = fields.agg_selectors
agg_selectors[0].value = 'animate'
dashboard.create_graph()
assert isinstance(dashboard.output[0], pn.pane.holoviews.HoloViews)
assert isinstance(dashboard.output[1][0][1], pn.widgets.player.DiscretePlayer)
@pytest.mark.skipif(not has_cartopy, reason='cartopy not present')
def test_with_is_geo_projection(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['lat', 'lon']
dashboard.control.displayer.select_variable('temp')
proj_panel = dashboard.control.projection
proj_panel.is_geo.value = True
proj_panel.projection.value = 'Orthographic'
proj_panel.proj_params.value = "{'central_longitude': -78, 'central_latitude': 43, 'globe': None}"
proj_panel.global_extent.value = True
dashboard.create_graph()
assert isinstance(dashboard.output[0], pn.pane.holoviews.HoloViews)
assert isinstance(dashboard.output[1][0], pn.widgets.select.Select)
@pytest.mark.skipif(not has_cartopy, reason='cartopy not present')
def test_with_is_geo_basemap(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['lat', 'lon']
dashboard.control.displayer.select_variable('temp')
proj_panel = dashboard.control.projection
proj_panel.is_geo.value = True
dashboard.create_graph()
assert isinstance(dashboard.output[0], pn.pane.holoviews.HoloViews)
assert isinstance(dashboard.output[1][0], pn.widgets.select.Select)
def test_with_aggregations_for_dims(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['time', 'sigma']
dashboard.control.displayer.select_variable('temp')
fields = dashboard.control.fields
agg_selectors = fields.agg_selectors
agg_selectors[0].value = 'max'
agg_selectors[1].value = 'count'
dashboard.plot_button.clicks += 1
assert isinstance(dashboard.output[0][0], pn.pane.holoviews.HoloViews)
def test_with_aggregations_for_coords(dashboard):
dashboard.control.displayer.select_variable('temp')
dashboard.control.coord_setter.coord_selector.value = ['lat', 'lon']
agg_selectors = dashboard.control.fields.agg_selectors
agg_selectors[0].value = 'max'
agg_selectors[1].value = 'count'
dashboard.plot_button.clicks += 1
assert isinstance(dashboard.output[0][0], pn.pane.holoviews.HoloViews)
def test_color_scaling_for_dims(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['time', 'sigma']
dashboard.control.displayer.select_variable('temp')
style = dashboard.control.style
style.color_scale.value = 'log'
dashboard.create_graph()
assert isinstance(dashboard.output[0][0], pn.pane.holoviews.HoloViews)
def test_color_scaling_for_coords(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['lat', 'lon']
dashboard.control.displayer.select_variable('temp')
style = dashboard.control.style
style.color_scale.value = 'log'
dashboard.create_graph()
assert isinstance(dashboard.output[0][0], pn.pane.holoviews.HoloViews)
def test_use_all_data_for_dims(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['time', 'sigma']
dashboard.control.displayer.select_variable('temp')
dashboard.control.style.use_all_data.value = True
dashboard.create_graph()
assert isinstance(dashboard.output[0][0], pn.pane.holoviews.HoloViews)
def test_use_all_data_for_coords(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['lat', 'lon']
dashboard.control.displayer.select_variable('temp')
dashboard.control.style.use_all_data.value = True
dashboard.create_graph()
assert isinstance(dashboard.output[0][0], pn.pane.holoviews.HoloViews)
def test_create_taps_and_series_graph_for_dims(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['time', 'sigma']
dashboard.control.displayer.select_variable('temp')
dashboard.control.fields.s_selector.value = 'sigma'
dashboard.create_graph()
dashboard.create_taps_graph(x=35, y=10)
assert isinstance(dashboard.series_graph[0], pn.pane.holoviews.HoloViews)
def test_create_taps_and_series_graph_for_2d_coords(dashboard):
dashboard.control.coord_setter.coord_selector.value = ['lat', 'lon']
dashboard.control.displayer.select_variable('temp')
dashboard.control.fields.s_selector.value = 'sigma'
dashboard.create_graph()
dashboard.create_taps_graph(x=-79.232, y=43.273)
assert isinstance(dashboard.series_graph[0], pn.pane.holoviews.HoloViews)
@pytest.mark.skipif(not has_crick_tdigest, reason='crick.tdigest not present')
def test_find_cmap_limits_with_crick_tdigest():
ds = xr.tutorial.open_dataset('air_temperature',
chunks={'lat': 25, 'lon': 25, 'time': 10})
a, b = find_cmap_limits(ds.air)
assert abs(a - 255.38780056044027) < 0.1 and abs(b - 298.5900340551101) < 0.1
|
from flask import Blueprint, jsonify, request
from project.api.models import User
users_blueprint = Blueprint('users', __name__, template_folder='./templates')
@users_blueprint.route('/users/ping', methods=['GET'])
def ping_pong():
return jsonify({
'status': 'success',
'message': 'pong!'
})
@users_blueprint.route('/users', methods=['GET'])
def get_all_users():
"""Get all users"""
response_object = {
'status': 'success',
'data': {
'users': [user.to_json() for user in User.query.all()]
}
}
return jsonify(response_object), 200
|
import dtcv
import matplotlib.pyplot as plt
import numpy as np
from shapely import wkt
def grid_shape(i, max_x=4):
"""Return a good grid shape, in x,y, for a number if items i"""
from math import sqrt, ceil
x = round(sqrt(i))
if x > max_x:
x = max_x
y = ceil(i / x)
return x, y
def plot_image_and_poly(r, shape=None, figsize=(20, 20), max_x=4, titlef=None):
"""Given a row in the intersection_regions dataframe, plot the image and
the intersection polygon"""
try:
records = [e[1] for e in r.iterrows()]
ncols, nrows = grid_shape(len(r), max_x=max_x)
except AttributeError:
records = [r]
ncols, nrows = 1, 1
if shape is not None:
ncols, nrows = shape
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
try:
axes = axes.flat
except AttributeError:
axes = [axes]
for ax, rec in zip(axes, records):
img = dtcv.get_image(rec.image_url)
ax.imshow(img)
pts = np.array((wkt.loads(rec.source)).exterior.coords)
ax.plot([e[0] for e in pts], [e[1] for e in pts], marker='s', color='red')
if titlef:
ax.set_title(titlef(rec))
# Generate random colormap
# From https://raw.githubusercontent.com/delestro/rand_cmap/master/rand_cmap.py
def rand_cmap(nlabels, type='bright', first_color_black=True, last_color_black=False):
"""
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
:param nlabels: Number of labels (size of colormap)
:param type: 'bright' for strong colors, 'soft' for pastel colors
:param first_color_black: Option to use first color as black, True or False
:param last_color_black: Option to use last color as black, True or False
:param verbose: Prints the number of labels and shows the colormap. True or False
:return: colormap for matplotlib
"""
from matplotlib.colors import LinearSegmentedColormap
import colorsys
import numpy as np
# Generate color map for bright colors, based on hsv
if type == 'bright':
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Generate soft pastel colors, by limiting the RGB spectrum
elif type == 'soft':
low = 0.6
high = 0.95
randRGBcolors = [(np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
return random_colormap
def show_colormap(cmap):
from matplotlib import colors, colorbar
from matplotlib import pyplot as plt
import numpy as np
fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
nlabels = cmap.N
bounds = np.linspace(0, nlabels, nlabels + 1)
norm = colors.BoundaryNorm(bounds, nlabels)
cb = colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, spacing='proportional', ticks=None,
boundaries=bounds, format='%1i', orientation=u'horizontal')
def resample_reorder_cmap(cmap, N, seed=1337):
'''Downsamples a colormap, then reorder the colors randomly
>>> resample_reorder_cmap(plt.get_cmap('plasma'),5)
'''
from matplotlib.colors import LinearSegmentedColormap
import random
rcmap = cmap._resample(N)
l = [rcmap(i) for i in range(N)] # Extract all of the values
random.seed(seed) # Always get the same ordering.
random.shuffle(l)
return LinearSegmentedColormap.from_list('rand_' + cmap.name, l, N=N)
|
import torch
from torchvision import models, datasets, transforms
from torch import nn, optim
import torch.nn.functional as F
from workspace_utils import active_session
import argparse
from load_and_preprocess_data import load_preprocess_data
from PIL import Image
import numpy as np
import json
# Create the parser
my_parser = argparse.ArgumentParser(description='predicts class from an image along with probability.')
# Add the arguments
my_parser.add_argument('image_path',
metavar='image_path',
type=str,
help='the path to a single image')
my_parser.add_argument('checkpoint_dir', default = '/home/workspace/ImageClassifier/checkpoint3.pth', type=str, help='directory path to get checkpoint.')
my_parser.add_argument('--top_k', default = 3, type=int, help='return top k most likely classes')
my_parser.add_argument('--category_names', default = '/home/workspace/ImageClassifier/cat_to_name.json', type=str, help='path to json file with all classes')
my_parser.add_argument('--gpu', action = 'store_true', help='will run the model on gpu if cuda is available')
# Execute parse_args()
args = my_parser.parse_args()
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
checkpoint = torch.load(args.checkpoint_dir, map_location = map_location)
model = models.__dict__[checkpoint['arch']](pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
device = torch.device("cuda" if torch.cuda.is_available()&args.gpu else "cpu")
#Move model to cpu/gpu based on user's preference.
model.to(device)
#function to process an image
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
im = Image.open(image)
im = im.resize((256, 256))
im = im.crop((16, 16, 240, 240))
np_image = (np.array(im))/255
means = [0.485, 0.456, 0.406]
sds = [0.229, 0.224, 0.225]
np_image = (np_image - means)/sds
np_image = np_image.transpose(2,0,1)
return np_image
class_to_idx = model.class_to_idx
#function to predict a single image
def predict(image_path, model, topk=3):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
top_class = []
model.eval()
# Implement the code to predict the class from an image file
image = process_image(image_path)
image = torch.from_numpy(image).float()
image = image.unsqueeze(0)
image = image.to(device)
with torch.no_grad():
log_ps = model.forward(image)
ps = torch.exp(log_ps)
top_p, top_idx = ps.topk(topk, dim = 1)
top_p = top_p.cpu().numpy()[0]
#idx_to_class = dict(map(reversed,class_to_idx.items())) #no need to convert as they are already converted in training part
top_idx = top_idx.cpu().numpy()[0].tolist()
for idx in top_idx:
top_class.append(class_to_idx[idx])
return top_p, top_class
#predict an image
probs, classes = predict(image_path = args.image_path, model = model, topk = args.top_k)
if args.category_names == 'cat_to_name.json':
#a json file that has all output classes. Each class is associated with a name.
# json file is read and is converted to a dictionary
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
class_names= []
for i in classes:
class_names.append(cat_to_name[i])
print(class_names, probs)
else:
print(classes, probs)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'conversion_dialog.ui'
#
# Created: Wed Oct 3 15:38:47 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_conversion_dialog(object):
def setupUi(self, conversion_dialog):
conversion_dialog.setObjectName("conversion_dialog")
conversion_dialog.setWindowModality(QtCore.Qt.NonModal)
conversion_dialog.resize(284, 199)
conversion_dialog.setModal(True)
self.formLayout = QtWidgets.QFormLayout(conversion_dialog)
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.FieldsStayAtSizeHint)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(conversion_dialog)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.label)
self.label_2 = QtWidgets.QLabel(conversion_dialog)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.buttonBox = QtWidgets.QDialogButtonBox(conversion_dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.SpanningRole, self.buttonBox)
self.stimulus_on_box = QtWidgets.QSpinBox(conversion_dialog)
self.stimulus_on_box.setMaximum(1000000)
self.stimulus_on_box.setProperty("value", 8)
self.stimulus_on_box.setObjectName("stimulus_on_box")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.stimulus_on_box)
self.stimulus_end_box = QtWidgets.QSpinBox(conversion_dialog)
self.stimulus_end_box.setMaximum(1000000)
self.stimulus_end_box.setProperty("value", 16)
self.stimulus_end_box.setObjectName("stimulus_end_box")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.stimulus_end_box)
self.label_3 = QtWidgets.QLabel(conversion_dialog)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtWidgets.QLabel(conversion_dialog)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.framerate_box = QtWidgets.QDoubleSpinBox(conversion_dialog)
self.framerate_box.setMaximum(100000.0)
self.framerate_box.setSingleStep(0.1)
self.framerate_box.setProperty("value", 4.0)
self.framerate_box.setObjectName("framerate_box")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.framerate_box)
self.retranslateUi(conversion_dialog)
self.buttonBox.accepted.connect(conversion_dialog.accept)
self.buttonBox.rejected.connect(conversion_dialog.reject)
QtCore.QMetaObject.connectSlotsByName(conversion_dialog)
def retranslateUi(self, conversion_dialog):
conversion_dialog.setWindowTitle(QtCore.QCoreApplication.translate("conversion_dialog", "Dialog", None))
self.label.setText(QtCore.QCoreApplication.translate("conversion_dialog", "bla", None))
self.label_2.setText(QtCore.QCoreApplication.translate("conversion_dialog", "framerate", None))
self.label_3.setText(QtCore.QCoreApplication.translate("conversion_dialog", "stimulus onset (frame)", None))
self.label_4.setText(QtCore.QCoreApplication.translate("conversion_dialog", "stimulus end (frame)", None))
|
from unittest import TestCase
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
from tamr_unify_client.models.attribute_configuration.collection import (
AttributeConfigurationCollection,
)
class TestAttributeConfigurationCollection(TestCase):
def setUp(self):
auth = UsernamePasswordAuth("username", "password")
self.unify = Client(auth)
@responses.activate
def test_by_relative_id(self):
ac_url = f"http://localhost:9100/api/versioned/v1/projects/1/attributeConfigurations/1"
alias = "projects/1/attributeConfigurations/"
ac_test = AttributeConfigurationCollection(self.unify, alias)
expected = self.acc_json[0]["relativeId"]
responses.add(responses.GET, ac_url, json=self.acc_json[0])
self.assertEqual(
expected,
ac_test.by_relative_id("projects/1/attributeConfigurations/1").relative_id,
)
@responses.activate
def test_by_resource_id(self):
ac_url = f"http://localhost:9100/api/versioned/v1/projects/1/attributeConfigurations/1"
alias = "projects/1/attributeConfigurations/"
ac_test = AttributeConfigurationCollection(self.unify, alias)
expected = self.acc_json[0]["relativeId"]
responses.add(responses.GET, ac_url, json=self.acc_json[0])
self.assertEqual(expected, ac_test.by_resource_id("1").relative_id)
@responses.activate
def test_create(self):
url = (
f"http://localhost:9100/api/versioned/v1/projects/1/attributeConfigurations"
)
project_url = f"http://localhost:9100/api/versioned/v1/projects/1"
responses.add(responses.GET, project_url, json=self.project_json)
responses.add(responses.GET, url, json={})
responses.add(responses.POST, url, json=self.create_json, status=204)
responses.add(responses.GET, url, json=self.create_json)
attributeconfig = (
self.unify.projects.by_resource_id("1")
.as_mastering()
.attribute_configurations()
)
create = attributeconfig.create(self.create_json)
self.assertEqual(create.relative_id, self.create_json["relativeId"])
@responses.activate
def test_stream(self):
ac_url = f"http://localhost:9100/api/versioned/v1/projects/1/attributeConfigurations/"
alias = "projects/1/attributeConfigurations/"
ac_test = AttributeConfigurationCollection(self.unify, alias)
responses.add(responses.GET, ac_url, json=self.acc_json)
streamer = ac_test.stream()
stream_content = []
for char in streamer:
stream_content.append(char._data)
self.assertEqual(self.acc_json, stream_content)
create_json = {
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/35",
"relativeId": "projects/1/attributeConfigurations/35",
"relativeAttributeId": "datasets/79/attributes/Tester",
"attributeRole": "",
"similarityFunction": "ABSOLUTE_DIFF",
"enabledForMl": False,
"tokenizer": "",
"numericFieldResolution": [],
"attributeName": "Tester",
}
project_json = {
"id": "unify://unified-data/v1/projects/1",
"externalId": "project 1 external ID",
"name": "project 1 name",
"description": "project 1 description",
"type": "DEDUP",
"unifiedDatasetName": "project 1 unified dataset",
"created": {
"username": "admin",
"time": "2018-09-10T16:06:20.636Z",
"version": "project 1 created version",
},
"lastModified": {
"username": "admin",
"time": "2018-09-10T16:06:20.851Z",
"version": "project 1 modified version",
},
"relativeId": "projects/1",
}
acc_json = [
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/1",
"relativeId": "projects/1/attributeConfigurations/1",
"relativeAttributeId": "datasets/8/attributes/suburb",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "suburb",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/2",
"relativeId": "projects/1/attributeConfigurations/2",
"relativeAttributeId": "datasets/8/attributes/sex",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "sex",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/3",
"relativeId": "projects/1/attributeConfigurations/3",
"relativeAttributeId": "datasets/8/attributes/address_2",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "address_2",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/4",
"relativeId": "projects/1/attributeConfigurations/4",
"relativeAttributeId": "datasets/8/attributes/age",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "age",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/5",
"relativeId": "projects/1/attributeConfigurations/5",
"relativeAttributeId": "datasets/8/attributes/culture",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "culture",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/6",
"relativeId": "projects/1/attributeConfigurations/6",
"relativeAttributeId": "datasets/8/attributes/street_number",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "street_number",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/7",
"relativeId": "projects/1/attributeConfigurations/7",
"relativeAttributeId": "datasets/8/attributes/postcode",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "postcode",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/8",
"relativeId": "projects/1/attributeConfigurations/8",
"relativeAttributeId": "datasets/8/attributes/phone_number",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "phone_number",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/9",
"relativeId": "projects/1/attributeConfigurations/9",
"relativeAttributeId": "datasets/8/attributes/soc_sec_id",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "soc_sec_id",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/10",
"relativeId": "projects/1/attributeConfigurations/10",
"relativeAttributeId": "datasets/8/attributes/rec2_id",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "rec2_id",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/11",
"relativeId": "projects/1/attributeConfigurations/11",
"relativeAttributeId": "datasets/8/attributes/date_of_birth",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "date_of_birth",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/12",
"relativeId": "projects/1/attributeConfigurations/12",
"relativeAttributeId": "datasets/8/attributes/title",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "title",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/13",
"relativeId": "projects/1/attributeConfigurations/13",
"relativeAttributeId": "datasets/8/attributes/address_1",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "address_1",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/14",
"relativeId": "projects/1/attributeConfigurations/14",
"relativeAttributeId": "datasets/8/attributes/rec_id",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "rec_id",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/15",
"relativeId": "projects/1/attributeConfigurations/15",
"relativeAttributeId": "datasets/8/attributes/state",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "state",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/16",
"relativeId": "projects/1/attributeConfigurations/16",
"relativeAttributeId": "datasets/8/attributes/family_role",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "family_role",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/17",
"relativeId": "projects/1/attributeConfigurations/17",
"relativeAttributeId": "datasets/8/attributes/blocking_number",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "blocking_number",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/18",
"relativeId": "projects/1/attributeConfigurations/18",
"relativeAttributeId": "datasets/8/attributes/surname",
"attributeRole": "CLUSTER_NAME_ATTRIBUTE",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "surname",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/19",
"relativeId": "projects/1/attributeConfigurations/19",
"relativeAttributeId": "datasets/8/attributes/given_name",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": True,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "given_name",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/20",
"relativeId": "projects/1/attributeConfigurations/20",
"relativeAttributeId": "datasets/8/attributes/Address1",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": False,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "Address1",
},
{
"id": "unify://unified-data/v1/projects/1/attributeConfigurations/21",
"relativeId": "projects/1/attributeConfigurations/21",
"relativeAttributeId": "datasets/8/attributes/Address2",
"attributeRole": "",
"similarityFunction": "COSINE",
"enabledForMl": False,
"tokenizer": "DEFAULT",
"numericFieldResolution": [],
"attributeName": "Address2",
},
]
|
import multiprocessing
import threading
import traceback
from daphne.testing import _reinstall_reactor
from pytest_django.plugin import _blocking_manager
class DaphneThread(threading.Thread):
def __init__(self, host, application, kwargs=None, setup=None, teardown=None):
super().__init__()
self.host = host
self.application = application
self.kwargs = kwargs or {}
self.setup = setup or (lambda: None)
self.teardown = teardown or (lambda: None)
self.port = multiprocessing.Value("i")
self.ready = threading.Event()
self.errors = multiprocessing.Queue()
def run(self):
# OK, now we are in a forked child process, and want to use the reactor.
# However, FreeBSD systems like MacOS do not fork the underlying Kqueue,
# which asyncio (hence asyncioreactor) is built on.
# Therefore, we should uninstall the broken reactor and install a new one.
_reinstall_reactor()
from twisted.internet import reactor
from daphne.server import Server
from daphne.endpoints import build_endpoint_description_strings
try:
# Create the server class
endpoints = build_endpoint_description_strings(host=self.host, port=0)
self.server = Server(
application=self.application,
endpoints=endpoints,
signal_handlers=False,
**self.kwargs
)
# Set up a poller to look for the port
reactor.callLater(0.1, self.resolve_port)
# Run with setup/teardown
self.setup()
try:
self.server.run()
finally:
self.teardown()
except Exception as e:
# Put the error on our queue so the parent gets it
self.errors.put((e, traceback.format_exc()))
def resolve_port(self):
from twisted.internet import reactor
if self.server.listening_addresses:
self.port.value = self.server.listening_addresses[0][1]
self.ready.set()
else:
reactor.callLater(0.1, self.resolve_port)
def terminate(self):
from twisted.internet import reactor
reactor.stop()
# if hasattr(self, 'httpd'):
# # Stop the WSGI server
# self.httpd.shutdown()
# self.httpd.server_close()
self.join()
_blocking_manager.unblock()
|
from . import client, aioclient, parse
__all__ = [
'client',
'aioclient',
'parse'
]
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 14:03:27 2021
@author: asus
"""
import pandas as pd
import streamlit as st
from sklearn.linear_model import LogisticRegression
from pickle import dump
from pickle import load
st.title('Model Deployment: Logistic Regression')
st.title("authour-prathamesh deore")
st.sidebar.header('User Input Parameters')
def user_input_features():
CLMSEX = st.sidebar.selectbox('Gender',('1','0'))
CLMINSUR = st.sidebar.selectbox('Insurance',('1','0'))
SEATBELT = st.sidebar.selectbox('SeatBelt',('1','0'))
CLMAGE = st.sidebar.number_input("Insert the Age")
LOSS = st.sidebar.number_input("Insert Loss")
data = {'CLMSEX':CLMSEX,
'CLMINSUR':CLMINSUR,
'SEATBELT':SEATBELT,
'CLMAGE':CLMAGE,
'LOSS':LOSS}
features = pd.DataFrame(data,index = [0])
return features
df = user_input_features()
st.subheader('User Input parameters')
st.write(df)
# load the model from disk
loaded_model = load(open('MyLogisticModel.sav', 'rb'))
prediction = loaded_model.predict(df)
prediction_proba = loaded_model.predict_proba(df)
st.subheader('Predicted Result')
st.write('Yes' if prediction_proba[0][1] > 0.5 else 'No')
st.subheader('Prediction Probability')
st.write(prediction_proba)
claimants=pd.read_csv(r'C:\Users\asus\Desktop\my desk\data science\logistic regression\claimants.csv')
st.bar_chart(claimants)
st.line_chart(claimants)
st.color_picker('red')
|
import json
import hmac
import hashlib
from quart import url_for
from ecdsa import SECP256k1, SigningKey # type: ignore
from lnurl import encode as lnurl_encode # type: ignore
from typing import List, NamedTuple, Optional, Dict
from sqlite3 import Row
from lnbits.settings import WALLET
class User(NamedTuple):
id: str
email: str
extensions: List[str] = []
wallets: List["Wallet"] = []
password: Optional[str] = None
@property
def wallet_ids(self) -> List[str]:
return [wallet.id for wallet in self.wallets]
def get_wallet(self, wallet_id: str) -> Optional["Wallet"]:
w = [wallet for wallet in self.wallets if wallet.id == wallet_id]
return w[0] if w else None
class Wallet(NamedTuple):
id: str
name: str
user: str
adminkey: str
inkey: str
balance_msat: int
@property
def balance(self) -> int:
return self.balance_msat // 1000
@property
def withdrawable_balance(self) -> int:
from .services import fee_reserve
return self.balance_msat - fee_reserve(self.balance_msat)
@property
def lnurlwithdraw_full(self) -> str:
url = url_for(
"core.lnurl_full_withdraw",
usr=self.user,
wal=self.id,
_external=True,
)
try:
# print(url)
# foo = "http://0.0.0.0:5000/withdraw?usr=4707a59e3d564dd4b4f7ca4bd74a721e&wal=e87a5f2ff04242678995cd8a29f731c2"
url = "https://foo.com/withdraw?usr=4707a59e3d564dd4b4f7ca4bd74a721e&wal=e87a5f2ff04242678995cd8a29f731c2"
return lnurl_encode(url)
except Exception as e:
print("error making url",e)
return ""
def lnurlauth_key(self, domain: str) -> SigningKey:
hashing_key = hashlib.sha256(self.id.encode("utf-8")).digest()
linking_key = hmac.digest(hashing_key, domain.encode("utf-8"), "sha256")
return SigningKey.from_string(
linking_key,
curve=SECP256k1,
hashfunc=hashlib.sha256,
)
async def get_payment(self, payment_hash: str) -> Optional["Payment"]:
from .crud import get_wallet_payment
return await get_wallet_payment(self.id, payment_hash)
class Payment(NamedTuple):
checking_id: str
pending: bool
amount: int
fee: int
memo: str
time: int
bolt11: str
preimage: str
payment_hash: str
extra: Dict
wallet_id: str
webhook: str
webhook_status: int
@classmethod
def from_row(cls, row: Row):
return cls(
checking_id=row["checking_id"],
payment_hash=row["hash"] or "0" * 64,
bolt11=row["bolt11"] or "",
preimage=row["preimage"] or "0" * 64,
extra=json.loads(row["extra"] or "{}"),
pending=row["pending"],
amount=row["amount"],
fee=row["fee"],
memo=row["memo"],
time=row["time"],
wallet_id=row["wallet"],
webhook=row["webhook"],
webhook_status=row["webhook_status"],
)
@property
def tag(self) -> Optional[str]:
return self.extra.get("tag")
@property
def msat(self) -> int:
return self.amount
@property
def sat(self) -> int:
return self.amount // 1000
@property
def is_in(self) -> bool:
return self.amount > 0
@property
def is_out(self) -> bool:
return self.amount < 0
@property
def is_uncheckable(self) -> bool:
return self.checking_id.startswith("temp_") or self.checking_id.startswith(
"internal_"
)
async def set_pending(self, pending: bool) -> None:
from .crud import update_payment_status
await update_payment_status(self.checking_id, pending)
async def check_pending(self) -> None:
if self.is_uncheckable:
return
if self.is_out:
status = await WALLET.get_payment_status(self.checking_id)
else:
status = await WALLET.get_invoice_status(self.checking_id)
if self.is_out and status.failed:
print(f" - deleting outgoing failed payment {self.checking_id}: {status}")
await self.delete()
elif not status.pending:
print("check_pending models")
print(
f" - marking '{'in' if self.is_in else 'out'}' {self.checking_id} as not pending anymore: {status}"
)
await self.set_pending(status.pending)
async def delete(self) -> None:
from .crud import delete_payment
await delete_payment(self.checking_id)
class BalanceCheck(NamedTuple):
wallet: str
service: str
url: str
@classmethod
def from_row(cls, row: Row):
return cls(wallet=row["wallet"], service=row["service"], url=row["url"])
|
from lib import actions
__all__ = [
'GetNetworkDomainByNameAction',
]
class GetNetworkDomainByNameAction(actions.BaseAction):
def run(self, region, network_domain_name):
driver = self._get_compute_driver(region)
networkdomains = driver.ex_list_network_domains()
networkdomain = list(filter(lambda x: x.name == network_domain_name,
networkdomains))[0]
return self.resultsets.formatter(networkdomain)
|
#!/usr/bin/env python
import argparse
import json
import sys
import os
try:
from osmgeocoder import Geocoder
except ImportError:
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from osmgeocoder import Geocoder
parser = argparse.ArgumentParser(description='OSM Address search')
parser.add_argument(
'--config',
type=str,
nargs=1,
dest='config',
required=True,
help='Config file to use'
)
parser.add_argument(
'--country',
type=str,
nargs=1,
dest='country',
help='Only search in this country'
)
parser.add_argument(
'--center',
type=float,
nargs=2,
dest='center',
help='Center coordinate to filter the results'
)
parser.add_argument(
'address',
type=str,
help='Address to search'
)
args = parser.parse_args()
config = {}
with open(args.config[0], "r") as fp:
config = json.load(fp)
geocoder = Geocoder(**config)
kwargs = {}
if args.center is not None:
kwargs['center'] = (args.center[0], args.center[1])
if args.country is not None:
kwargs['country'] = args.country[0]
results = geocoder.forward(args.address, **kwargs)
print('Resolved "{}" to'.format(args.address))
for addr, lat, lon in results:
addr = ', '.join(addr.split("\n")).strip()
print(" - {} -> {}, {}".format(addr, lat, lon))
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import url, include
from custom.aaa.views import (
AggregationScriptPage,
LocationFilterAPI,
ProgramOverviewReport,
ProgramOverviewReportAPI,
UnifiedBeneficiaryReport,
UnifiedBeneficiaryReportAPI,
UnifiedBeneficiaryDetailsReport,
UnifiedBeneficiaryDetailsReportAPI,
)
dashboardurls = [
url('^$', ProgramOverviewReport.as_view(), name='program_overview'),
url('^program_overview/', ProgramOverviewReport.as_view(), name='program_overview'),
url('^unified_beneficiary/$', UnifiedBeneficiaryReport.as_view(), name='unified_beneficiary'),
url(
r'^unified_beneficiary/(?P<details_type>[\w-]+)/(?P<beneficiary_id>[\w-]+)/$',
UnifiedBeneficiaryDetailsReport.as_view(),
name='unified_beneficiary_details'
),
]
dataurls = [
url('^program_overview/', ProgramOverviewReportAPI.as_view(), name='program_overview_api'),
url('^unified_beneficiary/', UnifiedBeneficiaryReportAPI.as_view(), name='unified_beneficiary_api'),
url(
'^unified_beneficiary_details/',
UnifiedBeneficiaryDetailsReportAPI.as_view(),
name='unified_beneficiary_details_api'
),
url('^location_api/', LocationFilterAPI.as_view(), name='location_api'),
url(r'^aggregate/', AggregationScriptPage.as_view(), name=AggregationScriptPage.urlname),
]
urlpatterns = [
url(r'^aaa_dashboard/', include(dashboardurls)),
url(r'^aaa_dashboard_data/', include(dataurls)),
]
|
countries[countries['capital'].str.len() > 7] |
from dagster import pipeline, solid, execute_pipeline, LocalFileHandle
from dagster.core.storage.file_manager import local_file_manager
from dagster.utils.temp_file import get_temp_dir, get_temp_file_handle_with_data
def test_basic_file_manager_copy_handle_to_local_temp():
foo_data = 'foo'.encode()
with get_temp_dir() as temp_dir:
with get_temp_file_handle_with_data(foo_data) as foo_handle:
with local_file_manager(temp_dir) as manager:
local_temp = manager.copy_handle_to_local_temp(foo_handle)
assert local_temp != foo_handle.path
with open(local_temp, 'rb') as ff:
assert ff.read() == foo_data
def test_basic_file_manager_execute_in_pipeline():
called = {}
@solid
def file_handle(context):
foo_bytes = 'foo'.encode()
file_handle = context.file_manager.write_data(foo_bytes)
assert isinstance(file_handle, LocalFileHandle)
with open(file_handle.path, 'rb') as handle_obj:
assert foo_bytes == handle_obj.read()
with context.file_manager.read(file_handle) as handle_obj:
assert foo_bytes == handle_obj.read()
called['yup'] = True
@pipeline
def basic_file_manager_test():
file_handle() # pylint: disable=no-value-for-parameter
result = execute_pipeline(basic_file_manager_test)
assert result.success
assert called['yup']
|
"""Tests of classes that represent NEI models"""
|
def fib_list(max):
nums = []
a, b = 0, 1
while len(nums) < max:
nums.append(b)
a, b = b, a + b
return nums
def fib_gen(max):
count = 0
a, b = 0, 1
while count < max:
yield b
a, b = b, a + b
count += 1
for n in fib_list(10000):
print(n)
for n in fib_gen(10000):
print(n) |
# from .derivative import Derivative
from .checkerboard import Checkerboard
# from .structuralFeatures import StructuralFeatures
# from .olda import OLDA |
import pygame
import random
from algorithms import rectangles
from algorithms import bubble_sort
from algorithms import quick_sort
from algorithms import heap_sort
from algorithms import merge_sort
from algorithms import selection_sort
from algorithms import insertion_sort
from algorithms import timsort
from algorithms import introsort
class Draw(rectangles.Rectangles):
def __init__(self,window,number_of_rectangles,delay_in_millisecondes):
super().__init__(window, number_of_rectangles, delay_in_millisecondes)
self.window = window
self.number_of_rectangles = number_of_rectangles
self.delay_in_millisecondes = delay_in_millisecondes
self.bs = bubble_sort.BubbleSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.qs = quick_sort.QuickSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.hs = heap_sort.HeapSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.ms = merge_sort.MergeSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.ss = selection_sort.SelectionSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.ins = insertion_sort.InsertionSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.tim = timsort.TimSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.intro = introsort.IntroSort(self.window,self.number_of_rectangles,self.delay_in_millisecondes)
self.is_required_for_sorting = False
self.sort_index = -1
self.shuffled = False
self.array_sorted = False
def index_of_algorithm_chosen(self, index):
self.sort_index = index
self.is_required_for_sorting = True
def shuffle_array(self):
self.sort_state = False
self.sort_index = -1
random.shuffle(super().get_array_of_numbers())
self.shuffled = True
self.is_required_for_sorting = False
self.array_sorted = False
self.bs.set_defaults()
self.qs.set_defaults()
self.hs.set_defaults()
self.ms.set_defaults()
self.ss.set_defaults()
self.ins.set_defaults()
self.tim.set_defaults()
self.intro.set_defaults()
def sort_visualization(self):
if self.sort_index == 0 and not self.array_sorted:
self.bs.sort()
super().set_array_of_numbers(self.bs.get_array_of_numbers())
elif self.sort_index == 1 and not self.array_sorted:
self.ss.sort()
super().set_array_of_numbers(self.ss.get_array_of_numbers())
elif self.sort_index == 2 and not self.array_sorted:
self.ins.sort()
super().set_array_of_numbers(self.ins.get_array_of_numbers())
elif self.sort_index == 3 and not self.array_sorted:
self.ms.sort()
super().set_array_of_numbers(self.ms.get_array_of_numbers())
elif self.sort_index == 4 and not self.array_sorted:
self.qs.sort()
super().set_array_of_numbers(self.qs.get_array_of_numbers())
elif self.sort_index == 5 and not self.array_sorted:
self.hs.sort()
super().set_array_of_numbers(self.hs.get_array_of_numbers())
elif self.sort_index == 6 and not self.array_sorted:
self.tim.sort()
super().set_array_of_numbers(self.tim.get_array_of_numbers())
elif self.sort_index == 7 and not self.array_sorted:
self.intro.sort()
super().set_array_of_numbers(self.intro.get_array_of_numbers())
super().draw()
self.is_required_for_sorting = False
self.shuffled = False
self.array_sorted = True
def rectangle_bar_chart(self):
if self.sort_index == 0:
self.bs.information()
elif self.sort_index == 1:
self.ss.information()
elif self.sort_index == 2:
self.ins.information()
elif self.sort_index == 3:
self.ms.information()
elif self.sort_index == 4:
self.qs.information()
elif self.sort_index == 5:
self.hs.information()
elif self.sort_index == 6:
self.tim.information()
elif self.sort_index == 7:
self.intro.information()
super().draw() |
from typing import List, Optional
from .EditorCommon import ComponentSpec
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
class EditorViews:
def __init__(self, ownerComp):
self.ownerComp = ownerComp # type: COMP
self.customHolder = ownerComp.op('customViews') # type: COMP
def LoadCustomViews(self):
return
self.ClearCustomViews()
viewDicts = self.ownerComp.par.Customviews.eval()
if not viewDicts:
return
viewSpecs = [ComponentSpec.fromDict(v) for v in viewDicts]
# for i, viewSpec in enumerate(viewSpecs):
# comp = viewSpec.createComp(self.customHolder)
# comp.nodeY = 600 - (i * 150)
# self.initializeCustomView(comp, viewSpec)
self.updateCustomViewTable(viewSpecs)
def initializeCustomView(self, comp: 'COMP', viewSpec: ComponentSpec):
if not comp.isPanel:
return
try:
comp.par.hmode = 'fill'
comp.par.vmode = 'fill'
comp.par.display.expr = f'parent.editorViews.par.Selectedview == {viewSpec.name!r}'
except Exception as e:
print(self.ownerComp, 'Error initializing custom view', comp, '\n', viewSpec, '\n', e)
def ClearCustomViews(self):
for o in self.customHolder.children:
if not o or not o.valid:
continue
# try:
o.destroy()
# except:
# pass
self.updateCustomViewTable([])
def OnWorkspaceUnload(self):
self.ownerComp.par.Customviews = None
self.ClearCustomViews()
def OnWorkspaceLoad(self):
self.LoadCustomViews()
def updateCustomViewTable(self, viewSpecs: List[ComponentSpec]):
dat = self.ownerComp.op('set_custom_view_table')
dat.clear()
dat.appendRow(['name', 'label'])
if not viewSpecs:
return
for viewSpec in viewSpecs:
dat.appendRow([viewSpec.name, viewSpec.label or viewSpec.name])
|
import os
analysis_process_path = os.path.join(
os.path.dirname(__file__),
"analysis_process/14438140-4f0f-4dd8-b9c4-00212f112a99_2021-05-24T12:00:00.000000Z.json",
)
analysis_protocol_path = os.path.join(
os.path.dirname(__file__),
"analysis_protocol/61223a2e-a775-53f4-8aab-fc3b4ef88722_2021-05-24T12:00:00.000000Z.json",
)
outputs_path = os.path.join(os.path.dirname(__file__), "outputs.json")
# Input UUIDS are the fastq uuids for the intermediate run
# Optimus can have multiple fastq1 and fastq1 per run
LINKS_INPUT = {
"project_id": "9d97f01f-9313-416e-9b07-560f048b2350",
"workspace_version": "2021-05-24T12:00:00.000000Z",
"input_uuids": [
"0af14740-753f-42e4-a025-e02c438b8591",
"0132474c-2452-4c80-9334-d95b999864ad",
"9dd8ad36-5570-4109-ac92-04363aafafe5",
"3f1313f0-fe17-4063-9955-56ead2cb7de0",
],
"output_file_path": outputs_path,
"analysis_process_path": analysis_process_path,
"analysis_protocol_path": analysis_protocol_path,
"file_name_string": "6f911b71-158e-4f50-b8e5-395f386a343b",
"pipeline_type": "Optimus",
"project_level": False,
}
|
# -*- coding:utf-8 -*-
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import imutils
import cv2 as cv
ANSWER_KEY_SCORE = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
ANSWER_KEY = {0: "A", 1: "B", 2: "C", 3: "D", 4: "E"}
# 加载一个图片到opencv中
img = cv.imread('E:\\tmp\\t1.png')
cv.imshow("orgin",img)
#转化成灰度图片
gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow("gray",gray)
gaussian_bulr = cv.GaussianBlur(gray, (5, 5), 0) # 高斯模糊
cv.imshow("gaussian",gaussian_bulr)
edged=cv.Canny(gaussian_bulr,75,200) # 边缘检测,灰度值小于2参这个值的会被丢弃,大于3参这个值会被当成边缘,在中间的部分,自动检测
cv.imshow("edged",edged)
# 寻找轮廓
image, cts, hierarchy = cv.findContours( edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# 给轮廓加标记,便于我们在原图里面观察,注意必须是原图才能画出红色,灰度图是没有颜色的
# cv.drawContours(img, cts, -1, (0,0,255), 3)
# 按面积大小对所有的轮廓排序
list=sorted(cts,key=cv.contourArea,reverse=True)
print("寻找轮廓的个数:",len(cts))
cv.imshow("draw_contours",img)
# 正确题的个数
correct_count=0
for c in list:
# 周长,第1个参数是轮廓,第二个参数代表是否是闭环的图形
peri=0.01*cv.arcLength(c,True)
# 获取多边形的所有定点,如果是四个定点,就代表是矩形
approx=cv.approxPolyDP(c,peri,True)
# 打印定点个数
print("顶点个数:",len(approx))
if len(approx)==4: #矩形
# 透视变换提取原图内容部分
ox_sheet = four_point_transform(img, approx.reshape(4, 2))
# 透视变换提取灰度图内容部分
tx_sheet = four_point_transform(gray, approx.reshape(4, 2))
cv.imshow("ox", ox_sheet)
cv.imshow("tx", tx_sheet)
# 使用ostu二值化算法对灰度图做一个二值化处理
ret,thresh2 = cv.threshold(tx_sheet, 0, 255,cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cv.imshow("ostu", thresh2)
# 继续寻找轮廓
r_image, r_cnt, r_hierarchy = cv.findContours(thresh2.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
print("找到轮廓个数:",len(r_cnt))
# 使用红色标记所有的轮廓
# cv.drawContours(ox_sheet,r_cnt,-1,(0,0,255),2)
# 把所有找到的轮廓,给标记出来
questionCnts = []
for cxx in r_cnt:
# 通过矩形,标记每一个指定的轮廓
x, y, w, h = cv.boundingRect(cxx)
ar = w / float(h)
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
# 使用红色标记,满足指定条件的图形
# cv.rectangle(ox_sheet, (x, y), (x + w, y + h), (0, 0, 255), 2)
# 把每个选项,保存下来
questionCnts.append(cxx)
cv.imshow("ox_1", ox_sheet)
# 按坐标从上到下排序
questionCnts = contours.sort_contours(questionCnts, method="top-to-bottom")[0]
# 使用np函数,按5个元素,生成一个集合
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
# 获取按从左到右的排序后的5个元素
cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
bubble_rows=[]
# 遍历每一个选项
for (j, c) in enumerate(cnts):
# 生成一个大小与透视图一样的全黑背景图布
mask = np.zeros(tx_sheet.shape, dtype="uint8")
# 将指定的轮廓+白色的填充写到画板上,255代表亮度值,亮度=255的时候,颜色是白色,等于0的时候是黑色
cv.drawContours(mask, [c], -1, 255, -1)
# 做两个图片做位运算,把每个选项独自显示到画布上,为了统计非0像素值使用,这部分像素最大的其实就是答案
mask = cv.bitwise_and(thresh2, thresh2, mask=mask)
# cv.imshow("c" + str(i), mask)
# 获取每个答案的像素值
total = cv.countNonZero(mask)
# 存到一个数组里面,tuple里面的参数分别是,像素大小和答案的序号值
# print(total,j)
bubble_rows.append((total,j))
bubble_rows=sorted(bubble_rows,key=lambda x: x[0],reverse=True)
# 选择的答案序号
choice_num=bubble_rows[0][1]
print("答案:{} 数据: {}".format(ANSWER_KEY.get(choice_num),bubble_rows))
fill_color=None
# 如果做对就加1
if ANSWER_KEY_SCORE.get(q) == choice_num:
fill_color = (0, 255, 0) #正确 绿色
correct_count = correct_count+1
else:
fill_color = (0, 0, 255) #错误 红色
cv.drawContours(ox_sheet, cnts[choice_num], -1, fill_color, 2)
cv.imshow("answer_flagged", ox_sheet)
text1 = "total: " + str(len(ANSWER_KEY)) + ""
text2 = "right: " + str(correct_count)
text3 = "score: " + str(correct_count*1.0/len(ANSWER_KEY)*100)+""
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(ox_sheet, text1 + " " + text2+" "+text3, (10, 30), font, 0.5, (0, 0, 255), 2)
cv.imshow("score", ox_sheet)
break
cv.waitKey(0)
|
from django.contrib import admin
from .models import Chat, Message
admin.site.register(Chat)
admin.site.register(Message)
|
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from django.db.models import Q, Sum
from .models import *
import json
import numpy.random
import numpy as np
from serpapi import GoogleSearch
import re
from itertools import chain
import nltk
from nltk import pos_tag, word_tokenize
from nltk.corpus import wordnet
from nltk.tokenize import WordPunctTokenizer
import wikipedia
from youtube_search import YoutubeSearch
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
import bs4
from textblob import TextBlob
import requests
from decouple import config, Csv
from rake_nltk import Rake
import re
from collections import Counter
import requests
from bs4 import BeautifulSoup
#test comment
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
import re
import inflect
stop_words.extend(['The', 'students', 'learn'])
count_vect = CountVectorizer()
porter = PorterStemmer()
lancaster=LancasterStemmer()
wikipedia.set_rate_limiting(True)
engine = inflect.engine()
import spacy
import pyinflect
nlp = spacy.load('en_core_web_sm')
import random
from youtube_transcript_api import YouTubeTranscriptApi
from .get_key_terms import *
from .get_activities import *
from .activity_builder import *
from .tasks import *
stop_words = ['i', "'", "'" '!', '.', ':', ',', '[', ']', '(', ')', '?', "'see", "see", 'x', '...', 'student', 'learn', 'objective', 'students', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
TAG_RE = re.compile(r'<[^>]+>')
def get_transcript_nouns(match_textlines):
full_sent_list = []
results = tokenize.sent_tokenize(match_textlines)
for sent in results:
new_list = []
sent = ' '.join(sent.split())
sent_blob = TextBlob(sent)
sent_tagger = sent_blob.pos_tags
for y in sent_tagger:
if 'NNP' in y[1]:
full_sent_list.append(y[0])
elif 'NNPS' in y[1]:
full_sent_list.append(y[0])
elif 'NN' in y[1]:
full_sent_list.append(y[0])
if full_sent_list:
full_sent_list = full_sent_list[0]
return(full_sent_list)
def get_transcript(video_id, video_db_id, lesson_id):
video_match = youtubeSearchResult.objects.get(id=video_db_id)
lesson_match = lessonObjective.objects.get(id=lesson_id)
worksheet_full, created = worksheetFull.objects.get_or_create(lesson_overview=lesson_match, title=video_match.vid_id)
full_trans = YouTubeTranscriptApi.get_transcript(video_id)
all_lines = []
for line in full_trans:
text = line['text']
text = text.strip()
all_lines.append(text)
new_lines = []
line_count = 0
for line in all_lines:
line_count = line_count + 1
line_create = youtubeLine.objects.create(vid_id=video_id, line_num=line_count, transcript_text=line)
video_match.transcript_lines.add(line_create)
if(line_count%4==0):
noun_result = get_transcript_nouns(line)
if noun_result:
if noun_result in line:
new_line = line.replace(noun_result, "_________")
create_question = topicQuestionitem.objects.create(is_video=True, lesson_overview=lesson_match, subject=lesson_match.subject, Question=new_line, Correct=noun_result, item=video_db_id, explanation=line, trans_line_num=line_count)
worksheet_full.questions.add(create_question)
return('Done')
def video_id(url):
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
o = urlparse(url)
if o.netloc == 'youtu.be':
return o.path[1:]
elif o.netloc in ('www.youtube.com', 'youtube.com'):
if o.path == '/watch':
id_index = o.query.index('v=')
return o.query[id_index+2:id_index+13]
elif o.path[:7] == '/embed/':
return o.path.split('/')[2]
elif o.path[:3] == '/v/':
return o.path.split('/')[2]
return None
def youtube_results(text, lesson_id):
class_objectives = lessonObjective.objects.get(id=lesson_id)
class_objectives_list = str(class_objectives.teacher_objective)
topics = class_objectives.objectives_topics.all()
topic_list = []
for item in topics:
title = item.item
topic_list.append(title)
class_topics = ' '.join([str(i) for i in topic_list])
combined = class_objectives_list
params = {
"engine": "youtube",
"search_query": combined,
'api_key': config('api_key')
}
search = GoogleSearch(params)
results = search.get_dict()
try:
video_results = results['video_results']
return(video_results)
except:
return(None)
#Show students examples of <<DEMONSTRATION>>. <<GROUPING>> Instruct students to <<VERB>> by <<DEMONSTRATION>> <<WORK_PRODUCT>>
def get_lessons_ajax(lesson_id, user_id):
lesson_full_List = []
user_profile = User.objects.get(id=user_id)
lesson_match = lessonObjective.objects.get(id=lesson_id)
topic_matches = lesson_match.objectives_topics.all()
demo_ks = lesson_match.objectives_demonstration.all()
grouping = 'in groups of two'
act_match = selectedActivity.objects.filter(lesson_overview = lesson_match, is_selected=True)
rec_act_match = selectedActivity.objects.filter(lesson_overview = lesson_match, is_selected=False)
for item in rec_act_match:
lesson_full_List.append(item)
matched_activities = []
temp_ids_list = []
for sent in act_match:
matching = sent.template_id
temp_ids_list.append(matching)
matched_activities.append(sent.lesson_text)
topic_list = topicInformation.objects.filter(id__in=topic_matches)
filtered_activities_topics = selectedActivity.objects.filter(is_admin=True, objectives_topics__in=topic_list)
updated_activities = update_topic_activities(filtered_activities_topics, lesson_id, user_id)
for item in updated_activities:
lesson_full_List.append(item)
activities_full = []
if len(lesson_full_List) <= 5:
multi_activity = get_multiple_types_activity(topic_list)
teacher_objective = lesson_match.teacher_objective
topic_results = []
full_result = []
topic_ids = []
for item in topic_list:
topic_types = item.topic_type.all()
topic_items = []
for tt in topic_types:
masked_info = tt.item
topic_id = tt.id
topic_ids.append(topic_id)
topic_items.append(masked_info)
topic_results.append(masked_info)
replacement = item, topic_items
full_result.append(replacement)
words_list_full = ' '.join([str(i) for i in topic_results])
words_list_full = words_list_full.split()
wordfreq = []
for w in words_list_full:
result = w, words_list_full.count(w)
wordfreq.append(result)
wordfreq.sort(key=lambda x: x[1], reverse=True)
wordfreq = set(wordfreq)
wording_list = []
for item in demo_ks:
wording = item.content
topic_types = []
if item.topic_id:
topic_match = topicInformation.objects.filter(id=item.topic_id).first()
if topic_match:
topic_two = topic_match.topic_type.all()
for topic in topic_two:
topic_types.append(topic)
if wording not in wording_list:
results = wording, topic_types
wording_list.append(results)
single_activities = get_single_types_activity(topic_list)
for line in single_activities:
if line not in wording_list:
wording_list.append(line)
for topic_item in wordfreq:
#this is for multiple or the same topic items
if topic_item[1] > 1:
result_list = []
for item in full_result:
if topic_item[0] in item[1]:
result = item[0]
result_list.append(result)
plural_noun = get_plural_types_activity(result_list)
for item in plural_noun:
if item not in wording_list:
wording_list.append(item)
multi_noun = get_multiple_types_activity(result_list)
for item in multi_noun:
if item not in wording_list:
wording_list.append(item)
else:
#this is for single topic items
result_list = []
for item in full_result:
if topic_item[0] in item[1]:
result_one = item[0]
result_list.append(result_one)
topic_one = result_one.topic_type.all()
topic_list = []
for item in topic_one:
topic_list.append(item)
demo_ks_match = LearningDemonstrationTemplate.objects.filter(topic_type__in=topic_one)
for demo in demo_ks_match:
wording = demo.content
topic_two = demo.topic_type.all()
for item in topic_two:
new_wording = wording.replace(str(item), result_one.item)
result = new_wording, result_one.item, item, 'single', demo.id
if result not in wording_list:
wording_list.append(result)
mi_labels = [' ', 'Verbal', 'Visual', 'Musical', 'Movement', 'Logical']
bl_labels = [' ', 'Remember', 'Understand', 'Apply', 'Analyze', 'Evaluate', 'Create']
colors = [' ', 'primary', 'secondary', 'success', 'danger', 'warning', 'light']
font_a = [' ', 'microphone', 'eye', 'music', 'walking', 'puzzle-piece']
demo_list_sect = []
random.shuffle(wording_list)
for line in wording_list:
sentence = line[0]
topic = line[1]
t_type = line[2]
d_type = line[3]
demo_id = line[4]
wording_split = sentence.split()
first_word = wording_split[0]
tokens = nlp(first_word)
new_verb = tokens[0]._.inflect('VBG')
if new_verb:
new_demo = sentence.replace(first_word, new_verb)
lesson_full = get_new_lesson(new_demo, topic, d_type, t_type, lesson_id, user_profile.id, demo_id)
for item in lesson_full:
lesson_full_List.append(item)
random.shuffle(lesson_full_List)
else:
pass
for item in lesson_full_List:
temp_id = item.template_id
text_s = item.lesson_text
text_demo = item.ks_demo
bl = item.bloom
mi = item.mi
ret = item.ret_rate
matching = item.template_id
print(matching)
if matching is not None:
if matching not in temp_ids_list:
temp_ids_list.append(matching)
activity = {'id': item.id, 'activity': item.lesson_text, 'bl_color': item.bl_color, 'bl_label': item.bl_labels, 'mi_color': item.mi_color, 'mi_label': item.mi_labels, 'mi_icon': item.mi_icon, 'ret':ret}
activities_full.append(activity)
else:
activity = {'id': item.id, 'activity': item.lesson_text, 'bl_color': item.bl_color, 'bl_label': item.bl_labels, 'mi_color': item.mi_color, 'mi_label': item.mi_labels, 'mi_icon': item.mi_icon, 'ret':ret}
activities_full.append(activity)
return(activities_full)
def label_blooms_activities_analytics(lesson_id):
#each activity is given a blooms level
#this counts the number of occurences of each blooms level and divides by the total number to get a %
class_objectives = lessonObjective.objects.get(id=lesson_id)
matched_activities = selectedActivity.objects.filter(lesson_overview=class_objectives, is_selected=True)
if matched_activities:
bl_list = []
bl_names = ('Remember', 'Understand', 'Analyze', 'Evaluate', 'Create')
for activity in matched_activities:
act_bl = int(activity.bloom)
bl_list.append(act_bl)
bl_list.sort()
bl_count = len(bl_list)
bl_length = len(set(str(bl_count)))
#the blooms number is used as an index to find the color and label in the progress bar
colors = ('bg-primary', 'bg-secondary', 'bg-success', 'bg-danger', 'bg-warning')
bl_names = ('Remember', 'Understand', 'Analyze', 'Evaluate', 'Create')
bl_Count = [1,2,3,4,5]
bl_list_full = []
for item in bl_Count:
item_count = bl_list.count(item)
zero_count = item - 1
name = bl_names[zero_count]
color = colors[zero_count]
avg = item_count/bl_count * 100
results = {'name': name, 'count': avg, 'color': color}
if results not in bl_list_full:
bl_list_full.append(results)
else:
bl_list_full = None
return(bl_list_full)
def label_mi_activities_analytics(lesson_id):
#each activity is given a blooms level
#this counts the number of occurences of each blooms level and divides by the total number to get a %
class_objectives = lessonObjective.objects.get(id=lesson_id)
matched_activities = selectedActivity.objects.filter(lesson_overview=class_objectives, is_selected=True)
mi_list = []
if matched_activities:
for activity in matched_activities:
act_mi = int(activity.mi)
mi_list.append(act_mi)
mi_list.sort()
mi_count = len(mi_list)
mi_length = len(set(str(mi_count)))
#the mi number is used as an index to find the color and label in the progress bar
colors = ('bg-primary', 'bg-secondary', 'bg-success', 'bg-danger', 'bg-warning')
mi_names = ('Verbal', 'Visual', 'Musical', 'Movement', 'Logical')
mi_Count = [1,2,3,4,5]
mi_list_full = []
for item in mi_Count:
item_count = mi_list.count(item)
zero_count = item - 1
name = mi_names[zero_count]
color = colors[zero_count]
avg = item_count/mi_count * 100
results = {'name': name, 'count': avg, 'color': color}
if results not in mi_list_full:
mi_list_full.append(results)
else:
mi_list_full = None
return(mi_list_full)
def retention_activities_analytics(lesson_id):
class_objectives = lessonObjective.objects.get(id=lesson_id)
matched_activities = selectedActivity.objects.filter(lesson_overview=class_objectives, is_selected=True)
class_standards = class_objectives.objectives_standards.all()
matched_standards = singleStandard.objects.filter(id__in=class_standards)
passive = ('watch', 'listen', 'video', 'song')
middle = ('notes', 'worksheet', 'complete', 'write', 'type')
active = ('present', 'presentation', 'discuss', 'discussion', 'debate', 'group', 'teams', 'pairs', 'explain')
if matched_activities:
active_count = 0
middle_count = 0
passive_count = 0
activity_results = 0
for activity in matched_activities:
activity_text = activity.lesson_text
split_activity = activity_text.split()
if any(word in activity_text for word in active):
activity_results = activity_results + 3
active_count = active_count + 1
elif any(word in activity_text for word in middle):
activity_results = activity_results + 2
middle_count = middle_count + 1
elif any(word in activity_text for word in passive):
activity_results = activity_results + 1
passive_count = passive_count + 1
else:
pass
activity_count = matched_activities.count()
remaining = activity_count - active_count - middle_count
passive_count = passive_count + remaining
total_count = activity_count
retention_avg = (activity_results/(total_count*3)) * 100
retention_avg = round(retention_avg)
passive_per = (passive_count/total_count) * 100
middle_per = (middle_count/total_count) * 100
active_per = (active_count/total_count) * 100
if retention_avg >= 60:
text = 'Your retention rate is high because your lessons include active learning'
elif retention_avg >= 30:
text = 'Your retention rate may be average because students practice and take notes.'
else:
text = 'Try improving your retention rate by including a presentation of knowledge or discussion'
results = {'avg': retention_avg, 'text': text, 'passive': passive_per, 'middle': middle_per, 'active': active_per}
else:
text = 'Add activities to view results'
results = {'avg': 0, 'text': text, 'passive': 25, 'middle': 50, 'active': 25}
return(results)
def build_activity_list(soup, user_profile, class_objectives, lesson_id):
#this function pulls in beautiful soup and pulls out the activities that will be used to create analytics and demonstrations of knowledge
all_selected = selectedActivity.objects.filter(lesson_overview=class_objectives, is_selected=True)
new_list = []
activities_list = soup.find('ul', {"id": "activity-div"})
if activities_list:
activities = [x.get_text() for x in activities_list.findAll('li')]
for activity in activities:
if len(activity) > 4:
try:
l_act = label_activities(activity, lesson_id)
new_activity, created = selectedActivity.objects.get_or_create(created_by=user_profile, lesson_overview=class_objectives, lesson_text=activity)
if created:
new_activity.verb=l_act[2]
new_activity.work_product=l_act[3]
new_activity.bloom=l_act[1]
new_activity.mi=l_act[0]
new_activity.is_selected = True
new_activity.save()
new_list.append(new_activity.id)
find_topics = identify_topic(activity, lesson_id)
if find_topics:
for item in find_topics:
match_topic = topicInformation.objects.filter(id=item).first()
update_activity = new_activity.objectives_topics.add(match_topic)
except:
pass
for acty in all_selected:
old_id = acty.id
if old_id not in new_list:
acty.is_selected = False
acty.save()
return('Complete')
def save_big_questions_list(soup, user_profile, class_objectives, lesson_id):
lesson_match = lessonObjective.objects.get(id=lesson_id)
#this function pulls in beautiful soup and pulls out the activities that will be used to create analytics and demonstrations of knowledge
current_questions = googleRelatedQuestions.objects.filter(lesson_plan=lesson_match, is_selected=True).delete()
questions = []
for row in soup.findAll('li', {"id": "full_question"}):
question = row.find('h6').contents
answer = row.find('p').contents
if len(question[0]) > 5:
try:
match_question = googleRelatedQuestions.objects.create(question=question[0], snippet=answer[0], is_selected=True, lesson_plan=lesson_match)
except:
pass
return('Complete')
def build_key_terms_list(soup, user_profile, class_objectives, lesson_id, matched_grade, standard_set):
#this takes the beautiful soup and pulls out key terms to save for changes and create more connections.
term_sets = []
for row in soup.findAll('table')[0].tbody.findAll('tr'):
term = row.find('th').contents
if term[0]:
description = row.find('td').contents
for item in description:
if item:
try:
result = item.get_text(';; ', strip=True)
result = result.split(";; ")
for item in result:
if item:
result = term[0], item
if result not in term_sets:
term_sets.append(result)
except:
result = item.split(";; ")
for item in result:
if item:
result = term[0], item
if result not in term_sets:
term_sets.append(result)
#build new terms with new descriptions
key_term_list = list(term_sets)
#located at get_key_terms.py
try:
term_pairs = create_terms(key_term_list, lesson_id, matched_grade, user_profile.id, standard_set)
except:
pass
return('Complete')
def get_lesson_sections(text_overview, class_id, lesson_id, user_id):
#this is the main function that reads the tinymce editor information and breaks it into activities and key terms.
user_profile = User.objects.get(id=user_id)
classroom_profile = classroom.objects.get(id=class_id)
standard_set = classroom_profile.standards_set
class_objectives = lessonObjective.objects.get(id=lesson_id)
subject = class_objectives.subject
matched_grade = class_objectives.current_grade_level
topic_matches = class_objectives.objectives_topics.all()
topic_lists_selected = topicInformation.objects.filter(id__in=topic_matches).order_by('item')
create_topic_matches, created = matchedTopics.objects.get_or_create(lesson_overview=class_objectives)
#pulls information from the TinyMCE html field and then parse the information
if text_overview:
soup = BeautifulSoup(text_overview)
build_activities = build_activity_list(soup, user_profile, class_objectives, lesson_id)
build_key_terms = build_key_terms_list(soup, user_profile, class_objectives, lesson_id, matched_grade, standard_set)
save_big_questions = save_big_questions_list(soup, user_profile, class_objectives, lesson_id)
return('Done')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from django.core.urlresolvers import reverse
from filer.models.imagemodels import Image
from rest_framework import serializers
class CMSPagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductPage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
cms_pages = CMSPagesField()
...
"""
def to_representation(self, value):
urls = {page.get_absolute_url() for page in value.all()}
return list(urls)
def to_internal_value(self, data):
site = get_current_site()
pages_root = reverse('pages-root')
ret = []
for path in data:
if path.startswith(pages_root):
path = path[len(pages_root):]
# strip any final slash
if path.endswith('/'):
path = path[:-1]
page = get_page_from_path(site, path)
if page:
ret.append(page)
return ret
class ImagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductImage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
images = ImagesField()
...
"""
def to_representation(self, value):
return list(value.values_list('pk', flat=True))
def to_internal_value(self, data):
return list(Image.objects.filter(pk__in=data))
class ValueRelatedField(serializers.RelatedField):
"""
A serializer field used to access a single value from a related model.
Usage:
myfield = ValueRelatedField(model=MyModel)
myfield = ValueRelatedField(model=MyModel, field_name='myfield')
This serializes objects of type ``MyModel`` so that that the return data is a simple scalar.
On deserialization it creates an object of type ``MyModel``, if none could be found with the
given field name.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.related_field_name = kwargs.pop('field_name', 'name')
super(ValueRelatedField, self).__init__(*args, **kwargs)
def get_queryset(self):
return self.model.objects.all()
def to_representation(self, value):
return getattr(value, self.related_field_name)
def to_internal_value(self, value):
data = {self.related_field_name: value}
instance, _ = self.model.objects.get_or_create(**data)
return instance
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ib.ext.cfg.EWrapperMsgGenerator -> config module for EWrapperMsgGenerator.java.
"""
from java2python.config.default import modulePrologueHandlers
modulePrologueHandlers += [
'from ib.ext.AnyWrapperMsgGenerator import AnyWrapperMsgGenerator',
'from ib.ext.EClientSocket import EClientSocket',
'from ib.ext.MarketDataType import MarketDataType',
'from ib.ext.TickType import TickType',
'from ib.ext.Util import Util',
'',
'from ib.lib import Double',
]
|
from tests_utils.abstract_model_test_mixin import AbstractModelTestMixin
from stack_it.contents.abstracts import ModelBaseContentMixin
from stack_it.models import Page
class ModelBaseContentMixinUnitTest(AbstractModelTestMixin):
"""
Testing ModelBaseContentMixin
Model is created by AbstractModelTestMixin.
See tests_utils.abstract_model_test_mixin
Attributes:
mixin (AbstractModel): See tests_utils.abstract_model_test_mixin
"""
mixin = ModelBaseContentMixin
def test_value_returns_relevant_instance(self):
"""
Checking value returns the given instance
"""
page = Page.objects.create(title="Title")
instance = self.model.objects.create(
instance_id=page.pk,
model_name='stack_it.Page'
)
self.assertEqual(page, instance.value)
def test_value_returns_none_when_instance_is_deleted(self):
"""
Checking value returns None when instance is deleted
and instance_id is set to None
"""
page = Page.objects.create(title="Title")
instance = self.model.objects.create(
instance_id=page.pk,
model_name='stack_it.Page'
)
page.delete()
self.assertEqual(instance.value, None)
self.assertEqual(instance.instance_id, None)
def test_value_returns_none_when_model_does_not_exists(self):
"""
Might appens during development cycle that a model is deleted.
Makes sure you get "None"
"""
page = Page.objects.create(title="Title")
instance = self.model.objects.create(
instance_id=page.pk,
model_name='hello.World'
)
page.delete()
self.assertEqual(instance.value, None)
|
from django.http import HttpResponse
from rest_framework_tracking.mixins import LoggingMixin
from app.views import AnonymousAPIView
from tinkoff.api.serializers import PaymentNotificationSerializer
class TinkoffPaymentNotificationsView(LoggingMixin, AnonymousAPIView):
def post(self, request, *args, **kwargs):
serializer = PaymentNotificationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return HttpResponse('OK')
|
import numpy as np
from htm_rl.envs.biogwlab.environment import Environment
from htm_rl.envs.biogwlab.generation.food import FoodPositionsGenerator, FoodPositions, FoodPositionsManual
from htm_rl.envs.biogwlab.module import Entity, EntityType
from htm_rl.envs.biogwlab.view_clipper import ViewClip
def add_food(env, types=None, **food):
areas = env.get_module('areas')
obstacles = env.get_module('obstacles')
if types is None:
food_types = {'beans': food}
food = dict()
else:
food_types = types
return Food(
types=food_types, env=env,
areas=areas, obstacles=obstacles,
**food
)
class Food(Entity):
family = 'food'
type = EntityType.Consumable
reward: float
positions_fl: set[int]
weighted_generation: bool
generator: FoodPositions
env: Environment
def __init__(
self, env: Environment, reward: float, n_items: int = 1,
area_weights: list[float] = None, positions=None,
**entity
):
super(Food, self).__init__(**entity)
self.reward = reward
self.manual_positions = positions
self.weighted_generation = False
if positions is not None:
self.generator = FoodPositionsManual(shape=env.shape, positions=positions)
else:
self.generator = FoodPositionsGenerator(
shape=env.shape, n_items=n_items, area_weights=area_weights
)
self.weighted_generation = area_weights is not None
self.env = env
def generate(self, seeds):
# we should not take this entity into account
# in aggregated masks during generation
self.initialized = False
seed = seeds['food']
empty_mask = ~self.env.aggregated_mask[EntityType.Obstacle]
if self.weighted_generation:
areas = self.env.entities[EntityType.Area]
area_masks = []
for area in areas:
mask = np.zeros_like(empty_mask)
area.append_mask(mask)
area_masks.append(mask)
else:
area_masks = None
positions_fl = self.generator.generate(
seed=seed, empty_mask=empty_mask, area_masks=area_masks
)
self.positions_fl = set(positions_fl)
self.initialized = True
def collect(self, position, view_direction):
reward, success = 0, False
position_fl = self._flatten_position(position)
if position_fl in self.positions_fl:
self.positions_fl.remove(position_fl)
success = True
reward = self.reward
return reward, success
def render(self, view_clip: ViewClip = None):
if view_clip is None:
positions_fl = np.array(list(self.positions_fl))
env_size = self.env.shape[0] * self.env.shape[1]
return positions_fl, env_size
indices = []
for abs_ind, view_ind in zip(view_clip.abs_indices, view_clip.view_indices):
if abs_ind in self.positions_fl:
indices.append(view_ind)
view_size = view_clip.shape[0] * view_clip.shape[1]
return np.array(indices), view_size
def append_mask(self, mask: np.ndarray):
if not self.initialized:
return
for position_fl in self.positions_fl:
pos = self._unflatten_position(position_fl)
mask[pos] = 1
def append_position(self, exist: bool, position):
return exist or (
self.initialized
and self._flatten_position(position) in self.positions_fl
)
def _flatten_position(self, position):
return position[0] * self.env.shape[1] + position[1]
def _unflatten_position(self, position_fl):
return divmod(position_fl, self.env.shape[1])
|
import requests
from uuid import UUID
# Version: 0.0.1
class AuthorityCredentialsGetter():
"""
All calls to your Aerobridge instance requires credentials from a oauth server, in this case this is Flight Passport OAUTH server, this class gets the token and the associated public key.
...
Attributes
----------
client_id : str
OAUTH client id for the client credentials token
client_secret : str
OAUTH client secret for the client credentials token client as is set in the
audience : str
The audience as set in the OAUTH server represented as a string
base_url : str
The fully qualified base url of the OAUTH server
token_url : str
The REST endpoint of the getting a token by executing a client credentials grant
jwks_url : str
The endpoint where the OAUTH server provides a the public key as represented in JWKS
Methods
-------
get_credentials():
Exceutes a client credentials grant request to the auth server to get a JWT token
get_public_key():
Gets the public key in the form of a JWKS JSON object of the OAUTH server
"""
def __init__(self, client_id:str, client_secret:str, audience:str, base_url:str, token_endpoint:str, jwks_endpoint:str):
"""
Constructs all the necessary attributes for the aerobridge object.
Parameters
----------
client_id : str
A valid JWT Token as base64url encoded string
audience : str
The audience as set in the OAUTH server represented as a string
base_url : str
The fully qualified base url of the OAUTH server
token_endpoint : str
The REST endpoint of the getting a token by executing a client credentials grant
jwks_endpoint : str
The endpoint where the OAUTH server provides a the public key as represented in JWKS
"""
self.client_id = client_id
self.client_secret = client_secret
self.audience = audience
self.base_url = base_url
self.token_url = base_url + token_endpoint
self.jwks_url = base_url + jwks_endpoint
def get_credentials(self):
"""
Excutes the Client Credentials Grant
If the argument 'additional' is passed, then it is appended after the main info.
Returns
-------
JSON with access token if the request is successful, if it is unsuccessful the JSON returned a error message is displayed
"""
payload = {"grant_type":"client_credentials","client_id": self.client_id,"client_secret": self.client_secret,"audience": self.audience,"scope": 'aerobridge.read aerobridge.write'}
url = self.token_url
token_data = requests.post(url, data = payload)
t_data = token_data.json()
return t_data
def get_public_key(self):
"""
Gets the public key as expressed in JWKS format
Returns
-------
JSON in the JWKS forma
"""
url = self.jwks_url
jwks_data = requests.get(url)
jwks_data = jwks_data.json()
return jwks_data
class AerobridgeClient():
"""
This a a Python client that make calls to the Aerobridge API and returns data. It requires the requests package and the json module.
...
Attributes
----------
token : str
A valid JWT Token
aerobridge_url : str
The fully qualified domain name for your Aerobridge Instance
authority_url : str
The base url
"""
def __init__(self, aerobridge_url:str, token:str):
"""
Constructs all the necessary attributes for the aerobridge object.
Parameters
----------
token : str
A valid JWT Token as base64url encoded string
aerobridge_url : str
The fully qualified domain name for your Aerobridge Instance
"""
self.token = token
self.aerobridge_url = aerobridge_url if aerobridge_url else 'https://aerobridgetestflight.herokuapp.com/'
self.session = requests.Session()
def ping_aerobridge(self):
'''
This method checks the server heartbeat
Returns:
hearbeat_response (str): Pong if the server is running properly https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/openskies-sh/aerobridge/master/api/aerobridge-1.0.0.resolved.yaml#/paths/~1ping/get
'''
aerobridge_url = self.aerobridge_url+ 'ping/'
headers = {'Authorization': 'Bearer '+ self.token}
r = self.session.get(aerobridge_url, headers=headers)
return r
def download_flight_permission(self, operation_id:UUID):
'''
This method downloads the flight plan given a flight plan_id
Parameters:
operation_id (uuid): The uuid of a flight operation in your Aerobridge instance
Returns:
plan_details (json): Details of a flight plan, see https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/openskies-sh/aerobridge/master/api/aerobridge-1.0.0.resolved.yaml#operation/retrieveFlightPlan
'''
securl = self.aerobridge_url + 'gcs/flight-operations/' + operation_id + '/permission'
headers = {'Authorization': 'Bearer '+ self.token}
r = self.session.put(securl, headers= headers)
return r
def upload_flight_log(self, operation_id:UUID, raw_log:str):
'''
This method uploads a flight log associated with a operation ID
Parameters:
operation_id (uuid): The uuid of a flight operation in your Aerobridge instance
raw_log (str): The raw log file as retrieved from the vehicle.
Returns:
log_details (json): Details of the newly created flight log, see https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/openskies-sh/aerobridge/master/api/aerobridge-1.0.0.resolved.yaml#operation/createFlightLog
'''
securl = self.aerobridge_url + 'gcs/flight-logs'
headers = {'Authorization': 'Bearer '+ self.token}
payload = {'operation':operation_id, 'raw_log':raw_log}
r = self.session.post(securl, headers= headers,json = payload)
return r
def download_flight_plan(self, plan_id):
'''
This method downloads the flight plan given a flight plan_id
Parameters:
plan_id (uuid): The uuid of a flight plan in your Aerobridge instance
Returns:
plan_details (json): Details of a flight plan, see https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/openskies-sh/aerobridge/master/api/aerobridge-1.0.0.resolved.yaml#operation/retrieveFlightPlan
'''
securl = self.aerobridge_url + 'gcs/flight-plans/' + plan_id
headers = {'Authorization': 'Bearer '+ self.token, 'content-type': 'application/json'}
r = self.session.get(securl, headers= headers)
return r
def get_aircraft_by_flight_controller_id(self, registered_flight_module_id:str):
'''
This method downloads the details of an aircraft given the flight controller ID
Parameters:
registered_flight_module_id (str): The id of the flight controller
Returns:
aircraft_detail (json): Details of an aircraft, see https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/openskies-sh/aerobridge/master/api/aerobridge-1.0.0.resolved.yaml#operation/Get%20Single%20Aircraft%20(RFM%20ID)
'''
securl = self.aerobridge_url + 'registry/aircraft/rfm/' + registered_flight_module_id
headers = {'Authorization': 'Bearer '+ self.token, 'content-type': 'application/json'}
r = self.session.get(securl, headers= headers)
return r
def get_firmware_by_flight_controller_id(self, registered_flight_module_id:str):
'''
This method downloads the details of an aircraft firmware given the flight controller ID
Parameters:
registered_flight_module_id (str): The id of the flight controller
Returns:
firmware_detail (json): Details of an aircraft firmware, see https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/openskies-sh/aerobridge/master/api/aerobridge-1.0.0.resolved.yaml#operation/Get%20Aircraft%20firmware%20by%20RFM%20ID
'''
securl = self.aerobridge_url + 'registry/aircraft/firmware/' + registered_flight_module_id
headers = {'Authorization': 'Bearer '+ self.token, 'content-type': 'application/json'}
r = self.session.get(securl, headers= headers)
return r |
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.transforms.functional as TF
import clip
class CLIPLoss:
def __init__(self, device, name='ViT-B/16'):
self.device = device
self.name = name
self.clip_model, self.transform_PIL = clip.load(self.name, device=self.device, jit=False)
# disable training
self.clip_model.eval()
for p in self.clip_model.parameters():
p.requires_grad = False
# image augmentation
self.transform = T.Compose([
T.Resize((224, 224)),
T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
# placeholder
self.text_zs = None
self.image_zs = None
def normalize(self, x):
return x / x.norm(dim=-1, keepdim=True)
# image-text (e.g., dreamfields)
def prepare_text(self, texts):
# texts: list of strings.
texts = clip.tokenize(texts).to(self.device)
self.text_zs = self.normalize(self.clip_model.encode_text(texts))
print(f'[INFO] prepared CLIP text feature: {self.text_zs.shape}')
def __call__(self, images, mode='text'):
images = self.transform(images)
image_zs = self.normalize(self.clip_model.encode_image(images))
if mode == 'text':
# if more than one string, randomly choose one.
if self.text_zs.shape[0] > 1:
idx = random.randint(0, self.text_zs.shape[0] - 1)
text_zs = self.text_zs[[idx]]
else:
text_zs = self.text_zs
# broadcast text_zs to all image_zs
loss = - (image_zs * text_zs).sum(-1).mean()
else:
raise NotImplementedError
return loss
# image-image (e.g., diet-nerf)
def prepare_image(self, dataset):
# images: a nerf dataset (we need both poses and images!)
pass |
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import render
from .forms import ProductModelForm
from products.models import Product
# # This is the WRONG way of doing it
# # Everyone can create objects in the databases
# def bad_view(request, *args, **kwargs):
# # print(dict(request.GET))
# my_request_data = dict(request.GET)
# new_product = my_request_data.get("new_product")
# print(my_request_data, new_product)
# if new_product[0].lower() == "true":
# print("new product")
# return HttpResponse("Dont do this")
# Create your views here.
def search_view(request):
# return HttpResponse("<h1>Hello World</h1>")
# context = {"name": "Anders"}
query = request.GET.get('q') # q is refered as query
qs = Product.objects.filter(title__icontains=query[0])
print(query, qs) # qs is query set
context = {"name": "abc", "query": query}
return render(request, "home.html", context)
# def product_create_view(request, *args, **kwargs):
# # print(request.POST)
# # print(request.GET)
# if request.method == "POST":
# post_data = request.POST or None
# if post_data != None:
# my_form = ProductForm(request.POST)
# # Data is being validated by a Django Form
# if my_form.is_valid():
# print(my_form.cleaned_data.get("title"))
# title_from_input = my_form.cleaned_data.get("title")
# Product.objects.create(title=title_from_input)
# # print("post_data", post_data)
# return render(request, "forms.html", {})
# from django.contrib.auth.decorators import login_required
# @login_required
@staff_member_required
def product_create_view(request):
form = ProductModelForm(request.POST or None)
if form.is_valid():
obj = form.save(commit=False)
# do some stuff
obj.user = request.user
obj.save()
# print(form.cleaned_data) # Cleaned data is Validated data
# data = form.cleaned_data
# Product.objects.create(**data)
form = ProductModelForm()
# Redirect options
# return HttpResponceRedirect("/succes")
# return redirect("/succes")
return render(request, "forms.html", {"form": form})
def product_detail_view(request, pk):
try:
obj = Product.objects.get(pk=pk)
except Product.DoesNotExist:
raise Http404 # Renders html page, with HTTP status code 404
# return HttpResponse(f"Product id {obj.pk}")
return render(request, "products/detail.html", {"object": obj})
def product_list_view(request, *args, **kvargs):
qs = Product.objects.all()
context = {"object_list": qs}
return render(request, "products/list.html", context)
def product_api_detail_view(request, pk, *args, **kwargs):
try:
obj = Product.objects.get(pk=pk)
except Product.DoesNotExist:
return JsonResponse({"message": "Not found"}) # return JSON with HTTP status code of 404
return JsonResponse({"id": obj.id})
|
# *****************************************************************
# (C) Copyright IBM Corp. 2020, 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# *****************************************************************
import os
import pathlib
import pytest
from importlib.util import spec_from_loader, module_from_spec
from importlib.machinery import SourceFileLoader
test_dir = pathlib.Path(__file__).parent.absolute()
spec = spec_from_loader("opence", SourceFileLoader("opence", os.path.join(test_dir, '..', 'open_ce', 'open-ce-builder')))
opence = module_from_spec(spec)
spec.loader.exec_module(opence)
import helpers
import open_ce.build_env as build_env
import open_ce.utils as utils
from open_ce.errors import OpenCEError
from build_tree_test import TestBuildTree
import open_ce.test_feedstock as test_feedstock
class PackageBuildTracker(object):
def __init__(self):
self.built_packages = set()
def validate_build_feedstock(self, build_command, package_deps = None, conditions=None):
'''
Used to mock the `build_feedstock` function and ensure that packages are built in a valid order.
'''
if package_deps:
self.built_packages = self.built_packages.union(build_command.packages)
for package in build_command.packages:
for dependency in package_deps[package]:
assert dependency in self.built_packages
if conditions:
for condition in conditions:
assert condition(build_command)
def test_build_env(mocker, capsys):
'''
This is a complete test of `build_env`.
It uses `test-env2.yaml` which has a dependency on `test-env1.yaml`, and specifies a chain of package dependencies.
That chain of package dependencies is used by the mocked build_feedstock to ensure that the order of builds is correct.
'''
dirTracker = helpers.DirTracker()
mocker.patch(
'os.system',
side_effect=(lambda x: helpers.validate_cli(x, possible_expect=["git clone", "git checkout"], retval=0)) #At this point all system calls are git clones. If that changes this should be updated.
)
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch(
'os.chdir',
side_effect=dirTracker.validate_chdir
)
mocker.patch(
'open_ce.validate_config.validate_build_tree'
)
# +-------+
# +------+ 15 +-----+
# | +---+---+ | +-------+
# +---v---+ | +-----> 16 |
# | 11 | | +---+---+
# +----+--+ | |
# | | +-------+ |
# | +-----> 14 <-----+
# | +-+-----+
# +---v---+ |
# | 12 | |
# +--+----+ |
# | +-----v--+
# +------------> 13 |
# +---+----+
# |
# +----v----+
# | 21 |
# +---------+
package_deps = {"package11": ["package15"],
"package12": ["package11"],
"package13": ["package12", "package14"],
"package14": ["package15", "package16"],
"package15": [],
"package16": ["package15"],
"package21": ["package13"],
"package22": ["package15"]}
#---The first test specifies a python version that isn't supported in the env file by package21.
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), package_deps))
)
mocker.patch(
'conda_build.api.get_output_file_paths',
side_effect=(lambda meta, *args, **kwargs: helpers.mock_get_output_file_paths(meta))
)
mocker.patch(
'open_ce.build_tree.BuildTree._create_remote_deps',
side_effect=(lambda x: x)
)
py_version = "2.0"
buildTracker = PackageBuildTracker()
mocker.patch( # This ensures that 'package21' is not built when the python version is 2.0.
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps,
conditions=[(lambda command: command.python == py_version),
(lambda command: command.recipe != "package21-feedstock")]))
)
env_file = os.path.join(test_dir, 'test-env2.yaml')
opence._main(["build", build_env.COMMAND, env_file, "--python_versions", py_version, "--run_tests"])
validate_conda_env_files(py_version)
#---The second test specifies a python version that is supported in the env file by package21.
py_version = "2.1"
channel = "my_channel"
package_deps = {"package11": ["package15"],
"package12": ["package11"],
"package13": ["package12", "package14"],
"package14": ["package15", "package16"],
"package15": [],
"package16": ["package15"],
"package21": ["package13"],
"package22": ["package21"]}
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), package_deps))
)
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps,
conditions=[(lambda command: command.python == py_version and channel in command.channels)]))
)
env_file = os.path.join(test_dir, 'test-env2.yaml')
opence._main(["build", build_env.COMMAND, env_file, "--python_versions", py_version, "--channels", channel])
validate_conda_env_files(py_version)
#---The third test verifies that the repository_folder argument is working properly.
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps,
conditions=[(lambda command: command.repository.startswith("repo_folder"))]))
)
py_version = "2.1"
env_file = os.path.join(test_dir, 'test-env2.yaml')
opence._main(["build", build_env.COMMAND, env_file, "--repository_folder", "repo_folder", "--python_versions", py_version])
validate_conda_env_files(py_version)
#---The fourth test verifies that builds are skipped properly if they already exist.
mocker.patch(
'open_ce.build_tree.BuildCommand.all_outputs_exist',
return_value=True)
captured = capsys.readouterr()
opence._main(["build", build_env.COMMAND, env_file])
captured = capsys.readouterr()
assert "Skipping build of" in captured.out
mocker.patch(
'open_ce.build_tree.BuildCommand.all_outputs_exist',
return_value=False)
#---The fifth test specifies a cuda version that isn't supported in the env file by package21.
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), package_deps))
)
mocker.patch(
'conda_build.api.get_output_file_paths',
side_effect=(lambda meta, *args, **kwargs: helpers.mock_get_output_file_paths(meta))
)
cuda_version = "9.1"
package_deps = {"package11": ["package15"],
"package12": ["package11"],
"package13": ["package12", "package14"],
"package14": ["package15", "package16"],
"package15": [],
"package16": ["package15"],
"package21": ["package13"],
"package22": ["package15"]}
buildTracker = PackageBuildTracker()
mocker.patch( # This ensures that 'package21' is not built when the cuda version is 9.1
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps,
conditions=[(lambda command: command.recipe != "package21-feedstock")]))
)
env_file = os.path.join(test_dir, 'test-env2.yaml')
opence._main(["build", build_env.COMMAND, env_file, "--cuda_versions", cuda_version, "--run_tests"])
validate_conda_env_files(cuda_versions=cuda_version)
#---The sixth test specifies a cuda version that is supported in the env file by package21.
cuda_version = "9.2"
package_deps = {"package11": ["package15"],
"package12": ["package11"],
"package13": ["package12", "package14"],
"package14": ["package15", "package16"],
"package15": [],
"package16": ["package15"],
"package21": ["package13"],
"package22": ["package21"]}
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), package_deps))
)
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps,
conditions=[(lambda command: command.cudatoolkit == cuda_version)]))
)
env_file = os.path.join(test_dir, 'test-env2.yaml')
opence._main(["build", build_env.COMMAND, env_file, "--cuda_versions", cuda_version])
validate_conda_env_files(cuda_versions=cuda_version)
#---The seventh test specifies specific packages that should be built (plus their dependencies)
package_deps = {"package11": ["package15"],
"package12": ["package11"],
"package13": ["package12", "package14"],
"package14": ["package15", "package16"],
"package15": [],
"package16": ["package15"],
"package21": ["package13"],
"package22": ["package21"]}
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), package_deps))
)
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps,
conditions=[(lambda command: not command.recipe in ["package11-feedstock",
"package12-feedstock",
"package13-feedstock",
"package21-feedstock",
"package22-feedstock"])]))
)
env_file = os.path.join(test_dir, 'test-env2.yaml')
captured = capsys.readouterr()
opence._main(["build", build_env.COMMAND, env_file, "--python_versions", py_version, "--packages", "package14,package35"])
captured = capsys.readouterr()
assert "No recipes were found for package35" in captured.out
#---The eighth test makes sure that relative URL paths work.
package_deps = {"package11": ["package15"],
"package12": ["package11"],
"package13": ["package12", "package14"],
"package14": ["package15", "package16"],
"package15": [],
"package16": ["package15"],
"package21": ["package13"],
"package22": ["package15"]}
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), package_deps))
)
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock_from_command',
side_effect=(lambda x, *args, **kwargs: buildTracker.validate_build_feedstock(x, package_deps))
)
mocker.patch(
'urllib.request.urlretrieve',
side_effect=(lambda x, filename=None: (os.path.join(test_dir, os.path.basename(x)), None))
)
env_file = 'https://test.com/test-env2.yaml'
opence._main(["build", build_env.COMMAND, env_file])
def validate_conda_env_files(py_versions=utils.DEFAULT_PYTHON_VERS,
build_types=utils.DEFAULT_BUILD_TYPES,
mpi_types=utils.DEFAULT_MPI_TYPES,
cuda_versions=utils.DEFAULT_CUDA_VERS):
# Check if conda env files are created for given python versions and build variants
variants = utils.make_variants(py_versions, build_types, mpi_types, cuda_versions)
for variant in variants:
cuda_env_file = os.path.join(os.getcwd(), utils.DEFAULT_OUTPUT_FOLDER,
"{}{}.yaml".format(utils.CONDA_ENV_FILENAME_PREFIX,
utils.variant_string(variant['python'], variant['build_type'], variant['mpi_type'], variant['cudatoolkit'])))
assert os.path.exists(cuda_env_file)
# Remove the file once it's existence is verified
os.remove(cuda_env_file)
def test_env_validate(mocker):
'''
This is a negative test of `build_env`, which passes an invalid env file.
'''
dirTracker = helpers.DirTracker()
mocker.patch(
'os.mkdir',
return_value=0 #Don't worry about making directories.
)
mocker.patch(
'os.system',
side_effect=(lambda x: helpers.validate_cli(x, expect=["git clone"], retval=0)) #At this point all system calls are git clones. If that changes this should be updated.
)
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), []))
)
mocker.patch(
'os.chdir',
side_effect=dirTracker.validate_chdir
)
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock',
side_effect=buildTracker.validate_build_feedstock
)
env_file = os.path.join(test_dir, 'test-env-invalid1.yaml')
with pytest.raises(OpenCEError) as exc:
opence._main(["build", build_env.COMMAND, env_file])
assert "Unexpected key chnnels was found in " in str(exc.value)
def test_build_env_container_build(mocker):
'''
Test that passing the --container_build argument calls container_build.build_with_container_tool
'''
arg_strings = ["build", build_env.COMMAND, "--container_build", "my-env.yaml"]
mocker.patch('open_ce.container_build.build_with_container_tool', return_value=0)
mocker.patch('os.path.exists', return_value=1)
mocker.patch('pkg_resources.get_distribution', return_value=None)
opence._main(arg_strings)
def test_build_env_container_build_multiple_cuda_versions(mocker):
'''
Tests that passing mutiple values in --cuda_versions argument with container_build fails.
'''
arg_strings = ["build", build_env.COMMAND, "--container_build",
"--cuda_versions", "10.2,11.0", "my-env.yaml"]
mocker.patch('os.path.exists', return_value=1)
with pytest.raises(OpenCEError) as exc:
opence._main(arg_strings)
assert "Only one cuda version" in str(exc.value)
def test_build_env_container_build_cuda_versions(mocker):
'''
Tests that passing --cuda_versions argument with container_build argument works correctly.
'''
dirTracker = helpers.DirTracker()
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch('open_ce.container_build.build_with_container_tool', return_value=0)
mocker.patch('os.path.exists', return_value=1)
cuda_version = "10.2"
arg_strings = ["build", build_env.COMMAND, "--container_build",
"--cuda_versions", cuda_version, "my-env.yaml"]
opence._main(arg_strings)
validate_conda_env_files(cuda_versions=cuda_version)
def test_build_env_container_build_with_build_args(mocker):
'''
Tests that passing --container_build_args argument with container_build argument works correctly.
'''
dirTracker = helpers.DirTracker()
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch('open_ce.container_build.build_with_container_tool', return_value=0)
mocker.patch('os.path.exists', return_value=1)
# with docker_build
arg_strings = ["build", build_env.COMMAND, "--docker_build",
"--container_build_args", "--build-args ENV1=test1 some_setting=1", "my-env.yaml"]
opence._main(arg_strings)
# with container_build
arg_strings = ["build", build_env.COMMAND, "--container_build",
"--container_build_args", "--build-args ENV1=test1 some_setting=1", "my-env.yaml"]
opence._main(arg_strings)
def test_build_env_container_build_with_container_tool(mocker):
'''
Tests that passing --container_tool argument works correctly.
'''
dirTracker = helpers.DirTracker()
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch('open_ce.container_build.build_with_container_tool', return_value=0)
mocker.patch('os.path.exists', return_value=1)
#with docker_build argument
arg_strings = ["build", build_env.COMMAND, "--docker_build",
"--container_tool", "podman", "my-env.yaml"]
opence._main(arg_strings)
#with container_build argument
arg_strings = ["build", build_env.COMMAND, "--container_build",
"--container_tool", "podman", "my-env.yaml"]
opence._main(arg_strings)
def test_build_env_if_no_conda_build(mocker):
'''
Test that build_env should fail if conda_build isn't present and no --container_build
'''
arg_strings = ["build", build_env.COMMAND, "my-env.yaml"]
mocker.patch('pkg_resources.get_distribution', return_value=None)
with pytest.raises(OpenCEError):
opence._main(arg_strings)
def test_run_tests(mocker):
'''
Test that the _run_tests function works properly.
'''
dirTracker = helpers.DirTracker()
mock_build_tree = TestBuildTree([], "3.6", "cpu,cuda", "openmpi", "10.2")
mock_test_commands = [test_feedstock.TestCommand("Test1",
conda_env="test-conda-env2.yaml",
bash_command="echo Test1"),
test_feedstock.TestCommand("Test2",
conda_env="test-conda-env2.yaml",
bash_command="[ 1 -eq 2 ]")]
mocker.patch("open_ce.test_feedstock.gen_test_commands", return_value=mock_test_commands)
mocker.patch(
'os.chdir',
side_effect=dirTracker.validate_chdir
)
conda_env_files = dict()
mock_build_tree._test_commands = dict()
for variant in mock_build_tree._possible_variants:
conda_env_files[str(variant)] = "tests/test-conda-env2.yaml"
mock_build_tree._test_feedstocks[str(variant)] = ["feedstock1"]
# Note: All of the tests should fail, since there isn't a real conda environment to activate
with pytest.raises(OpenCEError) as exc:
build_env._run_tests(mock_build_tree, [], conda_env_files)
assert "There were 4 test failures" in str(exc.value)
def test_build_env_url(mocker):
'''
This tests that if a URL is passed in for an env file that it is downloaded.
I mock urlretrieve to return the test-env-invalid1.yaml file so that I can check
for the invalid channels identifier, ensuring that the download function was called.
'''
dirTracker = helpers.DirTracker()
mocker.patch(
'os.mkdir',
return_value=0 #Don't worry about making directories.
)
mocker.patch(
'os.system',
side_effect=(lambda x: helpers.validate_cli(x, expect=["git clone"], retval=0)) #At this point all system calls are git clones. If that changes this should be updated.
)
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch(
'conda_build.api.render',
side_effect=(lambda path, *args, **kwargs: helpers.mock_renderer(os.getcwd(), []))
)
mocker.patch(
'os.chdir',
side_effect=dirTracker.validate_chdir
)
buildTracker = PackageBuildTracker()
mocker.patch(
'open_ce.build_feedstock.build_feedstock',
side_effect=buildTracker.validate_build_feedstock
)
mocker.patch(
'urllib.request.urlretrieve',
side_effect=(lambda x, filename=None: (os.path.join(test_dir, os.path.basename(x)), None))
)
env_file = 'https://test.com/test-env-invalid1.yaml'
with pytest.raises(OpenCEError) as exc:
opence._main(["build", build_env.COMMAND, env_file])
assert "Unexpected key chnnels was found in " in str(exc.value)
|
import argparse
import os
import shutil
import numpy as np
import torch
from trixi.util import Config, GridSearch
def check_attributes(object_, attributes):
missing = []
for attr in attributes:
if not hasattr(object_, attr):
missing.append(attr)
if len(missing) > 0:
return False
else:
return True
def set_seeds(seed, cuda=True):
if not hasattr(seed, "__iter__"):
seed = (seed, seed, seed)
np.random.seed(seed[0])
torch.manual_seed(seed[1])
if cuda: torch.cuda.manual_seed_all(seed[2])
def make_onehot(array, labels=None, axis=1, newaxis=False):
# get labels if necessary
if labels is None:
labels = np.unique(array)
labels = list(map(lambda x: x.item(), labels))
# get target shape
new_shape = list(array.shape)
if newaxis:
new_shape.insert(axis, len(labels))
else:
new_shape[axis] = new_shape[axis] * len(labels)
# make zero array
if type(array) == np.ndarray:
new_array = np.zeros(new_shape, dtype=array.dtype)
elif torch.is_tensor(array):
new_array = torch.zeros(new_shape, dtype=array.dtype, device=array.device)
else:
raise TypeError("Onehot conversion undefined for object of type {}".format(type(array)))
# fill new array
n_seg_channels = 1 if newaxis else array.shape[axis]
for seg_channel in range(n_seg_channels):
for l, label in enumerate(labels):
new_slc = [slice(None), ] * len(new_shape)
slc = [slice(None), ] * len(array.shape)
new_slc[axis] = seg_channel * len(labels) + l
if not newaxis:
slc[axis] = seg_channel
new_array[tuple(new_slc)] = array[tuple(slc)] == label
return new_array
def match_to(x, ref, keep_axes=(1,)):
target_shape = list(ref.shape)
for i in keep_axes:
target_shape[i] = x.shape[i]
target_shape = tuple(target_shape)
if x.shape == target_shape:
pass
if x.dim() == 1:
x = x.unsqueeze(0)
if x.dim() == 2:
while x.dim() < len(target_shape):
x = x.unsqueeze(-1)
x = x.expand(*target_shape)
x = x.to(device=ref.device, dtype=ref.dtype)
return x
def make_slices(original_shape, patch_shape):
working_shape = original_shape[-len(patch_shape):]
splits = []
for i in range(len(working_shape)):
splits.append([])
for j in range(working_shape[i] // patch_shape[i]):
splits[i].append(slice(j*patch_shape[i], (j+1)*patch_shape[i]))
rest = working_shape[i] % patch_shape[i]
if rest > 0:
splits[i].append(slice((j+1)*patch_shape[i], (j+1)*patch_shape[i] + rest))
# now we have all slices for the individual dimensions
# we need their combinatorial combinations
slices = list(itertools.product(*splits))
for i in range(len(slices)):
slices[i] = [slice(None), ] * (len(original_shape) - len(patch_shape)) + list(slices[i])
return slices
def coordinate_grid_samples(mean, std, factor_std=5, scale_std=1.):
relative = np.linspace(-scale_std*factor_std, scale_std*factor_std, 2*factor_std+1)
positions = np.array([mean + i * std for i in relative]).T
axes = np.meshgrid(*positions)
axes = map(lambda x: list(x.ravel()), axes)
samples = list(zip(*axes))
samples = list(map(np.array, samples))
return samples
def get_default_experiment_parser():
parser = argparse.ArgumentParser()
parser.add_argument("base_dir", type=str, help="Working directory for experiment.")
parser.add_argument("-c", "--config", type=str, default=None, help="Path to a config file.")
parser.add_argument("-v", "--visdomlogger", action="store_true", help="Use visdomlogger.")
parser.add_argument("-tx", "--tensorboardxlogger", type=str, default=None)
parser.add_argument("-tl", "--telegramlogger", action="store_true")
parser.add_argument("-dc", "--default_config", type=str, default="DEFAULTS", help="Select a default Config")
parser.add_argument("-ad", "--automatic_description", action="store_true")
parser.add_argument("-r", "--resume", type=str, default=None, help="Path to resume from")
parser.add_argument("-irc", "--ignore_resume_config", action="store_true", help="Ignore Config in experiment we resume from.")
parser.add_argument("-test", "--test", action="store_true", help="Run test instead of training")
parser.add_argument("-g", "--grid", type=str, help="Path to a config for grid search")
parser.add_argument("-s", "--skip_existing", action="store_true", help="Skip configs for which an experiment exists, only for grid search")
parser.add_argument("-m", "--mods", type=str, nargs="+", default=None, help="Mods are Config stubs to update only relevant parts for a certain setup.")
parser.add_argument("-ct", "--copy_test", action="store_true", help="Copy test files to original experiment.")
return parser
def run_experiment(experiment, configs, args, mods=None, **kwargs):
# set a few defaults
if "explogger_kwargs" not in kwargs:
kwargs["explogger_kwargs"] = dict(folder_format="{experiment_name}_%Y%m%d-%H%M%S")
if "explogger_freq" not in kwargs:
kwargs["explogger_freq"] = 1
if "resume_save_types" not in kwargs:
kwargs["resume_save_types"] = ("model", "simple", "th_vars", "results")
config = Config(file_=args.config) if args.config is not None else Config()
config.update_missing(configs[args.default_config].deepcopy())
if args.mods is not None and mods is not None:
for mod in args.mods:
config.update(mods[mod])
config = Config(config=config, update_from_argv=True)
# GET EXISTING EXPERIMENTS TO BE ABLE TO SKIP CERTAIN CONFIGS
if args.skip_existing:
existing_configs = []
for exp in os.listdir(args.base_dir):
try:
existing_configs.append(Config(file_=os.path.join(args.base_dir, exp, "config", "config.json")))
except Exception as e:
pass
if args.grid is not None:
grid = GridSearch().read(args.grid)
else:
grid = [{}]
for combi in grid:
config.update(combi)
if args.skip_existing:
skip_this = False
for existing_config in existing_configs:
if existing_config.contains(config):
skip_this = True
break
if skip_this:
continue
if "backup_every" in config:
kwargs["save_checkpoint_every_epoch"] = config["backup_every"]
loggers = {}
if args.visdomlogger:
loggers["v"] = ("visdom", {}, 1)
if args.tensorboardxlogger is not None:
if args.tensorboardxlogger == "same":
loggers["tx"] = ("tensorboard", {}, 1)
else:
loggers["tx"] = ("tensorboard", {"target_dir": args.tensorboardxlogger}, 1)
if args.telegramlogger:
kwargs["use_telegram"] = True
if args.automatic_description:
difference_to_default = Config.difference_config_static(config, configs["DEFAULTS"]).flat(keep_lists=True, max_split_size=0, flatten_int=True)
description_str = ""
for key, val in difference_to_default.items():
val = val[0]
description_str = "{} = {}\n{}".format(key, val, description_str)
config.description = description_str
exp = experiment(config=config,
base_dir=args.base_dir,
resume=args.resume,
ignore_resume_config=args.ignore_resume_config,
loggers=loggers,
**kwargs)
trained = False
if args.resume is None or args.test is False:
exp.run()
trained = True
if args.test:
exp.run_test(setup=not trained)
if isinstance(args.resume, str) and exp.elog is not None and args.copy_test:
for f in glob.glob(os.path.join(exp.elog.save_dir, "test*")):
if os.path.isdir(f):
shutil.copytree(f, os.path.join(args.resume, "save", os.path.basename(f)))
else:
shutil.copy(f, os.path.join(args.resume, "save")) |
from transducer._util import UNSET
from transducer.infrastructure import Reduced
# Transducible processes
def transduce(transducer, reducer, iterable, init=UNSET):
r = transducer(reducer)
accumulator = r.initial() if init is UNSET else init
for item in iterable:
accumulator = r.step(accumulator, item)
if isinstance(accumulator, Reduced):
accumulator = accumulator.value
break
return r.complete(accumulator)
|
# -*- coding: utf-8 -*-
description = 'Setup for the Saphire Filter in primary beam'
includes = ['monoturm']
group = 'optional'
devices = dict(
saph_mot = device('nicos.devices.vendor.ipc.Motor',
description = 'Motor to move the saphire filter',
bus = 'bus5',
# addr = 66, #old rack, old connectors
addr = 88,
slope = 412.8,
unit = 'mm',
abslimits = (-133.4, 120),
# negative limit switch was used to reference,
# reference position is -133.4
# almost certainly the positive limit switch comes around 120mm + \epsilon
# so one can check if microsteps + slope is set correctly...
zerosteps = 500000,
confbyte = 8, # read out from card
speed = 80, # read out from card
accel = 20, # read out from card
microstep = 2, # read out from card
min = 444930, # lower refpos. taken from old config
max = 520640, # read out from card
),
saph = device('nicos.devices.generic.Switcher',
description = 'saphire filter',
moveable = 'saph_mot',
mapping = {'in': -133,
'out': -8},
blockingmove = True,
precision = 1,
),
)
|
from .application import *
from .static import *
from .database import *
from .auth import *
|
import os
from pathlib import Path
# import dbl
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from apollo import *
from apollo.checks import *
from apollo.commands import *
from apollo.embeds import *
from apollo.events import *
from apollo.input import *
from apollo.services import *
# Load .env file
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
env = os.getenv("ENV", "develop")
db_user = os.getenv("DB_USER")
db_pass = os.getenv("DB_PASS")
db_name = os.getenv("DB_NAME")
engine = create_engine(
f"mysql://{db_user}:{db_pass}@localhost/{db_name}?charset=utf8mb4",
pool_recycle=3600,
)
if env == "develop":
engine.echo = True
# Configure session factory
Session = sessionmaker(expire_on_commit=False)
Session.configure(bind=engine)
scoped_session = ScopedSession(Session)
# Setup cache
cache = Cache(Session)
cache.load_prefixes()
# Initialize bot
apollo = Apollo(Session, cache)
# Initialze input services
time_zone_input = TimeZoneInput(apollo)
# Initialize embeds
about_embed = AboutEmbed()
event_embed = EventEmbed()
help_embed = HelpEmbed()
start_time_embed = StartTimeEmbed()
time_zone_embed = TimeZoneEmbed()
# Initialize services
format_date_time = FormatDateTime()
request_local_start_time = RequestLocalStartTime(scoped_session, format_date_time, time_zone_input, time_zone_embed,
start_time_embed)
update_event = UpdateEvent(apollo, event_embed)
update_response = UpdateResponse(apollo)
sync_event_channels = SyncEventChannels(apollo)
list_event = ListEvent(apollo, event_embed)
list_events = ListEvents(apollo, list_event)
handle_event_reaction = HandleEventReaction(
apollo, update_event, update_response, request_local_start_time
)
# Add events
apollo.add_cog(OnCommandError(apollo))
apollo.add_cog(OnGuildChannelDelete(apollo))
apollo.add_cog(OnGuildJoin(apollo))
apollo.add_cog(OnGuildRemove(apollo))
apollo.add_cog(OnRawMessageDelete(apollo))
apollo.add_cog(OnRawReactionAdd(apollo, handle_event_reaction))
apollo.add_cog(OnReady(apollo))
# Add commands
apollo.add_cog(AboutCommand(apollo, about_embed))
apollo.add_cog(ChannelCommand(apollo, list_events))
apollo.add_cog(EventCommand(apollo, list_events, sync_event_channels))
apollo.add_cog(HelpCommand(apollo, help_embed))
apollo.add_cog(PrefixCommand(apollo))
apollo.add_cog(RoleCommand(apollo))
apollo.add_cog(TimeZoneCommand(apollo, time_zone_embed, time_zone_input))
# Add checks
apollo.add_check(NotEventChannel(apollo))
apollo.run(os.getenv("BOT_TOKEN"), reconnect=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 14:42:15 2019
@author: Zhiyu Ye
Email: [email protected]
In London, the United Kingdom
"""
import os
import json
import cv2
import numpy as np
from PIL import Image
from tools.sub_masks_annotations import create_sub_masks, create_sub_mask_annotation
import time
import random
import matplotlib.pyplot as plt
if __name__ == "__main__":
input_dir = 'path to/YCB_Video_Dataset'
# Generate the categories
class_file = open(input_dir + '/image_sets/classes.txt')
line = class_file.readline()
count = 0
category_id = 0
categories = []
while line:
category_id += 1
category = {'supercategory':line, 'id':category_id, 'name':line}
categories.append(category)
line = class_file.readline()
class_file.close()
# Read the names of the images to generator annotations
image_names_file = open(input_dir + '/image_sets/train.txt')
line = image_names_file.readline()
image_names = []
while line:
image_names.append(line[:-1])
line = image_names_file.readline()
image_names_file.close()
# For shuffle the data
num_of_images = len(image_names)
random.seed(0)
image_id_index = random.sample([i for i in range(0, num_of_images)], num_of_images)
# Generate the images and the annotations
image_dir = input_dir + '/data'
width = 640
height = 480
iscrowd = 0
annotation_id = 0
annotations = []
images = []
image_count = -1
count = 0
for image_name in image_names:
start_time = time.time()
print('Processing:', image_name, '...')
# Write infomation of each image
file_name = image_name + '-color.png'
image_count += 1
image_id = image_id_index[image_count]
image_item = {'file_name':file_name, 'height':height, 'id':image_id, 'width':width}
images.append(image_item)
# Write information of each mask in the image
mask_name = image_name + '-label.png'
image = Image.open(image_dir + '/' + mask_name)
# Extract each mask of the image
sub_masks = create_sub_masks(image)
count = count + len(sub_masks)
for category_id, sub_mask in sub_masks.items():
category_id = int(category_id[1:category_id.find(',')])
annotation_id += 1
cimg = np.array(sub_mask)
opencvImage = np.stack((cimg, cimg, cimg), axis = 2)
instance = np.uint8(np.where(opencvImage == True, 0, 255))
annotation_item = create_sub_mask_annotation(instance, image_id, category_id, annotation_id, iscrowd)
annotations.append(annotation_item)
print('Done! Time used:', time.time()-start_time)
print('Test if all the instances are detected, the result is', count == annotation_id)
# Combine categories, annotations and images to form a json file
json_data = {'annotations':annotations, 'categories':categories, 'images':images}
annotations_output_dir = input_dir + '/annotations'
if not os.path.exists(annotations_output_dir):
os.makedirs(annotations_output_dir)
with open(annotations_output_dir + '/instances.json', 'w') as f:
json.dump(json_data, f)
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
# Hackishly synchronize the version.
version = r"0.1.0"
setup(
name="EchelleJSON",
version=version,
author="Ian Czekala",
author_email="[email protected]",
url="https://github.com/iancze/EchelleJSON",
py_modules=["EchelleJSON"],
description="A simple JSON format for Echelle spectra",
long_description=open("README.rst").read(),
package_data={"": ["LICENSE"]},
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
)
|
"""Tests for neurodocker.interfaces.FreeSurfer"""
from neurodocker.interfaces.tests import utils
class TestFreeSurfer(object):
def test_docker(self):
specs = {
'pkg_manager': 'apt',
'instructions': [
('base', 'ubuntu:16.04'),
('freesurfer', {'version': '6.0.0-min'}),
('user', 'neuro'),
]
}
bash_test_file = "test_freesurfer.sh"
utils.test_docker_container_from_specs(
specs=specs, bash_test_file=bash_test_file)
def test_singularity(self):
specs = {
'pkg_manager': 'apt',
'instructions': [
('base', 'docker://ubuntu:16.04'),
('freesurfer', {'version': '6.0.0-min'}),
('user', 'neuro'),
]
}
bash_test_file = "test_freesurfer.sh"
utils.test_singularity_container_from_specs(
specs=specs, bash_test_file=bash_test_file)
|
from django.contrib import admin
from events.models import Event, Attendance
admin.site.register(Event)
admin.site.register(Attendance) |
r"""
Poisson equation with source term.
Find :math:`u` such that:
.. math::
\int_{\Omega} c \nabla v \cdot \nabla u
= - \int_{\Omega_L} b v = - \int_{\Omega_L} f v p
\;, \quad \forall v \;,
where :math:`b(x) = f(x) p(x)`, :math:`p` is a given FE field and :math:`f` is
a given general function of space.
This example demonstrates use of functions for defining material parameters,
regions, parameter variables or boundary conditions. Notably, it demonstrates
the following:
1. How to define a material parameter by an arbitrary function - see the
function :func:`get_pars()` that evaluates :math:`f(x)` in quadrature
points.
2. How to define a known function that belongs to a given FE space (field) -
this function, :math:`p(x)`, is defined in a FE sense by its nodal values
only - see the function :func:`get_load_variable()`.
In order to define the load :math:`b(x)` directly, the term ``dw_dot``
should be replaced by ``dw_integrate``.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/cylinder.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
}
materials = {
'm' : ({'c' : 1.0},),
'load' : 'get_pars',
}
regions = {
'Omega' : 'all',
'Omega_L' : 'vertices by get_middle_ball',
'Gamma_Left' : ('vertices in (x < 0.00001)', 'facet'),
'Gamma_Right' : ('vertices in (x > 0.099999)', 'facet'),
}
fields = {
'temperature' : ('real', 1, 'Omega', 1),
'velocity' : ('real', 'vector', 'Omega', 1),
}
variables = {
'u' : ('unknown field', 'temperature', 0),
'v' : ('test field', 'temperature', 'u'),
'p' : ('parameter field', 'temperature',
{'setter' : 'get_load_variable'}),
'w' : ('parameter field', 'velocity',
{'setter' : 'get_convective_velocity'}),
}
ebcs = {
'u1' : ('Gamma_Left', {'u.0' : 'get_ebc'}),
'u2' : ('Gamma_Right', {'u.0' : -2.0}),
}
integrals = {
'i' : 1,
}
equations = {
'Laplace equation' :
"""dw_laplace.i.Omega( m.c, v, u )
- dw_convect_v_grad_s.i.Omega( v, w, u )
= - dw_dot.i.Omega_L( load.f, v, p )"""
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
def get_pars(ts, coors, mode=None, **kwargs):
"""
Evaluate the coefficient `load.f` in quadrature points `coors` using a
function of space.
For scalar parameters, the shape has to be set to `(coors.shape[0], 1, 1)`.
"""
if mode == 'qp':
x = coors[:, 0]
val = 55.0 * (x - 0.05)
val.shape = (coors.shape[0], 1, 1)
return {'f' : val}
def get_middle_ball(coors, domain=None):
"""
Get the :math:`\Omega_L` region as a function of mesh coordinates.
"""
x, y, z = coors[:, 0], coors[:, 1], coors[:, 2]
r1 = nm.sqrt((x - 0.025)**2.0 + y**2.0 + z**2)
r2 = nm.sqrt((x - 0.075)**2.0 + y**2.0 + z**2)
flag = nm.where((r1 < 2.3e-2) | (r2 < 2.3e-2))[0]
return flag
def get_load_variable(ts, coors, region=None):
"""
Define nodal values of 'p' in the nodal coordinates `coors`.
"""
y = coors[:,1]
val = 5e5 * y
return val
def get_convective_velocity(ts, coors, region=None):
"""
Define nodal values of 'w' in the nodal coordinates `coors`.
"""
val = 100.0 * nm.ones_like(coors)
return val
def get_ebc(coors, amplitude):
"""
Define the essential boundary conditions as a function of coordinates
`coors` of region nodes.
"""
z = coors[:, 2]
val = amplitude * nm.sin(z * 2.0 * nm.pi)
return val
functions = {
'get_pars' : (get_pars,),
'get_load_variable' : (get_load_variable,),
'get_convective_velocity' : (get_convective_velocity,),
'get_middle_ball' : (get_middle_ball,),
'get_ebc' : (lambda ts, coor, bc, problem, **kwargs: get_ebc(coor, 5.0),),
}
|
from __future__ import absolute_import
import pytest
from django.test.utils import override_settings
from tests.testapp.models import Thing
pytestmark = pytest.mark.django_db
def test_simple_query(client, caplog):
client.get('/test_orm_create/', HTTP_X_REQUEST_ID='foo')
assert ' -- request_id=foo' in caplog.records[-1].message
def test_query_with_newlines(client, caplog):
client.get('/test_raw_query/', HTTP_X_REQUEST_ID='foo')
assert ' -- request_id=foo' in caplog.records[-1].message
def test_signal_handling(client, caplog):
with override_settings(
MIDDLEWARE_CLASSES=[],
INSTALLED_APPS=['tests.testapp', 'django_db_log_requestid']):
client.get('/test_orm_create/', HTTP_X_REQUEST_ID='foo')
assert ' -- request_id=foo' in caplog.records[-1].message |
import pytest
from pincell import config as pincell_config
def pytest_addoption(parser):
parser.addoption('--exe')
parser.addoption('--build-inputs', action='store_true')
def pytest_configure(config):
opts = ['exe', 'build_inputs']
for opt in opts:
if config.getoption(opt) is not None:
pincell_config[opt] = config.getoption(opt)
@pytest.fixture
def run_in_tmpdir(tmpdir):
orig = tmpdir.chdir()
try:
yield
finally:
orig.chdir()
|
from classes.map_class import Map
from unittest import TestCase
class TestMap(TestCase):
def setUp(self):
self.map_object = Map(['link1', 'link2', 'link3'])
def tearDown(self):
self.setUp()
|
a = int(input())
b = int(input())
c = int(input())
n = int(input())
d = max((a+1)//2, b, c)
m = 4*d-a-b-c
if n < m:
print(0)
exit(0)
w = (n-m)//4
print(2*d - a + 2*w)
print(d - b + w)
print(d - c + w)
|
import flopy.mt3d as mt
class GcgAdapter:
_data = None
def __init__(self, data):
self._data = data
def validate(self):
# should be implemented
# for key in content:
# do something
# return some hints
pass
def is_valid(self):
# should be implemented
# for key in content:
# do something
# return true or false
return True
def merge(self):
default = self.default()
for key in self._data:
default[key] = self._data[key]
return default
def get_package(self, _mt):
content = self.merge()
return mt.Mt3dGcg(
_mt,
**content
)
@staticmethod
def default():
default = {
"mxiter": 1,
"iter1": 50,
"isolve": 3,
"ncrs": 0,
"accl": 1,
"cclose": 1e-05,
"iprgcg": 0,
"extension": 'gcg',
"unitnumber": None
}
return default
@staticmethod
def read_package(package):
content = {
"mxiter": package.mxiter,
"iter1": package.iter1,
"isolve": package.isolve,
"ncrs": package.ncrs,
"accl": package.accl,
"cclose": package.cclose,
"iprgcg": package.iprgcg,
"extension": package.extension[0],
"unitnumber": package.unit_number[0]
}
return content
|
import numpy as np
from matplotlib import patches
from common.world import World
from pursuit.agents.ad_hoc.adhoc import AdhocAgent, ACTIONS
from pursuit.agents.handcoded.teammate_aware import TeammateAwareAgent
from pursuit.reward import get_reward_function
from pursuit.state import PursuitState
from pursuit.transition import get_transition_function
import matplotlib.pyplot as plt
agent = TeammateAwareAgent(0)
world_size = (10, 10)
adhoc_filename = 'adhoc_dataset/10x10ta_random_200'
adhoc = AdhocAgent.load(adhoc_filename)
positions = [(3, 3), (3, 7), (7, 3)]
prey = (5, 5)
result = np.zeros(world_size)
for x in range(world_size[0]):
for y in range(world_size[1]):
if (x, y) in positions:
continue
initial_state = PursuitState(tuple([(x, y)] + positions), (prey,), world_size)
adhoc.b_model.predict(initial_state)
predicted_action_dist = adhoc.b_model.cache[initial_state][0]
true_action = agent.act(initial_state)
result[x, y] = predicted_action_dist[ACTIONS.index(true_action)]
fig,ax = plt.subplots(1)
im = ax.imshow(result, interpolation='nearest')
fig.colorbar(im)
for x, y in positions:
rect = patches.Rectangle((x-0.5, y-0.5), 0.95, 0.95, linewidth=1, edgecolor='r', facecolor='black')
ax.add_patch(rect)
rect = patches.Rectangle((prey[0]-0.5, prey[1]-0.5), 0.95, 0.95, linewidth=1, edgecolor='r', facecolor='red')
ax.add_patch(rect)
plt.show() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Yizhong
# created_at: 07/25/2017 上午10:33
import tensorflow as tf
import tensorflow.contrib as tc
def rnn(rnn_type, inputs, length, hidden_size, layer_num=1, dropout_keep_prob=None, concat=True):
if not rnn_type.startswith('bi'):
cell = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
outputs, _ = tf.nn.dynamic_rnn(cell, inputs, sequence_length=length, dtype=tf.float32)
state = outputs[:, -1, :]
else:
cell_fw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
cell_bw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_bw, cell_fw, inputs,
sequence_length=length, dtype=tf.float32)
state_fw = outputs[0][:, -1, :]
state_bw = outputs[1][:, -1, :]
if concat:
outputs = tf.concat(outputs, 2)
state = tf.concat([state_fw, state_bw], 1)
else:
outputs = outputs[0] + outputs[1]
state = state_fw + state_bw
return outputs, state
def get_cell(rnn_type, hidden_size, layer_num=1, dropout_keep_prob=None):
if rnn_type.endswith('lstm'):
cell = tc.rnn.LSTMCell(num_units=hidden_size, state_is_tuple=True)
elif rnn_type.endswith('gru'):
cell = tc.rnn.GRUCell(num_units=hidden_size)
elif rnn_type.endswith('rnn'):
cell = tc.rnn.BasicRNNCell(num_units=hidden_size)
else:
raise NotImplementedError('Unsuported rnn type: {}'.format(rnn_type))
if dropout_keep_prob is not None:
cell = tc.rnn.DropoutWrapper(cell, input_keep_prob=dropout_keep_prob, output_keep_prob=dropout_keep_prob)
if layer_num > 1:
cell = tc.rnn.MultiRNNCell([cell]*layer_num, state_is_tuple=True)
return cell
|
'''
Created on Jan 5, 2020
@author: ballance
'''
from pyucis.source_file import SourceFile
class StatementId():
def __init__(self, file : SourceFile, line : int, item : int):
self.file = file
self.line = line
self.item = item
def getFile(self) -> SourceFile:
return self.file
def getLine(self) -> int:
return self.line
def getItem(self) -> int:
return self.item
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.