content
stringlengths 5
1.05M
|
---|
import imp
import module1 # 使用import导入外部定义的函数
module1.make_pizze(16, "pepperoni") # 外部导入的函数调用 必须以导入的模块开头
module1.make_pizze(12, "mushrooms", "green penper", "extra cheese")
from module1 import make_pizze as pizz # 使用as为make_pizza函数定义一个别名
pizz(12, "mushrooms", "green penper", "extra cheese")
import module1 as p # 也可以为模块作别名
p.make_pizze(12, "mushrooms", "green penper", "extra cheese")
|
import asyncio
import enum
import random
import logging
from collections import namedtuple, deque, defaultdict
from .utils import id_generator, prefix_for_id
def get_default_logger():
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.propagate = False
if log.hasHandlers():
return
handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
log.addHandler(handler)
return log
ReadOnly = namedtuple(
"ReadOnly", ("node_id", "req_id", "index", "resps", "item", "future")
)
Entry = namedtuple("Entry", ("index", "term", "item"))
Snapshot = namedtuple("Snapshot", ("data", "index", "term"))
class State(enum.IntEnum):
FOLLOWER = 0
CANDIDATE = 1
LEADER = 2
class PeerState(enum.IntEnum):
PROBE = 0
REPLICATE = 1
SNAPSHOT = 2
class Msg(enum.IntEnum):
APPEND_REQ = 0
APPEND_RESP = 1
VOTE_REQ = 2
VOTE_RESP = 3
PROPOSE_REQ = 4
PROPOSE_RESP = 5
READINDEX_REQ = 6
READINDEX_RESP = 7
SNAPSHOT_REQ = 8
SNAPSHOT_RESP = 9
class Log(object):
"""A Raft log"""
def __init__(self):
self.entries = []
self.offset = 0
def append(self, index, term, item):
self.entries.append(Entry(index, term, item))
@property
def last_index(self):
if self.entries:
return self.entries[-1].index
return 0
@property
def last_term(self):
if self.entries:
return self.entries[-1].term
return 0
def lookup(self, index):
i = index - self.offset - 1
if 0 <= i < len(self.entries):
return self.entries[i]
return None
def reset(self, offset=0):
self.entries = []
self.offset = offset
def rollback_to_before(self, index):
assert index > 0
i = index - self.offset - 1
if i >= 0:
del self.entries[i:]
def drop_entries(self, index):
i = index - self.offset
assert i > 0
self.entries = self.entries[i:]
self.offset = index
class StateMachine(object):
def apply(self, item):
pass
def apply_read(self, item):
pass
def create_snapshot(self):
pass
def restore_snapshot(self, snap):
pass
class Peer(object):
"""Information about a peer node"""
def __init__(self, node_id):
self.node_id = node_id
self.state = PeerState.PROBE
self.match_index = 0
self.next_index = 0
self.voted = False
self.done_requests = set()
self.recently_messaged = False
def __repr__(self):
return "Peer<match_index=%d, next_index=%d>" % (
self.match_index,
self.next_index,
)
def reset(self, last_index):
self.state = PeerState.PROBE
self.match_index = 0
self.next_index = last_index + 1
self.voted = False
def become_probe(self):
self.state = PeerState.PROBE
self.next_index = self.match_index + 1
def become_replicate(self):
self.state = PeerState.REPLICATE
self.next_index = self.match_index + 1
def become_snapshot(self):
self.state = PeerState.SNAPSHOT
def maybe_update_index(self, index):
update = self.match_index < index
if update:
self.match_index = index
if self.next_index < index + 1:
self.next_index = index + 1
return update
def optimistic_update_index(self, n):
self.next_index = n + 1
def maybe_decrement_index(self, rejected):
if self.state == PeerState.REPLICATE:
if self.match_index >= rejected:
return False
else:
self.next_index = self.match_index + 1
return True
else:
self.next_index = max(rejected - 1, 1)
return True
def request_already_done(self, req_id):
return req_id in self.done_requests
def add_done_request(self, req_id):
self.done_requests.add(req_id)
def update_done_requests(self, last_req_id):
# TODO: more efficient data structure
self.done_requests = set(i for i in self.done_requests if i > last_req_id)
class RaftNode(object):
def __init__(
self,
node_id,
peer_node_ids,
state_machine,
heartbeat_ticks=1,
election_ticks=None,
max_entries_per_msg=5,
snapshot_entries=1000,
snapshot_overlap_entries=10,
random_state=None,
logger=None,
loop=None,
):
self.node_id = node_id
self.peers = {n: Peer(n) for n in peer_node_ids}
self.pending = {}
self.last_req_id = 0
self.leader_id = None
self.req_id_generator = id_generator(self.node_id)
self.state_machine = state_machine
self.loop = loop or asyncio.get_event_loop()
self.max_entries_per_msg = max_entries_per_msg
self.snapshot_entries = snapshot_entries
self.snapshot_overlap_entries = snapshot_overlap_entries
assert snapshot_overlap_entries < snapshot_entries
# Read-only requests
# - A queue of ReadOnly objects
self.readonly_queue = deque()
# - A map of request_id -> ReadOnly objects
self.readonly_map = {}
# - A map of readindex -> List[ReadOnly]
self.readindex_map = defaultdict(list)
# Random state
if random_state is None:
random_state = random.Random()
self.random = random_state
# Logging
self.logger = logger or get_default_logger()
# Timing ticks
if heartbeat_ticks <= 0:
raise ValueError("heartbeat_ticks must be > 0")
if election_ticks is None:
election_ticks = 10 * heartbeat_ticks
self.heartbeat_timeout = heartbeat_ticks
self.election_ticks = election_ticks
self.elapsed_ticks = 0
self.reset_election_timeout()
self.term = 0
self.log = Log()
self.snapshot = None
self.commit_index = 0
self.applied_index = 0
self.handlers = {
Msg.APPEND_REQ: self.on_append_req,
Msg.APPEND_RESP: self.on_append_resp,
Msg.VOTE_REQ: self.on_vote_req,
Msg.VOTE_RESP: self.on_vote_resp,
Msg.PROPOSE_REQ: self.on_propose_req,
Msg.PROPOSE_RESP: self.on_propose_resp,
Msg.READINDEX_REQ: self.on_readindex_req,
Msg.READINDEX_RESP: self.on_readindex_resp,
Msg.SNAPSHOT_REQ: self.on_snapshot_req,
Msg.SNAPSHOT_RESP: self.on_snapshot_resp,
}
# Initialize as a follower
self.become_follower()
def append_req(
self, term, prev_log_index, prev_log_term, entries, leader_commit, tag
):
return (
Msg.APPEND_REQ,
self.node_id,
term,
prev_log_index,
prev_log_term,
entries,
leader_commit,
tag,
)
def append_resp(self, term, success, index, tag):
return (Msg.APPEND_RESP, self.node_id, term, success, index, tag)
def vote_req(self, term, last_log_index, last_log_term):
return (Msg.VOTE_REQ, self.node_id, term, last_log_index, last_log_term)
def vote_resp(self, term, success):
return (Msg.VOTE_RESP, self.node_id, term, success)
def propose_req(self, req_id, item, last_req_id):
return (Msg.PROPOSE_REQ, self.node_id, req_id, item, last_req_id)
def propose_resp(self, req_id, item, leader_id):
return (Msg.PROPOSE_RESP, self.node_id, req_id, item, leader_id)
def readindex_req(self, req_id):
return (Msg.READINDEX_REQ, self.node_id, req_id)
def readindex_resp(self, req_id, index, leader_id):
return (Msg.READINDEX_RESP, self.node_id, req_id, index, leader_id)
def snapshot_req(self, term, snap_index, snap_term, snap_data):
return (Msg.SNAPSHOT_REQ, self.node_id, term, snap_index, snap_term, snap_data)
def snapshot_resp(self, term, success, index):
return (Msg.SNAPSHOT_RESP, self.node_id, term, success, index)
def create_future(self):
return self.loop.create_future()
def next_request_id(self):
return next(self.req_id_generator)
def on_message(self, msg):
"""Called when a new message is received.
Returns
-------
msgs : list
A list of ``(target, msg)`` pairs, where ``target`` is the node id
to send to, and ``msg`` is the message to send.
"""
return self.handlers[msg[0]](*msg[1:])
def on_tick(self):
"""Called on every time tick.
Returns
-------
msgs : list
A list of ``(target, msg)`` pairs, where ``target`` is the node id
to send to, and ``msg`` is the message to send.
"""
self.elapsed_ticks += 1
msgs = []
if self.state == State.LEADER:
if self.elapsed_ticks >= self.heartbeat_timeout:
self.reset_heartbeat_timeout()
msgs = self.broadcast_append(is_heartbeat=True)
else:
if self.elapsed_ticks >= self.election_timeout:
self.become_candidate()
msgs = [
(
node,
self.vote_req(
self.term, self.log.last_index, self.log.last_term
),
)
for node in self.peers
]
return msgs
def propose(self, item):
req_id = self.next_request_id()
future = self.create_future()
self.pending[req_id] = future
self.logger.debug("Creating proposal [req_id: %d, item: %r]", req_id, item)
if self.leader_id is None:
msgs = self.on_propose_resp(self.node_id, req_id, item, None)
elif self.state == State.LEADER:
msgs = self.on_propose_req(self.node_id, req_id, item, self.last_req_id)
else:
msgs = [(self.leader_id, self.propose_req(req_id, item, self.last_req_id))]
return future, msgs
def read(self, item, local=False):
if local:
future = self.create_future()
res = self.state_machine.apply_read(item)
future.set_result(res)
msgs = []
else:
req_id = self.next_request_id()
future = self.create_future()
self.logger.debug(
"Creating read-only request [req_id: %d, item: %r]", req_id, item
)
if self.leader_id is None:
future.set_exception(ValueError("Leader unknown"))
msgs = []
elif self.state == State.LEADER:
if self.has_committed_entry_this_term():
msgs = self.schedule_readonly(
self.node_id, req_id, item=item, future=future
)
else:
future.set_exception(
ValueError("Leader hasn't committed anything yet this term")
)
msgs = []
else:
ro = ReadOnly(
node_id=self.node_id,
req_id=req_id,
index=None,
resps=None,
item=item,
future=future,
)
self.readonly_map[ro.req_id] = ro
msgs = [(self.leader_id, self.readindex_req(req_id))]
return future, msgs
def reset_heartbeat_timeout(self):
self.elapsed_ticks = 0
def reset_election_timeout(self):
# The election timeout is in [election_ticks, election_ticks * 2]
self.elapsed_ticks = 0
self.election_timeout = self.random.randint(
self.election_ticks, 2 * self.election_ticks
)
def reset_client_requests(self):
exc = ValueError("Leadership changed during request, please retry")
# Reset any pending read requests
self.readonly_queue.clear()
for m in [self.readonly_map, self.readindex_map]:
for ro in m.values():
if ro.future is not None and not ro.future.done():
ro.set_exception(exc)
m.clear()
# Reset any pending propose requests
for f in self.pending.values():
f.set_exception(exc)
self.pending.clear()
def reset(self):
# Reset all timers
self.reset_election_timeout()
# No leader known
self.leader_id = None
# Reset previous votes
self.voted_for = None
self.vote_count = 0
# Reset all peers
for peer in self.peers.values():
peer.reset(self.log.last_index)
# Reset client requests
self.reset_client_requests()
def become_follower(self, leader_id=None):
self.reset()
self.state = State.FOLLOWER
self.leader_id = leader_id
self.logger.info(
"Server %s transitioned to follower, term %s", self.node_id, self.term
)
def become_candidate(self):
self.reset()
self.state = State.CANDIDATE
self.leader_id = None
self.term += 1
self.voted_for = self.node_id
self.vote_count = 1
self.logger.info(
"Server %s transitioned to candidate, term %s", self.node_id, self.term
)
def become_leader(self):
self.reset()
self.state = State.LEADER
self.leader_id = self.node_id
self.logger.info(
"Server %s transitioned to leader, term %s", self.node_id, self.term
)
def update_commit_index(self):
updated = False
for N in range(self.commit_index + 1, self.log.last_index + 1):
count = sum(1 for p in self.peers.values() if p.match_index >= N)
entry = self.log.lookup(N)
entry_term = entry.term if entry else 0
if self.is_majority(count) and self.term == entry_term:
self.commit_index = N
updated = True
return updated
def update_last_applied(self):
for index in range(self.applied_index + 1, self.commit_index + 1):
entry = self.log.lookup(index)
self.apply_entry(entry)
self.applied_index += 1
# Apply any pending reads
for ro in self.readindex_map.pop(self.applied_index, ()):
res = self.state_machine.apply_read(ro.item)
ro.future.set_result(res)
# Maybe take a snapshot
if self.applied_index - self.log.offset > self.snapshot_entries:
self.logger.info("Creating snapshot up to index %d", self.applied_index)
self.snapshot = Snapshot(
data=self.state_machine.create_snapshot(),
index=self.log.last_index,
term=self.log.last_term,
)
self.log.drop_entries(
self.applied_index - self.snapshot_overlap_entries
)
def apply_entry(self, entry):
if entry.item is None:
return
req_id, item, last_req_id = entry.item
self.logger.debug(
"Applying entry [req_id: %d, item: %r, last_req_id: %d]",
req_id,
item,
last_req_id,
)
node_id = prefix_for_id(req_id)
if node_id == self.node_id:
if req_id > self.last_req_id:
resp = self.state_machine.apply(item)
fut = self.pending.pop(req_id, None)
if fut and not fut.done():
fut.set_result(resp)
self.last_req_id = req_id
else:
assert req_id not in self.pending
else:
peer = self.peers.get(node_id)
if peer is not None and not peer.request_already_done(req_id):
self.state_machine.apply(item)
peer.add_done_request(req_id)
def schedule_readonly(self, node_id, req_id, item=None, future=None):
ro = ReadOnly(
node_id=node_id,
req_id=req_id,
index=self.commit_index,
resps=set(),
item=item,
future=future,
)
self.readonly_map[ro.req_id] = ro
self.readonly_queue.append(ro)
return self.broadcast_append(send_empty=True)
def process_readonly(self, ro, index=None):
if ro.future.done():
return
if index is None:
index = ro.index
if index <= self.applied_index:
# Read is ready now, apply
res = self.state_machine.apply_read(ro.item)
ro.future.set_result(res)
else:
# Read will be ready later
self.readindex_map[index].append(ro)
def maybe_become_follower(self, term, leader_id=None):
if term > self.term:
self.term = term
self.become_follower(leader_id)
def is_majority(self, n):
return n >= len(self.peers) / 2
def has_committed_entry_this_term(self):
committed = self.log.lookup(self.commit_index)
return committed is not None and committed.term == self.term
def _make_append_reqs(self, peer, is_heartbeat=False, send_empty=False):
if is_heartbeat and peer.recently_messaged:
peer.recently_messaged = False
return []
if peer.state == PeerState.SNAPSHOT:
# When in snapshot state, we hold off on messages until we receive
# a reply
return []
prev_index = peer.next_index - 1
should_snapshot = False
if self.log.offset < peer.next_index:
if self.log.offset < prev_index:
prev_term = self.log.lookup(prev_index).term
else:
assert self.log.offset == prev_index
if self.log.offset == 0:
prev_term = 0
elif self.snapshot is not None:
prev_term = self.snapshot.term
else:
should_snapshot = True
else:
should_snapshot = True
if should_snapshot:
peer.become_snapshot()
req = self.snapshot_req(
self.term, self.snapshot.index, self.snapshot.term, self.snapshot.data
)
return [(peer.node_id, req)]
if peer.state == PeerState.PROBE:
if self.log.last_index >= peer.next_index:
entries = [self.log.lookup(peer.next_index)]
else:
entries = []
else:
entries = [
self.log.lookup(i)
for i in range(
peer.next_index,
min(self.max_entries_per_msg + peer.next_index, self.log.last_index)
+ 1,
)
]
if entries:
peer.optimistic_update_index(entries[-1].index)
if not entries and not is_heartbeat and not send_empty:
return []
peer.recently_messaged = not is_heartbeat
if self.readonly_queue:
tag = self.readonly_queue[-1].req_id
else:
tag = None
req = self.append_req(
self.term, prev_index, prev_term, entries, self.commit_index, tag
)
return [(peer.node_id, req)]
def broadcast_append(self, is_heartbeat=False, send_empty=False):
msgs = []
for peer in self.peers.values():
msgs.extend(
self._make_append_reqs(
peer, is_heartbeat=is_heartbeat, send_empty=send_empty
)
)
return msgs
def on_append_req(
self, node_id, term, prev_log_index, prev_log_term, entries, leader_commit, tag
):
self.logger.debug(
"Received append request: [node_id: %d, term: %d, prev_log_index: %d, "
"prev_log_term: %d, entries: %r, leader_commit: %d]",
node_id,
term,
prev_log_index,
prev_log_term,
entries,
leader_commit,
)
# Reject requests with a previous term
if term < self.term:
reply = self.append_resp(self.term, False, None, tag)
return [(node_id, reply)]
# Requests with a higher term may convert this node to a follower
self.maybe_become_follower(term, node_id)
if self.state == State.FOLLOWER:
self.leader_id = node_id
self.reset_election_timeout()
if prev_log_index > self.log.offset:
existing = self.log.lookup(prev_log_index)
if existing is None or existing.term != prev_log_term:
reply = self.append_resp(self.term, False, prev_log_index + 1, tag)
return [(node_id, reply)]
for e_index, e_term, e_item in entries:
existing = self.log.lookup(e_index)
if existing is None:
self.log.append(e_index, e_term, e_item)
elif existing.term != e_term:
self.log.rollback_to_before(e_index)
self.log.append(e_index, e_term, e_item)
if leader_commit > self.commit_index:
self.commit_index = min(leader_commit, self.log.last_index)
self.update_last_applied()
reply = self.append_resp(self.term, True, self.log.last_index, tag)
return [(node_id, reply)]
else:
return []
def on_append_resp(self, node_id, term, success, index, tag):
self.logger.debug(
"Received append response: [node_id: %d, term: %d, success: %s, index: %s]",
node_id,
term,
success,
index,
)
self.maybe_become_follower(term, node_id)
if self.state != State.LEADER:
return []
msgs = []
peer = self.peers[node_id]
ro = self.readonly_map.get(tag)
if ro is not None:
ro.resps.add(node_id)
if self.is_majority(len(ro.resps) + 1):
# We have now heard from a majority of nodes that the index is
# this far, which means all read requests up to `tag` have been
# accounted for.
while self.readonly_queue:
ro = self.readonly_queue.popleft()
del self.readonly_map[ro.req_id]
if ro.future is not None:
self.process_readonly(ro)
else:
# Remote request, return read index
reply = self.readindex_resp(ro.req_id, ro.index, None)
msgs.append((ro.node_id, reply))
if ro.req_id == tag:
break
if success:
if peer.maybe_update_index(index):
if peer.state == PeerState.PROBE:
peer.become_replicate()
if self.update_commit_index():
self.update_last_applied()
msgs.extend(self.broadcast_append())
else:
if peer.maybe_decrement_index(index):
if peer.state == PeerState.REPLICATE:
peer.become_probe()
msgs.extend(self._make_append_reqs(peer))
return msgs
def on_vote_req(self, node_id, term, last_log_index, last_log_term):
self.logger.debug(
"Received vote request: [node_id: %d, term: %d, "
"last_log_index: %d, last_log_term: %d]",
node_id,
term,
last_log_index,
last_log_term,
)
if term < self.term:
reply = self.vote_resp(self.term, False)
return [(node_id, reply)]
self.maybe_become_follower(term, node_id)
if (self.voted_for is None or self.voted_for == node_id) and (
last_log_index >= self.log.last_index
and last_log_term >= self.log.last_term
):
self.reset_election_timeout()
self.voted_for = node_id
return [(node_id, self.vote_resp(self.term, True))]
else:
return [(node_id, self.vote_resp(self.term, False))]
def on_vote_resp(self, node_id, term, success):
self.logger.debug(
"Received vote response: [node_id: %d, term: %d, success: %s]",
node_id,
term,
success,
)
self.maybe_become_follower(term, node_id)
msgs = []
if self.state == State.CANDIDATE:
peer = self.peers[node_id]
if not peer.voted:
peer.voted = True
if success:
self.vote_count += 1
if self.vote_count >= self.is_majority(self.vote_count):
self.become_leader()
# We append an empty entry on leadership transition
index = self.log.last_index + 1
self.log.append(index, self.term, None)
msgs = self.broadcast_append()
return msgs
def on_propose_req(self, node_id, req_id, item, last_req_id):
self.logger.debug(
"Received propose request: [node_id: %d, req_id: %d, "
"item: %r, last_req_id: %d]",
node_id,
req_id,
item,
last_req_id,
)
if self.state == State.LEADER:
# This node thinks it is the leader, apply directly
index = self.log.last_index + 1
self.log.append(index, self.term, (req_id, item, last_req_id))
msgs = self.broadcast_append()
else:
# We're not the leader, respond accordingly
msgs = [(node_id, self.propose_resp(req_id, item, self.leader_id))]
return msgs
def on_propose_resp(self, node_id, req_id, item, leader_id):
self.logger.debug(
"Received propose response: [node_id: %d, req_id: %d, "
"item: %r, leader_id: %d]",
node_id,
req_id,
item,
leader_id,
)
fut = self.pending.get(req_id)
msgs = []
if fut is not None and not fut.done():
if leader_id is not None:
# Retry with new leader
msgs = [(leader_id, self.propose_req(req_id, item, self.last_req_id))]
else:
fut.set_exception(ValueError("Leader unknown"))
del self.pending[req_id]
if req_id > self.last_req_id:
self.last_req_id = req_id
else:
# Future already done, move on
if fut is not None:
del self.pending[req_id]
if req_id > self.last_req_id:
self.last_req_id = req_id
return msgs
def on_readindex_req(self, node_id, req_id):
self.logger.debug(
"Received readindex request: [node_id: %d, req_id: %d]", node_id, req_id
)
if self.state == State.LEADER and self.has_committed_entry_this_term():
msgs = self.schedule_readonly(node_id, req_id)
else:
# We're either not the leader, or we are the leader but aren't
# quite ready to take requests. Respond accordingly.
msgs = [(node_id, self.readindex_resp(req_id, None, self.leader_id))]
return msgs
def on_readindex_resp(self, node_id, req_id, index, leader_id):
self.logger.debug(
"Received readindex response: [node_id: %d, req_id: %d, "
"index: %s, leader_id: %s]",
node_id,
req_id,
index,
)
ro = self.readonly_map.pop(req_id, None)
if ro is not None and not ro.future.done():
if index is None:
if leader_id is None:
# Leader unknown, error
ro.future.set_exception(ValueError("Leader unknown"))
else:
# Retry with new leader
self.readonly_map[ro.req_id] = ro
return [(leader_id, self.readindex_req(req_id))]
else:
self.process_readonly(ro, index=index)
return []
def on_snapshot_req(self, node_id, term, snap_index, snap_term, snap_data):
self.logger.debug(
"Received snapshot request: [node_id: %d, term: %d, "
"snap_index: %d, snap_term: %d]",
node_id,
term,
snap_index,
snap_term,
)
# Reject requests with a previous term
if term < self.term:
reply = self.snapshot_resp(self.term, False, self.commit_index)
return [(node_id, reply)]
self.maybe_become_follower(term, node_id)
if self.state == State.FOLLOWER:
self.reset_election_timeout()
if self.commit_index >= snap_index:
success = False
else:
self.leader_id = node_id
self.state_machine.restore_snapshot(snap_data)
self.log.reset(snap_index)
self.commit_index = snap_index
self.applied_index = snap_index
success = True
else:
success = False
reply = self.snapshot_resp(self.term, success, self.commit_index)
return [(node_id, reply)]
def on_snapshot_resp(self, node_id, term, success, index):
self.logger.debug(
"Received snapshot response: [node_id: %d, term: %d, "
"success: %s, index: %d]",
node_id,
term,
success,
index,
)
peer = self.peers[node_id]
if success:
peer.match_index = index
peer.become_probe()
return self._make_append_reqs(peer)
|
# -*- coding: utf-8 -*-
import ctypes
import ctypes.util
import os
import sys
def get_libc_function(fn):
if sys.platform == 'win32':
if sys.version_info.minor >= 5:
libc = ctypes.windll.msvcrt
else:
libc = ctypes.CDLL(ctypes.util.find_msvcrt())
else:
libc = ctypes.CDLL(ctypes.util.find_library('c'))
return getattr(libc, fn)
def load_library(name):
# If an explicit override has been given then use it
lpath = os.environ.get('PYFR_{0}_LIBRARY_PATH'.format(name.upper()))
if lpath:
return ctypes.CDLL(lpath)
# Otherwise synthesise the library name and start searching
lname = platform_libname(name)
# Start with system search path
try:
return ctypes.CDLL(lname)
# ..and if this fails then run our own search
except OSError:
for sd in platform_libdirs():
try:
return ctypes.CDLL(os.path.abspath(os.path.join(sd, lname)))
except OSError:
pass
else:
raise OSError('Unable to load {0}'.format(name))
def platform_libname(name):
if sys.platform == 'darwin':
return 'lib{0}.dylib'.format(name)
elif sys.platform == 'win32':
return '{0}.dll'.format(name)
else:
return 'lib{0}.so'.format(name)
def platform_libdirs():
path = os.environ.get('PYFR_LIBRARY_PATH', '')
dirs = [d for d in path.split(':') if d]
# On Mac OS X append the default path used by MacPorts
if sys.platform == 'darwin':
return dirs + ['/opt/local/lib']
# Otherwise just return
else:
return dirs
|
from isce3.ext.isce3.io import *
# Note that the local 'gdal' package import shadows the gdal package that was
# imported from 'isce3.ext.isce3.io' above
from . import gdal
|
from evalml.pipelines.components.transformers import Transformer
from evalml.utils import (
_convert_woodwork_types_wrapper,
_retain_custom_types_and_initalize_woodwork,
infer_feature_types
)
def _extract_year(col, encode_as_categories=False):
return col.dt.year, None
_month_to_int_mapping = {"January": 0, "February": 1, "March": 2, "April": 3, "May": 4, "June": 5,
"July": 6, "August": 7, "September": 8, "October": 9, "November": 10, "December": 11}
def _extract_month(col, encode_as_categories=False):
months = col.dt.month_name()
months_unique = months.unique()
months_encoded = months.map(lambda m: _month_to_int_mapping[m])
if encode_as_categories:
months_encoded = months_encoded.astype("category")
return months_encoded, {m: _month_to_int_mapping[m] for m in months_unique}
_day_to_int_mapping = {"Sunday": 0, "Monday": 1, "Tuesday": 2, "Wednesday": 3, "Thursday": 4, "Friday": 5,
"Saturday": 6}
def _extract_day_of_week(col, encode_as_categories=False):
days = col.dt.day_name()
days_unique = days.unique()
days_encoded = days.map(lambda d: _day_to_int_mapping[d])
if encode_as_categories:
days_encoded = days_encoded.astype("category")
return days_encoded, {d: _day_to_int_mapping[d] for d in days_unique}
def _extract_hour(col, encode_as_categories=False):
return col.dt.hour, None
class DateTimeFeaturizer(Transformer):
"""Transformer that can automatically featurize DateTime columns."""
name = "DateTime Featurization Component"
hyperparameter_ranges = {}
_function_mappings = {"year": _extract_year,
"month": _extract_month,
"day_of_week": _extract_day_of_week,
"hour": _extract_hour}
def __init__(self, features_to_extract=None, encode_as_categories=False, random_seed=0, **kwargs):
"""Extracts features from DateTime columns
Arguments:
features_to_extract (list): List of features to extract. Valid options include "year", "month", "day_of_week", "hour".
encode_as_categories (bool): Whether day-of-week and month features should be encoded as pandas "category" dtype.
This allows OneHotEncoders to encode these features.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
if features_to_extract is None:
features_to_extract = ["year", "month", "day_of_week", "hour"]
invalid_features = set(features_to_extract) - set(self._function_mappings.keys())
if len(invalid_features) > 0:
raise ValueError("{} are not valid options for features_to_extract".format(", ".join([f"'{feature}'" for feature in invalid_features])))
parameters = {"features_to_extract": features_to_extract,
"encode_as_categories": encode_as_categories}
parameters.update(kwargs)
self._date_time_col_names = None
self._categories = {}
self.encode_as_categories = encode_as_categories
super().__init__(parameters=parameters,
component_obj=None,
random_seed=random_seed)
def fit(self, X, y=None):
X = infer_feature_types(X)
self._date_time_col_names = X.select("datetime").columns
return self
def transform(self, X, y=None):
"""Transforms data X by creating new features using existing DateTime columns, and then dropping those DateTime columns
Arguments:
X (ww.DataTable, pd.DataFrame): Data to transform
y (ww.DataColumn, pd.Series, optional): Ignored.
Returns:
ww.DataTable: Transformed X
"""
X_ww = infer_feature_types(X)
X_t = _convert_woodwork_types_wrapper(X_ww.to_dataframe())
features_to_extract = self.parameters["features_to_extract"]
if len(features_to_extract) == 0:
return infer_feature_types(X_t)
for col_name in self._date_time_col_names:
for feature in features_to_extract:
name = f"{col_name}_{feature}"
features, categories = self._function_mappings[feature](X_t[col_name], self.encode_as_categories)
X_t[name] = features
if categories:
self._categories[name] = categories
X_t = X_t.drop(self._date_time_col_names, axis=1)
return _retain_custom_types_and_initalize_woodwork(X_ww, X_t)
def get_feature_names(self):
"""Gets the categories of each datetime feature.
Returns:
Dictionary, where each key-value pair is a column name and a dictionary
mapping the unique feature values to their integer encoding.
"""
return self._categories
def _get_feature_provenance(self):
provenance = {}
for col_name in self._date_time_col_names:
provenance[col_name] = []
for feature in self.parameters['features_to_extract']:
provenance[col_name].append(f'{col_name}_{feature}')
return provenance
|
import math
import collections
def solution():
while True:
n = int(input())
if n==0:
break
v = collections.Counter(str(math.factorial(n)))
print('%d! --' %(n))
for it in range(5):
print(' (%d)%5d' %(it, v[str(it)]), end = '')
print('')
for it in range(5,10):
print(' (%d)%5d' %(it, v[str(it)]), end = '')
print('')
if __name__=="__main__":
solution()
|
"""
REST calls to Hopsworks Feature Store Service
"""
import json
import sys
from hops import constants, util
from hops.exceptions import RestAPIError
def _get_featurestores():
"""
Sends a REST request to get all featurestores for the project
Returns:
a list of Featurestore JSON DTOs
Raises:
:RestAPIError: if there was an error in the REST call to Hopsworks
"""
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
util.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE)
response = util.send_request(connection, method, resource_url)
resp_body = response.read().decode('utf-8')
response_object = json.loads(resp_body)
# for python 3
if sys.version_info > (3, 0):
if response.code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not fetch feature stores (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.code, response.reason, error_code, error_msg, user_msg))
else: # for python 2
if response.status != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not fetch feature stores (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.code, response.reason, error_code, error_msg, user_msg))
return response_object
def _get_featurestore_metadata(featurestore):
"""
Makes a REST call to hopsworks to get all metadata of a featurestore (featuregroups and
training datasets) for the provided featurestore.
Args:
:featurestore: the name of the database, defaults to the project's featurestore
Returns:
JSON response
Raises:
:RestAPIError: if there was an error in the REST call to Hopsworks
"""
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
util.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
featurestore + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORE_METADATA_RESOURCE)
response = util.send_request(connection, method, resource_url)
resp_body = response.read().decode('utf-8')
response_object = json.loads(resp_body)
# for python 3
if sys.version_info > (3, 0):
if response.code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not fetch featurestore metadata for featurestore: {} (url: {}), "
"server response: \n "
"HTTP code: {}, HTTP reason: {}, error code: {}, "
"error msg: {}, user msg: {}".format(
resource_url, featurestore, response.code, response.reason, error_code, error_msg, user_msg))
else: # for python 2
if response.status != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not fetch featurestore metadata for featurestore: {} (url: {}), "
"server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, "
"error msg: {}, user msg: {}".format(
resource_url, featurestore, response.status, response.reason, error_code, error_msg, user_msg))
return response_object
def _get_project_info(project_name):
"""
Makes a REST call to hopsworks to get all metadata of a project for the provided project.
Args:
:project_name: the name of the project
Returns:
JSON response
Raises:
:RestAPIError: if there was an error in the REST call to Hopsworks
"""
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_INFO_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
project_name)
response = util.send_request(connection, method, resource_url)
resp_body = response.read().decode('utf-8')
response_object = json.loads(resp_body)
# for python 3
if sys.version_info > (3, 0):
if response.code != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not fetch project metadata for project: {} (url: {}), "
"server response: \n "
"HTTP code: {}, HTTP reason: {}, error code: {}, "
"error msg: {}, user msg: {}".format(
project_name, resource_url, response.code, response.reason, error_code, error_msg, user_msg))
else: # for python 2
if response.status != 200:
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not fetch project metadata for project: {} (url: {}), "
"server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, "
"error msg: {}, user msg: {}".format(
project_name, resource_url, response.status, response.reason, error_code, error_msg, user_msg))
return response_object
def _get_featuregroup_rest(featuregroup_id, featurestore_id):
"""
Makes a REST call to hopsworks for getting the metadata of a particular featuregroup (including the statistics)
Args:
:featuregroup_id: id of the featuregroup
:featurestore_id: id of the featurestore where the featuregroup resides
Returns:
The REST response
Raises:
:RestAPIError: if there was an error in the REST call to Hopsworks
"""
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
util.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
str(featurestore_id) +
constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATUREGROUPS_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER
+ str(featuregroup_id))
response = util.send_request(connection, method, resource_url, headers=headers)
resp_body = response.read().decode('utf-8')
response_object = json.loads(resp_body)
try: # for python 3
if (response.code != 200):
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not get the metadata of featuregroup (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.code, response.reason, error_code, error_msg, user_msg))
except: # for python 2
if (response.status != 200):
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not get the metadata of featuregroup (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status, response.reason, error_code, error_msg, user_msg))
return response_object
def _get_training_dataset_rest(training_dataset_id, featurestore_id):
"""
Makes a REST call to hopsworks for getting the metadata of a particular training dataset (including the statistics)
Args:
:training_dataset_id: id of the training_dataset
:featurestore_id: id of the featurestore where the training dataset resides
Returns:
The REST response
Raises:
:RestAPIError: if there was an error in the REST call to Hopsworks
"""
headers = {constants.HTTP_CONFIG.HTTP_CONTENT_TYPE: constants.HTTP_CONFIG.HTTP_APPLICATION_JSON}
method = constants.HTTP_CONFIG.HTTP_GET
connection = util._get_http_connection(https=True)
resource_url = (constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_REST_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_PROJECT_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
util.project_id() + constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_FEATURESTORES_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER +
str(featurestore_id) +
constants.DELIMITERS.SLASH_DELIMITER +
constants.REST_CONFIG.HOPSWORKS_TRAININGDATASETS_RESOURCE + constants.DELIMITERS.SLASH_DELIMITER
+ str(training_dataset_id))
response = util.send_request(connection, method, resource_url, headers=headers)
resp_body = response.read().decode('utf-8')
response_object = json.loads(resp_body)
try: # for python 3
if (response.code != 200):
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not get the metadata of featuregroup (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.code, response.reason, error_code, error_msg, user_msg))
except: # for python 2
if (response.status != 200):
error_code, error_msg, user_msg = util._parse_rest_error(response_object)
raise RestAPIError("Could not get the metadata of featuregroup (url: {}), server response: \n " \
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user msg: {}".format(
resource_url, response.status, response.reason, error_code, error_msg, user_msg))
return response_object
|
from django.contrib import admin
from django.urls import path, include
from mysite_old.cities.views import index
urlpatterns = [
path('admin/', admin.site.urls),
path("", index),
path('cities/', include('mysite_old.cities.urls')),
path('people/', include('mysite_old.people.urls')),
]
|
import math
import os
import paddle.fluid as fluid
import numpy as np
from reader import custom_img_reader
from config import read_params,logger
from network import resnet50
learning_params = {
'learning_rate': 0.0001,
'batch_size': 64,
'step_per_epoch':-1,
'num_epoch': 80,
'epochs':[10,30], # Applies piecewise decay to the initial learning rate.
'lr_decay':[1,0.1,0.01],
'use_GPU':True,
'pretrained':False,
'pretrain_params_path':'',
'save_params_path':''
}
place = fluid.CPUPlace() if not learning_params['use_GPU'] else fluid.CUDAPlace(0)
def get_momentum_optimizer(parameter_list):
'''
piecewise decay to the initial learning rate
'''
batch_size = learning_params['batch_size']
step_per_epoch = int(math.ceil(read_params['input_num'] / batch_size)) #
learning_rate = learning_params['learning_rate']
boundaries = [i * step_per_epoch for i in learning_params['epochs']]
values = [i * learning_rate for i in learning_params['lr_decay']]
learning_rate = fluid.layers.piecewise_decay(boundaries,values)
optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9,parameter_list=parameter_list)
return optimizer
def get_adam_optimizer(parameter_list):
'''
piecewise decay to the initial learning rate
'''
batch_size = learning_params['batch_size']
step_per_epoch = int(math.ceil(read_params['input_num'] / batch_size)) #
learning_rate = learning_params['learning_rate']
boundaries = [i * step_per_epoch for i in learning_params['epochs']]
values = [i * learning_rate for i in learning_params['lr_decay']]
learning_rate = fluid.layers.piecewise_decay(boundaries,values)
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=learning_rate,parameter_list=parameter_list)
return optimizer
with fluid.dygraph.guard(place):
resnet = resnet50(False,num_classes=5)
optimizer = get_adam_optimizer(resnet.parameters())
if learning_params['pretrained']:
params,_ = fluid.load_dygraph(learning_params['pretrain_params_path'])
resnet.set_dict(params)
with fluid.dygraph.guard(place):
resnet.train()
train_list = os.path.join(read_params['data_dir'],read_params['train_list'])
train_reader = fluid.io.batch(custom_img_reader(train_list,mode='train'),batch_size=learning_params['batch_size'])
train_loader = fluid.io.DataLoader.from_generator(capacity=3,return_list=True,use_multiprocess=False)
train_loader.set_sample_list_generator(train_reader,places=place)
eval_list = os.path.join(read_params['data_dir'],read_params['eval_list'])
eval_reader = fluid.io.batch(custom_img_reader(eval_list,mode='eval'),batch_size=learning_params['batch_size'])
for epoch_id in range(learning_params['num_epoch']):
for batch_id,single_step_data in enumerate((train_loader())):
img = fluid.dygraph.to_variable(single_step_data[0])
label = fluid.dygraph.to_variable(single_step_data[1])
predict = resnet(img)
predict = fluid.layers.softmax(predict)
acc = fluid.layers.accuracy(input=predict,label=label)
loss = fluid.layers.cross_entropy(predict,label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
optimizer.minimize(avg_loss)
resnet.clear_gradients()
if batch_id % 10 == 0:
try:
single_eval_data = next(eval_reader())
img_eval = np.array([x[0] for x in single_eval_data])
label_eval = np.array([x[1] for x in single_eval_data])
img_eval = fluid.dygraph.to_variable(img_eval)
label_eval = fluid.dygraph.to_variable(label_eval)
eval_predict = resnet(img_eval)
eval_predict = fluid.layers.softmax(eval_predict)
eval_acc = fluid.layers.accuracy(input=eval_predict,label=label_eval)
logger.info('Loss epoch {} step {}: {} acc{} eval_acc {}'.format(epoch_id,batch_id, avg_loss.numpy(),acc.numpy(),eval_acc.numpy()))
except Exception:
logger.info('Loss epoch {} step {}: {} acc{} '.format(epoch_id,batch_id, avg_loss.numpy(),acc.numpy()))
logger.info('Final loss:{}'.format(avg_loss.numpy()))
fluid.save_dygraph(resnet.state_dict(),learning_params['save_params_path']) |
#!/usr/bin/python
# This script prints the predicted body part for a given image
import os
import numpy as np
from keras import optimizers
from keras.models import load_model
from keras.preprocessing import image
import argparse
import tensorflow as tf
parser = argparse.ArgumentParser(description='Predict on single image')
parser.add_argument("net", help="net to use")
parser.add_argument("image", help="image")
ARGS = parser.parse_args()
# Loading and Compiling Model
MODEL = load_model(ARGS.net)
MODEL.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['acc'])
# Path of image you want to predict
IMG_PATH = ARGS.image
# Convert Img to an appropriate numpy array
IMG = image.load_img(IMG_PATH, target_size=(224, 224))
X = image.img_to_array(IMG)
X = np.expand_dims(X, axis=0)
IMAGES = np.vstack([X])
# The actual prediction
CLASSES = MODEL(IMAGES, training=False)
# Converting result of prediction to readable categories
CATEGORIES = {0: 'anal', 1: 'arms', 2: 'armsAndHands',
3: 'face', 4: 'feet', 5: 'genitalsFemale',
6: 'genitalsMale', 7: 'hands', 8: 'head',
9: 'legs', 10: 'legsAndFeet', 11: 'torso'}
print('Predicted Classes for Images: ')
i=0
other=True
for c in tf.unstack(CLASSES, axis=1):
print(CATEGORIES[i], ': {f:.3f}'.format(f=float(c[0])))
i += 1
if(float(c[0]) > 0.5):
other=False
print('other:', other)
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Testing Support
"""
import zope.security
from zope.container import contained
from zope.publisher.interfaces.browser import IBrowserView
from zope.site.testing import siteSetUp, siteTearDown
from z3c.menu.ready2go import interfaces, item
class TestParticipation(object):
principal = 'foobar'
interaction = None
class ISample(zope.interface.Interface):
"""Sample context interface."""
@zope.interface.implementer(ISample)
class Sample(object):
"""Sample context object."""
def __init__(self, title):
self.title = title
@zope.interface.implementer(IBrowserView)
class LocatableView(contained.Contained):
def __init__(self, context, request):
self.__parent__ = context
self.context = context
self.request = request
class IFirstView(IBrowserView):
"""First sample view interface."""
class ISecondView(IBrowserView):
"""Second sample view interface."""
@zope.interface.implementer(IFirstView)
class FirstView(LocatableView):
"""First view."""
@zope.interface.implementer(ISecondView)
class SecondView(LocatableView):
"""Second view."""
class IFirstMenu(interfaces.IMenuManager):
"""First menu manager."""
class ISecondMenu(interfaces.IMenuManager):
"""Second menu manager."""
class FirstMenuItem(item.ContextMenuItem):
viewName = 'first.html'
weight = 1
class SecondMenuItem(item.ContextMenuItem):
viewName = 'second.html'
weight = 2
def setUp(test):
root = siteSetUp(True)
test.globs['root'] = root
from zope.traversing.testing import setUp
setUp()
from zope.browserpage import metaconfigure
from zope.contentprovider import tales
metaconfigure.registerType('provider', tales.TALESProviderExpression)
zope.security.management.newInteraction()
zope.security.management.getInteraction().add(TestParticipation())
def tearDown(test):
zope.security.management.endInteraction()
siteTearDown()
|
from textwrap import dedent
from hamcrest import assert_that, equal_to, is_
from pluscal.ast import Skip
from pluscal.builders.procedure import ProcedureBuilder, ProceduresBuilder, PVariableBuilder
def test_procedure() -> None:
builder = ProcedureBuilder("foo").args(
"bar",
).vars(
PVariableBuilder("baz", "qox"),
).do(
Skip(),
)
assert_that(
str(builder),
is_(equal_to(dedent("""\
procedure foo(bar)
variable baz = qox;
begin
skip;
end procedure;""")))
)
def test_procedures() -> None:
builder = ProceduresBuilder().define(
ProcedureBuilder("foo").do(
Skip(),
),
)
assert_that(
str(builder),
is_(equal_to(dedent("""\
procedure foo()
begin
skip;
end procedure;""")))
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from transformerquant.models.base import PreTrainedModel
from transformerquant.modules.residual_bert import ResidualBERT
from transformerquant.modules.activation.activations import ACT2FN
from transformerquant.configs.bert_config import BertConfig
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'residual-bert': "",
'residual-bert-large': ""
}
BertLayerNorm = torch.nn.LayerNorm
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class BertPooler(nn.Module):
def __init__(self, in_features, out_features):
super(BertPooler, self).__init__()
self.dense = nn.Linear(in_features, out_features)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
# sample size [batch_size, sequence_len, dim] -> [batch_size, dim]
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, in_features, activation_func="relu", layer_norm_eps=1e-6):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(in_features,in_features)
self.transform_act_fn = ACT2FN[activation_func]
self.LayderNorm = BertLayerNorm(in_features, eps=layer_norm_eps)
def forward(self, x):
x = self.dense(x)
x = self.transform_act_fn(x)
x = self.LayerNorm(x)
return x
class BertLMPredictionHead(nn.Module):
def __init__(self,in_features, out_features, activation_func="relu", layer_norm_eps=1e-6):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(in_features=in_features, activation_func=activation_func, layer_norm_eps=layer_norm_eps)
self.decoder = nn.Linear(in_features, out_features, bias=False)
self.bias = nn.Parameter(torch.zeros(out_features))
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
# ============================================= #
# bert model(bert encoder) #
# ============================================= #
class ResidualBertModel(BertPreTrainedModel):
def __init__(self, config):
super(ResidualBertModel, self).__init__(config)
self.encoder = ResidualBERT(d_model = config.d_model,
num_layers=config.num_layers,
nhead=config.nhead,
dropout=config.attention_dropout_prob,
activation=config.activation)
self.pooler = BertPooler(in_features=config.d_model, out_features=config.d_model)
self.init_weights()
def forward(self, x, mask=None):
x = self.encoder(x,mask)
x = self.pooler(x)
return x
# ========================================== #
# bert for pretraining #
# ========================================== #
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = ResidualBertModel(config)
self.cls = BertLMPredictionHead(in_features=config.d_model, out_features=config.num_labels, activation_func=config.activation, layer_norm_eps=config.layer_norm_eps)
self.init_weights()
def forward(self, x, mask=None):
x = self.bert(x, mask)
x = self.cls(x)
return x
# ========================================== #
# bert for downsteam task #
# ========================================== #
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = ResidualBERT(d_model = config.d_model,
num_layers=config.num_layers,
nhead=config.nhead,
dropout=config.attention_dropout_prob,
activation=config.activation)
self.pooler = BertPooler(in_features=config.d_model, out_features=config.d_model)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.d_model, config.num_labels)
def forward(self, x, mask=None):
x = self.bert(x, mask)
x = self.pooler(x)
x = self.dropout(x)
logit = self.classifier(x)
return logit.squeeze(-1) |
import MySQLdb
import logging
class Results():
def __init__(self, year=None):
self.year = year
self.logger = logging.getLogger(__name__)
self.db = MySQLdb.connect(host='localhost',
user='vagrant',
passwd='',
db='furrypoll2009_2012')
def getResults(self):
self.logger.info('Beginning %s', self.year)
cur = self.db.cursor()
cur.execute('select * from furrypoll%s' % self.year)
numRows = cur.rowcount
self.logger.info('Processing %s results', numRows)
for x in range(0, numRows):
row = cur.fetchone()
yield self.resultFromRow(row)
cur.close()
self.db.close()
self.logger.info('Done with %s', self.year)
def resultFromRow(self, row):
result = {
'year': self.year,
# The following three fields use the same index for each year; beyond
# that, we'll need to pass the whole row.
'birthdate': self.getBirthDate(
row[self.questionIndex['month']],
row[self.questionIndex['year']]),
'biosex': self.getBiosex(
row[self.questionIndex['biosex']]),
'gender': self.getGender(
row[self.questionIndex['gender']]),
'orientation': self.getOrientation(row),
'country': self.getCountry(row),
'state': self.getState(row),
'religion': self.getReligion(row),
'politics_social': row[self.questionIndex['politics_social']] \
if self.questionIndex['politics_social'] else None,
'politics_economic': row[self.questionIndex['politics_economic']] \
if self.questionIndex['politics_economic'] else None,
'occupation': self.getOccupation(row),
'education': self.getEducation(row),
'relationship': self.getRelationship(row),
'partner_is_furry':
row[self.questionIndex['partner_is_furry']] == 'o',
'howfurry': row[self.questionIndex['howfurry']],
'years_known_fandom':
row[self.questionIndex['years_known_fandom']],
'years_as_furry': row[self.questionIndex['years_as_furry']],
'furries_known': row[self.questionIndex['furries_known']],
'furries_known_in_person':
row[self.questionIndex['furries_known_in_person']],
'whoknows_nobody':
row[self.questionIndex['whoknows_nobody']] == 'o',
'whoknows_family':
row[self.questionIndex['whoknows_family']] == 'o' if \
self.questionIndex['whoknows_family'] else None,
'whoknows_SO':
row[self.questionIndex['whoknows_SO']] == 'o',
'whoknows_furryfriends':
row[self.questionIndex['whoknows_furryfriends']] == 'o',
'whoknows_bestfriends':
row[self.questionIndex['whoknows_bestfriends']] == 'o',
'whoknows_closerfriends':
row[self.questionIndex['whoknows_closerfriends']] == 'o',
'whoknows_friends':
row[self.questionIndex['whoknows_friends']] == 'o',
'whoknows_coworkers':
row[self.questionIndex['whoknows_coworkers']] == 'o',
'whoknows_commonknowledge':
row[self.questionIndex['whoknows_commonknowledge']] == 'o',
'nonfurry_response': self.getNonFurryResponse(row),
'nonfurry_response_personal':
self.getNonFurryResponsePersonal(row),
'nonfurry_accuracy': self.getNonFurryAccuracy(row),
'rp_as_different_gender': self.getRPAsDiffGender(row),
'seximportance_overall': row[self.questionIndex['seximportance']],
'seximportance_personal':
row[self.questionIndex['seximportance_personal']],
'seximportance_others':
row[self.questionIndex['seximportance_others']],
'seximportance_public':
row[self.questionIndex['seximportance_public']],
'is_artist': row[self.questionIndex['is_artist']] == 'o',
'is_writer': row[self.questionIndex['is_writer']] == 'o',
'is_musician': row[self.questionIndex['is_musician']] == 'o',
'is_congoer': row[self.questionIndex['is_congoer']] == 'o',
'is_fursuiter': row[self.questionIndex['is_fursuiter']] == 'o',
'is_active_online_communities':
row[self.questionIndex['is_active_online_communities']] == 'o',
'is_fan_rpgs': row[self.questionIndex['is_fan_rpgs']] == 'o',
'is_fan_scifi': row[self.questionIndex['is_fan_scifi']] == 'o',
'is_fan_anime': row[self.questionIndex['is_fan_anime']] == 'o',
'is_plushophile': row[self.questionIndex['is_plushophile']] == 'o',
'is_zoophile': row[self.questionIndex['is_zoophile']] == 'o',
'is_polyglot': row[self.questionIndex['is_polyglot']] == 'o',
'is_animal_rights_advocate':
row[self.questionIndex['is_animal_rights_advocate']] == 'o',
'is_vegetarian': row[self.questionIndex['is_vegetarian']] == 'o',
'is_politically_active':
row[self.questionIndex['is_politically_active']] == 'o',
'is_otherkin': row[self.questionIndex['is_otherkin']] == 'o' if \
self.questionIndex['is_otherkin'] else None,
'opinion_artwork': row[self.questionIndex['opinion_artwork']],
'opinion_writing': row[self.questionIndex['opinion_writing']],
'opinion_conventions':
row[self.questionIndex['opinion_conventions']],
'opinion_fursuiting':
row[self.questionIndex['opinion_fursuiting']],
'opinion_plushophilia':
row[self.questionIndex['opinion_plushophilia']],
'opinion_zoophilia':
row[self.questionIndex['opinion_zoophilia']],
'opinion_online_communities':
row[self.questionIndex['opinion_online_communities']],
'importance_artwork':
row[self.questionIndex['importance_artwork']],
'importance_writing':
row[self.questionIndex['importance_writing']],
'importance_online_communities':
row[self.questionIndex['importance_online_communities']],
'importance_muds':
row[self.questionIndex['importance_muds']],
'importance_conventions':
row[self.questionIndex['importance_conventions']],
'importance_fursuiting':
row[self.questionIndex['importance_fursuiting']],
'how_much_human': self.getHowHuman(row),
'animal_wolf':
row[self.questionIndex['animal_wolf']] == 'o',
'animal_redfox':
row[self.questionIndex['animal_redfox']] == 'o',
'animal_greyfox':
row[self.questionIndex['animal_greyfox']] == 'o',
'animal_arcticfox':
row[self.questionIndex['animal_arcticfox']] == 'o',
'animal_kitsune':
row[self.questionIndex['animal_kitsune']] == 'o',
'animal_otherfox':
row[self.questionIndex['animal_otherfox']] == 'o',
'animal_coyote':
row[self.questionIndex['animal_coyote']] == 'o',
'animal_jackal':
row[self.questionIndex['animal_jackal']] == 'o' \
if self.questionIndex['animal_jackal'] \
is not None else None,
'animal_germanshepherd':
row[self.questionIndex['animal_germanshepherd']] == 'o',
'animal_husky':
row[self.questionIndex['animal_husky']] == 'o',
'animal_collie':
row[self.questionIndex['animal_collie']] == 'o',
'animal_otherdog':
row[self.questionIndex['animal_otherdog']] == 'o',
'animal_othercanine':
row[self.questionIndex['animal_othercanine']] == 'o',
'animal_tiger':
row[self.questionIndex['animal_tiger']] == 'o',
'animal_lion':
row[self.questionIndex['animal_lion']] == 'o',
'animal_leopard':
row[self.questionIndex['animal_leopard']] == 'o',
'animal_snowleopard':
row[self.questionIndex['animal_snowleopard']] == 'o' \
if self.questionIndex['animal_snowleopard'] \
is not None else None,
'animal_panther':
row[self.questionIndex['animal_panther']] == 'o',
'animal_cheetah':
row[self.questionIndex['animal_cheetah']] == 'o',
'animal_cougar':
row[self.questionIndex['animal_cougar']] == 'o',
'animal_domesticcat':
row[self.questionIndex['animal_domesticcat']] == 'o',
'animal_otherfeline':
row[self.questionIndex['animal_otherfeline']] == 'o',
'animal_dragon':
row[self.questionIndex['animal_dragon']] == 'o',
'animal_lizard':
row[self.questionIndex['animal_lizard']] == 'o',
'animal_dinosaur':
row[self.questionIndex['animal_dinosaur']] == 'o',
'animal_otherreptile':
row[self.questionIndex['animal_otherreptile']] == 'o',
'animal_raccoon':
row[self.questionIndex['animal_raccoon']] == 'o',
'animal_skunk':
row[self.questionIndex['animal_skunk']] == 'o',
'animal_badger':
row[self.questionIndex['animal_badger']] == 'o',
'animal_riverotter':
row[self.questionIndex['animal_riverotter']] == 'o',
'animal_seaotter':
row[self.questionIndex['animal_seaotter']] == 'o',
'animal_weasel':
row[self.questionIndex['animal_weasel']] == 'o',
'animal_othermustelid':
row[self.questionIndex['animal_othermustelid']] == 'o',
'animal_redpanda':
row[self.questionIndex['animal_redpanda']] == 'o',
'animal_othermusteloid':
row[self.questionIndex['animal_othermusteloid']] == 'o',
'animal_horse':
row[self.questionIndex['animal_horse']] == 'o',
'animal_deer':
row[self.questionIndex['animal_deer']] == 'o',
'animal_otherungulate':
row[self.questionIndex['animal_otherungulate']] == 'o',
'animal_brownbear':
row[self.questionIndex['animal_brownbear']] == 'o',
'animal_grizzlybear':
row[self.questionIndex['animal_grizzlybear']] == 'o',
'animal_pandabear':
row[self.questionIndex['animal_pandabear']] == 'o',
'animal_polarbear':
row[self.questionIndex['animal_polarbear']] == 'o',
'animal_otherbear':
row[self.questionIndex['animal_otherbear']] == 'o',
'animal_mouse':
row[self.questionIndex['animal_mouse']] == 'o',
'animal_rat':
row[self.questionIndex['animal_rat']] == 'o',
'animal_squirrel':
row[self.questionIndex['animal_squirrel']] == 'o',
'animal_other':
row[self.questionIndex['animal_other']] == 'o',
'animal_raven':
row[self.questionIndex['animal_raven']] == 'o',
'animal_raptor':
row[self.questionIndex['animal_raptor']] == 'o',
'animal_otherbird':
row[self.questionIndex['animal_otherbird']] == 'o',
'animal_rabbit':
row[self.questionIndex['animal_rabbit']] == 'o',
'animal_kangaroo':
row[self.questionIndex['animal_kangaroo']] == 'o',
'animal_koala':
row[self.questionIndex['animal_koala']] == 'o',
'animal_othermarsupial':
row[self.questionIndex['animal_othermarsupial']] == 'o',
'animal_lemur':
row[self.questionIndex['animal_lemur']] == 'o',
'animal_monkey':
row[self.questionIndex['animal_monkey']] == 'o',
'animal_otherprimate':
row[self.questionIndex['animal_otherprimate']] == 'o',
'animal_hyaena':
row[self.questionIndex['animal_hyaena']] == 'o',
'animal_bat':
row[self.questionIndex['animal_bat']] == 'o',
'animal_griffin':
row[self.questionIndex['animal_griffin']] == 'o',
}
# Race involves multiple columns
self.setRace(row, result)
self.setHowOften(row, result)
self.setWebsites(row, result)
self.setPsychographic(row, result)
return result
def getBirthDate(self, month, year):
if month == 'nul' or year == '' or int(year) == 0:
return None
try:
return float(year) + float(month)
except ValueError:
return None
def getBiosex(self, biosex):
try:
return {
'a': 'Male',
'b': 'Female',
'c': 'Other',
}[biosex]
except:
return None
def getGender(self, gender):
try:
return {
'a': 'Male, completely',
'b': 'Male, predominately',
'c': 'Equal parts male and female',
'd': 'Female, predominately',
'e': 'Female, completely',
'f': 'Other',
}[gender]
except:
return None
def getOrientation(self, row):
orientation = row[self.questionIndex['orientation']]
try:
return (
'Completely heterosexual',
'Mostly heterosexual',
'Bisexual, leaning heterosexual',
'Bisexual',
'Bisexual, leaning homosexual',
'Mostly homosexual',
'Completely homosexual',
'Pansexual',
'Asexual',
)[int(orientation)]
except:
return None
def getCountry(self, row):
if len(row[self.questionIndex['country']]) == 2 and \
row[self.questionIndex['country']] != 'zz':
return row[self.questionIndex['country']]
else:
return None
def getState(self, row):
if self.questionIndex['state'] is not None:
if len(row[self.questionIndex['state']]) == 2 and \
row[self.questionIndex['state']] != 'zz':
return row[self.questionIndex['state']]
else:
return None
def setRace(self, row, result):
result['race_white'] = row[self.questionIndex['race_white']] == 'o'
result['race_black'] = row[self.questionIndex['race_black']] == 'o'
result['race_hispanic'] = row[self.questionIndex['race_hispanic']] \
== 'o'
result['race_asian'] = row[self.questionIndex['race_asian']] == 'o'
result['race_native'] = row[self.questionIndex['race_native']] == 'o'
def getReligion(self, row):
try:
return {
'a': 'Christian, Catholic',
'b': 'Christian, Protestant',
'c': 'Christian, Other',
'd': 'Muslim',
'e': 'Hindu',
'f': 'Jewish',
'g': 'Pagan',
'h': 'Buddhist',
'i': 'Shinto',
'j': 'Agnostic',
'k': 'Atheist',
'x': 'Other',
}[row[self.questionIndex['religion']]]
except:
return None
def getOccupation(self, row):
try:
return {
'a': 'Student',
'b': 'Administrative',
'c': 'Government/Armed Service',
'd': 'Sales/Support',
'e': 'Technical/IT',
'f': 'Professional',
'g': 'Service',
'h': 'Creative or other within the furry fandom',
'i': 'Retired',
'j': 'Unemployed' if self.year in [2011, 2012] else 'None',
'k': 'None',
'x': 'Other',
}[row[self.questionIndex['occupation']]]
except:
return None
def getEducation(self, row):
try:
return {
'a': 'Some high/secondary school',
'b': 'High/secondary school (ongoing)',
'c': 'High/secondary school graduate',
'd': 'Some college/university',
'e': 'College/university (ongoing)',
'f': 'College/university graduate',
'g': 'Some postcollege',
'h': 'Postcollege (ongoing)',
'i': 'Advance degree',
'x': 'Other',
}[row[self.questionIndex['education']]]
except:
return None
def getRelationship(self, row):
try:
return {
'a': 'Single',
'b': 'Casual relationship',
'c': 'Open relationship',
'd': 'Long-term relationship',
'e': 'Marriage or other permanent commitment',
'x': 'Other',
}[row[self.questionIndex['relationship']]]
except:
return None
def getNonFurryResponse(self, row):
try:
return {
'a': 'They have no knowledge',
'b': 'Extremely positively',
'c': 'Positively',
'd': 'Slightly positively',
'e': 'Ambivalently',
'f': 'Slightly negatively',
'g': 'Negatively',
'h': 'Extremely negatively',
}[row[self.questionIndex['nonfurry_response']]]
except:
return None
def getNonFurryResponsePersonal(self, row):
try:
return {
'a': 'No',
'b': 'Yes, they respond more positively',
'c': 'Yes, they respond more negatively',
'd': 'The reaction is mixed; some respond more positively ' +
'and some more negatively',
'e': 'People do not know that I am a furry',
}[row[self.questionIndex['nonfurry_response_personal']]]
except:
return None
def getNonFurryAccuracy(self, row):
try:
return {
'a': 'Extremely accurate',
'b': 'Accurate',
'c': 'Inaccurate',
'd': 'Extremely inaccurate',
}[row[self.questionIndex['nonfurry_accuracy']]]
except:
return None
def getRPAsDiffGender(self, row):
try:
return {
'a': 'No, and I would not do so',
'b': 'No, but I might do so',
'c': 'Yes, occasionally',
'd': 'Yes, regularly',
'e': 'My primary furry persona fits this description',
}[row[self.questionIndex['rp_as_different_gender']]]
except:
return None
def setHowOften(self, row, result):
fields = ['chat_online', 'roleplay', 'attend_conventions', 'meet_up',
'visit_furry_websites',
'participate_in_furry_online_communities', 'write', 'draw',
'play_nonfurry_online_games', 'play_nonfurry_rpgs',
'attend_nonfurry_conventions',
'participate_in_nonfurry_online_communities']
for field in fields:
try:
result['howoften_' + field] = [
'Never',
'Less than once a year',
'Yearly',
'Several times a year',
'Monthly',
'Several times a month',
'Weekly',
'Daily',
][int(row[self.questionIndex['howoften_' + field]]) - 1]
except:
continue
def setWebsites(self, row, result):
sites = ['artspots', 'deviantart', 'e621', 'flist', 'fchan',
'flayrah', 'furaffinity', 'furcadia', 'furnation',
'furocity', 'furrag', 'furry4life', 'furryteens',
'furspace', 'furtopia', 'inkbunny', 'pounced',
'sofurry', 'vcl', 'wikifur',]
for site in sites:
try:
result['website_' + site] = \
row[self.questionIndex['website_' + site]] == 'o'
except:
result['website_' + site] = None
def getHowHuman(self, row):
try:
return {
'a': 'I consider myself to be completely human, with no ' +
'other attributes',
'b': 'I consider myself to be completely human, though I ' +
'admire some aspects of other animals',
'c': 'I consider myself to be mostly human, with some ' +
'other animal traits mixed in',
'd': 'I consider myself to be roughly equal parts human and ' +
'animal',
'e': 'I consider myself partly human, although animal ' +
'traits are more common',
'f': 'I do not consider myself human',
'g': 'Other',
}[row[self.questionIndex['how_much_human']]]
except:
return None
def setPsychographic(self, row, result):
if self.year == 2012:
result['faith_and_spirituality'] = \
row[self.questionIndex['faith_and_spirituality']]
result['friends_look_advice'] = \
row[self.questionIndex['friends_look_advice']]
result['make_rather_than_buy'] = \
row[self.questionIndex['make_rather_than_buy']]
result['more_talented_than_peers'] = \
row[self.questionIndex['more_talented_than_peers']]
result['value_cutting_edge'] = \
row[self.questionIndex['value_cutting_edge']]
result['rather_patronize_small_businesses'] = \
row[self.questionIndex['rather_patronize_small_businesses']]
result['enjoy_creating_things'] = \
row[self.questionIndex['enjoy_creating_things']]
result['ahead_of_pop_culture'] = \
row[self.questionIndex['ahead_of_pop_culture']]
result['tendency_to_overthink'] = \
row[self.questionIndex['tendency_to_overthink']]
result['mass_media_lcd'] = \
row[self.questionIndex['mass_media_lcd']]
result['enjoy_leading'] = \
row[self.questionIndex['enjoy_leading']]
result['focus_on_specific_interests'] = \
row[self.questionIndex['focus_on_specific_interests']]
result['too_reliant_on_tech'] = \
row[self.questionIndex['too_reliant_on_tech']]
result['filesharing_nbd'] = \
row[self.questionIndex['filesharing_nbd']]
result['citizens_politically_active'] = \
row[self.questionIndex['citizens_politically_active']]
result['want_to_be_fashionable'] = \
row[self.questionIndex['want_to_be_fashionable']]
result['exciting_rather_than_predictable'] = \
row[self.questionIndex['exciting_rather_than_predictable']]
result['learning_for_learnings_sake'] = \
row[self.questionIndex['learning_for_learnings_sake']]
result['routine_is_comforting'] = \
row[self.questionIndex['routine_is_comforting']]
result['advertising_is_useful'] = \
row[self.questionIndex['advertising_is_useful']]
result['other_people_think_important'] = \
row[self.questionIndex['other_people_think_important']]
result['learn_about_universe'] = \
row[self.questionIndex['learn_about_universe']]
result['find_simpler_option'] = \
row[self.questionIndex['find_simpler_option']]
result['decisions_moral_code'] = \
row[self.questionIndex['decisions_moral_code']]
result['people_more_distant'] = \
row[self.questionIndex['people_more_distant']]
result['first_to_try_new_things'] = \
row[self.questionIndex['first_to_try_new_things']]
result['consider_intellectual'] = \
row[self.questionIndex['consider_intellectual']]
result['buy_on_impulse'] = \
row[self.questionIndex['buy_on_impulse']]
result['corporations_soulless'] = \
row[self.questionIndex['corporations_soulless']]
result['enjoy_traveling'] = \
row[self.questionIndex['enjoy_traveling']]
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
def nothing(x):
pass
# Creating a window for later use
cv2.namedWindow('result')
# Starting with 100's to prevent error while masking
h,s,v = 100,100,100
# Creating track bar
cv2.createTrackbar('h_min', 'result',0,179,nothing)
cv2.createTrackbar('s_min', 'result',0,255,nothing)
cv2.createTrackbar('v_min', 'result',0,255,nothing)
# Creating track bar
cv2.createTrackbar('h_max', 'result',0,179,nothing)
cv2.createTrackbar('s_max', 'result',0,255,nothing)
cv2.createTrackbar('v_max', 'result',0,255,nothing)
while(1):
_, frame = cap.read()
#converting to HSV
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# get info from track bar and appy to result
h_min = cv2.getTrackbarPos('h_min','result')
s_min = cv2.getTrackbarPos('s_min','result')
v_min = cv2.getTrackbarPos('v_min','result')
h_max = cv2.getTrackbarPos('h_max', 'result')
s_max = cv2.getTrackbarPos('s_max', 'result')
v_max = cv2.getTrackbarPos('v_max', 'result')
# Normal masking algorithm
lower_blue = np.array([h_min,s_min,v_min])
print('--------------------')
print('MIN: hsv({}, {}, {})'.format(h_min, s_min, v_min))
print('MAX: hsv({}, {}, {})'.format(h_max, s_max, v_max))
print('--------------------')
upper_blue = np.array([h_max, s_max, v_max])
mask = cv2.inRange(hsv,lower_blue, upper_blue)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.putText(result, 'MIN: hsv({}, {}, {})'.format(h_min, s_min, v_min), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
cv2.putText(result, 'MAX: hsv({}, {}, {})'.format(h_max, s_max, v_max), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
cv2.imshow('result',result)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows() |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 10:29:21 2018
@author: dedekinds
"""
import os
from PIL import Image
import numpy as np
PATH = os.getcwd()
def png2jpg_background_white(PNG,SEG):
os.chdir(PATH+'/READY')
INPUT=np.array(Image.open(PNG))
os.chdir(PATH+'/examples/segmentation')
seg = np.array(Image.open(SEG))
h = INPUT.shape[0]
w = INPUT.shape[1]
new = Image.new("RGB",(h,w))
for i in range(0,h):
for j in range(0,w):
if seg[i][j]:
new.putpixel([i,j],(INPUT[i,j,0],INPUT[i,j,1],INPUT[i,j,2]))
else:
new.putpixel([i,j],(255,255,255))
return new
def rot_flop(filename):
os.system('convert -rotate 90 '+filename+' '+filename)
os.system('convert -flop '+filename+' '+filename)
#create context_segementation and move it to /segmentation
os.chdir(PATH+'/READY')
os.system('convert -alpha Extract -type optimize -strip -quality 60 +dither base_context.png base_context_mask.png')
os.system('mv base_context_mask.png '+PATH+'/examples/segmentation')
#create style_segementation and move it to /segmentation
os.chdir(PATH+'/READY')
os.system('convert -alpha Extract -type optimize -strip -quality 60 +dither base_style.png base_style_mask.png')
os.system('mv base_style_mask.png '+PATH+'/examples/segmentation')
#########################################
#convert type
os.chdir(PATH+'/READY')
os.system('convert base_context.png base_context.jpg')#convert to pre-jpg first
os.system('convert base_style.png base_style.jpg')#it can avoid some trouble from the type of png
context = png2jpg_background_white(PNG = 'base_context.jpg',SEG = 'base_context_mask.png' )
context.save('base_context.jpg')
os.system('mv base_context.jpg '+PATH+'/examples/input/')
style = png2jpg_background_white(PNG = 'base_style.jpg',SEG = 'base_style_mask.png' )
style.save('base_style.jpg')
os.system('mv base_style.jpg '+PATH+'/examples/style/')
#rotate and flop
os.chdir(PATH+'/examples/input')
rot_flop('base_context.jpg')
os.chdir(PATH+'/examples/style')
rot_flop('base_style.jpg')
#begin training
os.chdir(PATH)
os.system('python deep_photostyle.py --content_image_path ./examples/input/base_context.jpg --style_image_path ./examples/style/base_style.jpg --content_seg_path ./examples/segmentation/base_context_mask.png --style_seg_path ./examples/segmentation/base_style_mask.png --style_option 2')
#Clipping
os.chdir(PATH)
INPUT=np.array(Image.open('best_temp.png'))
os.chdir(PATH+'/examples/segmentation')
seg = np.array(Image.open('base_context_mask.png'))
h = INPUT.shape[0]
w = INPUT.shape[1]
print(h,w)
new = Image.new("RGB",(h,w))
for i in range(0,h):
for j in range(0,w):
if seg[i][j]:
new.putpixel([i,j],(INPUT[i,j,0],INPUT[i,j,1],INPUT[i,j,2]))
else:
new.putpixel([i,j],(255,255,255))
os.chdir(PATH)
new.save('last_result.jpg')
rot_flop('last_result.jpg')
|
squares = []
for x in range(6):
squares.append(x ** 2)
squares |
import os
from openpyxl import load_workbook, Workbook
from openpyxl.styles import Font
# mapping from index to letter
letters = [
"A", "B", "C", "D", "E", "F", "G", "H", "I",
"J", "K", "L", "M", "N", "O", "P", "Q", "R",
"S", "T", "U", "V", "W", "X", "Y", "Z"
]
# needed information about excel workbook
summary_wb_name = "summary.xlsx"
sheet_name = "Source Data"
column = "C"
start_row = 4
# get all the files from the current folder
dir_content = os.listdir(".")
excel_tl_files = [doc for doc in dir_content if doc.endswith("xltx")]
# sorts the names by year
excel_tl_files.sort()
processed = 0
# set up the output workbook
summary_wb = Workbook()
summary_ws = summary_wb.active
summary_ws["A1"] = "Customers"
summary_ws["A1"].font = Font(size="16", bold=True)
for index, excel_file in enumerate(excel_tl_files):
print(f"Processing and extracting data from {excel_file}")
wb = load_workbook(filename=excel_file)
sheet = wb[sheet_name]
# set up the counter and get the current cell
curr_row = start_row
cell_num = f"{column}{curr_row}"
cell = sheet[cell_num]
# empty list of entities for this workbook
workbook_entities = []
# get distinct customers from all years
while cell.value is not None:
# get the value of the current cell
entity = cell.value
# if the entity is not yet part of the array, add it
if entity not in workbook_entities:
workbook_entities.append(entity)
# set the current cell to the next row
curr_row += 1
cell_num = f"{column}{curr_row}"
cell = sheet[cell_num]
# write to new excel sheet with unique customers and heading of the given year
column_letter = letters[index]
column_index = f"{column_letter}2"
summary_ws.column_dimensions[column_letter].width = 20
# write the name of the file without filetype
summary_ws[column_index] = os.path.splitext(excel_file)[0]
summary_ws[column_index].font = Font(bold=True)
# for every entity, write it to a row below the header
for i, entity in enumerate(workbook_entities):
cell_index = f"{column_letter}{i + 3}"
summary_ws[cell_index] = entity
processed += 1
# save the workbook
summary_wb.save(summary_wb_name)
print(f"Processed {processed} of {len(excel_tl_files)} excel files.") |
#!/usr/bin/env python3
import unittest
import ctypes
import re
lib = ctypes.CDLL('../c-bindings/libgojsonnet.so')
lib.jsonnet_evaluate_snippet.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.POINTER(ctypes.c_int),
]
lib.jsonnet_evaluate_snippet.restype = ctypes.POINTER(ctypes.c_char)
lib.jsonnet_make.argtypes = []
lib.jsonnet_make.restype = ctypes.c_void_p
lib.jsonnet_string_output.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
]
lib.jsonnet_string_output.restype = None
t = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
]
lib.jsonnet_ext_var.argtypes = t
lib.jsonnet_ext_code.argtypes = t
lib.jsonnet_tla_var.argtypes = t
lib.jsonnet_tla_code.argtypes = t
lib.jsonnet_jpath_add.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
]
lib.jsonnet_jpath_add.restype = None
lib.jsonnet_max_trace.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
]
lib.jsonnet_max_trace.restype = None
lib.jsonnet_evaluate_file.argtypes = [
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.POINTER(ctypes.c_int),
]
lib.jsonnet_evaluate_file.restype = ctypes.POINTER(ctypes.c_char)
lib.jsonnet_destroy.argtypes = [
ctypes.c_void_p
]
lib.jsonnet_destroy.restype = None
lib.jsonnet_realloc.argtypes = [
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_char),
ctypes.c_ulong,
]
lib.jsonnet_realloc.restype = ctypes.POINTER(ctypes.c_char)
lib.jsonnet_version.argtypes = []
lib.jsonnet_version.restype = ctypes.POINTER(ctypes.c_char)
def free_buffer(vm, buf):
assert not lib.jsonnet_realloc(vm, buf, 0)
def to_bytes(buf):
return ctypes.cast(buf, ctypes.c_char_p).value
class TestJsonnetEvaluateBindings(unittest.TestCase):
def setUp(self):
self.err = ctypes.c_int()
self.err_ref = ctypes.byref(self.err)
self.vm = lib.jsonnet_make()
def test_add_strings(self):
res = lib.jsonnet_evaluate_snippet(self.vm, b"vm1", b"'xxx' + 'yyy'", self.err_ref)
self.assertEqual(b'"xxxyyy"\n', to_bytes(res))
free_buffer(self.vm, res)
def test_string_output(self):
lib.jsonnet_string_output(self.vm, 1)
res = lib.jsonnet_evaluate_snippet(self.vm, b"vm2", b"'xxx' + 'yyy'", self.err_ref)
self.assertEqual(b'xxxyyy\n', to_bytes(res))
free_buffer(self.vm, res)
def test_params(self):
lib.jsonnet_ext_var(self.vm, b"e1", b"a")
lib.jsonnet_ext_code(self.vm, b"e2", b"'b'")
lib.jsonnet_tla_var(self.vm, b"t1", b"c")
lib.jsonnet_tla_code(self.vm, b"t2", b"'d'")
res = lib.jsonnet_evaluate_snippet(self.vm, b"ext_and_tla", b"""function(t1, t2) std.extVar("e1") + std.extVar("e2") + t1 + t2""", self.err_ref)
self.assertEqual(b'"abcd"\n', to_bytes(res))
free_buffer(self.vm, res)
def test_jpath(self):
lib.jsonnet_jpath_add(self.vm, b"jsonnet_import_test/")
res = lib.jsonnet_evaluate_snippet(self.vm, b"jpath", b"""import 'foo.jsonnet'""", self.err_ref)
self.assertEqual(b"42\n", to_bytes(res))
free_buffer(self.vm, res)
def test_max_trace(self):
lib.jsonnet_max_trace(self.vm, 4)
res = lib.jsonnet_evaluate_snippet(self.vm, b"max_trace", b"""local f(x) = if x == 0 then error 'expected' else f(x - 1); f(10)""", self.err_ref)
expectedTrace = b'RUNTIME ERROR: expected\n\tmax_trace:1:29-45\tfunction <f>\n\tmax_trace:1:51-59\tfunction <f>\n\t...\n\tmax_trace:1:61-66\t$\n\tDuring evaluation\t\n'
self.assertEqual(expectedTrace, to_bytes(res))
free_buffer(self.vm, res)
def test_evaluate_file(self):
res = lib.jsonnet_evaluate_file(self.vm, b"jsonnet_import_test/foo.jsonnet", self.err_ref)
self.assertEqual(b"42\n", to_bytes(res))
free_buffer(self.vm, res)
def test_jsonnet_version(self):
res = lib.jsonnet_version()
match = re.match(r'^v[0-9]+[.][0-9]+[.][0-9]+ [(]go-jsonnet[)]$', to_bytes(res).decode('utf-8'))
self.assertIsNotNone(match)
def tearDown(self):
lib.jsonnet_destroy(self.vm)
if __name__ == '__main__':
unittest.main() |
from django.test import SimpleTestCase
from ads.forms import AdvertisementForm
from .models import Category
from django.test import TestCase
class TestForms(TestCase):
"""This class tests the input fields in the form that is used when creating or editing a new ad."""
def setUp(self):
"""Making a category for testing"""
Category.objects.create(name='TestCategory')
def test_ads_form_valid_data(self):
form = AdvertisementForm(data={
'product_name': 'Car',
'product_description': 'I want to sell my Chevrolet 1980 model',
'price': 1000000,
'category': Category.objects.get(name='TestCategory')
})
self.assertTrue(form.is_valid())
def test_ads_form_negative_price(self):
form = AdvertisementForm(data={
'product_name': 'Car',
'product_description': 'I want to sell my Chevrolet 1980 model',
'price': -1000,
'category': Category.objects.get(name='TestCategory')
})
self.assertFalse(form.is_valid())
def test_ads_form_no_price(self):
form = AdvertisementForm(data={
'product_name': 'Car',
'product_description': 'I want to sell my Chevrolet 1980 model',
'category': Category.objects.get(name='TestCategory')
})
self.assertFalse(form.is_valid())
def test_ads_form_no_product_name(self):
form = AdvertisementForm(data={
'product_description': 'I want to sell my Chevrolet 1980 model',
'price': 10,
'category': Category.objects.get(name='TestCategory')
})
self.assertFalse(form.is_valid())
def test_ads_form_no_product_description(self):
form = AdvertisementForm(data={
'product_name': 'Car',
'price': 10,
'category': Category.objects.get(name='TestCategory')
})
self.assertFalse(form.is_valid())
def test_ads_all_inputs_missing(self):
form = AdvertisementForm(data={
})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4) # checks that all 4 errors occurred for missing fields
|
# -*- encoding: utf-8 -*-
'''
@Time : 2020-08-06
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 为每一个响应的请求头 添加一个 统计数量的响应项
'''
# here put the import lib
import mitmproxy.http
from mitmproxy import ctx
class AddHeader:
def __init__(self):
self.num = 0
def request(self, flow: mitmproxy.http.HTTPFlow):
self.num = self.num + 1
flow.request.headers["count"] = str(self.num)
ctx.log.info('修改了请求头')
addons = [
AddHeader()
]
if __name__ == "__main__":
# mitmweb -s 2_events.py
pass
|
from flask import Flask, request
from flask_restful import Api, Resource, reqparse
import time, json
from API_executor.Authentication import invalidToken
#importing Database
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import Database as db
class Audit(Resource):
def get(self):
logs = db.getMyAppsLog()
data = []
for log in logs:
del log["_id"]
data.append(log)
return {"data": data}, 200, {"content-type": "application/json"}
@staticmethod
def invalidToken():
return {"code":1703,"description":"Session is invalid or expired"}, 401, {'Content-Type':'application/json'}
|
from abc import ABCMeta, abstractmethod
import os
class TraversalAlgorithm(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self, gridClass, draw):
self.gridClass = gridClass
self.grid = gridClass.get_grid()
self.draw = draw
self.lowestCost = 0
@abstractmethod
def initialize(self):
pass
@abstractmethod
def move(self):
pass
@abstractmethod
def backtrack(self):
pass
def other_node_adjacent_to_current_node(self, currentNode, otherNode):
if (((currentNode.get_x() == otherNode.get_x()) and ((currentNode.get_y() + 1) == otherNode.get_y()))
or ((currentNode.get_x() == otherNode.get_x()) and ((currentNode.get_y() - 1) == otherNode.get_y()))
or (((currentNode.get_x() + 1) == otherNode.get_x()) and (currentNode.get_y() == otherNode.get_y()))
or (((currentNode.get_x() - 1) == otherNode.get_x()) and (currentNode.get_y() == otherNode.get_y()))):
return True
else:
return False
def get_lowest_cost(self):
return self.lowestCost |
import numpy as np
import os
from os import path
import shutil
def material_properties():
'''
Cross-sections from Capilla. Applications of the multidimensional
PL equations to complex fuel assembly problems. 2009.
Returns:
--------
constants: [dictionary]
dictionary that contains the materials and their respective
cross-sections.
* main keys:
- uo2 - U - UO2 Fuel
- mox3 - P1 - 4.3% MOX Fuel (outer)
- mox2 - P2 - 7.0% MOX Fuel
- mox1 - P3 - 8.7% MOX Fuel (inner)
- gtub - X - Guide Tube
- reflec - R - Reflector
- fchamb - C - Moveable Fission Chamber
* secondary keys:
- DIFFCOEF = difffusion coefficients
- REMXS = removal cross-sections
- NSF = production cross-sections
- SP0 = scattering cross-sections
- TOT = total cross-sections
'''
constants = {
'uo2': {
'DIFFCOEF': np.array([1.20, 0.40]),
'REMXS': np.array([0.029656, 0.092659]),
'NSF': np.array([0.00457, 0.11353]),
# from S11, S12, S21, S22 (S11, S22 from selfscatt.py)
'SP0': np.array([0.5417301, 0.02043, 0.00, 1.01730104])
},
'mox': {
'DIFFCOEF': np.array([1.20, 0.40]),
'REMXS': np.array([0.029655, 0.23164]),
'NSF': np.array([0.0068524, 0.34450]),
# from S11, S12, S21, S22 (S11, S22 from selfscatt.py)
'SP0': np.array([0.56844291, 0.015864, 0.00, 0.92557093])
},
'reflec': {
'DIFFCOEF': np.array([1.20, 0.20]),
'REMXS': np.array([0.051, 0.04]),
'NSF': np.array([0.00, 0.00]),
# from S11, S12, S21, S22 (S11, S22 from selfscatt.py)
'SP0': np.array([0.56, 0.05, 0.00, 2.30])
}
}
for mat in constants.keys():
remxs = constants[mat]['REMXS']
G = len(remxs)
scatt = constants[mat]['SP0'].reshape(G, G)
constants[mat]['TOT'] = remxs + scatt.diagonal()
return constants
def prepare_xs(constants, sp3=True, correct=False):
'''
This function prepares the cross-sections and outputs a dictionary
with all the information required by Cerberus.
Parameters:
----------
constants: [dictionary]
cross-section data
primary keys: name of the material
secondary keys: constants
sp3: [bool]
True if creating SP3 constants
correct: [bool]
True if applying transport correction to SP3 constants
Returns:
--------
constants2: [dictionary]
primary keys: name of the material
secondary keys: constants required by Cerberus/Moltres
'''
constants2 = {}
for mat in constants.keys():
G = len(constants[mat]['TOT'])
totxs = constants[mat]['TOT']
try:
s0xs = constants[mat]['SP0']
except KeyError:
s0xs = np.zeros(G*G)
try:
s1xs = constants[mat]['SP1']
except KeyError:
s1xs = np.zeros(G*G)
try:
s2xs = constants[mat]['SP2']
except KeyError:
s2xs = np.zeros(G*G)
try:
s3xs = constants[mat]['SP3']
except KeyError:
s3xs = np.zeros(G*G)
rem0xs = totxs - s0xs.reshape(G, G).diagonal()
rem1xs = totxs - s1xs.reshape(G, G).diagonal()
rem2xs = totxs - s2xs.reshape(G, G).diagonal()
rem3xs = totxs - s3xs.reshape(G, G).diagonal()
constants2[mat] = {}
constants2[mat]['BETA_EFF'] = np.zeros(8)
constants2[mat]['CHID'] = np.zeros(G)
constants2[mat]['CHIP'] = np.zeros(G)
constants2[mat]['CHIT'] = np.zeros(G)
constants2[mat]['CHIT'][0] = 1.
try:
constants2[mat]['FISS'] = constants[mat]['FISS']
except KeyError:
constants2[mat]['FISS'] = constants[mat]['NSF']/2.4
constants2[mat]['INVV'] = np.zeros(G)
constants2[mat]['KAPPA'] = 200*np.ones(G)
constants2[mat]['LAMBDA'] = np.zeros(8)
try:
constants2[mat]['NSF'] = constants[mat]['NSF']
except KeyError:
constants2[mat]['NSF'] = np.zeros(G)
constants2[mat]['SP0'] = s0xs
if sp3 is True:
if correct is True:
try:
constants2[mat]['DIFFCOEFA'] = 1./3/constants[mat]['TRXS']
except KeyError:
constants2[mat]['DIFFCOEFA'] = constants[mat]['DIFFCOEF']
else:
constants2[mat]['DIFFCOEFA'] = 1./3/rem1xs
constants2[mat]['DIFFCOEFB'] = 9./35/rem3xs
constants2[mat]['REMXSA'] = rem0xs
constants2[mat]['REMXSB'] = rem2xs + 4./5*rem0xs
constants2[mat]['COUPLEXSA'] = 2*rem0xs
constants2[mat]['COUPLEXSB'] = 2./5*rem0xs
else:
constants2[mat]['DIFFCOEF'] = constants[mat]['DIFFCOEF']
constants2[mat]['REMXS'] = rem0xs
return constants2
def output_xs(outdir, temp, materials):
'''
This function outputs the dictionary with the material cross-sections
into the Cerberus and moltres readable text files.
Parameters:
-----------
outdir: [string]
directory that will hold the cross-section files
temp: [float]
temperature at which the cross-sections were obtained
materials: [dictionary]
contains the cross-section informations
primary keys: name of the material
secondary keys: constants
Return:
-------
None
'''
for currentMat in materials.keys():
for data in materials[currentMat].keys():
with open(outdir + '/' + currentMat +
'_' + data + '.txt', 'a') as fh:
strData = materials[currentMat][data]
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else str(strData)
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
return None
if __name__ == "__main__":
temp = 300
outdir = 'xs2g-homo'
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
materials = material_properties()
materials2 = prepare_xs(materials, sp3=True, correct=True)
output_xs(outdir, temp, materials2)
|
from time import sleep_ms, ticks_ms, ticks_diff
from machine import Pin, ADC, reset
from micropython import const
from hcsr04 import HCSR04
from umqtt.simple import MQTTClient
_led_light_on_milliseconds = const(30000)
_led_light_current_to_voltage_resistor_value = const(47)
# Wiring:
# D7 UV light switch. High == on.
uv_light = Pin(13, Pin.OUT)
def turn_on_uv_light() -> None:
uv_light.on()
def turn_off_uv_light() -> None:
uv_light.off()
turn_off_uv_light()
# A0 UV light current (47 Ohm resister converts current to voltage) measurement.
uv_light_voltage = ADC(0) # value 0, 1024 corresponding to 0. 3.3V
def map_adc_reading_to_voltage(value, left_min, left_max, right_min, right_max) -> float:
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - left_min) / float(left_span)
# Convert the 0-1 range into a value in the right range.
return right_min + (value_scaled * right_span)
_ADC_reading_low_range = const(0)
_ADC__reading_high_range = const(1024)
_ADC_voltage_low_range = const(0)
_ADC_voltage_high_range = 3.3
def measure_uv_light_current() -> float:
current_ma = map_adc_reading_to_voltage(uv_light_voltage.read(),
_ADC_reading_low_range,
_ADC__reading_high_range,
_ADC_voltage_low_range,
_ADC_voltage_high_range) / \
_led_light_current_to_voltage_resistor_value * 1000
# print('UV light current is {} mA'.format(current_ma))
return current_ma
_last_x_distance_values = []
_track_distance_value_number = const(8)
def compute_average(values_list) -> float:
assert len(values_list) > 0, "doesn't make sense to compute empty list average"
# print('values_list length is {}'.format(len(values_list)))
if len(values_list) > 0:
average = sum(values_list) / len(values_list)
return average
else:
return 0
# Sometime the ultrasonic sensor returns readings much shorter than true value. Track the last x values and
# use their average to filter out such noise.
def update_distance_average(new_measure) -> float:
if len(_last_x_distance_values) > _track_distance_value_number:
_last_x_distance_values.pop(0)
_last_x_distance_values.append(new_measure)
return compute_average(_last_x_distance_values)
# Only call this method called when WiFi is connected.
# See https://docs.micropython.org/en/latest/esp8266/tutorial/network_basics.html#configuration-of-the-wifi
# ESP8266/32 persist WiFi configuration during power-off, and will reconnect automatically.
def publish_message(message) -> None: # message is in binary format
print('Publish message: {0}'.format(message))
try:
c = MQTTClient(client_id="smart_uv_light_umqtt_client", # if username/pwd wrong, this will throw Exception
server="192.168.1.194",
user=b"mosquitto",
password=b"mosquitto",
ssl=False)
if 0 == c.connect(): # 0 is success.
c.publish(b"smart_uv_light_status_topic", message)
c.disconnect()
else:
print('Connect to MQTT server failed. ')
except OSError as exception:
# When machine is just booted, WiFi hasn't been connected yet. Immediately invoke MQTT will throw error.
# Instead of tracking startup grace period, just keep code simple by catching and log the error.
print('publish_message encountered error {}'.format(exception))
last_reset_tick = ticks_ms()
_reset_interval_milliseconds = const(60 * 1000 * 30 ) # 30 minutes
def periodically_reset() -> None:
tick_ms_elapsed = ticks_diff(ticks_ms(), last_reset_tick)
# message = 'tick_ms elapsed {}'.format(tick_ms_elapsed)
# publish_message(message)
# print(message)
if tick_ms_elapsed > _reset_interval_milliseconds:
# free is in boot so it's available to REPL, for convenience. Import it to make this dependency explicit?
publish_message('Reset. Current memory usage is {}'.format(free())) # Also works as heartbeat.
sleep_ms(2000)
reset()
# Don't need to record last_reset_tick here. Because after reset, code will do it.
# ESP32 Devkit print Dxx, the xx is pin number used below.
# ESP8266 map Dxx to GPIO number https://randomnerdtutorials.com/esp8266-pinout-reference-gpios/
# D5 Trigger (reversed logic, low -> high because of the MOSFET driver in front of distance sensor.
# HCSR04 library needs to be modified)
# D6 Echo
sensor = HCSR04(trigger_pin=14, echo_pin=12)
start_ticks = 0
_loop_sleep_ms = const(6)
_led_on_distance_cm = const(73) # 29 inch
# For testing average function.
# import urandom
while True:
try:
distance_reading = sensor.distance_cm()
# distance_reading = urandom.getrandbits(8)
# print('The last distance reading is {} cm.'.format(distance_reading))
if distance_reading <= 2:
# It can give negative readings. Hardware bug or library bug?
# print('Drop value below the Ultrasonic lower range.')
continue
average_distance = update_distance_average(distance_reading)
# print('Current average distance is {} cm.'.format(average_distance))
if average_distance < _led_on_distance_cm and uv_light.value() == 0:
start_ticks = ticks_ms()
turn_on_uv_light()
publish_message('Turn light on. Average distance {}. '
'Light current is {} mA. '.format(average_distance, measure_uv_light_current()))
if 1 == uv_light.value() and ticks_diff(ticks_ms(), start_ticks) > _led_light_on_milliseconds:
# publish_message('Before turning light off. Current is {} mA.'.format(measure_uv_light_current()))
turn_off_uv_light()
publish_message('Light is off. Current is {} mA.'.format(measure_uv_light_current()))
measure_uv_light_current()
periodically_reset()
sleep_ms(_loop_sleep_ms)
except OSError as ex:
error_message = 'ERROR: {}'.format(ex)
# If function, invoked from here, raises exception, the loop can terminate.
publish_message(error_message)
print(error_message)
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS
Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from iam.collection import FancyDict
def get_filter_obj(filter_data, filter_keys):
filter_obj = FancyDict()
_filter_data = filter_data or {}
for key in filter_keys:
filter_obj[key] = _filter_data.get(key)
return filter_obj
class Page(object):
def __init__(self, limit, offset):
self.limit = limit
self.offset = offset
@property
def slice_from(self):
return self.offset
@property
def slice_to(self):
if self.limit == 0:
return None
return self.offset + self.limit
def get_page_obj(page_data):
return Page(limit=page_data.get("limit", 0), offset=page_data.get("offset", 0))
|
import unittest
from python3_gearman.admin_client import GearmanAdminClient, ECHO_STRING
from python3_gearman.admin_client_handler import GearmanAdminClientCommandHandler
from python3_gearman.errors import InvalidAdminClientState, ProtocolError
from python3_gearman.protocol import GEARMAN_COMMAND_ECHO_RES, GEARMAN_COMMAND_ECHO_REQ, GEARMAN_COMMAND_TEXT_COMMAND, \
GEARMAN_SERVER_COMMAND_STATUS, GEARMAN_SERVER_COMMAND_VERSION, GEARMAN_SERVER_COMMAND_WORKERS, GEARMAN_SERVER_COMMAND_MAXQUEUE, GEARMAN_SERVER_COMMAND_SHUTDOWN
from tests._core_testing import _GearmanAbstractTest, MockGearmanConnectionManager, MockGearmanConnection
class MockGearmanAdminClient(GearmanAdminClient, MockGearmanConnectionManager):
pass
class CommandHandlerStateMachineTest(_GearmanAbstractTest):
"""Test the public interface a GearmanWorker may need to call in order to update state on a GearmanWorkerCommandHandler"""
connection_manager_class = MockGearmanAdminClient
command_handler_class = GearmanAdminClientCommandHandler
def setUp(self):
super(CommandHandlerStateMachineTest, self).setUp()
self.connection_manager.current_connection = self.connection
self.connection_manager.current_handler = self.command_handler
def test_send_illegal_server_commands(self):
self.assertRaises(ProtocolError, self.send_server_command, "This is not a server command")
def test_ping_server(self):
self.command_handler.send_echo_request(ECHO_STRING)
self.assert_sent_command(GEARMAN_COMMAND_ECHO_REQ, data=ECHO_STRING)
self.assertEqual(self.command_handler._sent_commands[0], GEARMAN_COMMAND_ECHO_REQ)
self.command_handler.recv_command(GEARMAN_COMMAND_ECHO_RES, data=ECHO_STRING)
server_response = self.pop_response(GEARMAN_COMMAND_ECHO_REQ)
self.assertEqual(server_response, ECHO_STRING)
def test_state_and_protocol_errors_for_status(self):
self.send_server_command(GEARMAN_SERVER_COMMAND_STATUS)
# Test premature popping as this we aren't until ready we see the '.'
self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_STATUS)
# Test malformed server status
self.assertRaises(ProtocolError, self.recv_server_response, '\t'.join(['12', 'IP-A', 'CLIENT-A']))
self.recv_server_response('.')
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_STATUS)
self.assertEqual(server_response, tuple())
def test_multiple_status(self):
self.send_server_command(GEARMAN_SERVER_COMMAND_STATUS)
self.recv_server_response('\t'.join(['test_function', '1', '5', '17']))
self.recv_server_response('\t'.join(['another_function', '2', '4', '23']))
self.recv_server_response('.')
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_STATUS)
self.assertEqual(len(server_response), 2)
test_response, another_response = server_response
self.assertEqual(test_response['task'], 'test_function')
self.assertEqual(test_response['queued'], 1)
self.assertEqual(test_response['running'], 5)
self.assertEqual(test_response['workers'], 17)
self.assertEqual(another_response['task'], 'another_function')
self.assertEqual(another_response['queued'], 2)
self.assertEqual(another_response['running'], 4)
self.assertEqual(another_response['workers'], 23)
def test_version(self):
expected_version = '0.12345'
self.send_server_command(GEARMAN_SERVER_COMMAND_VERSION)
self.recv_server_response(expected_version)
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_VERSION)
self.assertEqual(expected_version, server_response)
def test_state_and_protocol_errors_for_workers(self):
self.send_server_command(GEARMAN_SERVER_COMMAND_WORKERS)
# Test premature popping as this we aren't until ready we see the '.'
self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_WORKERS)
# Test malformed responses
self.assertRaises(ProtocolError, self.recv_server_response, ' '.join(['12', 'IP-A', 'CLIENT-A']))
self.assertRaises(ProtocolError, self.recv_server_response, ' '.join(['12', 'IP-A', 'CLIENT-A', 'NOT:']))
self.recv_server_response('.')
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_WORKERS)
self.assertEqual(server_response, tuple())
def test_multiple_workers(self):
self.send_server_command(GEARMAN_SERVER_COMMAND_WORKERS)
self.recv_server_response(' '.join(['12', 'IP-A', 'CLIENT-A', ':', 'function-A', 'function-B']))
self.recv_server_response(' '.join(['13', 'IP-B', 'CLIENT-B', ':', 'function-C']))
self.recv_server_response('.')
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_WORKERS)
self.assertEqual(len(server_response), 2)
test_response, another_response = server_response
self.assertEqual(test_response['file_descriptor'], '12')
self.assertEqual(test_response['ip'], 'IP-A')
self.assertEqual(test_response['client_id'], 'CLIENT-A')
self.assertEqual(test_response['tasks'], ('function-A', 'function-B'))
self.assertEqual(another_response['file_descriptor'], '13')
self.assertEqual(another_response['ip'], 'IP-B')
self.assertEqual(another_response['client_id'], 'CLIENT-B')
self.assertEqual(another_response['tasks'], ('function-C', ))
def test_maxqueue(self):
self.send_server_command(GEARMAN_SERVER_COMMAND_MAXQUEUE)
self.assertRaises(ProtocolError, self.recv_server_response, 'NOT OK')
# Pop prematurely
self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_MAXQUEUE)
self.recv_server_response('OK')
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_MAXQUEUE)
self.assertEqual(server_response, 'OK')
def test_shutdown(self):
self.send_server_command(GEARMAN_SERVER_COMMAND_SHUTDOWN)
# Pop prematurely
self.assertRaises(InvalidAdminClientState, self.pop_response, GEARMAN_SERVER_COMMAND_SHUTDOWN)
self.recv_server_response(None)
server_response = self.pop_response(GEARMAN_SERVER_COMMAND_SHUTDOWN)
self.assertEqual(server_response, None)
def send_server_command(self, expected_command):
self.command_handler.send_text_command(expected_command)
expected_line = "%s\n" % expected_command
self.assert_sent_command(GEARMAN_COMMAND_TEXT_COMMAND, raw_text=expected_line)
self.assertEqual(self.command_handler._sent_commands[0], expected_command)
def recv_server_response(self, response_line):
self.command_handler.recv_command(GEARMAN_COMMAND_TEXT_COMMAND, raw_text=response_line)
def pop_response(self, expected_command):
server_cmd, server_response = self.command_handler.pop_response()
self.assertEqual(expected_command, server_cmd)
return server_response
if __name__ == '__main__':
unittest.main()
|
import copy
import random
import numpy as np
from typing import Any, Dict, Optional, Union, List
from ding.envs import BaseEnv, BaseEnvInfo, BaseEnvTimestep
from ding.envs.common.env_element import EnvElement, EnvElementInfo
from ding.utils import ENV_REGISTRY
@ENV_REGISTRY.register('bitflip')
class BitFlipEnv(BaseEnv):
def __init__(self, cfg: dict) -> None:
self._cfg = cfg
self._n_bits = cfg.n_bits
self._state = np.zeros(self._n_bits)
self._goal = np.zeros(self._n_bits)
self._curr_step = 0
self._maxsize = self._n_bits
self._final_eval_reward = 0
def reset(self) -> np.ndarray:
self._curr_step = 0
self._final_eval_reward = 0
if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed:
random_seed = 100 * random.randint(1, 1000)
np.random.seed(self._seed + random_seed)
elif hasattr(self, '_seed'):
np.random.seed(self._seed)
self._state = np.random.randint(0, 2, size=(self._n_bits, )).astype(np.float32)
self._goal = np.random.randint(0, 2, size=(self._n_bits, )).astype(np.float32)
while (self._state == self._goal).all():
self._goal = np.random.randint(0, 2, size=(self._n_bits, )).astype(np.float32)
obs = np.concatenate([self._state, self._goal], axis=0)
return obs
def close(self) -> None:
pass
def check_success(self, state: np.ndarray, goal: np.ndarray) -> bool:
return (self._state == self._goal).all()
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
random.seed(self._seed)
def step(self, action: np.ndarray) -> BaseEnvTimestep:
self._state[action] = 1 - self._state[action]
if self.check_success(self._state, self._goal):
rew = np.array([1]).astype(np.float32)
done = True
else:
rew = np.array([0]).astype(np.float32)
done = False
self._final_eval_reward += float(rew)
if self._curr_step >= self._maxsize - 1:
done = True
info = {}
if done:
info['final_eval_reward'] = self._final_eval_reward
self._curr_step += 1
obs = np.concatenate([self._state, self._goal], axis=0)
return BaseEnvTimestep(obs, rew, done, info)
def info(self) -> BaseEnvInfo:
T = EnvElementInfo
return BaseEnvInfo(
agent_num=1,
obs_space=T(
(2 * self._n_bits, ),
{
'min': [0 for _ in range(self._n_bits)],
'max': [1 for _ in range(self._n_bits)],
'dtype': float,
},
),
# [min, max)
act_space=T(
(self._n_bits, ),
{
'min': 0,
'max': self._n_bits,
'dtype': int,
},
),
rew_space=T(
(1, ),
{
'min': 0.0,
'max': 1.0
},
),
use_wrappers=None,
)
def __repr__(self) -> str:
return "DI-engine BitFlip Env({})".format('bitflip')
|
#!/usr/bin/python
import operator, copy, os, sys, threading, cStringIO, traceback
import gc
from PySide.QtCore import *
from PySide.QtGui import *
import preferences
pref = preferences.pref
prefDir = preferences.prefDir
from log import *
from tools import unicode_cleanup
import jsitwindow
import TorrentTable
import jsit_manager
import jsit
VERSION="0.5.2" # Adjusted by make_release
qApp = None
# From http://pydev.blogspot.com.br/2014/03/should-python-garbage-collector-be.html
class GarbageCollector(QObject):
'''
Disable automatic garbage collection and instead collect manually
every INTERVAL milliseconds.
This is done to ensure that garbage collection only happens in the GUI
thread, as otherwise Qt can crash.
'''
INTERVAL = 3000
def __init__(self, parent, debug=False):
QObject.__init__(self, parent)
self.debug = debug
self.timer = QTimer(self)
self.timer.timeout.connect(self.check)
self.threshold = gc.get_threshold()
if self.debug:
print ('gc thresholds:', self.threshold)
gc.disable()
self.timer.start(self.INTERVAL)
def check(self):
#return self.debug_cycles() # uncomment to just debug cycles
l0, l1, l2 = gc.get_count()
if self.debug:
print 'gc_check called:', l0, l1, l2
if l0 > self.threshold[0]:
num = gc.collect(0)
if self.debug:
print 'collecting gen 0, found:', num, 'unreachable'
if l1 > self.threshold[1]:
#gc.set_debug(gc.DEBUG_LEAK)
num = gc.collect(1)
if self.debug:
print 'collecting gen 1, found:', num, 'unreachable'
if l2 > self.threshold[2]:
num = gc.collect(2)
if self.debug:
print 'collecting gen 2, found:', num, 'unreachable'
def debug_cycles(self):
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
for obj in gc.garbage:
print (obj, repr(obj), type(obj))
class JSITWindow(QMainWindow):
def __init__(self, mgr, *args):
QMainWindow.__init__(self, *args)
self.mgr = mgr
self.ui = jsitwindow.Ui_JSIT()
self.ui.setupUi(self)
self._visible = True
self.model = TorrentTable.TorrentTableModel(self, mgr)
self.ui.tableView.setDataModel(self.model)
self._clip = QApplication.clipboard()
# Set up routes to my methods
self.ui.addFiles.clicked.connect(self.addTorrentFiles)
self.ui.addURL.clicked.connect(self.addTorrentURL)
self.ui.startB.clicked.connect(self.startAll)
self.ui.stopB.clicked.connect(self.stopAll)
self.ui.downloadB.clicked.connect(self.downloadAll)
self.ui.watchClipboard.stateChanged.connect(self.watchClipboard)
self.ui.watchDirectory.stateChanged.connect(self.watchDirectory)
self.ui.reloadB.clicked.connect(self.reloadList)
self.ui.actionSave_Preferences.activated.connect(self.savePreferences)
self.ui.actionEdit_Preferences.activated.connect(self.NIY)
self.ui.actionAbout.activated.connect(self.about)
# Set up values from preferences
self.ui.watchClipboard.setChecked(bool(pref("jsit_manager", "watchClipboard", False)))
self.ui.watchDirectory.setChecked(bool(pref("jsit_manager", "watchDirectory", False)))
# Set up log catching for errors
## Not threadsafe...
##addLogCallback(self.logError)
def __repr__(self):
return "JSITWindow(0x%x)" % id(self)
def closeStartBox(self):
self._startBox.close()
self._startBox = None
def update(self):
log(DEBUG)
try:
self.model.update(clip = [self._clip.text(QClipboard.Clipboard), self._clip.text(QClipboard.Selection)])
finally:
QTimer.singleShot(pref("yajsig", "updateRate", 1000), self.update)
def addTorrentFiles(self):
log(INFO)
fns,ftype = QFileDialog.getOpenFileNames(self, "Open Torrent File", "", "Torrent Files (*.torrent)")
for fn in fns:
tor = self.mgr.addTorrentFile(fn, basedir=pref("downloads", "basedir", "downloads"),
unquoteNames=pref("downloads", "unquoteNames", True), interpretDirectories=pref("downloads", "interpretDirectories", True))
def addTorrentURL(self):
log(INFO)
dlg = QInputDialog(self)
dlg.setInputMode(QInputDialog.TextInput)
dlg.setLabelText("Enter http/magnet link:")
dlg.setWindowTitle("Add Torrent from URL")
dlg.resize(500,100)
ok = dlg.exec_()
url = dlg.textValue()
if ok:
tor = self.mgr.addTorrentURL(url, basedir=pref("downloads", "basedir", "downloads"),
unquoteNames=pref("downloads", "unquoteNames", True), interpretDirectories=pref("downloads", "interpretDirectories", True))
def startAll(self):
log(INFO)
self.mgr.startAll()
def stopAll(self):
log(INFO)
self.mgr.stopAll()
def downloadAll(self):
log(INFO)
self.mgr.downloadAll()
def reloadList(self):
log(INFO)
self.mgr.reloadList()
def watchClipboard(self, value):
log(INFO)
self.mgr.watchClipboard(bool(value))
preferences.setValue("jsit_manager", "watchClipboard", bool(value))
def watchDirectory(self, value):
log(INFO)
self.mgr.watchDirectory(bool(value))
preferences.setValue("jsit_manager", "watchDirectory", bool(value))
def showEvent(self, event):
log(DEBUG)
self._visible = True
QTimer.singleShot(0, self.update)
def hideEvent(self, event):
log(DEBUG)
self._visible = False
def quit(self):
log(WARNING)
cpref = preferences.changed()
# Somehow the MBox doesn't close. Figure out later.
if False and cpref:
b = QMessageBox(flags = Qt.Dialog);
b.setText("Unsaved preferences")
b.setInformativeText("The preferences %s have changed."% cpref)
b.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel);
b.setDefaultButton(QMessageBox.Save);
ret = b.exec_();
log(DEBUG, "ret= %s"% ret)
if ret == QMessageBox.Save:
event.handled()
self.savePreferences()
elif ret == QMessageBox.Cancel:
event.ignore()
elif ret == QMessageBox.Disacrd:
event.handled()
else:
log(WARNING, "Messagebox returned %s, not sure what to do." % ret)
else:
self.savePreferences()
log(WARNING, "Quitting.")
self.mgr.release()
qApp.quit()
def savePreferences(self):
log(INFO)
preferences.save(os.path.join(basedir, "preferences.json"))
def NIY(self):
log(INFO)
b = QMessageBox();
b.setText("Sorry!")
b.setInformativeText("Not implemented yet...")
b.setStandardButtons(QMessageBox.Ok);
b.setDefaultButton(QMessageBox.Ok);
ret = b.exec_();
def about(self):
log(INFO)
b = QMessageBox();
b.setText("About YAJSIG")
b.setInformativeText("Yet Another Justseed.it GUI\nVersion %s" % VERSION)
b.setStandardButtons(QMessageBox.Ok);
b.setDefaultButton(QMessageBox.Ok);
ret = b.exec_();
def logError(self, fullmsg, threadName, ltime, level, levelName, caller, msg):
if level > ERROR:
return
log(INFO)
ret = QMessageBox.question(None, "Error Caught!", str(msg), QMessageBox.Ok | QMessageBox.Abort)
if ret == QMessageBox.Abort:
os._exit(1) # Brute force exit, avoid thread problems...
# Exception catchall helper
# Based on http://www.riverbankcomputing.com/pipermail/pyqt/2009-May/022961.html (originally from Eric IDE)
exception_active = False
def excepthook(excType, excValue, tracebackobj):
"""
Global function to catch unhandled exceptions.
@param excType exception type
@param excValue exception value
@param tracebackobj traceback object
"""
# Not reentrant, ignore everything after the first
global exception_active
if exception_active:
return
separator = '-' * 80
logFile = "simple.log"
notice = \
"""An unhandled exception occurred. Please report the problem\n"""\
"""at https://github.com/TouchTone/pyjsit/issues .\n"""\
"""Please include the 'yajsig.log' and 'aria.log' log files in your report.\n\nError information:\n"""
timeString = time.strftime("%Y-%m-%d, %H:%M:%S")
tbinfofile = cStringIO.StringIO()
traceback.print_tb(tracebackobj, None, tbinfofile)
tbinfofile.seek(0)
tbinfo = tbinfofile.read()
errmsg = '%s: \n%s' % (str(excType), str(excValue))
sections = [separator, timeString, separator, errmsg, separator, tbinfo]
msg = '\n'.join(sections)
log(ERROR, "Caught unhandled exception %s!" % errmsg)
log(ERROR, tbinfo)
exception_active = True
ret = QMessageBox.question(None, "Unhandled Exception Caught!", str(notice)+str(msg)+str(VERSION), QMessageBox.Ok | QMessageBox.Abort)
if ret == QMessageBox.Abort:
os._exit(1) # Brute force exit, avoid thread problems...
exception_active = False
# Not threadsafe...
##sys.excepthook = excepthook
if __name__ == "__main__":
global basedir
if len(sys.argv) < 1:
print "Call as %s" % sys.argv[0]
sys.exit(1)
if getattr(sys, 'frozen', None):
basedir = sys._MEIPASS
else:
basedir = os.path.dirname(__file__)
preferences.setBaseDir(basedir)
if os.path.isfile(os.path.join(basedir, "preferences.json")):
preferences.load(os.path.join(basedir, "preferences.json"))
else:
preferences.load(os.path.join(basedir, "defaults.json"))
setLogLevel(pref("yajsig", "logLevel", INFO))
setFileLog(os.path.join(basedir, "yajsig.log"), pref("yajsig", "fileLogLevel", DEBUG))
global qapp
qapp = QApplication([])
# Make sure GC only runs in this thread to prevent crashes
gcol = GarbageCollector(qapp, debug=False)
if len(sys.argv) == 3:
username = sys.argv[1]
password = sys.argv[2]
log(DEBUG, "Got %s:%s from command line." % (username, password))
else:
username = pref("jsit", "username", None)
password = pref("jsit", "password", None)
log(DEBUG, "Got %s:%s from preferences." % (username, password))
while True:
try:
if username == None or password == None:
log(DEBUG, "Need username and password, trigger input.")
raise jsit.APIError("No user/password")
mgr = jsit_manager.Manager(username = username, password = password, torrentdir = prefDir("jsit", "torrentDirectory", "intorrents"))
break
except jsit.APIError, e:
log(WARNING, "JSIT login failed (%s)!" % e)
username, ok = QInputDialog.getText(None, "JS.it Username", "Enter JS.it username:", QLineEdit.Normal, username)
if not ok:
log(ERROR, "Username aborted!")
sys.exit(1)
password, ok = QInputDialog.getText(None, "JS.it Password", "Enter JS.it password:", QLineEdit.Normal, password)
if not ok:
log(ERROR, "Password aborted!")
sys.exit(1)
log(DEBUG, "jsit_manager started...")
preferences.setValue("jsit", "username", username)
preferences.setValue("jsit", "password", password)
##addIgnoreModule("jsit")
##addIgnoreModule("jsit_manager")
##addOnlyModule("TorrentTable")
win = JSITWindow(mgr)
QObject.connect(qapp, SIGNAL("lastWindowClosed()"), win, SLOT("quit()"))
win.show()
if False:
import stacktracer
stacktracer.trace_start("trace.html",interval=5,auto=True) # Set auto flag to always update file!
qapp.exec_()
|
def pretvori_v_sekunde(niz):
"""
Pretvori niz, ki predstavlja dolžino skladbe v formatu hh:mm:ss v število sekund.
"""
h, m, s = map(int, niz.split(":"))
return s + m*60 + h*3600
def sekunde_v_format(sek):
"""
Pretvori sekunde `sek` v format hh:mm:ss.
"""
if isinstance(sek, str):
return sek
h = sek // 3600
m = (sek % 3600) // 60
s = sek % 60
return "{:0>2d}:{:0>2d}:{:0>2d}".format(h, m, s)
def popravi_datum(niz):
"""
Format yyyy-mm-dd spremeni v dd. mm. yyyy.
"""
return "{2}. {1}. {0}".format(*niz.split("-"))
if __name__ == "__main__":
print(sekunde_v_format(11432))
print(popravi_datum("1975-12-13"))
|
import numpy as np
from numba import jit
from ..utils import Const
@jit(nopython=True)
def JB2008(AMJD,YRDAY,SUN,SAT,F10,F10B,S10,S10B,M10,M10B,Y10,Y10B,DSTDTC):
'''
Jacchia-Bowman 2008 Model Atmosphere
This is the CIRA "Integration Form" of a Jacchia Model.
There are no tabular values of density. Instead, the barometricequation and diffusion equation are integrated numerically using
the Newton-Coates method to produce the density profile up to the input position.
INPUT:
AMJD : Date and Time, in modified Julian Days and Fraction (MJD = JD-2400000.5)
SUN[0] : Right Ascension of Sun (radians)
SUN[1] : Declination of Sun (radians)
SAT[0] : Right Ascension of Position (radians)
SAT[1] : Geocentric Latitude of Position (radians)
SAT[2] : Height of Position (km)
F10 : 10.7-cm Solar Flux (1.0E-22*W/(M**2*Hz)) (Tabular time 1.0 day earlier)
F10B : 10.7-cm Solar Flux, ave. 81-day centered on the input time (Tabular time 1.0 day earlier)
S10 : EUV index (26-34 nm) scaled to F10 (Tabular time 1.0 day earlier)
S10B : EUV 81-day ave. centered index (Tabular time 1.0 day earlier)
M10 : MG2 index scaled to F10 (Tabular time 2.0 days earlier)
M10B : MG2 81-day ave. centered index (Tabular time 2.0 days earlier)
Y10 : Solar X-Ray & Lya index scaled to F10 (Tabular time 5.0 days earlier)
Y10B : Solar X-Ray & Lya 81-day ave. centered index (Tabular time 5.0 days earlier)
DSTDTC : Temperature change computed from Dst index
OUTPUT:
TEMP(1): Exospheric Temperature above Input Position (K)
TEMP(2): Temperature at Input Position (K)
RHO : Total Mass-Desnity at Input Position (kg/m**3)
Reference:
Bowman, Bruce R., etc. : "A New Empirical Thermospheric
Density Model JB2008 Using New Solar and Geomagnetic Indices",
AIAA/AAS 2008, COSPAR CIRA 2008 Model
Note:
The program is translated from the fortran source code written by Bruce R Bowman (HQ AFSPC, Space Analysis Division), 2008
'''
# The alpha are the thermal diffusion coefficients in Eq. (6)
TEMP = np.zeros(2)
ALPHA = np.zeros(5)
ALPHA[4] = -0.38
# AL10 is DLOG(10.0)
AL10 = Const.al10
# The AMW are the molecular weights in order: N2, O2, O, Ar, He & H
AMW = np.array([28.0134,31.9988,15.9994,39.9480,4.0026,1.00797])
# AVOGAD is Avogadro's number in mks units (molecules/kmol)
AVOGAD = Const.avogad
PI = Const.pi
TWOPI,FOURPI = Const.twopi,Const.fourpi
PIOV2,PIOV4 = Const.pivo2,Const.pivo4
# The FRAC are the assumed sea-level volume fractions in order: N2, O2, Ar, and He
FRAC = np.array([0.7811,0.20955,9.34e-3,1.289e-5])
# RSTAR is the universal gas-constant in mks units (joules/K/kmol)
RSTAR = Const.rstar
# The R# are values used to establish height step sizes in the regimes 90km to 105km, 105km to 500km and 500km upward.
R1,R2,R3 = 0.01,0.025,0.075
# The WT are weights for the Newton-Cotes Five-Point Quad. formula
WT = np.array([7,32,12,32,7])*2/45
# The CHT are coefficients for high altitude density correction
CHT = np.array([0.22,-0.2e-2,0.115e-2,-0.211e-5])
DEGRAD = Const.degrad
# Equation (14)
FN = (F10B/240)**0.25
if FN > 1: FN = 1
FSB = F10B*FN + S10B*(1 - FN)
TSUBC = 392.4 + 3.227*FSB + 0.298*(F10-F10B) + 2.259*(S10-S10B) + 0.312*(M10-M10B) + 0.178*(Y10-Y10B)
# Equation (15)
ETA = np.abs(SAT[1] - SUN[1])/2
THETA = np.abs(SAT[1] + SUN[1])/2
# Equation (16)
H = SAT[0] - SUN[0]
TAU = H - 0.64577182 + 0.10471976 * np.sin(H + 0.75049158)
GLAT,ZHT = SAT[1],SAT[2]
GLST = H + PI
GLSTHR = (GLST/DEGRAD)*(24/360)
if GLSTHR >= 24: GLSTHR -= 24
if GLSTHR < 0: GLSTHR += 24
# Equation (17)
C = np.cos(ETA)**2.5
S = np.sin(THETA)**2.5
DF = S + (C - S) * np.abs(np.cos(0.5 * TAU))**3
TSUBL = TSUBC * (1 + 0.31 * DF)
# Compute correction to dTc for local solar time and lat correction
DTCLST = DTSUB(F10,GLSTHR,GLAT,ZHT)
# Compute the local exospheric temperature.
# Add geomagnetic storm effect from input dTc value
TEMP[0] = TSUBL + DSTDTC
TINF = TEMP[0] + DTCLST
# Equation (9)
TSUBX = 444.3807 + 0.02385 * TINF - 392.8292 * np.exp(-0.0021357 * TINF)
# Equation (11)
GSUBX = 0.054285714 * (TSUBX - 183)
# The TC array will be an argument in the call to XLOCAL, which evaluates Equation (10) or Equation (13)
TC = np.zeros(4)
TC[0],TC[1] = TSUBX,GSUBX
# A AND GSUBX/A OF Equation (13)
TC[2] = (TINF - TSUBX)/PIOV2
TC[3] = GSUBX/TC[2]
# Equation (5)
Z1 = 90
Z2 = min(SAT[2],105)
AL = np.log(Z2/Z1)
N = int(AL/R1) + 1
ZR = np.exp(AL/N)
AMBAR1 = XAMBAR(Z1)
TLOC1 = XLOCAL(Z1,TC)
ZEND = Z1
SUM2 = 0
AIN = AMBAR1 * XGRAV(Z1)/TLOC1
for I in range(N):
Z = ZEND
ZEND = ZR * Z
DZ = 0.25 * (ZEND-Z)
SUM1 = WT[0]*AIN
for J in range(1,5):
Z += DZ
AMBAR2 = XAMBAR(Z)
TLOC2 = XLOCAL(Z,TC)
GRAVL = XGRAV(Z)
AIN = AMBAR2 * GRAVL/TLOC2
SUM1 += WT[J] * AIN
SUM2 += DZ * SUM1
FACT1 = 1e3/RSTAR
RHO = 3.46e-6 * AMBAR2 * TLOC1 * np.exp(-FACT1*SUM2) /AMBAR1 /TLOC2
# Equation (2)
ANM = AVOGAD * RHO
AN = ANM/AMBAR2
# Equation (3)
FACT2 = ANM/28.96
ALN = np.zeros(6)
ALN[0] = np.log(FRAC[0]*FACT2)
ALN[3] = np.log(FRAC[2]*FACT2)
ALN[4] = np.log(FRAC[3]*FACT2)
# Equation (4)
ALN[1] = np.log(FACT2 * (1 + FRAC[1]) - AN)
ALN[2] = np.log(2 * (AN - FACT2))
if SAT[2] <= 105:
TEMP[1] = TLOC2
# Put in negligible hydrogen for use in DO-LOOP 13
ALN[5] = ALN[4] - 25
else:
# Equation (6)
Z3 = min(SAT[2],500)
AL = np.log(Z3/Z)
N = int(AL/R2) + 1
ZR = np.exp(AL/N)
SUM2 = 0
AIN = GRAVL/TLOC2
for I in range(N):
Z = ZEND
ZEND = ZR * Z
DZ = 0.25 * (ZEND - Z)
SUM1 = WT[0] * AIN
for J in range(1,5):
Z += DZ
TLOC3 = XLOCAL(Z,TC)
GRAVL = XGRAV(Z)
AIN = GRAVL/TLOC3
SUM1 += WT[J] * AIN
SUM2 += DZ * SUM1
Z4 = max(SAT[2],500)
AL = np.log(Z4/Z)
R = R2
if SAT[2] > 500: R = R3
N = int(AL/R) + 1
ZR = np.exp(AL/N)
SUM3 = 0
for I in range(N):
Z = ZEND
ZEND = ZR * Z
DZ = 0.25 * (ZEND - Z)
SUM1 = WT[0] * AIN
for J in range(1,5):
Z += DZ
TLOC4 = XLOCAL(Z,TC)
GRAVL = XGRAV(Z)
AIN = GRAVL/TLOC4
SUM1 += WT[J] * AIN
SUM3 += DZ * SUM1
if SAT[2] <= 500:
T500 = TLOC4
TEMP[1] = TLOC3
ALTR = np.log(TLOC3/TLOC2)
FACT2 = FACT1 * SUM2
HSIGN = 1
else:
T500 = TLOC3
TEMP[1] = TLOC4
ALTR = np.log(TLOC4/TLOC2)
FACT2 = FACT1 * (SUM2 + SUM3)
HSIGN = -1
ALN[:-1] -= (1 + ALPHA) * ALTR + FACT2 * AMW[:-1]
# Equation (7) - Note that in CIRA72, AL10T5 = DLOG10(T500)
AL10T5 = np.log10(TINF)
ALNH5 = (5.5 * AL10T5 - 39.4) * AL10T5 + 73.13
ALN[5] = AL10 * (ALNH5 + 6) + HSIGN * (np.log(TLOC4/TLOC3) + FACT1 * SUM3 * AMW[5])
# Equation (24) - J70 Seasonal-Latitudinal Variation
TRASH = (AMJD - 36204) / 365.2422
CAPPHI = TRASH%1
DLRSL = 0.02 * (SAT[2] - 90) * np.exp(-0.045 * (SAT[2] - 90)) * np.sign(SAT[1]) * np.sin(TWOPI * CAPPHI+ 1.72) * np.sin(SAT[1])**2
# Equation (23) - Computes the semiannual variation
DLRSA = 0
if Z < 2e3:
# Use new semiannual model
FZZ,GTZ,DLRSA = SEMIAN08(YRDAY,ZHT,F10B,S10B,M10B)
if FZZ < 0: DLRSA = 0
# Sum the delta-log-rhos and apply to the number densities.
# In CIRA72 the following equation contains an actual sum, namely DLR = AL10 * (DLRGM + DLRSA + DLRSL)
# However, for Jacchia 70, there is no DLRGM or DLRSA.
DLR = AL10 * (DLRSL + DLRSA)
ALN += DLR
# Compute mass-density and mean-molecular-weight and convert number density logs from natural to common.
AN = np.exp(ALN)
SUMNM = (AN*AMW).sum()
AL10N = ALN/AL10
RHO = SUMNM/AVOGAD
# Compute the high altitude exospheric density correction factor
FEX = 1
if ZHT >= 1e3 and ZHT < 1.5e3:
ZETA = (ZHT - 1e3) * 0.002
ZETA2 = ZETA**2
ZETA3 = ZETA**3
F15C = CHT[0] + CHT[1]*F10B + (CHT[2] + CHT[3]*F10B)*1.5e3
F15C_ZETA = (CHT[2] + CHT[3]*F10B) * 500
FEX2 = 3 * F15C - F15C_ZETA - 3
FEX3 = F15C_ZETA - 2 * F15C + 2
FEX = 1 + FEX2 * ZETA2 + FEX3 * ZETA3
if ZHT >= 1.5e3: FEX = CHT[0] + CHT[1]*F10B + CHT[2]*ZHT + CHT[3]*F10B*ZHT
# Apply the exospheric density correction factor.
RHO *= FEX
return TEMP,RHO
@jit(nopython=True)
def XAMBAR(Z):
'''
Evaluates Equation (1)
'''
C = np.array([28.15204,-8.5586e-2,1.2840e-4,-1.0056e-5,-1.0210e-5,1.5044e-6,9.9826e-8])
DZ = Z - 100
AMB = C[6]
for i in range(5,-1,-1): AMB = DZ * AMB + C[i]
return AMB
@jit(nopython=True)
def XGRAV(Z):
'''
Evaluates Equation (8)
'''
return 9.80665/(1 + Z/6356.766)**2
@jit(nopython=True)
def XLOCAL(Z,TC):
'''
Evaluates Equation (10) or Equation (13), depending on Z
'''
DZ = Z - 125
if DZ > 0:
XLOCAL = TC[0] + TC[2] * np.arctan(TC[3]*DZ*(1 + 4.5e-6*DZ**2.5))
else:
XLOCAL = ((-9.8204695e-6 * DZ - 7.3039742e-4) * DZ**2 + 1)* DZ * TC[1] + TC[0]
return XLOCAL
@jit(nopython=True)
def DTSUB (F10,XLST,XLAT,ZHT):
'''
COMPUTE dTc correction for Jacchia-Bowman model
Calling Args:
F10 = (I) F10 FLUX
XLST = (I) LOCAL SOLAR TIME (HOURS 0-23.999)
XLAT = (I) XLAT = SAT LAT (RAD)
ZHT = (I) ZHT = HEIGHT (KM)
DTC = (O) dTc correction
'''
B = np.array([-4.57512297, -5.12114909, -69.3003609,\
203.716701, 703.316291, -1943.49234,\
1106.51308, -174.378996, 1885.94601,\
-7093.71517, 9224.54523, -3845.08073,\
-6.45841789, 40.9703319, -482.006560,\
1818.70931, -2373.89204, 996.703815,36.1416936])
C = np.array([-15.5986211, -5.12114909, -69.3003609,\
203.716701, 703.316291, -1943.49234,\
1106.51308, -220.835117, 1432.56989,\
-3184.81844, 3289.81513, -1353.32119,\
19.9956489, -12.7093998, 21.2825156,\
-2.75555432, 11.0234982, 148.881951,\
-751.640284, 637.876542, 12.7093998,\
-21.2825156, 2.75555432])
DTC = 0
tx = XLST/24
ycs = np.cos(XLAT)
F = (F10 - 100)/100
# calculates dTc
if ZHT >= 120 and ZHT <= 200:
H = (ZHT - 200)/50
DTC200 = C[16] + C[17]*tx*ycs + C[18]*tx**2*ycs + C[19]*tx**3*ycs + C[20]*F*ycs + C[21]*tx*F*ycs + C[22]*tx**2*F*ycs
sum_ = C[0] + B[1]*F + C[2]*tx*F + C[3]*tx**2*F + C[4]*tx**3*F + C[5]*tx**4*F + C[6]*tx**5*F +\
C[7]*tx*ycs + C[8]*tx**2*ycs + C[9]*tx**3*ycs + C[10]*tx**4*ycs + C[11]*tx**5*ycs + C[12]*ycs+\
C[13]*F*ycs + C[14]*tx*F*ycs + C[15]*tx**2*F*ycs
DTC200DZ = sum_
CC = 3*DTC200 - DTC200DZ
DD = DTC200 - CC
ZP = (ZHT-120)/80
DTC = CC*ZP*ZP + DD*ZP*ZP*ZP
if ZHT > 200 and ZHT <= 240:
H = (ZHT - 200)/50
sum_ = C[0]*H + B[1]*F*H + C[2]*tx*F*H + C[3]*tx**2*F*H + C[4]*tx**3*F*H + C[5]*tx**4*F*H + C[6]*tx**5*F*H+\
C[7]*tx*ycs*H + C[8]*tx**2*ycs*H + C[9]*tx**3*ycs*H + C[10]*tx**4*ycs*H + C[11]*tx**5*ycs*H + C[12]*ycs*H+\
C[13]*F*ycs*H + C[14]*tx*F*ycs*H + C[15]*tx**2*F*ycs*H + C[16] + C[17]*tx*ycs + C[18]*tx**2*ycs +\
C[19]*tx**3*ycs + C[20]*F*ycs + C[21]*tx*F*ycs + C[22]*tx**2*F*ycs
DTC = sum_
if ZHT > 240 and ZHT <= 300.0:
H = 0.8
sum_ = C[0]*H + B[1]*F*H + C[2]*tx*F*H + C[3]*tx**2*F*H + C[4]*tx**3*F*H + C[5]*tx**4*F*H + C[6]*tx**5*F*H +\
C[7]*tx*ycs*H + C[8]*tx**2*ycs*H + C[9]*tx**3*ycs*H + C[10]*tx**4*ycs*H + C[11]*tx**5*ycs*H + C[12]*ycs*H+\
C[13]*F*ycs*H + C[14]*tx*F*ycs*H + C[15]*tx**2*F*ycs*H + C[16] + C[17]*tx*ycs + C[18]*tx**2*ycs +\
C[19]*tx**3*ycs + C[20]*F*ycs + C[21]*tx*F*ycs + C[22]*tx**2*F*ycs
AA = sum_
BB = C[0] + B[1]*F + C[2]*tx*F + C[3]*tx**2*F + C[4]*tx**3*F + C[5]*tx**4*F + C[6]*tx**5*F +\
C[7]*tx*ycs + C[8]*tx**2*ycs + C[9]*tx**3*ycs + C[10]*tx**4*ycs + C[11]*tx**5*ycs + C[12]*ycs +\
C[13]*F*ycs + C[14]*tx*F*ycs + C[15]*tx**2*F*ycs
H = 3
sum_ = B[0] + B[1]*F + B[2]*tx*F + B[3]*tx**2*F + B[4]*tx**3*F + B[5]*tx**4*F + B[6]*tx**5*F + \
B[7]*tx*ycs + B[8]*tx**2*ycs + B[9]*tx**3*ycs + B[10]*tx**4*ycs + B[11]*tx**5*ycs + B[12]*H*ycs +\
B[13]*tx*H*ycs + B[14]*tx**2*H*ycs + B[15]*tx**3*H*ycs + B[16]*tx**4*H*ycs + B[17]*tx**5*H*ycs + B[18]*ycs
DTC300 = sum_
sum_ = B[12]*ycs + B[13]*tx*ycs + B[14]*tx**2*ycs + B[15]*tx**3*ycs + B[16]*tx**4*ycs + B[17]*tx**5*ycs
DTC300DZ = sum_
CC = 3*DTC300 - DTC300DZ - 3*AA - 2*BB
DD = DTC300 - AA - BB - CC
ZP = (ZHT-240)/60
DTC = AA + BB*ZP + CC*ZP*ZP + DD*ZP*ZP*ZP
if ZHT > 300 and ZHT <= 600:
H = ZHT/100
sum_ = B[0] + B[1]*F + B[2]*tx*F + B[3]*tx**2*F + B[4]*tx**3*F + B[5]*tx**4*F + B[6]*tx**5*F +\
B[7]*tx*ycs + B[8]*tx**2*ycs + B[9]*tx**3*ycs + B[10]*tx**4*ycs + B[11]*tx**5*ycs + B[12]*H*ycs +\
B[13]*tx*H*ycs + B[14]*tx**2*H*ycs + B[15]*tx**3*H*ycs + B[16]*tx**4*H*ycs + B[17]*tx**5*H*ycs + B[18]*ycs
DTC = sum_
if ZHT > 600 and ZHT <= 800.0:
ZP = (ZHT - 600)/100
HP = 6
AA = B[0] + B[1]*F + B[2]*tx*F + B[3]*tx**2*F + B[4]*tx**3*F + B[5]*tx**4*F + B[6]*tx**5*F +\
B[7]*tx*ycs + B[8]*tx**2*ycs + B[9]*tx**3*ycs + B[10]*tx**4*ycs + B[11]*tx**5*ycs + B[12]*HP*ycs +\
B[13]*tx*HP*ycs + B[14]*tx**2*HP*ycs+ B[15]*tx**3*HP*ycs + B[16]*tx**4*HP*ycs + B[17]*tx**5*HP*ycs + B[18]*ycs
BB = B[12]*ycs + B[13]*tx*ycs + B[14]*tx**2*ycs + B[15]*tx**3*ycs + B[16]*tx**4*ycs + B[17]*tx**5*ycs
CC = -(3*AA+4*BB)/4
DD = (AA+BB)/4
DTC = AA + BB*ZP + CC*ZP*ZP + DD*ZP*ZP*ZP
return DTC
@jit(nopython=True)
def SEMIAN08 (DAY,HT,F10B,S10B,M10B):
'''
COMPUTE SEMIANNUAL VARIATION (DELTA LOG RHO)
INPUT DAY, HEIGHT, F10B, S10B, M10B FSMB
025. 650. 150. 148. 147. 151.
OUTPUT FUNCTIONS FZ, GT, AND DEL LOG RHO VALUE
DAY (I) DAY OF YEAR
HT (I) HEIGHT (KM)
F10B (I) AVE 81-DAY CENTERED F10
S10B (I) AVE 81-DAY CENTERED S10
M10B (I) AVE 81-DAY CENTERED M10
FZZ (O) SEMIANNUAL AMPLITUDE
GTZ (O) SEMIANNUAL PHASE FUNCTION
DRLOG (O) DELTA LOG RHO
'''
TWOPI = Const.twopi
# FZ GLOBAL MODEL VALUES
# 1997-2006 FIT:
FZM = np.array([0.2689,-0.01176, 0.02782,-0.02782, 0.3470e-3])
# GT GLOBAL MODEL VALUES
# 1997-2006 FIT:
GTM = np.array([-0.3633, 0.08506, 0.2401,-0.1897, -0.2554,-0.01790, 0.5650e-3,-0.6407e-3,-0.3418e-2,-0.1252e-2])
# COMPUTE NEW 81-DAY CENTERED SOLAR INDEX FOR FZ
FSMB = F10B - 0.7*S10B - 0.04*M10B
HTZ = HT/1e3
FZZ = FZM[0] + FZM[1]*FSMB + FZM[2]*FSMB*HTZ + FZM[3]*FSMB*HTZ**2 + FZM[4]*FSMB**2*HTZ
# COMPUTE DAILY 81-DAY CENTERED SOLAR INDEX FOR GT
FSMB = F10B - 0.75*S10B - 0.37*M10B
TAU = (DAY-1)/365
SIN1P = np.sin(TWOPI*TAU)
COS1P = np.cos(TWOPI*TAU)
SIN2P = np.sin(2*TWOPI*TAU)
COS2P = np.cos(2*TWOPI*TAU)
GTZ = GTM[0] + GTM[1]*SIN1P + GTM[2]*COS1P + GTM[3]*SIN2P + GTM[4]*COS2P + GTM[5]*FSMB + GTM[6]*FSMB*SIN1P + GTM[7]*FSMB*COS1P + GTM[8]*FSMB*SIN2P + GTM[9]*FSMB*COS2P
if FZZ < 1e-6: FZZ = 1e-6
DRLOG = FZZ*GTZ
return FZZ,GTZ,DRLOG |
"""
Revision ID: 0280_invited_user_folder_perms
Revises: 0279_remove_fk_to_users
Create Date: 2019-03-11 14:38:28.010082
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "0280_invited_user_folder_perms"
down_revision = "0279_remove_fk_to_users"
def upgrade():
op.add_column(
"invited_users",
sa.Column(
"folder_permissions",
postgresql.JSONB(none_as_null=True, astext_type=sa.Text()),
nullable=True,
),
)
def downgrade():
op.drop_column("invited_users", "folder_permissions")
|
from rest_framework import serializers
from .models import Role, User, Document
class RoleSerializer(serializers.ModelSerializer):
"""Role Serializer"""
class Meta:
model = Role
fields = ('id', 'name',)
class UserSerializer(serializers.ModelSerializer):
"""User Serializer"""
class Meta:
model = User
fields = ('id', 'username', 'email',
'full_name', 'password', 'about', 'role_id')
extra_kwargs = {
'password': {'write_only': True, 'required': True},
'id': {'read_only': True},
'email': {'required': True}
}
def create(self, validated_data):
user = User(**validated_data)
user.set_password(validated_data['password'])
user.save()
return user
def update(self, instance, validated_data):
password = validated_data.get('password', None)
if password:
validated_data.pop('password')
instance.set_password(password)
for key, value in validated_data.items():
setattr(instance, key, value)
instance.save()
return instance
class DocumentSerializer(serializers.ModelSerializer):
"""Document Serializer"""
user = serializers.ReadOnlyField()
author_id = serializers.ReadOnlyField(source='author_identity')
class Meta:
model = Document
fields = ('id', 'title', 'content', 'access', 'author_id', 'user',
'created_at', 'updated_at')
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from google.cloud import storage
from google.protobuf import json_format
from google.cloud import vision as vision
from google.cloud.vision import enums
from google.cloud.vision import types
import pathlib
# サンプルコードのディレクトリから画像のあるディレクトリへのパスを解決する
image_dir = pathlib.Path(__file__).parent.parent.resolve() / 'image_data'
features = [
types.Feature(type=enums.Feature.Type.DOCUMENT_TEXT_DETECTION),]
requests = []
client = vision.ImageAnnotatorClient()
image_files = list(image_dir.glob('test_jpn_*.jpg'))
# image_files = ['test_jpn_01.jpg', 'test_jpn_02.jpg', 'test_jpn_03.jpg']
for filename in image_files :
with open(filename, 'rb') as image_file:
image = types.Image(content = image_file.read())
# image = types.Image(source= '')
request = types.AnnotateImageRequest(image=image, features=features)
requests.append(request)
response = client.batch_annotate_images(requests)
for rp in response.responses:
print(rp.text_annotations[0].description)
|
version https://git-lfs.github.com/spec/v1
oid sha256:05673db1cb1df666b8bf0197af1c6e98f55c2f7592d1d52ad135ffd846aa913c
size 1917
|
from ctapipe.visualization import CameraDisplay
__all__ = ['plot_muon_event',
]
def plot_muon_event(ax, geom, image, centroid, ringrad_camcoord,
ringrad_inner, ringrad_outer, event_id):
"""
Paramenters
---------
ax: `matplotlib.pyplot.axis`
geom: CameraGeometry
centroid: `float` centroid of the muon ring
ringrad_camcoord: `float` ring radius in camera coordinates
ringrad_inner: `float` inner ring radius in camera coordinates
ringrad_outer: `float` outer ring radius in camera coordinates
event_id: `int` id of the analyzed event
Returns
---------
ax: `matplotlib.pyplot.axis`
"""
disp0 = CameraDisplay(geom, ax=ax)
disp0.image = image
disp0.cmap = 'viridis'
disp0.add_colorbar(ax=ax)
disp0.add_ellipse(centroid, ringrad_camcoord.value,
ringrad_camcoord.value, 0., 0., color="red")
disp0.add_ellipse(centroid, ringrad_inner.value,
ringrad_inner.value, 0., 0.,
color="magenta")
disp0.add_ellipse(centroid, ringrad_outer.value,
ringrad_outer.value, 0., 0.,
color="magenta")
ax.set_title(f"Event {event_id}")
return ax
|
# Generated by Django 3.0.10 on 2021-05-17 16:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0078_auto_20210506_1329'),
]
operations = [
migrations.AlterModelOptions(
name='glossaryterm',
options={'ordering': ['term_raw'], 'verbose_name': 'Glossary term', 'verbose_name_plural': 'Glossary terms'},
),
migrations.AddField(
model_name='glossaryterm',
name='term_raw',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
|
import re
from .get_string_similarity import get_string_similarity
class Word:
def __init__(self, string):
if string is None: string = ''
if isinstance(string, self.__class__):
string = string._string
else:
string = str(string)
self._string = re.sub(r'\W+', '', string)
@property
def string(self):
return self._string
@property
def initial(self):
return self.string[0].upper()
@property
def length(self):
return len(self._string)
def __sub__(self, other):
"""
:type other: Word
:rtype: float
"""
return self.get_similarity(other=other)
def get_similarity(self, other, case_sensitivity=1.0, first_char_weight=0.0, method='jaro_winkler'):
if other is None:
return 0
else:
return get_string_similarity(
s1=self.string, s2=other.string, method=method,
case_sensitivity=case_sensitivity, first_char_weight=first_char_weight
)
def __eq__(self, other):
return self.string == str(other)
def lower(self, inplace=False):
if inplace:
self._string = self.string.lower()
else:
return self.__class__(string=self.string.lower())
def __repr__(self):
return self._string
def __str__(self):
return self._string
|
# Split Array Largest Sum: https://leetcode.com/problems/split-array-largest-sum/
# Given an array nums which consists of non-negative integers and an integer m, you can split the array into m non-empty continuous subarrays.
# Write an algorithm to minimize the largest sum among these m subarrays.
# So this solution will need to take advantage of a few things the main one being that a cumalitive sum of x can be retreived by getting the
# cumSum and subtracting the sub result out so for 7 2 5 10 8 we would have an array of the sum 7 9 14 24 32 and at index 2
# the value would be 32 - 14 (sum[2]) = 18
# This means that at any point we can get the sub array pieces right basically this means that we need to create a for loop
# in which we increase the size of the subarrays to expand across all variations
# We can track this with a dp array in which we build the min maxed value for 1 to i where i is the size of largets possible subarray
class Solution:
def splitArray(self, nums, m):
ourSum = [0]
for num in nums:
ourSum.append(ourSum[-1] + num)
# Since we want to know the minimized max we need to set our dp to infinity minus the starting point
dp = [[float('inf')] * (len(nums) + 1) for _ in range(m+1)]
dp[0][0] = 0
# Loop through the whole dp aray
i = 1
while i <= len(nums):
# For every possible size of j from 1 - m go across
j = 1
while j <= m:
# Once we have the current max we need to check the previous sections max to update our dp table
# It equals the minimum of what is in the dp array or the max of the cur sumation (sum[-1] - sum[i])
# It needs to check every subarray from 0 to the current row
k = 0
while k < i:
# For visibility
print(i, j, k)
currentSolution = dp[i][j]
potentialSum = ourSum[i] - ourSum[k]
# This is the previous potential splitting
lastSeenSolution = dp[k][j-1]
dp[i][j] = min(currentSolution, max(
lastSeenSolution, potentialSum))
if k == 0:
k += 1
else:
k += k
j += j
i += i
return dp[len(nums)][m]
# Unfortunately there is still a bug within this code but I am not sure what it is yet but my time is up
# This isn't even the most optimal solution as it runs in o(n^2 * m) time and o(n * m) space as we compute every possible solution until we find k
# Apparently the optimized solution requires you to have a binary search which I haven't had the chance to read throught this problem enough times to understand
# Also during review of this problem the following adjustments were needed we need to loop over the array backwards in order to succesfully solve this problem:
ourSum = [0]
for num in nums:
ourSum.append(ourSum[-1] + num)
# Since we want to know the minimized max we need to set our dp to infinity minus the starting point
dp = [[float('inf')] * (len(nums) + 1) for _ in range(m+1)]
dp[0][0] = 0
# Loop through the whole dp aray
for i in range(1, m+1):
# For every possible size of j from 1 - m go across
for j in range(1, (len(nums)+1)):
# Once we have the current max we need to check the previous sections max to update our dp table
# It equals the minimum of what is in the dp array or the max of the cur sumation (sum[-1] - sum[i])
# It needs to check every subarray from 0 to the current row
for k in range(j-1, -1, -1):
# For visibility
print(i, j, k)
currentSolution = dp[i][j]
potentialSum = ourSum[j] - ourSum[k]
# This is the previous potential splitting
lastSeenSolution = dp[i-1][k]
dp[i][j] = min(currentSolution, max(
lastSeenSolution, potentialSum))
if potentialSum > dp[i-1][k]:
break
return dp[-1][-1]
A = Solution()
print(A.splitArray([7, 2, 5, 10, 8], 2))
# Score Card
# Did I need hints? No although i did look at the optimal solution even though I didn't get to it
# Did you finish within 30 min? N
# Was the solution optimal? See above
# Were there any bugs? I listed bugs in the above code
# 1 1 1 1 = 1
|
from my_lambdata.ds_ulitities import enlarge
print(enlarge(5)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from subprocess import Popen, PIPE, STDOUT
p = Popen(['python', 'google_search.py'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
p_stdout, p_stderr = p.communicate(input='Котики в чашке'.encode())
print(p_stdout.decode())
|
from functools import partial
class Base:
async def get_data(self):
raise NotImplementedError("Please fill this out")
async def __anext__(self):
return await self.get_data()
def __aiter__(self):
return self
def __or__(self, other):
return other(self)
@classmethod
def with_args(cls, *args, **kwargs):
return partial(cls, *args, **kwargs)
class SyncBase:
def get_data(self):
raise NotImplementedError("Please fill this out")
def __next__(self):
return self.get_data()
def __iter__(self):
return self
def __or__(self, other):
return other(self)
@classmethod
def with_args(cls, *args, **kwargs):
return partial(cls, *args, **kwargs)
|
import csv
import math
import os
import random
import re
import time
from pathlib import Path
import pygame
from PIL import Image, ImageOps
from gamescript import commonscript, readstat, menu, battleui
from gamescript.arcade import longscript
rotationxy = commonscript.rotationxy
load_image = commonscript.load_image
load_images = commonscript.load_images
load_base_button = commonscript.load_base_button
stat_convert = readstat.stat_convert
setrotate = longscript.setrotate
main_dir = os.path.split(os.path.abspath(__file__))[0]
default_sprite_size = (150, 150)
screen_size = (1000, 1000)
screen_scale = (screen_size[0] / 1000, screen_size[1] / 1000)
pygame.init()
pen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Animation Maker") # set the game name on program border/tab
pygame.mouse.set_visible(True) # set mouse as visible
direction_list = ("front", "side", "back", "sideup", "sidedown")
def setuplist(itemclass, currentrow, showlist, itemgroup, box, uiclass, layer=4):
"""generate list of subsection of the left side of encyclopedia"""
widthadjust = screen_scale[0]
heightadjust = screen_scale[1]
row = 5 * heightadjust
column = 5 * widthadjust
pos = box.rect.topleft
if currentrow > len(showlist) - box.maxshowlist:
currentrow = len(showlist) - box.maxshowlist
if len(itemgroup) > 0: # remove previous sprite in the group before generate new one
for stuff in itemgroup:
stuff.kill()
del stuff
for index, item in enumerate(showlist):
if index >= currentrow:
itemgroup.add(itemclass(None, box, (pos[0] + column, pos[1] + row), item, layer=layer)) # add new subsection sprite to group
row += (30 * heightadjust) # next row
if len(itemgroup) > box.maxshowlist:
break # will not generate more than space allowed
uiclass.add(*itemgroup)
def listscroll(mouse_scrollup, mouse_scrolldown, scroll, listbox, currentrow, namelist, namegroup, uiclass, layer=3):
if mouse_scrollup:
currentrow -= 1
if currentrow < 0:
currentrow = 0
else:
setuplist(menu.NameList, currentrow, namelist, namegroup, listbox, uiclass, layer=layer)
scroll.changeimage(newrow=currentrow, logsize=len(namelist))
elif mouse_scrolldown:
currentrow += 1
if currentrow + listbox.maxshowlist - 1 < len(namelist):
setuplist(menu.NameList, currentrow, namelist, namegroup, listbox, uiclass, layer=layer)
scroll.changeimage(newrow=currentrow, logsize=len(namelist))
else:
currentrow -= 1
return currentrow
def popuplist_newopen(action, newrect, newlist, uitype):
"""Move popup_listbox and scroll sprite to new location and create new name list baesd on type"""
currentpopuprow = 0
if uitype == "top":
popup_listbox.rect = popup_listbox.image.get_rect(topleft=newrect)
elif uitype == "bottom":
popup_listbox.rect = popup_listbox.image.get_rect(bottomleft=newrect)
popup_listbox.namelist = newlist
popup_listbox.action = action
setuplist(menu.NameList, 0, newlist, popup_namegroup,
popup_listbox, ui, layer=19)
popup_listscroll.pos = popup_listbox.rect.topright # change position variable
popup_listscroll.rect = popup_listscroll.image.get_rect(topleft=popup_listbox.rect.topright) #
popup_listscroll.changeimage(newrow=0, logsize=len(newlist))
ui.add(popup_listbox, *popup_namegroup, popup_listscroll)
popup_listbox.type = uitype
def load_textures(main_dir, subfolder=None):
"""loads all body sprite part image"""
imgs = {}
dirpath = os.path.join(main_dir, "data")
if subfolder is not None:
for folder in subfolder:
dirpath = os.path.join(dirpath, folder)
loadorderfile = [f for f in os.listdir(dirpath) if f.endswith("." + "png")] # read all file
loadorderfile.sort(key=lambda var: [int(x) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
for file in loadorderfile:
imgs[file.split(".")[0]] = load_image(main_dir, file, dirpath)
return imgs
race_list = []
race_acro = []
with open(os.path.join(main_dir, "data", "troop", "troop_race.csv"), encoding="utf-8", mode="r") as unitfile:
rd = csv.reader(unitfile, quoting=csv.QUOTE_ALL)
for row in rd:
if "," in row[-2]: # make str with , into list
thisruleset = [int(item) if item.isdigit() else item for item in row[-2].split(",")]
else:
thisruleset = [row[-2]]
for n, i in enumerate(row):
if i.isdigit() or ("." in i and re.search("[a-zA-Z]", i) is None) or i == "inf":
row[n] = float(i)
race_list.append(row[1].lower())
race_acro.append(row[3])
unitfile.close()
race_list = race_list[2:] # remove header and any race
race_acro = race_acro[2:]
race_accept = ["human"] # for now accept only human race
generic_animation_pool = []
for direction in direction_list:
with open(os.path.join(main_dir, "data", "arcade", "animation", "generic", direction, "animation.csv"), encoding="utf-8", mode="r") as unitfile:
rd = csv.reader(unitfile, quoting=csv.QUOTE_ALL)
rd = [row for row in rd]
part_name_header = rd[0]
list_column = ["p1_head", "p1_face", "p1_body", "p1_r_arm_up", "p1_r_arm_low", "p1_r_hand", "p1_l_arm_up",
"p1_l_arm_low", "p1_l_hand", "p1_r_leg", "p1_r_foot", "p1_l_leg", "p1_l_foot",
"p1_main_weapon", "p1_sub_weapon", "p2_head", "p2_face", "p2_body", "p2_r_arm_up", "p2_r_arm_low", "p2_r_hand",
"p2_l_arm_up", "p2_l_arm_low", "p2_l_hand", "p2_r_leg", "p2_r_foot", "p2_l_leg",
"p2_l_foot", "p2_main_weapon", "p2_sub_weapon", "effect_1", "effect_2", "dmg_effect_1", "dmg_effect_2"] # value in list only
list_column = [index for index, item in enumerate(part_name_header) if item in list_column]
part_name_header = part_name_header[1:] # keep only part name for list ref later
animation_pool = {}
for row_index, row in enumerate(rd):
if row_index > 0:
key = row[0].split("/")[0]
for n, i in enumerate(row):
row = stat_convert(row, n, i, list_column=list_column)
row = row[1:]
if key in animation_pool:
animation_pool[key].append({part_name_header[item_index]: item for item_index, item in enumerate(row)})
else:
animation_pool[key] = [{part_name_header[item_index]: item for item_index, item in enumerate(row)}]
generic_animation_pool.append(animation_pool)
part_name_header = [item for item in part_name_header if item not in ("effect", "property")]
unitfile.close()
skel_joint_list = []
for race in race_list:
if race in race_accept:
for direction in direction_list:
with open(os.path.join(main_dir, "data", "arcade", "sprite", "generic", race, direction, "skeleton_link.csv"), encoding="utf-8",
mode="r") as unitfile:
rd = csv.reader(unitfile, quoting=csv.QUOTE_ALL)
rd = [row for row in rd]
header = rd[0]
list_column = ["Position"] # value in list only
list_column = [index for index, item in enumerate(header) if item in list_column]
joint_list = {}
for row_index, row in enumerate(rd):
if row_index > 0:
for n, i in enumerate(row):
row = stat_convert(row, n, i, list_column=list_column)
key = row[0].split("/")[0]
if key in joint_list:
joint_list[key].append({row[1:][0]: pygame.Vector2(row[1:][1])})
else:
joint_list[key] = [{row[1:][0]: pygame.Vector2(row[1:][1])}]
skel_joint_list.append(joint_list)
unitfile.close()
with open(os.path.join(main_dir, "data", "arcade", "sprite", "generic", "skin_colour_rgb.csv"), encoding="utf-8",
mode="r") as unitfile:
rd = csv.reader(unitfile, quoting=csv.QUOTE_ALL)
rd = [row for row in rd]
header = rd[0]
skin_colour_list = {}
int_column = ["red", "green", "blue"] # value in list only
int_column = [index for index, item in enumerate(header) if item in int_column]
for row_index, row in enumerate(rd):
if row_index > 0:
for n, i in enumerate(row):
row = stat_convert(row, n, i, int_column=int_column)
key = row[0].split("/")[0]
skin_colour_list[key] = row[1:]
gen_body_sprite_pool = {}
for race in race_list:
if race in race_accept:
gen_body_sprite_pool[race] = {}
for direction in direction_list:
gen_body_sprite_pool[race][direction] = {}
partfolder = Path(os.path.join(main_dir, "data", "arcade", "sprite", "generic", race, direction))
subdirectories = [str(x).split("data\\")[1].split("\\") for x in partfolder.iterdir() if x.is_dir()]
for folder in subdirectories:
imgs = load_textures(main_dir, folder)
gen_body_sprite_pool[race][direction][folder[-1]] = imgs
gen_weapon_sprite_pool = {}
weapon_list = ["sword"]
for weapon in weapon_list:
for direction in direction_list:
partfolder = Path(os.path.join(main_dir, "data", "arcade", "sprite", "generic", "weapon", direction))
subdirectories = [str(x).split("data\\")[1].split("\\") for x in partfolder.iterdir() if x.is_dir()]
for folder in subdirectories:
if folder[-1] not in gen_weapon_sprite_pool:
gen_weapon_sprite_pool[folder[-1]] = {}
gen_weapon_sprite_pool[folder[-1]][direction] = {}
imgs = load_textures(main_dir, folder)
gen_weapon_sprite_pool[folder[-1]][direction] = imgs
class Showroom(pygame.sprite.Sprite):
def __init__(self, size):
"""White space for showing off sprite and animation"""
self._layer = 10
pygame.sprite.Sprite.__init__(self)
self.size = (int(size[0]), int(size[1]))
self.image = pygame.Surface(self.size)
self.image.fill((255, 255, 255))
self.rect = self.image.get_rect(center=(screen_size[0] / 2, screen_size[1] / 2))
self.grid = True
def update(self, *args):
self.image.fill((255, 255, 255))
if self.grid:
grid_width = self.image.get_width() / 10
grid_height = self.image.get_height() / 10
for loop in range(1, 10):
pygame.draw.line(self.image, (0, 0, 0), (grid_width * loop, 0), (grid_width * loop, self.image.get_height()))
pygame.draw.line(self.image, (0, 0, 0), (0, grid_height * loop), (self.image.get_width(), grid_height * loop))
class Filmstrip(pygame.sprite.Sprite):
"""animation sprite filmstrip, always no more than 10 per animation"""
image_original = None
def __init__(self, pos):
self._layer = 5
pygame.sprite.Sprite.__init__(self, self.containers)
self.pos = pos
self.image = self.image_original.copy() # original no sprite
self.image_original2 = self.image_original.copy() # after add sprite but before adding selected corner
self.rect = self.image.get_rect(topleft=self.pos)
self.image_scale = (self.image.get_width() / 100, self.image.get_height() / 120)
self.blitimage = None
self.strip_rect = None
self.activate = True
def update(self, *args):
self.image = self.image_original2.copy()
def selected(self, select=False):
self.image = self.image_original2.copy()
select_colour = (200, 100, 100)
if self.activate:
select_colour = (150, 200, 100)
if select:
pygame.draw.rect(self.image, select_colour, (0, 0, self.image.get_width(), self.image.get_height()), 15)
def add_strip(self, image=None):
self.image = self.image_original.copy()
if image is not None:
self.blitimage = pygame.transform.scale(image.copy(), (int(100 * self.image_scale[0]), int(100 * self.image_scale[1])))
self.strip_rect = self.blitimage.get_rect(center=(self.image.get_width() / 2, self.image.get_height() / 2))
self.image.blit(self.blitimage, self.strip_rect)
self.image_original2 = self.image.copy()
if self.activate is False:
pygame.draw.rect(self.image_original2, (0, 0, 0), (0, 0, self.image.get_width(), self.image.get_height()), 15)
class Button(pygame.sprite.Sprite):
"""Normal button"""
def __init__(self, text, image, pos, fontsize=20):
self._layer = 5
pygame.sprite.Sprite.__init__(self, self.containers)
self.font = pygame.font.SysFont("helvetica", int(fontsize * screen_scale[1]))
self.image = image.copy()
self.image_original = self.image.copy()
self.text = text
self.pos = pos
textsurface = self.font.render(str(text), 1, (0, 0, 0))
textrect = textsurface.get_rect(center=(int(self.image.get_width() / 2), int(self.image.get_height() / 2)))
self.image.blit(textsurface, textrect)
self.rect = self.image.get_rect(center=self.pos)
def change_text(self, text):
if text != self.text:
self.image = self.image_original.copy()
self.text = text
textsurface = self.font.render(self.text.capitalize(), 1, (0, 0, 0))
textrect = textsurface.get_rect(center=(int(self.image.get_width() / 2), int(self.image.get_height() / 2)))
self.image.blit(textsurface, textrect)
self.rect = self.image.get_rect(center=self.pos)
class SwitchButton(pygame.sprite.Sprite):
"""Button that switch text/option"""
def __init__(self, text_list, image, pos, fontsize=20):
self._layer = 5
pygame.sprite.Sprite.__init__(self, self.containers)
self.font = pygame.font.SysFont("helvetica", int(fontsize * screen_scale[1]))
self.pos = pos
self.current_option = 0
self.image_original = image
self.image = self.image_original.copy()
self.text_list = text_list
self.change_text(self.text_list[self.current_option])
self.rect = self.image.get_rect(center=self.pos)
def change_option(self, option):
if self.current_option != option:
self.current_option = option
self.image = self.image_original.copy()
self.change_text(self.text_list[self.current_option])
def change_text(self, text):
textsurface = self.font.render(str(text), 1, (0, 0, 0))
textrect = textsurface.get_rect(center=(int(self.image.get_width() / 2), int(self.image.get_height() / 2)))
self.image.blit(textsurface, textrect)
class Bodyhelper(pygame.sprite.Sprite):
def __init__(self, size, pos, type, part_images):
self._layer = 6
pygame.sprite.Sprite.__init__(self, self.containers)
self.fontsize = int(14 * screen_scale[1])
self.font = pygame.font.SysFont("helvetica", self.fontsize)
self.boxfont = pygame.font.SysFont("helvetica", int(22 * screen_scale[1]))
self.size = size
self.image = pygame.Surface(self.size, pygame.SRCALPHA)
self.image.fill((255,255,255))
pygame.draw.rect(self.image, (100,150,150), (0, 0, self.image.get_width(), self.image.get_height()), 3)
self.image_original = self.image.copy() # for original before add part and click
self.rect = self.image.get_rect(center=pos)
self.type = type
if self.type in ("p1", "p2"):
self.part_images_original = [image.copy() for image in part_images]
empytybox = self.part_images_original[-1]
self.part_images_original = self.part_images_original[:-1]
for boxpart in ("W1", "W2"):
textsurface = self.boxfont.render(boxpart, 1, (0, 0, 0))
textrect = textsurface.get_rect(center=(empytybox.get_width() / 2, empytybox.get_height() / 2))
newbox = empytybox.copy()
newbox.blit(textsurface, textrect)
self.part_images_original.append(newbox)
self.part_images = [image.copy() for image in self.part_images_original]
self.part_selected = []
self.stat1 = {}
self.stat2 = {}
if self.type == "p1":
self.rect_part_list = {"p1_head": None, "p1_body": None, "p1_r_arm_up": None, "p1_r_arm_low": None, "p1_r_hand": None,
"p1_l_arm_up": None, "p1_l_arm_low": None, "p1_l_hand": None, "p1_r_leg": None, "p1_r_foot": None,
"p1_l_leg": None, "p1_l_foot": None, "p1_main_weapon": None, "p1_sub_weapon": None}
self.part_pos = {"p1_head": (185, 85), "p1_body": (185, 148), "p1_r_arm_up": (155, 126), "p1_r_arm_low": (155, 156),
"p1_r_hand": (155, 187), "p1_l_arm_up": (215, 126), "p1_l_arm_low": (215, 156), "p1_l_hand": (215, 187),
"p1_r_leg": (170, 216), "p1_r_foot": (170, 246), "p1_l_leg": (200, 216), "p1_l_foot": (200, 246),
"p1_main_weapon": (165, 30), "p1_sub_weapon": (205, 30)}
elif self.type == "p2":
self.rect_part_list = {"p2_head": None, "p2_body": None, "p2_r_arm_up": None, "p2_r_arm_low": None,
"p2_r_hand": None, "p2_l_arm_up": None, "p2_l_arm_low": None, "p2_l_hand": None,
"p2_r_leg": None, "p2_r_foot": None, "p2_l_leg": None, "p2_l_foot": None,
"p2_main_weapon": None, "p2_sub_weapon": None}
self.part_pos = {"p2_head": (185, 85), "p2_body": (185, 148), "p2_r_arm_up": (155, 126), "p2_r_arm_low": (155, 156),
"p2_r_hand": (155, 187), "p2_l_arm_up": (215, 126), "p2_l_arm_low": (215, 156), "p2_l_hand": (215, 187),
"p2_r_leg": (170, 216), "p2_r_foot": (170, 246), "p2_l_leg": (200, 216), "p2_l_foot": (200, 246),
"p2_main_weapon": (155, 30), "p2_sub_weapon": (215, 30)}
else:
self.rect_part_list = {"effect1": None, "effect2": None, "dmg_effect1": None, "dmg_effect2": None}
self.part_pos = {"effect1": None, "effect2": None, "dmg_effect1": None, "dmg_effect2": None}
for key, item in self.part_pos.items():
self.part_pos[key] = (item[0] * screen_scale[0], item[1] * screen_scale[1])
self.blit_part()
def blit_part(self):
self.image = self.image_original.copy()
for index, image in enumerate(self.part_images):
thiskey = list(self.part_pos.keys())[index]
pos = self.part_pos[thiskey]
newimage = image.copy()
if thiskey in self.part_selected: # highlight selected part
size = (newimage.get_width(), newimage.get_height())
data = pygame.image.tostring(newimage, "RGBA") # convert image to string data for filtering effect
newimage = Image.frombytes("RGBA", size, data) # use PIL to get image data
alpha = newimage.split()[-1] # save alpha
newimage = newimage.convert("L") # convert to grey scale for colourise
newimage = ImageOps.colorize(newimage, black="black", mid="green", white="green").convert("RGB")
newimage.putalpha(alpha) # put back alpha
newimage = newimage.tobytes()
newimage = pygame.image.fromstring(newimage, size, "RGBA") # convert image back to a pygame surface
rect = newimage.get_rect(center=pos)
self.image.blit(newimage, rect)
self.rect_part_list[thiskey] = rect
def select(self, check_mouse_pos, shift_press, specific_part=None):
if specific_part is not None:
if specific_part is False:
self.part_selected = []
elif specific_part in list(self.part_pos.keys()):
if shift_press:
self.part_selected.append(specific_part)
else:
self.part_selected = [specific_part]
self.blit_part()
else:
click_any = False
if check_mouse_pos is not None:
for index, rect in enumerate(self.rect_part_list):
thisrect = self.rect_part_list[rect]
if thisrect is not None and thisrect.collidepoint(check_mouse_pos):
click_any = True
if shift_press:
self.part_selected.append(list(self.part_pos.keys())[index])
else:
self.part_selected = [list(self.part_pos.keys())[index]]
break
elif check_mouse_pos is None or (click_any is False and shift_press is False):
self.part_selected = []
self.blit_part()
self.add_stat()
def add_stat(self):
for index, part in enumerate(self.rect_part_list.keys()):
if self.stat2 is not None and part in self.stat2 and self.stat1[part] is not None and self.stat2[part] is not None:
stat = self.stat1[part] + self.stat2[part]
if len(stat) > 3:
stat.pop(3)
stat.pop(3)
if stat[0] in race_acro:
stat[0] = race_acro[race_list.index(stat[0])]
newchange = ["S", "F", "B", "CU", "CD"]
for index, change in enumerate(["side", "front", "back", "sideup", "sidedown"]):
if stat[1] == index:
stat[1] = newchange[index]
stat[2] = str(stat[2])
if len(stat) > 3:
stat[3] = str([stat[3][0], stat[3][1]])
for index, change in enumerate(["F", "FH", "FV", "FHV"]):
if stat[5] == index:
stat[5] = change
stat[4] = str(round(stat[4], 1))
stat[6] = "L" + str(int(stat[6]))
stat1 = stat[0:3] # first line with name
# stat1.append(stat[-1])
stat1 = str(stat1).replace("'", "")
stat1 = stat1[1:-1]
stat2 = stat[3:] # second line with stat
stat2 = str(stat2).replace("'", "")
stat2 = stat2[1:-1]
# except TypeError:
# stat1 = ""
# stat2 = ""
textcolour = (0, 0, 0)
if part in self.part_selected: # green text for selected part
textcolour = (20, 90, 20)
textsurface1 = self.font.render(stat1, 1, textcolour)
textsurface2 = self.font.render(stat2, 1, textcolour)
shiftx = 50 * screen_scale[0]
if "body" in part:
headname = "p1_head"
if "p2" in part:
headname = "p2_head"
textrect1 = textsurface1.get_rect(midleft=(self.part_pos[headname][0] + shiftx, self.part_pos[headname][1] - 5))
textrect2 = textsurface2.get_rect(midleft=(self.part_pos[headname][0] + shiftx, self.part_pos[headname][1] - 5 + self.fontsize + 2))
elif "head" in part:
textrect1 = textsurface1.get_rect(midright=(self.part_pos[part][0] - shiftx, self.part_pos[part][1] - 10))
textrect2 = textsurface2.get_rect(midright=(self.part_pos[part][0] - shiftx, self.part_pos[part][1] - 10 + self.fontsize + 2))
else:
shiftx = 14 * screen_scale[0]
if any(ext in part for ext in ["weapon", "effect", "special"]):
shiftx = 26 * screen_scale[0]
if self.part_pos[part][0] > self.image.get_width() / 2:
textrect1 = textsurface1.get_rect(midleft=(self.part_pos[part][0] + shiftx, self.part_pos[part][1] - 15))
textrect2 = textsurface2.get_rect(midleft=(self.part_pos[part][0] + shiftx, self.part_pos[part][1] - 15 + self.fontsize + 2))
else:
textrect1 = textsurface1.get_rect(midright=(self.part_pos[part][0] - shiftx, self.part_pos[part][1] - 15))
textrect2 = textsurface2.get_rect(midright=(self.part_pos[part][0] - shiftx, self.part_pos[part][1] - 15 + self.fontsize + 2))
self.image.blit(textsurface1, textrect1)
self.image.blit(textsurface2, textrect2)
# else:
# textsurface = self.font.render("None", 1, (0, 0, 0))
# textrect = textsurface.get_rect(midleft=self.part_pos[part])
# self.image.blit(textsurface, textrect)
class SideChoose:
def __init__(self):
pass
class NameBox(pygame.sprite.Sprite):
def __init__(self, size, pos):
self._layer = 6
pygame.sprite.Sprite.__init__(self, self.containers)
self.fontsize = int(24 * screen_scale[1])
self.font = pygame.font.SysFont("helvetica", int(self.fontsize * screen_scale[1]))
self.size = size
self.image = pygame.Surface(self.size)
self.image.fill((255, 255, 255))
pygame.draw.rect(self.image, (150, 200, 0), (0, 0, self.image.get_width(), self.image.get_height()), 2)
self.image_original = self.image.copy()
self.pos = pos
self.rect = self.image.get_rect(midtop=self.pos)
self.text = None
def change_name(self, text):
if text != self.text:
self.image = self.image_original.copy()
self.text = text
textsurface = self.font.render(self.text, 1, (0, 0, 0))
textrect = textsurface.get_rect(center=(int(self.image.get_width() / 2), int(self.image.get_height() / 2)))
self.image.blit(textsurface, textrect)
class Skeleton:
def __init__(self):
self.animation_list = []
self.animation_part_list = []
self.side = 1 # 0 = front, 1 = side, 2 = back, 3 = sideup, 4 = sidedown
self.rect_part_list = {"p1_head": None, "p1_body": None, "p1_r_arm_up": None, "p1_r_arm_low": None, "p1_r_hand": None,
"p1_l_arm_up": None, "p1_l_arm_low": None, "p1_l_hand": None, "p1_r_leg": None, "p1_r_foot": None,
"p1_l_leg": None, "p1_l_foot": None, "p1_main_weapon": None, "p1_sub_weapon": None,
"p2_head": None, "p2_body": None, "p2_r_arm_up": None, "p2_r_arm_low": None,
"p2_r_hand": None, "p2_l_arm_up": None, "p2_l_arm_low": None, "p2_l_hand": None,
"p2_r_leg": None, "p2_r_foot": None, "p2_l_leg": None, "p2_l_foot": None,
"p2_main_weapon": None, "p2_sub_weapon": None}
self.part_selected = []
self.not_show = []
self.p1_race = "human"
self.p2_race = "human"
skin = list(skin_colour_list.keys())[random.randint(0, len(skin_colour_list) - 1)]
skin_colour = skin_colour_list[skin]
self.hair_colour = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
self.eye_colour = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
self.weapon = {"p1_main_weapon":"sword", "p1_sub_weapon":None, "p2_main_weapon":None, "p2_sub_weapon":None}
self.empty_sprite_part = [0,pygame.Vector2(0, 0), [50, 50],0,0,0]
self.randomface()
self.read_animation(list(animation_pool.keys())[0])
def randomface(self): # todo change when add option to change face
self.p1_eyebrow = list(gen_body_sprite_pool[self.p1_race]["side"]["eyebrow"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p1_race]["side"]["eyebrow"]) - 1)]
self.p1_eye = list(gen_body_sprite_pool[self.p1_race]["side"]["eye"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p1_race]["side"]["eye"]) - 1)]
self.p1_mouth = list(gen_body_sprite_pool[self.p1_race]["side"]["mouth"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p1_race]["side"]["mouth"]) - 1)]
self.p1_beard = list(gen_body_sprite_pool[self.p1_race]["side"]["beard"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p1_race]["side"]["beard"]) - 1)]
self.p2_eyebrow = list(gen_body_sprite_pool[self.p2_race]["side"]["eyebrow"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p2_race]["side"]["eyebrow"]) - 1)]
self.p2_eye = list(gen_body_sprite_pool[self.p2_race]["side"]["eye"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p1_race]["side"]["eye"]) - 1)]
self.p2_mouth = list(gen_body_sprite_pool[self.p2_race]["side"]["mouth"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p2_race]["side"]["mouth"]) - 1)]
self.p2_beard = list(gen_body_sprite_pool[self.p2_race]["side"]["beard"].keys())[
random.randint(0, len(gen_body_sprite_pool[self.p2_race]["side"]["beard"]) - 1)]
def read_animation(self, name):
# sprite animation generation
animation_list = [generic_animation_pool[self.side][name]]
self.animation_part_list = []
self.bodypart_list = [{key: None for key in self.rect_part_list.keys()}] * 10
self.part_name_list = [{key: None for key in self.rect_part_list.keys()}] * 10
for animation in animation_list:
for index, pose in enumerate(animation):
link_list = {key: None for key in self.rect_part_list.keys()}
bodypart_list = {key: None for key in self.rect_part_list.keys()}
bodypart_list.update({"p1_eye": None, "p1_mouth": None, "p2_eye": None, "p2_mouth": None})
for part in pose:
if pose[part] != [0] and part != "property":
if "eye" not in part and "mouth" not in part:
if "weapon" in part:
if pose[part][1] in gen_weapon_sprite_pool[self.weapon[part]][pose[part][0]]:
link_list[part] = [pose[part][2], pose[part][3]]
bodypart_list[part] = [self.weapon[part], pose[part][0], pose[part][1]]
else:
link_list[part] = [pose[part][3], pose[part][4]]
bodypart_list[part] = [pose[part][0], pose[part][1], pose[part][2]]
elif pose[part] != 0:
bodypart_list[part] = pose[part]
else:
bodypart_list[part] = 1.0
self.bodypart_list[index] = bodypart_list
main_joint_pos_list = self.generate_body(bodypart_list)
self.sprite_part = {key: None for key in self.rect_part_list.keys()}
part_name = {key: None for key in self.rect_part_list.keys()}
exceptlist = ["eye", "mouth"]
for part in part_name_header:
if pose[part] != [0] and any(ext in part for ext in exceptlist) is False:
if "weapon" in part:
self.sprite_part[part] = [self.sprite_image[part],
(self.sprite_image[part].get_width() / 2, self.sprite_image[part].get_height() / 2),
link_list[part], pose[part][4], pose[part][5], pose[part][-1]]
part_name[part] = [self.weapon[part], pose[part][0], pose[part][1]]
else:
self.sprite_part[part] = [self.sprite_image[part], main_joint_pos_list[part], link_list[part], pose[part][5],
pose[part][6], pose[part][-1]]
part_name[part] = [pose[part][0], pose[part][1], pose[part][2]]
pose_layer_list = {k: v[-1] for k, v in self.sprite_part.items() if v is not None}
pose_layer_list = dict(sorted(pose_layer_list.items(), key=lambda item: item[1], reverse=True))
self.animation_part_list.append(self.sprite_part)
self.part_name_list[index] = part_name
image = self.create_animation_film(pose_layer_list)
self.animation_list.append(image)
print(self.part_name_list)
self.default_sprite_part = {key: (value[:] if value is not None else value) for key, value in self.animation_part_list[0].items()}
self.default_body_part = {key: value for key, value in self.bodypart_list[0].items()}
self.default_part_name = {key: value for key, value in self.part_name_list[0].items()}
def create_animation_film(self, pose_layer_list, empty=False):
image = pygame.Surface((150, 150), pygame.SRCALPHA) # default size will scale down later
for key, value in self.rect_part_list.items(): # reset rect list
self.rect_part_list[key] = None
if empty is False:
for index, layer in enumerate(pose_layer_list):
if layer not in self.not_show:
part = self.sprite_part[layer]
try:
image = self.part_to_sprite(image, part[0], list(self.sprite_part.keys()).index(layer),
part[1], part[2], part[3], part[4])
except IndexError:
pass
return image
def select_part(self, race, side, part, part_check, part_default):
"""For creating body part like eye or mouth in animation that accept any part (1) so use default instead"""
if part_check == 1:
surface = gen_body_sprite_pool[race][side][part][part_default].copy()
else:
surface = gen_body_sprite_pool[part_check[0]][part_check[1]][part][part_check[2]].copy()
return surface
def generate_body(self, bodypart_list):
p1_head_sprite_surface = None
try:
p1_head_race = bodypart_list["p1_head"][0]
p1_head_side = bodypart_list["p1_head"][1]
p1_head = gen_body_sprite_pool[p1_head_race][p1_head_side]["head"][bodypart_list["p1_head"][2]].copy()
p1_head_sprite_surface = pygame.Surface((p1_head.get_width(), p1_head.get_height()), pygame.SRCALPHA)
head_rect = p1_head.get_rect(midtop=(p1_head_sprite_surface.get_width() / 2, 0))
p1_head_sprite_surface.blit(p1_head, head_rect)
p1_face = [gen_body_sprite_pool[p1_head_race][p1_head_side]["eyebrow"][self.p1_eyebrow].copy(),
self.select_part(p1_head_race, p1_head_side, "eye", bodypart_list["p1_eye"], self.p1_eye),
gen_body_sprite_pool[p1_head_race][p1_head_side]["beard"][self.p1_beard].copy(),
self.select_part(p1_head_race, p1_head_side, "mouth", bodypart_list["p1_mouth"], self.p1_mouth)]
# if skin != "white":
# face[0] = self.apply_colour(face[0], skin_colour)
p1_face[0] = self.apply_colour(p1_face[0], self.hair_colour)
p1_face[2] = self.apply_colour(p1_face[2], self.hair_colour)
p1_face[1] = self.apply_colour(p1_face[1], self.eye_colour)
p1_head_sprite_surface = pygame.Surface((p1_face[2].get_width(), p1_face[2].get_height()), pygame.SRCALPHA)
head_rect = p1_head.get_rect(midtop=(p1_head_sprite_surface.get_width() / 2, 0))
p1_head_sprite_surface.blit(p1_head, head_rect)
for index, item in enumerate(p1_face):
rect = item.get_rect(topleft=(0, 0))
p1_head_sprite_surface.blit(item, rect)
except KeyError: # some head direction show no face
pass
except TypeError: # empty
pass
p2_head_sprite_surface = None
try:
p2_head_race = bodypart_list["p2_head"][0]
p2_head_side = bodypart_list["p2_head"][1]
p2_head = gen_body_sprite_pool[p2_head_race][p2_head_side]["head"][bodypart_list["p2_head"][2]].copy()
p2_head_sprite_surface = pygame.Surface((p2_head.get_width(), p2_head.get_height()), pygame.SRCALPHA)
head_rect = p2_head.get_rect(midtop=(p2_head_sprite_surface.get_width() / 2, 0))
p2_head_sprite_surface.blit(p2_head, head_rect)
p2_face = [gen_body_sprite_pool[p2_head_race][p2_head_side]["eyebrow"][self.p2_eyebrow].copy(),
self.select_part(p2_head_race, p2_head_side, "eye", bodypart_list["p2_eye"], self.p2_eye),
gen_body_sprite_pool[p2_head_race][p2_head_side]["beard"][self.p2_beard].copy(),
self.select_part(p2_head_race, p2_head_side, "mouth", bodypart_list["p2_mouth"], self.p2_mouth)]
# if skin != "white":
# face[0] = self.apply_colour(face[0], skin_colour)
p2_face[0] = self.apply_colour(p2_face[0], self.hair_colour)
p2_face[2] = self.apply_colour(p2_face[2], self.hair_colour)
p2_face[1] = self.apply_colour(p2_face[1], self.eye_colour)
p2_head_sprite_surface = pygame.Surface((p2_face[2].get_width(), p2_face[2].get_height()), pygame.SRCALPHA)
head_rect = p2_head.get_rect(midtop=(p2_head_sprite_surface.get_width() / 2, 0))
p2_head_sprite_surface.blit(p2_head, head_rect)
for index, item in enumerate(p2_face):
rect = item.get_rect(topleft=(0, 0))
p2_head_sprite_surface.blit(item, rect)
except KeyError: # some head direction show no face
pass
except TypeError: # empty
pass
self.sprite_image = {key: None for key in self.rect_part_list.keys()}
exceptlist = ["eye", "mouth", "head"]
for stuff in bodypart_list:
if bodypart_list[stuff] is not None:
if any(ext in stuff for ext in exceptlist) is False:
if "weapon" not in stuff:
partname = stuff[3:] # remove p1_ or p2_ to get part name
if "r_" in partname[0:2] or "l_" in partname[0:2]:
partname = partname[2:] # remove side
self.sprite_image[stuff] = gen_body_sprite_pool[bodypart_list[stuff][0]][bodypart_list[stuff][1]][partname][bodypart_list[stuff][2]].copy()
else:
partname = self.weapon[stuff]
if partname is not None and bodypart_list[stuff][2]:
self.sprite_image[stuff] = gen_weapon_sprite_pool[partname][bodypart_list[stuff][1]][bodypart_list[stuff][2]].copy()
elif "head" in stuff:
if "p1" in stuff:
self.sprite_image[stuff] = p1_head_sprite_surface
else:
self.sprite_image[stuff] = p2_head_sprite_surface
# if skin != "white":
# for part in list(self.sprite_image.keys())[1:]:
# self.sprite_image[part] = self.apply_colour(self.sprite_image[part], skin_colour)
main_joint_pos_list = {}
for part_index, part in enumerate(part_name_header):
for part_link in skel_joint_list[self.side]:
if part_link in part: # match part name, p1_head = head in part link #TODO change this when have p2
main_joint_pos_list[part] = list(skel_joint_list[self.side][part_link][0].values())[0]
break
return main_joint_pos_list
def click_part(self, mouse_pos, shift_press, part=None):
if part is None:
click_part = False
if shift_press is False:
self.part_selected = []
for index, rect in enumerate(self.rect_part_list):
thisrect = self.rect_part_list[rect]
if thisrect is not None and thisrect.collidepoint(mouse_pos):
click_part = True
if shift_press:
self.part_selected.append(index)
break
else:
self.part_selected = [index]
break
if click_part is False:
self.part_selected = []
else:
if shift_press:
self.part_selected.append(list(self.rect_part_list.keys()).index(part))
else:
self.part_selected = [list(self.rect_part_list.keys()).index(part)]
def edit_part(self, mouse_pos, current_frame, edit_type):
keylist = list(self.rect_part_list.keys())
if edit_type == "default": # reset to default
self.animation_part_list[current_frame] = {key: (value[:] if value is not None else value) for key, value in self.default_sprite_part.items()}
self.bodypart_list[current_frame] = {key: value for key, value in self.default_body_part.items()}
self.part_name_list[current_frame] = {key: value for key, value in self.default_part_name.items()}
self.part_selected = []
race_part_button.change_text("")
direction_part_button.change_text("")
part_selector.change_name("")
elif edit_type == "clear": # clear whole strip
self.animation_part_list[current_frame] = {}
self.part_selected = []
elif edit_type == "change": # change strip
self.part_selected = []
if current_frame > len(self.animation_part_list) - 1:
while current_frame > len(self.animation_part_list) - 1:
self.animation_part_list.append({})
self.animation_list.append(None)
surface = self.create_animation_film(None, empty=True)
self.animation_list[-1] = surface
elif "direction" in edit_type:
if self.part_selected != []:
for part in self.part_selected:
try:
part_index = keylist[part]
sidechange = edit_type.split("_")[1]
self.bodypart_list[current_frame][part_index][1] = sidechange
self.part_name_list[current_frame][part_index][1] = sidechange
main_joint_pos_list = self.generate_body(self.bodypart_list[current_frame])
self.animation_part_list[current_frame][part_index][0] = self.sprite_image[part_index]
except IndexError:
pass
except TypeError: # None type
pass
except KeyError: # change side and not found part with same name
self.part_name_list[current_frame][part_index][2] = ""
if part_index not in self.not_show:
self.not_show.append(part_index)
elif "part" in edit_type:
if self.part_selected != []:
part = self.part_selected[-1]
part_index = keylist[part]
partchange = edit_type[5:]
self.bodypart_list[current_frame][part_index][2] = partchange
self.part_name_list[current_frame][part_index][2] = partchange
print(self.weapon)
main_joint_pos_list = self.generate_body(self.bodypart_list[current_frame])
if self.animation_part_list[current_frame][part_index] == []:
self.animation_part_list[current_frame][part_index] = self.empty_sprite_part.copy()
if "weapon" in part_index:
self.animation_part_list[current_frame][part_index][1] = "center"
else:
self.animation_part_list[current_frame][part_index][1] = main_joint_pos_list[part_index]
self.animation_part_list[current_frame][part_index][0] = self.sprite_image[part_index]
print(self.sprite_image)
print(self.animation_part_list[current_frame][part_index])
if part_index in self.not_show:
self.not_show.remove(part_index)
elif "race" in edit_type:
if self.part_selected != []:
part = self.part_selected[-1]
part_index = keylist[part]
if part_index in self.not_show:
self.not_show.remove(part_index)
partchange = edit_type[5:]
if "weapon" in part_index:
self.weapon[part_index] = partchange
if self.bodypart_list[current_frame][part_index] is None:
self.bodypart_list[current_frame][part_index] = [0,0,0]
# self.animation_part_list[current_frame][part_index] = []
self.part_name_list[current_frame][part_index] = ["","",""]
self.animation_part_list[current_frame][part_index] = []
self.bodypart_list[current_frame][part_index][0] = partchange
self.part_name_list[current_frame][part_index][0] = partchange
try:
main_joint_pos_list = self.generate_body(self.bodypart_list[current_frame])
self.animation_part_list[current_frame][part_index][0] = self.sprite_image[part_index]
except IndexError:
pass
except KeyError: # change side and not found part with same name
self.part_name_list[current_frame][part_index][2] = ""
if part_index not in self.not_show:
self.not_show.append(part_index)
elif self.part_selected != []:
for part in self.part_selected:
if part < len(keylist): # can't edit part that not exist
part_index = keylist[part]
if self.animation_part_list[current_frame][part_index] is not None and \
len(self.animation_part_list[current_frame][part_index]) > 3:
if edit_type == "move":
self.animation_part_list[current_frame][part_index][2] = mouse_pos
elif edit_type == "rotate":
base_pos = self.animation_part_list[current_frame][part_index][2]
myradians = math.atan2(mouse_pos[1] - base_pos[1], mouse_pos[0] - base_pos[0])
newangle = math.degrees(myradians)
# """upper left -"""
if -180 <= newangle <= -90:
newangle = -newangle - 90
# """upper right +"""
elif -90 < newangle < 0:
newangle = (-newangle) - 90
# """lower right -"""
elif 0 <= newangle <= 90:
newangle = -(newangle + 90)
# """lower left +"""
elif 90 < newangle <= 180:
newangle = 270 - newangle
self.animation_part_list[current_frame][part_index][3] = newangle
elif "flip" in edit_type:
flip_type = int(edit_type[-1])
current_flip = self.animation_part_list[current_frame][part_index][4]
if current_flip == 0: # current no flip
self.animation_part_list[current_frame][part_index][4] = flip_type
elif current_flip == 1: # current hori flip
if flip_type == 1:
self.animation_part_list[current_frame][part_index][4] = 0
else:
self.animation_part_list[current_frame][part_index][4] = 3
elif current_flip == 2: # current vert flip
if flip_type == 1:
self.animation_part_list[current_frame][part_index][4] = 3
else:
self.animation_part_list[current_frame][part_index][4] = 0
elif current_flip == 3: # current both hori and vert flip
if flip_type == 1:
self.animation_part_list[current_frame][part_index][4] = 2
else:
self.animation_part_list[current_frame][part_index][4] = 1
elif "reset" in edit_type:
self.animation_part_list[current_frame][part_index][3] = 0
self.animation_part_list[current_frame][part_index][4] = 0
elif "delete" in edit_type:
if part_index in self.not_show:
self.not_show.remove(part_index)
else:
self.not_show.append(part_index)
elif "layer" in edit_type:
if "up" in edit_type:
self.animation_part_list[current_frame][part_index][-1] += 1
elif "down" in edit_type:
self.animation_part_list[current_frame][part_index][-1] -= 1
if self.animation_part_list[current_frame][part_index][-1] == 0:
self.animation_part_list[current_frame][part_index][-1] = 1
if self.animation_part_list[current_frame] != {}:
self.sprite_part = self.animation_part_list[current_frame]
pose_layer_list = {k: v[-1] for k, v in self.sprite_part.items() if v is not None and v != []}
pose_layer_list = dict(sorted(pose_layer_list.items(), key=lambda item: item[1], reverse=True))
surface = self.create_animation_film(pose_layer_list)
else: # create new frame
self.sprite_part = None
surface = self.create_animation_film(None, empty=True)
self.animation_list[current_frame] = surface
reload_animation(anim)
def apply_colour(self, surface, colour):
"""Colorise body part sprite"""
size = (surface.get_width(), surface.get_height())
data = pygame.image.tostring(surface, "RGBA") # convert image to string data for filtering effect
surface = Image.frombytes("RGBA", size, data) # use PIL to get image data
alpha = surface.split()[-1] # save alpha
surface = surface.convert("L") # convert to grey scale for colourise
max_colour = 255 # - (colour[0] + colour[1] + colour[2])
mid_colour = [c - ((max_colour - c) / 2) for c in colour]
surface = ImageOps.colorize(surface, black="black", mid=mid_colour, white=colour).convert("RGB")
surface.putalpha(alpha) # put back alpha
surface = surface.tobytes()
surface = pygame.image.fromstring(surface, size, "RGBA") # convert image back to a pygame surface
return surface
def part_to_sprite(self, surface, part, part_index, main_joint_pos, target, angle, flip):
"""Find body part's new center point from main_joint_pos with new angle, then create rotated part and blit to sprite"""
part_rotated = part.copy()
if flip != 0:
if flip == 1:
part_rotated = pygame.transform.flip(part_rotated, True, False)
elif flip == 2:
part_rotated = pygame.transform.flip(part_rotated, False, True)
elif flip == 3:
part_rotated = pygame.transform.flip(part_rotated, True, True)
if angle != 0:
part_rotated = pygame.transform.rotate(part_rotated, angle) # rotate part sprite
center = pygame.Vector2(part.get_width() / 2, part.get_height() / 2)
if main_joint_pos == "center":
main_joint_pos = (part.get_width() / 2, part.get_height() / 2)
pos_different = main_joint_pos - center # find distance between image center and connect point main_joint_pos
new_center = target - pos_different # find new center point
if angle != 0:
radians_angle = math.radians(360 - angle)
new_center = rotationxy(target, new_center, radians_angle) # find new center point with rotation
rect = part_rotated.get_rect(center=new_center)
self.rect_part_list[list(self.rect_part_list.keys())[part_index]] = rect
surface.blit(part_rotated, rect)
return surface
def remake_rectlist(self):
for key, value in self.rect_part_list.items(): # reset rect list
self.rect_part_list[key] = None
for part_index, part in enumerate(self.sprite_part):
rect = part.rect
self.rect_part_list[list(self.rect_part_list.keys())[part_index]] = rect
class Joint(pygame.sprite.Sprite):
def __init__(self, image):
pygame.sprite.Sprite.__init__(self, self.containers)
self.pos
self.image_original = image.copy()
def move(self, angle, pos):
self.pos = pos
self.image = self.image_original.copy()
self.image = pygame.transform.rotate(self.image, angle) # rotate part sprite
self.rect = self.image.get_rect(center=pos)
class Animation:
# created in 22 dec 2020 by cenk
def __init__(self, spd_ms, loop):
self.frames = None
self.speed_ms = spd_ms / 1000
self.start_frame = 0
self.end_frame = 0
self.first_time = time.time()
self.show_frame = 0
self.loop = loop
def reload(self, frames):
self.frames = frames
self.end_frame = len(self.frames) - 1
def play(self, surface, position, noplay_list):
if dt > 0:
if time.time() - self.first_time >= self.speed_ms:
self.show_frame += 1
while self.show_frame < 10 and noplay_list[self.show_frame]:
self.show_frame += 1
self.first_time = time.time()
if self.show_frame > self.end_frame:
self.show_frame = self.start_frame
surface.blit(self.frames[int(self.show_frame)], position)
def reload_animation(animation):
frames = [pygame.transform.smoothscale(thisimage, showroom.size) for thisimage in skeleton.animation_list]
for frame_index in range(0, 10):
try:
filmstrip_list[frame_index].add_strip(frames[frame_index])
except IndexError:
filmstrip_list[frame_index].add_strip()
animation.reload(frames)
for helper in helperlist:
helper.stat1 = skeleton.part_name_list[current_frame]
helper.stat2 = skeleton.sprite_part
if skeleton.part_selected != []:
for part in skeleton.part_selected:
part = list(skeleton.rect_part_list.keys())[part]
helper.select((0, 0), True, part)
else:
helper.select(None, shift_press)
# start animation maker
clock = pygame.time.Clock()
ui = pygame.sprite.LayeredUpdates()
fakegroup = pygame.sprite.LayeredUpdates() # just fake group to add for container and not get auto update
skeleton = Skeleton()
image = pygame.transform.scale(load_image(main_dir, "film.png", ["animation_maker_ui"]),
(int(100 * screen_scale[0]), int(100 * screen_scale[1])))
Filmstrip.image_original = image
filmstrips = pygame.sprite.Group()
Button.containers = ui
SwitchButton.containers = ui
Bodyhelper.containers = ui
Joint.containers = ui
Filmstrip.containers = ui, filmstrips
NameBox.containers = ui
menu.MenuButton.containers = fakegroup
menu.NameList.containers = ui
popup_listbox = pygame.sprite.Group()
popup_namegroup = pygame.sprite.Group()
filmstrip_list = [Filmstrip((0, 42 * screen_scale[1])), Filmstrip((image.get_width(), 42 * screen_scale[1])),
Filmstrip((image.get_width() * 2, 42 * screen_scale[1])), Filmstrip((image.get_width() * 3, 42 * screen_scale[1])),
Filmstrip((image.get_width() * 4, 42 * screen_scale[1])), Filmstrip((image.get_width() * 5, 42 * screen_scale[1])),
Filmstrip((image.get_width() * 6, 42 *screen_scale[1])), Filmstrip((image.get_width() * 7, 42 * screen_scale[1])),
Filmstrip((image.get_width() * 8, 42 * screen_scale[1])), Filmstrip((image.get_width() * 9, 42 * screen_scale[1]))]
filmstrips.add(*filmstrip_list)
imgs = load_images(main_dir, ["animation_maker_ui", "helper_parts"])
bodyhelper_size = (370 * screen_scale[0], 270 * screen_scale[1])
p1_body_helper = Bodyhelper(bodyhelper_size, (bodyhelper_size[0] / 2,
screen_size[1] - (bodyhelper_size[1] / 2)), "p1", imgs[0:14])
p2_body_helper = Bodyhelper(bodyhelper_size, (screen_size[0] - (bodyhelper_size[0] / 2),
screen_size[1] - (bodyhelper_size[1] / 2)), "p2", imgs[0:14])
# effect_helper = Bodyhelper(bodyhelper_size, (screen_size[0] / 2, screen_size[1] - (bodyhelper_size[1] / 2)), "effect", imgs[14:])
helperlist = [p1_body_helper, p2_body_helper]
image = load_image(main_dir, "button.png", ["animation_maker_ui"])
image = pygame.transform.scale(image, (int(image.get_width() * screen_scale[1]),
int(image.get_height() * screen_scale[1])))
new_button = Button("New", image, (image.get_width() / 2, image.get_height() / 2))
save_button = Button("Save", image, (image.get_width() * 2, image.get_height() / 2))
direction_button = Button("", image, (image.get_width() * 3.7, image.get_height() / 2))
duplicate_button = Button("Duplicate", image, (image.get_width() * 11, image.get_height() / 2))
delete_button = Button("Delete", image, (image.get_width() * 13, image.get_height() / 2))
play_animation_button = SwitchButton(["Play", "Stop"], image,
(screen_size[1] / 2, filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
joint_button = SwitchButton(["Joint:ON","Joint:OFF"], image, (play_animation_button.pos[0] + play_animation_button.image.get_width() * 5,
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
grid_button = SwitchButton(["Grid:ON","Grid:OFF"], image, (play_animation_button.pos[0] + play_animation_button.image.get_width() * 6,
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
copy_button = Button("Copy", image, (play_animation_button.pos[0] - play_animation_button.image.get_width(),
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
paste_button = Button("Paste", image, (play_animation_button.pos[0] + play_animation_button.image.get_width(),
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
default_button = Button("Default", image, (play_animation_button.pos[0] + play_animation_button.image.get_width() * 3,
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
clear_button = Button("Clear", image, (play_animation_button.pos[0] - play_animation_button.image.get_width() * 3,
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
activate_button = Button("Enable", image, (play_animation_button.pos[0] - play_animation_button.image.get_width() * 4,
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
deactivate_button = Button("Disable", image, (play_animation_button.pos[0] - play_animation_button.image.get_width() * 5,
filmstrip_list[0].rect.midbottom[1] + (image.get_height() / 1.5)))
reset_button = Button("Reset",image, (screen_size[0] / 1.5, p1_body_helper.rect.midtop[1] - (image.get_height() / 1.5)))
flip_hori_button = Button("Flip H",image, (reset_button.pos[0] - reset_button.image.get_width(),
p1_body_helper.rect.midtop[1] - (image.get_height() / 1.5)))
flip_vert_button = Button("Flip V",image, (reset_button.pos[0] + reset_button.image.get_width(),
p1_body_helper.rect.midtop[1] - (image.get_height() / 1.5)))
race_part_button = Button("", image, (reset_button.image.get_width() / 2,
p1_body_helper.rect.midtop[1] - (image.get_height() / 1.5)))
direction_part_button = Button("", image, (race_part_button.pos[0] + race_part_button.image.get_width(),
p1_body_helper.rect.midtop[1] - (image.get_height() / 1.5)))
# lock_button = SwitchButton(["Lock:OFF","Lock:ON"], image, (reset_button.pos[0] + reset_button.image.get_width() * 2,
# p1_body_helper.rect.midtop[1] - (image.get_height() / 1.5)))
input_ui_img = load_image(main_dir, "inputui.png", "ui\\mainmenu_ui")
inputui = menu.InputUI(None, input_ui_img,
(screen_size[0] / 2, screen_size[1] / 2)) # user text input ui box popup
imagelist = load_base_button(main_dir)
input_ok_button = menu.MenuButton(None, imagelist, pos=(inputui.rect.midleft[0] + imagelist[0].get_width(),
inputui.rect.midleft[1] + imagelist[0].get_height()),
text="Confirm", layer=31)
input_cancel_button = menu.MenuButton(None, imagelist,
pos=(inputui.rect.midright[0] - imagelist[0].get_width(),
inputui.rect.midright[1] + imagelist[0].get_height()),
text="Cancel", layer=31)
input_button = (input_ok_button, input_cancel_button)
input_box = menu.InputBox(None, inputui.rect.center, inputui.image.get_width()) # user text input box
inputui_pop = (inputui, input_box, input_ok_button, input_cancel_button)
confirmui = menu.InputUI(None, input_ui_img,
(screen_size[0] / 2, screen_size[1] / 2)) # user confirm input ui box popup
confirmui_pop = (confirmui, input_ok_button, input_cancel_button)
boximg = load_image(main_dir, "unit_presetbox.png", "ui\\mainmenu_ui")
menu.ListBox.containers = popup_listbox
popup_listbox = menu.ListBox(None, (0, 0), boximg, 15) # popup listbox need to be in higher layer
popup_listscroll = battleui.UIScroller(popup_listbox.rect.topright,
popup_listbox.image.get_height(),
popup_listbox.maxshowlist,
layer=14)
animation_selector = NameBox((400, image.get_height()), (screen_size[0] / 2, 0))
part_selector = NameBox((250, image.get_height()), (reset_button.image.get_width() * 4,
reset_button.rect.midtop[1]))
# loop_button = SwitchButton(["Loop:Yes", "Loop:No"],image, (100,200))
showroom = Showroom((150 * screen_size[0] / 500, 150 * screen_size[1] / 500))
ui.add(showroom)
runtime = 0
mousetimer = 0
play_animation = False
current_frame = 0
copy_frame = None
copy_name = None
copy_stat = None
deactivate_list = [False] * 10
currentpopuprow = 0
keypress_delay = 0
textinputpopup = (None, None)
skeleton.animation_list = []
animation_name = list(animation_pool.keys())[0]
direction = 1
direction_button.change_text(direction_list[direction])
skeleton.read_animation(animation_name)
animation_selector.change_name(animation_name)
anim = Animation(500, True)
shift_press = False
reload_animation(anim)
while True:
dt = clock.get_time() / 1000
uidt = dt
mouse_pos = pygame.mouse.get_pos() # current mouse pos based on screen
mouse_up = False # left click
mouse_leftdown = False # hold left click
mouse_right = False # right click
mouse_rightdown = False # hold right click
double_mouse_right = False # double right click
mouse_scrolldown = False
mouse_scrollup = False
mouse_wheel = False # mouse wheel click
mouse_wheeldown = False # hold mouse wheel click
copy_press = False
paste_press = False
del_press = False
shift_press = False
popup_click = False
input_esc = False
popup_list = []
keypress = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 1: # left click
mouse_up = True
if event.button == 2: # left click
mouse_wheel = True
elif event.button == 3: # Right Click
mouse_right = True
if mousetimer == 0:
mousetimer = 0.001 # Start timer after first mouse click
elif mousetimer < 0.3: # if click again within 0.3 second for it to be considered double click
double_mouse_right = True # double right click
mousetimer = 0
elif event.button == 4: # Mouse scroll up
mouse_scrollup = True
rowchange = -1
if popup_listscroll.rect.collidepoint(mouse_pos):
currentpopuprow = listscroll(mouse_scrollup, mouse_scrolldown, popup_listscroll, popup_listbox,
currentpopuprow, popup_listbox.namelist, popup_namegroup, ui)
elif event.button == 5: # Mouse scroll down
mouse_scrolldown = True
rowchange = 1
if popup_listscroll.rect.collidepoint(mouse_pos):
currentpopuprow = listscroll(mouse_scrollup, mouse_scrolldown, popup_listscroll, popup_listbox,
currentpopuprow, popup_listbox.namelist, popup_namegroup, ui)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
input_esc = True
elif textinputpopup[0] == "text_input":
input_box.userinput(event)
if pygame.mouse.get_pressed()[0]: # Hold left click
mouse_leftdown = True
elif pygame.mouse.get_pressed()[1]: # Hold left click
mouse_wheeldown = True
elif pygame.mouse.get_pressed()[2]: # Hold left click
mouse_rightdown = True
if inputui not in ui:
if keypress is not None and keypress_delay < 0.1:
if keypress[pygame.K_LCTRL] or keypress[pygame.K_RCTRL]:
if keypress[pygame.K_c]: # copy frame
copy_press = True
elif keypress[pygame.K_v]: # paste frame
paste_press = True
elif keypress[pygame.K_LSHIFT] or keypress[pygame.K_RSHIFT]:
shift_press = True
elif keypress[pygame.K_DELETE]:
keypress_delay = 0.1
if skeleton.part_selected != []:
skeleton.edit_part(mouse_pos, current_frame, "delete")
elif keypress[pygame.K_PAGEUP]:
keypress_delay = 0.1
if skeleton.part_selected != []:
skeleton.edit_part(mouse_pos, current_frame, "layerup")
elif keypress[pygame.K_PAGEDOWN]:
keypress_delay = 0.1
if skeleton.part_selected != []:
skeleton.edit_part(mouse_pos, current_frame, "layerdown")
if mousetimer != 0: # player click mouse once before
mousetimer += uidt # increase timer for mouse click using real time
if mousetimer >= 0.3: # time pass 0.3 second no longer count as double click
mousetimer = 0
if keypress_delay != 0: # player click mouse once before
keypress_delay += uidt # increase timer for mouse click using real time
if keypress_delay >= 0.3: # time pass 0.3 second no longer count as double click
keypress_delay = 0
if mouse_up:
if popup_listbox in ui:
if popup_listbox.rect.collidepoint(mouse_pos):
popup_click = True
for index, name in enumerate(popup_namegroup): # change leader with the new selected one
if name.rect.collidepoint(mouse_pos):
if popup_listbox.action == "part_side":
direction_part_button.change_text(name.name)
if skeleton.part_selected != []:
skeleton.edit_part(mouse_pos, current_frame, "direction_" + name.name)
elif popup_listbox.action == "part_select":
skeleton.edit_part(mouse_pos, current_frame, "part_" + name.name)
elif popup_listbox.action == "race_select":
skeleton.edit_part(mouse_pos, current_frame, "race_" + name.name)
for thisname in popup_namegroup: # remove name list
thisname.kill()
del thisname
ui.remove(popup_listbox, popup_listscroll)
elif popup_listscroll.rect.collidepoint(mouse_pos): # scrolling on list
popup_click = True
currentpopuprow = popup_listscroll.update(mouse_pos) # update the scroller and get new current subsection
else: # click other stuff
for thisname in popup_namegroup: # remove name list
thisname.kill()
del thisname
ui.remove(popup_listbox, popup_listscroll)
if popup_click is False:
if play_animation_button.rect.collidepoint(mouse_pos):
if play_animation_button.current_option == 0: # start playing animation
play_animation_button.change_option(1)
play_animation = True
else: # stop animation
play_animation_button.change_option(0)
play_animation = False
elif grid_button.rect.collidepoint(mouse_pos):
if grid_button.current_option == 0: # start playing animation
grid_button.change_option(1)
showroom.grid = False
else: # stop animation
grid_button.change_option(0)
showroom.grid = True
elif joint_button.rect.collidepoint(mouse_pos):
if joint_button.current_option == 0: # start playing animation
joint_button.change_option(1)
else: # stop animation
joint_button.change_option(0)
# change animation
# skeleton.animation_list = []
# skeleton.generate_animation("Default")
# frames = [pygame.transform.smoothscale(image, showroom.size) for image in skeleton.animation_list]
# for frame_index in range(0, 10):
# try:
# filmstrip_list[frame_index].add_strip(frames[frame_index])
# except IndexError:
# filmstrip_list[frame_index].add_strip()
# anim = Animation(frames, 1000, True)
if play_animation:
current_frame = int(anim.show_frame)
else:
dt = 0
if popup_click is False:
if mouse_up:
if clear_button.rect.collidepoint(mouse_pos):
skeleton.edit_part(mouse_pos, current_frame, "clear")
elif default_button.rect.collidepoint(mouse_pos):
skeleton.edit_part(mouse_pos, current_frame, "default")
elif copy_press or (mouse_up and copy_button.rect.collidepoint(mouse_pos)):
copy_frame = {key: (value[:] if value is not None else value) for key, value in
skeleton.animation_part_list[current_frame].items()}
copy_name = {key: (value[:] if value is not None else value) for key, value in
skeleton.part_name_list[current_frame].items()}
elif paste_press or (mouse_up and paste_button.rect.collidepoint(mouse_pos)):
if copy_frame is not None:
skeleton.animation_part_list[current_frame] = {key: (value[:] if value is not None else value) for key, value in
copy_frame.items()}
skeleton.part_name_list[current_frame] = {key: (value[:] if value is not None else value) for key, value in
copy_name.items()}
skeleton.edit_part(mouse_pos, current_frame, "change")
elif activate_button.rect.collidepoint(mouse_pos):
for strip_index, strip in enumerate(filmstrips):
if strip_index == current_frame:
strip.activate = True
deactivate_list[strip_index] = False
break
elif deactivate_button.rect.collidepoint(mouse_pos):
for strip_index, strip in enumerate(filmstrips):
if strip_index == current_frame:
strip.activate = False
deactivate_list[strip_index] = True
break
elif flip_hori_button.rect.collidepoint(mouse_pos):
skeleton.edit_part(mouse_pos, current_frame, "flip1")
elif flip_vert_button.rect.collidepoint(mouse_pos):
skeleton.edit_part(mouse_pos, current_frame, "flip2")
elif reset_button.rect.collidepoint(mouse_pos):
skeleton.edit_part(mouse_pos, current_frame, "reset")
elif direction_button.rect.collidepoint(mouse_pos):
popuplist_newopen("animation_side", direction_button.rect.bottomleft, direction_list, "top")
elif direction_part_button.rect.collidepoint(mouse_pos):
if race_part_button.text != "":
popuplist_newopen("part_side", direction_part_button.rect.topleft, direction_list, "bottom")
elif part_selector.rect.collidepoint(mouse_pos):
if direction_part_button.text != "" and race_part_button.text != "":
currentpart = list(skeleton.animation_part_list[current_frame].keys())[skeleton.part_selected[-1]]
try:
if "p1" in currentpart or "p2" in currentpart:
selectpart = currentpart[3:]
if selectpart[0:2] == "r_" or selectpart[0:2] == "l_":
selectpart = selectpart[2:]
part_list = list(gen_body_sprite_pool[race_part_button.text][direction_part_button.text][selectpart].keys())
except KeyError: # look at weapon next
selectpart = race_part_button.text
part_list = list(gen_weapon_sprite_pool[selectpart][direction_part_button.text].keys())
popuplist_newopen("part_select", part_selector.rect.topleft, part_list, "bottom")
elif race_part_button.rect.collidepoint(mouse_pos):
if skeleton.part_selected != []:
currentpart = list(skeleton.rect_part_list.keys())[skeleton.part_selected[-1]]
if "weapon" not in currentpart:
part_list = list(gen_body_sprite_pool.keys())
elif "weapon" in currentpart:
part_list = list(gen_weapon_sprite_pool)
popuplist_newopen("race_select", race_part_button.rect.topleft, part_list, "bottom")
elif new_button.rect.collidepoint(mouse_pos):
textinputpopup = ("text_input", "new_animation")
inputui.changeinstruction("New Animation Name:")
ui.add(inputui_pop)
else: # click on sprite in list
for strip_index, strip in enumerate(filmstrips):
if strip.rect.collidepoint(mouse_pos) and current_frame != strip_index:
current_frame = strip_index
anim.show_frame = current_frame
skeleton.part_selected = []
skeleton.edit_part(mouse_pos, current_frame, "change")
for index, helper in enumerate(helperlist):
helper.select(None, False)
break
helper_click = False
for index, helper in enumerate(helperlist):
if helper.rect.collidepoint(mouse_pos):
helper_click = helper
break
if helper_click is not False: # to avoid removing selected part when click other stuff
mouse_pos = pygame.Vector2((mouse_pos[0] - helper_click.rect.topleft[0]) / screen_size[0] * 1000,
(mouse_pos[1] - helper_click.rect.topleft[1]) / screen_size[1] * 1000)
helper_click.select(mouse_pos, shift_press)
if shift_press is False: # remove selected part in other helpers
skeleton.part_selected = [] # clear old list first
for index, helper in enumerate(helperlist):
if helper != helper_click:
helper.select(None, shift_press)
if helper_click.part_selected != []:
for part in helper_click.part_selected:
skeleton.click_part(mouse_pos, True, part)
if showroom.rect.collidepoint(mouse_pos):
mouse_pos = pygame.Vector2((mouse_pos[0] - showroom.rect.topleft[0]) / screen_size[0] * 500,
(mouse_pos[1] - showroom.rect.topleft[1]) / screen_size[1] * 500)
if mouse_up:
skeleton.click_part(mouse_pos, shift_press)
for index, helper in enumerate(helperlist):
if shift_press is False: # reset all first if no shift select
helper.select(mouse_pos, shift_press)
if skeleton.part_selected != []:
for part in skeleton.part_selected:
if list(skeleton.rect_part_list.keys())[part] in helper.rect_part_list:
helper.select(mouse_pos, shift_press, list(skeleton.rect_part_list.keys())[part])
else:
helper.select(None, shift_press)
else:
helper.select(None, shift_press)
if mouse_wheel or mouse_wheeldown:
skeleton.edit_part(mouse_pos, current_frame, "rotate")
elif mouse_right or mouse_rightdown:
skeleton.edit_part(mouse_pos, current_frame, "move")
if skeleton.part_selected != []:
part = skeleton.part_selected[-1]
if skeleton.sprite_part is not None and \
list(skeleton.rect_part_list.keys())[part] in list(skeleton.sprite_part.keys()):
nametext = skeleton.part_name_list[current_frame][list(skeleton.rect_part_list.keys())[part]]
if nametext is None:
nametext = ["","",""]
race_part_button.change_text(nametext[0])
direction_part_button.change_text(nametext[1])
part_selector.change_name(nametext[2])
else:
race_part_button.change_text("")
direction_part_button.change_text("")
part_selector.change_name("")
else:
race_part_button.change_text("")
direction_part_button.change_text("")
part_selector.change_name("")
else:
if input_ok_button.event:
input_ok_button.event = False
if textinputpopup[1] == "new_animation":
animation_name = input_box.text
elif textinputpopup[1] == "quit":
pygame.time.wait(1000)
if pygame.mixer:
pygame.mixer.music.stop()
pygame.mixer.music.unload()
pygame.quit()
input_box.textstart("")
textinputpopup = (None, None)
ui.remove(*inputui_pop)
elif input_cancel_button.event or input_esc:
input_cancel_button.event = False
input_box.textstart("")
textinputpopup = (None, None)
ui.remove(*inputui_pop, *confirmui_pop)
pass
ui.update(mouse_pos, mouse_up, mouse_leftdown, "any")
anim.play(showroom.image, (0, 0), deactivate_list)
for strip_index, strip in enumerate(filmstrips):
if strip_index == current_frame:
strip.selected(True)
break
pen.fill((0, 0, 0))
ui.draw(pen)
pygame.display.update()
clock.tick(60)
|
#if-else in python
number = int(input("Enter a number: "))
if number in range(80, 101):
print("A+")
elif number in range(50, 80):
print("50 to 79 range")
elif number in range(33, 50):
print("33 to 49 range")
elif number in range(101, 1000):
print("Over 100")
else:
print("Fail")
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
from random import random
from functools import reduce, partial
import logging
import numpy as np
import multiprocessing
import paddle
import paddle.fluid as F
import paddle.fluid.layers as L
from propeller import log
import propeller.paddle as propeller
from propeller.paddle.data import Dataset
from optimization import optimization
import utils.data
log.setLevel(logging.DEBUG)
class ClassificationBowModel(propeller.train.Model):
"""propeller Model wraper for paddle-ERNIE """
def __init__(self, config, mode, run_config):
self.config = config
self.mode = mode
self.run_config = run_config
self._param_initializer = F.initializer.TruncatedNormal(
scale=config.initializer_range)
self._emb_dtype = "float32"
self._word_emb_name = "word_embedding"
def forward(self, features):
text_ids_a, = features
def bow(ids):
embed = L.embedding(
input=ids,
size=[self.config.vocab_size, self.config.emb_size],
dtype=self._emb_dtype,
param_attr=F.ParamAttr(
name=self._word_emb_name, initializer=self._param_initializer),
is_sparse=False)
zero = L.fill_constant(shape=[1], dtype='int64', value=0)
pad = L.cast(L.logical_not(L.equal(ids, zero)), 'float32')
sumed = L.reduce_sum(embed * pad, dim=1)
sumed = L.softsign(sumed)
return sumed
sumed = bow(text_ids_a)
fced = L.fc(
input=sumed,
size=self.config.emb_size,
act='tanh',
param_attr=F.ParamAttr(
name="middle_fc.w_0", initializer=self._param_initializer),
bias_attr="middle_fc.b_0")
logits = L.fc(
input=fced,
size=self.config.num_label,
act=None,
param_attr=F.ParamAttr(
name="pooler_fc.w_0", initializer=self._param_initializer),
bias_attr="pooler_fc.b_0")
if self.mode is propeller.RunMode.PREDICT:
probs = L.softmax(logits)
return probs
else:
return logits
def loss(self, predictions, labels):
labels = L.softmax(labels)
loss = L.softmax_with_cross_entropy(predictions, labels, soft_label=True)
loss = L.mean(loss)
return loss
def backward(self, loss):
scheduled_lr, _ = optimization(
loss=loss,
warmup_steps=int(self.run_config.max_steps * self.config.warmup_proportion),
num_train_steps=self.run_config.max_steps,
learning_rate=self.config.learning_rate,
train_program=F.default_main_program(),
startup_prog=F.default_startup_program(),
weight_decay=self.config.weight_decay,
scheduler="linear_warmup_decay",)
propeller.summary.scalar('lr', scheduled_lr)
def metrics(self, predictions, labels):
predictions = L.argmax(predictions, axis=1)
labels = L.argmax(labels, axis=1)
#predictions = L.unsqueeze(predictions, axes=[1])
acc = propeller.metrics.Acc(labels, predictions)
#auc = propeller.metrics.Auc(labels, predictions)
return {'acc': acc}
if __name__ == '__main__':
parser = propeller.ArgumentParser('DAN model with Paddle')
parser.add_argument('--max_seqlen', type=int, default=128)
parser.add_argument('--vocab_file', type=str, required=True)
parser.add_argument('--unsupervise_data_dir', type=str, required=True)
parser.add_argument('--data_dir', type=str)
args = parser.parse_args()
run_config = propeller.parse_runconfig(args)
hparams = propeller.parse_hparam(args)
vocab = {j.strip().split(b'\t')[0].decode('utf8'): i for i, j in enumerate(open(args.vocab_file, 'rb'))}
unk_id = vocab['[UNK]']
char_tokenizer = utils.data.CharTokenizer(vocab.keys())
space_tokenizer = utils.data.SpaceTokenizer(vocab.keys())
supervise_feature_column = propeller.data.FeatureColumns([
propeller.data.TextColumn('text_a', unk_id=unk_id, vocab_dict=vocab, tokenizer=space_tokenizer),
propeller.data.LabelColumn('label'),
])
def before(text_a, label):
sentence_a = text_a[: args.max_seqlen]
return sentence_a, label
def after(sentence_a, label):
batch_size = sentence_a.shape[0]
onehot_label = np.zeros([batch_size, hparams.num_label], dtype=np.float32)
onehot_label[np.arange(batch_size), label] = 9999.
sentence_a, = utils.data.expand_dims(sentence_a)
return sentence_a, onehot_label
train_ds = supervise_feature_column.build_dataset('train', data_dir=os.path.join(args.data_dir, 'train'), shuffle=True, repeat=True, use_gz=False) \
.map(before) \
.padded_batch(hparams.batch_size, (0, 0)) \
.map(after) \
unsup_train_ds = supervise_feature_column.build_dataset('unsup_train', data_dir=args.unsupervise_data_dir, shuffle=True, repeat=True, use_gz=False) \
.map(before) \
.padded_batch(hparams.batch_size, (0, 0)) \
.map(after)
dev_ds = supervise_feature_column.build_dataset('dev', data_dir=os.path.join(args.data_dir, 'dev'), shuffle=False, repeat=False, use_gz=False) \
.map(before) \
.padded_batch(hparams.batch_size, (0, 0)) \
.map(after)
train_ds = utils.data.interleave(train_ds, unsup_train_ds)
shapes = ([-1, args.max_seqlen, 1], [-1, hparams.num_label])
types = ('int64', 'float32')
train_ds.data_shapes = shapes
train_ds.data_types = types
dev_ds.data_shapes = shapes
dev_ds.data_types = types
'''
from tqdm import tqdm
for slots in tqdm(train_ds):
pass
'''
best_exporter = propeller.train.exporter.BestExporter(os.path.join(run_config.model_dir, 'best'), cmp_fn=lambda old, new: new['dev']['acc'] > old['dev']['acc'])
propeller.train.train_and_eval(
model_class_or_model_fn=ClassificationBowModel,
params=hparams,
run_config=run_config,
train_dataset=train_ds,
eval_dataset={'dev': dev_ds},
exporters=[best_exporter])
print('dev_acc3\t%.5f' % (best_exporter._best['dev']['acc']))
|
import numpy as np
from pyscf.lib.linalg_helper import eig
#from pyscf.lib.numpy_helper import einsum
from numpy import einsum
from scipy import linalg as la
import matplotlib.pyplot as plt
############################################################################
# General Simple Exclusion Process:
# _p_
# ___ ___ _|_ \/_ ___ ___ ___ ___ ___
# alpha--->| | | | | | | | | |---> beta
# gamma<---|___|___|___|___|___|___|___|___|___|<--- delta
# /\___|
# q
#
#
###########################################################################
############################################
# Inputs
alpha = 0.8
beta = 2./3.
gamma = 0.
delta = 0.
p = 1.
q = 0.
s = 0.
maxBondDim = 20
maxIter = 2
d = 2
tol = 1e-8
plotConv = True
plotConvIn = False
hamType = 'sep'
############################################
############################################
# Determine MPO
# Basic Operators
Sp = np.array([[0,1],[0,0]])
Sm = np.array([[0,0],[1,0]])
n = np.array([[0,0],[0,1]])
v = np.array([[1,0],[0,0]])
I = np.array([[1,0],[0,1]])
z = np.array([[0,0],[0,0]])
# Exponentially weighted hopping rates
exp_alpha = np.exp(-s)*alpha
exp_beta = np.exp(-s)*beta
exp_p = np.exp(-s)*p
exp_q = np.exp(s)*q
exp_delta = np.exp(s)*delta
exp_gamma = np.exp(s)*gamma
# MPO Lists
W = []
Wl = []
W.insert(len(W),np.array([[exp_alpha*Sm-alpha*v+exp_gamma*Sp-gamma*n, Sp, -n, Sm,-v, I]]))
W.insert(len(W),np.array([[I ],
[exp_p*Sm ],
[p*v ],
[exp_q*Sp ],
[q*n ],
[exp_delta*Sm-delta*v+exp_beta*Sp-beta*n]]))
W.insert(len(W),np.array([[I, z, z, z, z, z],
[exp_p*Sm, z, z, z, z, z],
[p*v, z, z, z, z, z],
[exp_q*Sp, z, z, z, z, z],
[q*n, z, z, z, z, z],
[z, Sp, -n, Sm,-v, I]]))
Wl.insert(len(Wl),np.transpose(W[0],(0,1,3,2)).conj())
Wl.insert(len(Wl),np.transpose(W[1],(0,1,3,2)).conj())
Wl.insert(len(Wl),np.transpose(W[2],(0,1,3,2)).conj())
############################################
############################################
# Current & Density Operators
currentOp = [None]*3
currentOp[0] = np.array([[exp_alpha*Sm+exp_gamma*Sp,Sp,Sm,I]])
currentOp[1] = np.array([[I ],
[exp_p*Sm ],
[exp_q*Sp ],
[exp_delta*Sm+exp_beta*Sp]])
currentOp[2] = np.array([[I, z, z, z],
[exp_p*Sm, z, z, z],
[exp_q*Sp, z, z, z],
[z, Sp, Sm, I]])
densityOp = [None]*2
densityOp[0] = np.array([[n]])
densityOp[1] = np.array([[I]])
############################################
############################################
# Make Initial Unit Cell
H = np.zeros((2**2,2**2))
currH = np.zeros((2**2,2**2))
rDenH = np.zeros((2**2,2**2))
lDenH = np.zeros((2**2,2**2))
occ = np.zeros((2**2,2),dtype=int)
sum_occ = np.zeros(2**2,dtype=int)
for i in range(2**2):
occ[i,:] = np.asarray(list(map(lambda x: int(x),'0'*(2-len(bin(i)[2:]))+bin(i)[2:])))
#print(occ[i,:])
sum_occ[i] = np.sum(occ[i,:])
# Calculate Hamiltonian
for i in range(2**2):
i_occ = occ[i,:]
for j in range(2**2):
j_occ = occ[j,:]
tmp_mat0 = np.array([[1]])
currMat0 = np.array([[1]])
rDenMat0 = np.array([[1]])
lDenMat0 = np.array([[1]])
for k in range(2):
tmp_mat0 = einsum('ij,jk->ik',tmp_mat0,W[k][:,:,i_occ[k],j_occ[k]])
currMat0 = einsum('ij,jk->ik',currMat0,currentOp[k][:,:,i_occ[k],j_occ[k]])
rDenMat0 = einsum('ij,jk->ik',rDenMat0,densityOp[k][:,:,i_occ[k],j_occ[k]])
lDenMat0 = einsum('ij,jk->ik',lDenMat0,densityOp[k][:,:,i_occ[k],j_occ[k]])
H[i,j] += tmp_mat0[[0]]
currH[i,j] += currMat0[[0]]
rDenH[i,j] += rDenMat0[[0]]
lDenH[i,j] += lDenMat0[[0]]
# Diagonalize Hamiltonian
u,lpsi,psi = la.eig(H,left=True)
inds = np.argsort(u)
u = u[inds[-1]]
psi = psi[:,inds[-1]]
lpsi = lpsi[:,inds[-1]]
# Ensure Proper Normalization
# <-|R> = 1
# <L|R> = 1
psi = psi/np.sum(psi)
lpsi = lpsi/np.sum(lpsi*psi)
############################################
############################################
# Reshape wavefunction for SVD
psi = np.reshape(psi,(2,2))
lpsi = np.reshape(lpsi,(2,2))
############################################
############################################
# Do SVD of initial unit cell
U,S,V = np.linalg.svd(psi)
a = [1,min(maxBondDim,d)] # Keep Track of bond dimensions
A = np.reshape(U,(a[0],d,a[1]))
A = np.swapaxes(A,0,1)
B = np.reshape(V,(a[1],d,a[0]))
B = np.swapaxes(B,0,1)
# Left
Ul,Sl,Vl = np.linalg.svd(lpsi)
a = [1,min(maxBondDim,d)] # Keep Track of bond dimensions
Al = np.reshape(Ul,(a[0],d,a[1]))
Al = np.swapaxes(Al,0,1)
Bl = np.reshape(Vl,(a[1],d,a[0]))
Bl = np.swapaxes(Bl,0,1)
############################################
############################################
# Set initial left and right containers
LHBlock = np.array([[[1.]]])
RHBlock = np.array([[[1.]]])
LHBlockl= np.array([[[1.]]])
RHBlockl= np.array([[[1.]]])
LBlocklr = np.array([[1.]])
RBlocklr = np.array([[1.]])
LCurrBlock = np.array([[[1.]]])
RCurrBlock = np.array([[[1.]]])
############################################
############################################
# Evaluate Operators
# total current ---------
tmp1 = einsum('ijk , lim, m->jklm',LCurrBlock, Al.conj(), Sl.conj())
tmp2 = einsum('jklm, jnlo ->kmno',tmp1 , currentOp[0] )
tmp3 = einsum('kmno, okp, p->mnp ',tmp2 , A, S )
tmp4 = einsum('mnp , qmr ->npqr',tmp3 , Bl.conj() )
tmp5 = einsum('npqr, nsqt ->prst',tmp4 , currentOp[1] )
tmp6 = einsum('prst, tpu ->rsu ',tmp5 , B )
curr = einsum('rsu , rsu -> ',tmp6 , RCurrBlock )
# Left Density ---------
tmp1 = einsum('ik , lim, m->klm ',LBlocklr, Al.conj(), Sl.conj())
tmp2 = einsum('klm , jnlo ->kmno',tmp1, densityOp[0] )
tmp3 = einsum('kmno, okp, p->mnp ',tmp2, A, S )
tmp4 = einsum('mnp , qmr ->npqr',tmp3, Bl.conj() )
tmp5 = einsum('npqr, nsqt ->prst',tmp4, densityOp[1] )
tmp6 = einsum('prst, tpu ->su ',tmp5, B )
denl = einsum('ru , ru -> ',tmp6, RBlocklr )
# Right Density ---------
tmp1 = einsum('ik , lim, m->klm ',LBlocklr, Al.conj(), Sl.conj())
tmp2 = einsum('klm , jnlo ->kmno',tmp1, densityOp[1] )
tmp3 = einsum('kmno, okp, p->mnp ',tmp2, A, S )
tmp4 = einsum('mnp , qmr ->npqr',tmp3, Bl.conj() )
tmp5 = einsum('npqr, nsqt ->prst',tmp4, densityOp[0] )
tmp6 = einsum('prst, tpu ->su ',tmp5, B )
denr = einsum('ru , ru -> ',tmp6, RBlocklr )
############################################
############################################
# Store left and right environments
LHBlock= einsum('ijk,lim,jnlo,okp->mnp',LHBlock,A.conj(),W[0],A)
RHBlock= einsum('ijk,lmin,nop,kmp->jlo',B.conj(),W[1],B,RHBlock)
# Left
LHBlockl= einsum('ijk,lim,jnlo,okp->mnp',LHBlockl,Al.conj(),Wl[0],Al)
RHBlockl= einsum('ijk,lmin,nop,kmp->jlo',Bl.conj(),Wl[1],Bl,RHBlockl)
# Left Right
LBlocklr = einsum('jl,ijk,ilm->km',LBlocklr,Al.conj(),A)
RBlocklr = einsum('op,nko,nmp->km',RBlocklr,Bl.conj(),B)
LCurrBlock= einsum('ijk,lim,jnlo,okp->mnp',LCurrBlock,Al.conj(),currentOp[0],A)
RCurrBlock= einsum('ijk,lmin,nop,kmp->jlo',Bl.conj(),currentOp[1],B,RCurrBlock)
print(u,curr,denl,denr)
############################################
############################################
converged = False
iterCnt = 0
nBond = 1
E_prev = 0
El_prev = 0
curr_prev = 0
if plotConv:
fig = plt.figure()
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
Evec = []
Elvec = []
currVec = []
nBondVec = []
while not converged:
# -----------------------------------------------------------------------------
# Some Prerequisites
nBond += 2
iterCnt += 1
a[0] = a[1]
a[1] = min(maxBondDim,a[0]*2)
# -----------------------------------------------------------------------------
# Determine Initial Guess
# Pad A and B
(n1,n2,n3) = A.shape
Aguess = np.pad(einsum('ijk,k->ijk',A,S),((0,0),(0,a[0]-n2),(0,a[1]-n3)),'constant')
Bguess = np.pad(B,((0,0),(0,a[1]-n3),(0,a[0]-n2)),'constant')
initGuess = einsum('ijk,lkm->iljm',Aguess,Bguess)
guessShape = initGuess.shape
initGuess = initGuess.ravel()
# Left
Alguess = np.pad(einsum('ijk,k->ijk',Al,Sl),((0,0),(0,a[0]-n2),(0,a[1]-n3)),'constant')
Blguess = np.pad(Bl,((0,0),(0,a[1]-n3),(0,a[0]-n2)),'constant')
initGuessl = einsum('ijk,lkm->iljm',Alguess,Blguess)
initGuessl = initGuessl.ravel()
# -----------------------------------------------------------------------------
# Determine Hamiltonian Function
def Hx(x):
x_reshape = np.reshape(x,guessShape)
tmp1 = einsum('ijk,nqks->ijnqs',LHBlock,x_reshape) # Could be 'ijk,mpir->jkmpr'
tmp2 = einsum('jlmn,ijnqs->ilmqs',W[2],tmp1)
tmp3 = einsum('lopq,ilmqs->imops',W[2],tmp2)
finalVec = einsum('ros,imops->mpir',RHBlock,tmp3)
return -finalVec.ravel()
def Hlx(x):
x_reshape = np.reshape(x,guessShape)
tmp1 = einsum('ijk,nqks->ijnqs',LHBlockl,x_reshape) # Could be 'ijk,mpir->jkmpr'
tmp2 = einsum('jlmn,ijnqs->ilmqs',Wl[2],tmp1)
tmp3 = einsum('lopq,ilmqs->imops',Wl[2],tmp2)
finalVec = einsum('ros,imops->mpir',RHBlockl,tmp3)
return -finalVec.ravel()
def precond(dx,e,x0):
return dx
# -----------------------------------------------------------------------------
# Solve Eigenproblem
u,v = eig(Hx,initGuess,precond) # PH - Add tolerance here?
E = -u/nBond
# Left
ul,vl = eig(Hlx,initGuessl,precond) # PH - Add tolerance here?
El = -u/nBond
# PH - Figure out normalization
v = v/np.sum(v)
vl = vl/(np.dot(vl,v))
# ------------------------------------------------------------------------------
# Reshape result into state
psi = np.reshape(v,(d,d,a[0],a[0])) # s_l s_(l+1) a_(l-1) a_(l+1)
psi = np.transpose(psi,(2,0,1,3)) # a_(l-1) s_l a_(l+1) s_(l+1)
psi = np.reshape(psi,(a[0]*d,a[0]*d))
# Left
lpsi = np.reshape(vl,(d,d,a[0],a[0])) # s_l s_(l+1) a_(l-1) a_(l+1)
lpsi = np.transpose(lpsi,(2,0,1,3)) # a_(l-1) s_l a_(l+1) s_(l+1)
lpsi = np.reshape(lpsi,(a[0]*d,a[0]*d))
# ------------------------------------------------------------------------------
# Canonicalize state
U,S,V = np.linalg.svd(psi)
A = np.reshape(U,(a[0],d,-1))
A = A[:,:,:a[1]]
A = np.swapaxes(A,0,1)
B = np.reshape(V,(-1,d,a[0]))
B = B[:a[1],:,:]
B = np.swapaxes(B,0,1)
S = S[:a[1]]
# Left
Ul,Sl,Vl = np.linalg.svd(lpsi)
Al = np.reshape(Ul,(a[0],d,-1))
Al = Al[:,:,:a[1]]
Al = np.swapaxes(Al,0,1)
Bl = np.reshape(Vl,(-1,d,a[0]))
Bl = Bl[:a[1],:,:]
Bl = np.swapaxes(Bl,0,1)
Sl = Sl[:a[1]]
# -----------------------------------------------------------------------------
# Calculate Current & Density
# Total Current ---------
tmp1 = einsum('ijk , lim, m->jklm',LCurrBlock, Al.conj(), Sl.conj())
tmp2 = einsum('jklm, jnlo ->kmno',tmp1 , currentOp[2] )
tmp3 = einsum('kmno, okp, p->mnp ',tmp2 , A, S )
tmp4 = einsum('mnp , qmr ->npqr',tmp3 , Bl.conj() )
tmp5 = einsum('npqr, nsqt ->prst',tmp4 , currentOp[2] )
tmp6 = einsum('prst, tpu ->rsu ',tmp5 , B )
curr = einsum('rsu , rsu -> ',tmp6 , RCurrBlock )
# Left Density ---------
tmp1 = einsum('ik , lim, m->klm ',LBlocklr, Al.conj(), Sl.conj())
tmp2 = einsum('klm , jnlo ->kmno',tmp1, densityOp[0] )
tmp3 = einsum('kmno, okp, p->mnp ',tmp2, A, S )
tmp4 = einsum('mnp , qmr ->npqr',tmp3, Bl.conj() )
tmp5 = einsum('npqr, nsqt ->prst',tmp4, densityOp[1] )
tmp6 = einsum('prst, tpu ->su ',tmp5, B )
denl = einsum('ru , ru -> ',tmp6, RBlocklr )
# Right Density ---------
tmp1 = einsum('ik , lim, m->klm ',LBlocklr, Al.conj(), Sl.conj())
tmp2 = einsum('klm , jnlo ->kmno',tmp1, densityOp[1] )
tmp3 = einsum('kmno, okp, p->mnp ',tmp2, A, S )
tmp4 = einsum('mnp , qmr ->npqr',tmp3, Bl.conj() )
tmp5 = einsum('npqr, nsqt ->prst',tmp4, densityOp[0] )
tmp6 = einsum('prst, tpu ->su ',tmp5, B )
denr = einsum('ru , ru -> ',tmp6, RBlocklr )
# -----------------------------------------------------------------------------
# Store left and right environments
tmp1 = einsum('ijk,lim->jklm',LHBlock,A.conj())
tmp2 = einsum('jklm,jnlo->kmno',tmp1,W[2])
LHBlock = einsum('kmno,okp->mnp',tmp2,A)
tmp1 = einsum('nop,kmp->kmno',B,RHBlock)
tmp2 = einsum('kmno,lmin->iklo',tmp1,W[2])
RHBlock = einsum('iklo,ijk->jlo',tmp2,B.conj())
# Left
tmp1 = einsum('ijk,lim->jklm',LHBlockl,Al.conj())
tmp2 = einsum('jklm,jnlo->kmno',tmp1,Wl[2])
LHBlockl = einsum('kmno,okp->mnp',tmp2,Al)
tmp1 = einsum('nop,kmp->kmno',Bl,RHBlockl)
tmp2 = einsum('kmno,lmin->iklo',tmp1,Wl[2])
RHBlockl = einsum('iklo,ijk->jlo',tmp2,Bl.conj())
# Left Right Normalization
tmp1 = einsum('jl,ijk->ilk',LBlocklr,Al.conj())
LBlocklr = einsum('ilk,ilm->km',tmp1,A)
tmp1 = einsum('op,nko->nkp',RBlocklr,Bl.conj())
RBlocklr = einsum('nkp,nmp->km',tmp1,B)
tmp1 = einsum('ijk,lim->jklm',LCurrBlock,Al.conj())
tmp2 = einsum('jklm,jnlo->kmno',tmp1,currentOp[2])
LCurrBlock = einsum('kmno,okp->mnp',tmp2,A)
tmp1 = einsum('nop,kmp->kmno',B,RCurrBlock)
tmp2 = einsum('kmno,lmin->iklo',tmp1,currentOp[2])
RCurrBlock = einsum('iklo,ijk->jlo',tmp2,Bl.conj())
# ------------------------------------------------------------------------------
# Determine Normalization Factor
tmp1 = einsum('lm,m->lm',LBlocklr,S)
tmp2 = einsum('lm,l->lm',tmp1,Sl.conj())
normFact = einsum('lm,lm->',tmp2,RBlocklr)
print(normFact)
curr /= nBond*normFact
denl /= normFact
denr /= normFact
print(E,curr,denl,denr)
# ------------------------------------------------------------------------------
# Check for convergence
if (np.abs(E - E_prev) < tol) and (np.abs(El - El_prev) < tol) and (np.abs(curr-curr_prev) < tol):
converged = True
else:
E_prev = E
El_prev = El
curr_prev = curr
if plotConv:
Evec.append(E)
Elvec.append(El)
currVec.append(curr)
nBondVec.append(nBond)
ax1.cla()
ax1.plot(nBondVec,Evec,'r.')
ax1.plot(nBondVec,Elvec,'b.')
ax2.cla()
ax2.semilogy(nBondVec[:-1],np.abs(Evec[:-1]-Evec[-1]),'r.')
ax2.semilogy(nBondVec[:-1],np.abs(Elvec[:-1]-Elvec[-1]),'r.')
plt.pause(0.01)
|
#!/usr/bin/env python
# ____________________________________________________
# :) My Button Netmaxiot interfacing
# Button Example By NetmaxIOT & Rohitkhosla
# OpenSource MIT licence by Netmax IOT Shield And Rohitkhosla
# :)
#------------------------------------------------------------
import time
import Netmaxiot
# Connect the Netmaxiot Button to digital port D3
# SIG,NC,VCC,GND
button = 3
Netmaxiot.pinMode(button,"INPUT")
while True:
try:
print(Netmaxiot.digitalRead(button))
time.sleep(0.2)
except IOError:
print ("Error")
|
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""Tests for retry utilities"""
__authors__ = ["W. de Nolf"]
__license__ = "MIT"
__date__ = "05/02/2020"
import unittest
import os
import sys
import time
import tempfile
from .. import retry
def _cause_segfault():
import ctypes
i = ctypes.c_char(b"a")
j = ctypes.pointer(i)
c = 0
while True:
j[c] = b"a"
c += 1
def _submain(filename, kwcheck=None, ncausefailure=0, faildelay=0):
assert filename
assert kwcheck
sys.stderr = open(os.devnull, "w")
with open(filename, mode="r") as f:
failcounter = int(f.readline().strip())
if failcounter < ncausefailure:
time.sleep(faildelay)
failcounter += 1
with open(filename, mode="w") as f:
f.write(str(failcounter))
if failcounter % 2:
raise retry.RetryError
else:
_cause_segfault()
return True
_wsubmain = retry.retry_in_subprocess()(_submain)
class TestRetry(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.ctr_file = os.path.join(self.test_dir, "failcounter.txt")
def tearDown(self):
if os.path.exists(self.ctr_file):
os.unlink(self.ctr_file)
os.rmdir(self.test_dir)
def test_retry(self):
ncausefailure = 3
faildelay = 0.1
sufficient_timeout = ncausefailure * (faildelay + 10)
insufficient_timeout = ncausefailure * faildelay * 0.5
@retry.retry()
def method(check, kwcheck=None):
assert check
assert kwcheck
nonlocal failcounter
if failcounter < ncausefailure:
time.sleep(faildelay)
failcounter += 1
raise retry.RetryError
return True
failcounter = 0
kw = {
"kwcheck": True,
"retry_timeout": sufficient_timeout,
}
self.assertTrue(method(True, **kw))
failcounter = 0
kw = {
"kwcheck": True,
"retry_timeout": insufficient_timeout,
}
with self.assertRaises(retry.RetryTimeoutError):
method(True, **kw)
def test_retry_contextmanager(self):
ncausefailure = 3
faildelay = 0.1
sufficient_timeout = ncausefailure * (faildelay + 10)
insufficient_timeout = ncausefailure * faildelay * 0.5
@retry.retry_contextmanager()
def context(check, kwcheck=None):
assert check
assert kwcheck
nonlocal failcounter
if failcounter < ncausefailure:
time.sleep(faildelay)
failcounter += 1
raise retry.RetryError
yield True
failcounter = 0
kw = {"kwcheck": True, "retry_timeout": sufficient_timeout}
with context(True, **kw) as result:
self.assertTrue(result)
failcounter = 0
kw = {"kwcheck": True, "retry_timeout": insufficient_timeout}
with self.assertRaises(retry.RetryTimeoutError):
with context(True, **kw) as result:
pass
def test_retry_in_subprocess(self):
ncausefailure = 3
faildelay = 0.1
sufficient_timeout = ncausefailure * (faildelay + 10)
insufficient_timeout = ncausefailure * faildelay * 0.5
kw = {
"ncausefailure": ncausefailure,
"faildelay": faildelay,
"kwcheck": True,
"retry_timeout": sufficient_timeout,
}
with open(self.ctr_file, mode="w") as f:
f.write("0")
self.assertTrue(_wsubmain(self.ctr_file, **kw))
kw = {
"ncausefailure": ncausefailure,
"faildelay": faildelay,
"kwcheck": True,
"retry_timeout": insufficient_timeout,
}
with open(self.ctr_file, mode="w") as f:
f.write("0")
with self.assertRaises(retry.RetryTimeoutError):
_wsubmain(self.ctr_file, **kw)
|
"""
Copyright 2010 Jason Chu, Dusty Phillips, and Phil Schalm
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('notifications.views',
url('^$', 'list_notifications', name="list_notifications"),
url('^ajax/$', 'ajax_notification', name="ajax_notification"),
url('^dismiss/(?P<notification_id>\d+)/$', 'dismiss_notification',
name='dismiss_notification'),
url('^view/(?P<notification_id>\d+)/$', 'view_notification', name="view_notification"),
)
|
from celery.result import AsyncResult
from django.shortcuts import render
from django.contrib import messages
from django.http import JsonResponse
from .forms import SendEmailForm
from .tasks import send_email_task, loop
def index(request):
if request.method == 'POST':
form = SendEmailForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_email_task.delay(email)
messages.success(request, 'Sending email to {}'.format(email))
return render(request, 'index.html', {'form': form})
form = SendEmailForm()
return render(request, 'index.html', {'form': form})
def home(request):
return render(request, 'home.html')
def run_long_task(request):
if request.method == 'POST':
l = request.POST.get('l')
task = loop.delay(l)
return JsonResponse({"task_id": task.id}, status=202)
def task_status(request, task_id):
task = AsyncResult(task_id)
if task.state == 'FAILURE' or task.state == 'PENDING':
response = {
'task_id': task_id,
'state': task.state,
'progression': "None",
'info': str(task.info)
}
return JsonResponse(response, status=200)
current = task.info.get('current', 0)
total = task.info.get('total', 1)
progression = (int(current) / int(total)) * 100 # to display a percentage of progress of the task
response = {
'task_id': task_id,
'state': task.state,
'progression': progression,
'info': "None"
}
return JsonResponse(response, status=200) |
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .regnet import RegNet
from .res2net import Res2Net
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net',
'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet'
]
|
__author__ = 'markcial'
from libs import Slide, Tutor
slides = [
Slide(
"""Types in python
===============
Everything in python has a type, the philosophy behind python is that everything
in the python codebase is a "first class citizen", everything is able to be
overriden, metaprogrammed, extended or patched."""
),
Slide(
"""Working with types
==================
When you check the type of a number you will get the following output `<type 'int'>`
>>> type(1)"""
)
]
tutor = Tutor(slides)
tutor.interact() |
"""Module pointing to different implementations of Model class
The implementations contain methods to access the output or gradients of ML models trained based on different frameworks such as Tensorflow or PyTorch.
"""
class Model:
"""An interface class to different ML Model implementations."""
def __init__(self, model=None, model_path='', backend='TF1'):
"""Init method
:param model: trained ML model.
:param model_path: path to trained ML model.
:param backend: "TF1" ("TF2") for TensorFLow 1.0 (2.0) and "PYT" for PyTorch implementations of standard DiCE (https://arxiv.org/pdf/1905.07697.pdf). For all other frameworks and implementations, provide a dictionary with "model" and "explainer" as keys, and include module and class names as values in the form module_name.class_name. For instance, if there is a model interface class "SklearnModel" in module "sklearn_model.py" inside the subpackage dice_ml.model_interfaces, and dice interface class "DiceSklearn" in module "dice_sklearn" inside dice_ml.explainer_interfaces, then backend parameter should be {"model": "sklearn_model.SklearnModel", "explainer": dice_sklearn.DiceSklearn}.
"""
if((model is None) & (model_path == '')):
raise ValueError("should provide either a trained model or the path to a model")
else:
self.decide_implementation_type(model, model_path, backend)
def decide_implementation_type(self, model, model_path, backend):
"""Decides the Model implementation type."""
self.__class__ = decide(backend)
self.__init__(model, model_path, backend)
# To add new implementations of Model, add the class in model_interfaces subpackage and import-and-return the class in an elif loop as shown in the below method.
def decide(backend):
"""Decides the Model implementation type."""
if 'TF' in backend: # Tensorflow 1 or 2 backend
from dice_ml.model_interfaces.keras_tensorflow_model import KerasTensorFlowModel
return KerasTensorFlowModel
elif backend == 'PYT': # PyTorch backend
from dice_ml.model_interfaces.pytorch_model import PyTorchModel
return PyTorchModel
else: # all other implementations and frameworks
backend_model = backend['model']
module_name, class_name = backend_model.split('.')
module = __import__("dice_ml.model_interfaces." + module_name, fromlist=[class_name])
return getattr(module, class_name)
|
from enum import Enum
class MethodType(Enum):
COMPUTATION = 'COMPUTATION'
INSTRUMENTATION = 'INSTRUMENTATION'
EXTRACTION = 'EXTRACTION'
PROVENANCE = 'PROVENANCE' |
# metar.py
import logging
import re
# Metadata
NAME = 'metar'
ENABLE = True
USAGE = '''Usage: ![metar|taf] <id> <date>
Given a station ID, produce the METeorological Aerodrome Report or the terminal
aerodrome forecast.
Date is in format yyyymmddhhnn
Examples:
> !metar # Default location
> !taf kphx # Phoenix Sky Harbor
> !taf ksbn 202110271235 # South Bend 2021-10-27 at 12:35
'''
METAR_PATTERN = r'^!metar\s*(?P<ids>[a-zA-Z]+)*\s*(?P<date>[0-9]+)*$'
TAF_PATTERN = r'^!taf\s*(?P<ids>[a-zA-Z]+)*\s*(?P<date>[0-9]+)*$'
# Constants
DEFAULT_ID = 'ksbn'
METAR_URL_BASE = 'https://aviationweather.gov/metar/data?'
METAR_URL_EXT = 'format=raw&hours=0&taf=off&layout=off'
EXTRACT = r'<code>(.*)</code>'
# Functions
async def get_metar_data(bot, ids, date, taf=False):
url = METAR_URL_BASE
if ids:
url = url + f'ids={ids}&'
else:
url = url + f'ids={DEFAULT_ID}&'
url = url + METAR_URL_EXT
if taf:
url = url + '&taf=on'
if date:
url = url + f'&date={date}'
async with bot.http_client.get(url) as response:
return await response.text()
async def metar(bot, message, ids=None, date=None):
data = await get_metar_data(bot, ids, date, False)
metar = re.findall(EXTRACT, data)
if '<strong>No METAR found' in data or not metar:
return message.with_body('No results')
return message.with_body(metar[0])
async def taf(bot, message, ids=None, date=None):
data = await get_metar_data(bot, ids, date, True)
metar = re.findall(EXTRACT, data)
if '<strong>No METAR found' in data or len(metar) < 2:
return message.with_body('No results')
taf = re.sub('<br/> ', '| ', metar[1])
return message.with_body(taf)
# Register
def register(bot):
return (
('command', METAR_PATTERN, metar),
('command', TAF_PATTERN, taf)
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
import cv2
import numpy as np
import torch
def mmdet_normalize(img, mean, std, to_rgb=True):
"""
Args:
img (ndarray): Image to be normalized.
mean (ndarray): The mean to be used for normalize.
std (ndarray): The std to be used for normalize.
to_rgb (bool): Whether to convert to rgb.
Returns:
ndarray: The normalized image.
"""
mean = np.array(mean, 'f4')
stdinv = 1 / np.array(std, 'f4')
if to_rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = (img - mean) * stdinv
return np.float32(img)
class StableSoftmax(torch.nn.Module):
def __init__(self, dim=None):
super(StableSoftmax, self).__init__()
self.dim = dim
def forward(self, input):
x = input - torch.max(input, self.dim, keepdim=True)[0]
x = torch.exp(x)
return x / torch.sum(x, self.dim, keepdim=True)
|
from django.contrib.auth.models import User
from rest_framework import serializers
from activities.models import Activity
from activities_links.serializer import LinkSerializer
from activities_files.serializer import ActivityFileSerializer
from users.serializer import UserBriefSerializer
from utils.validators import validate_user_id
class ActivitySerializer(serializers.ModelSerializer):
class Meta:
model = Activity
fields = ("id", "title", "datetime", "location", "presenter", "attender", "check_in_open", "link", "file", "photo")
read_only_fields = ("id", "attender", "link", "file", "photo")
title = serializers.CharField(max_length=150)
location = serializers.CharField(max_length=50)
presenter = UserBriefSerializer(read_only=False, many=True)
attender = UserBriefSerializer(read_only=True, many=True)
link = LinkSerializer(read_only=True, many=True)
file = ActivityFileSerializer(read_only=True, many=True)
def validate_presenter(self, presenter_list):
if len(presenter_list) == 0:
raise serializers.ValidationError("活动没有演讲者")
for presenter in presenter_list:
if 'id' not in presenter:
raise serializers.ValidationError("用户不包含 id")
validate_user_id(presenter['id'])
return presenter_list
def create(self, validated_data):
presenter_data = validated_data.pop('presenter')
activity = Activity.objects.create(**validated_data)
for presenter in presenter_data:
u = User.objects.get(id=presenter['id'])
activity.presenter.add(u)
activity.save()
return activity
def update(self, instance: Activity, validated_data):
if 'presenter' in validated_data:
presenter_data = validated_data.pop('presenter')
instance.presenter.clear()
for presenter in presenter_data:
u = User.objects.get(id=presenter['id'])
instance.presenter.add(u)
instance = super().update(instance, validated_data) # 更新 title 等数据
instance.save()
return instance
class ActivityAdminSerializer(serializers.ModelSerializer):
class Meta:
model = Activity
fields = ('check_in_code', )
read_only_fields = ('check_in_code', )
|
from unittest import TestCase
import pyepgdb
from .testutil import get_resource_path
class SingleResult (TestCase):
def setUp (self):
with open(get_resource_path('single-result.epgdb'), 'rb') as f:
self.results = list(pyepgdb.parse(f))
def test_count (self):
self.assertEqual(len(self.results), 1, 'should be 1 result')
def test_episode_int_field (self):
self.assertEqual(self.results[0].episode['id'], 3261217)
def test_episode_string_field (self):
self.assertEqual(self.results[0].episode['uri'], 'crid://bds.tv/E0L97G')
def test_episode_binary_field (self):
self.assertEqual(self.results[0].episode['genre'],
b'\x02\x00\x00\x00\x00\x01\x20')
def test_episode_object_field (self):
self.assertEqual(self.results[0].episode['title'],
{'eng': 'Planet Earth'})
def test_episode_empty_object_field (self):
self.assertEqual(self.results[0].episode['epnum'], {})
def test_broadcast_int_field (self):
self.assertEqual(self.results[0].broadcast['start'], 1565445600)
def test_broadcast_string_field (self):
self.assertEqual(self.results[0].broadcast['channel'],
'1d72d745d4a1e046e3070c75b336a1a5')
class MultipleEpisodes (TestCase):
def setUp (self):
with open(get_resource_path('multiple-episodes.epgdb'), 'rb') as f:
self.results = list(pyepgdb.parse(f))
def test_count (self):
self.assertEqual(len(self.results), 2, 'should be 2 results')
def test_1_episode(self):
self.assertEqual(self.results[0].episode['id'], 3267319,
'should correspond to first broadcast')
def test_1_broadcast(self):
self.assertEqual(self.results[0].broadcast['id'], 3267317,
'should be first broadcast')
def test_2_episode(self):
self.assertEqual(self.results[1].episode['id'], 3261217,
'should correspond to second broadcast')
def test_2_broadcast(self):
self.assertEqual(self.results[1].broadcast['id'], 3261216,
'should be second broadcast')
class MultipleBroadcasts (TestCase):
def setUp (self):
with open(get_resource_path('multiple-broadcasts.epgdb'), 'rb') as f:
self.results = list(pyepgdb.parse(f))
def test_count (self):
self.assertEqual(len(self.results), 2, 'should be 2 results')
def test_1_episode(self):
self.assertEqual(self.results[0].episode['id'], 3261217,
'should be the episode')
def test_1_broadcast(self):
self.assertEqual(self.results[0].broadcast['id'], 3261216,
'should be first broadcast')
def test_2_episode(self):
self.assertEqual(self.results[1].episode['id'], 3261217,
'should be the episode')
def test_2_broadcast(self):
self.assertEqual(self.results[1].broadcast['id'], 3261251,
'should be second broadcast')
class BroadcastsFirst (TestCase):
def setUp (self):
with open(get_resource_path('broadcasts-first.epgdb'), 'rb') as f:
self.results = list(pyepgdb.parse(f))
def test_count (self):
self.assertEqual(len(self.results), 1, 'should be 1 result')
def test_episode(self):
self.assertEqual(self.results[0].episode['id'], 3261217,
'episode ID should match')
def test_broadcast(self):
self.assertEqual(self.results[0].broadcast['id'], 3261216,
'broadcast ID should match')
|
#
# PySNMP MIB module CISCO-VISM-CODEC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-VISM-CODEC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:02:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion")
voice, = mibBuilder.importSymbols("BASIS-MIB", "voice")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Unsigned32, MibIdentifier, Integer32, iso, NotificationType, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Gauge32, ObjectIdentity, Bits, Counter64, IpAddress, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibIdentifier", "Integer32", "iso", "NotificationType", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Gauge32", "ObjectIdentity", "Bits", "Counter64", "IpAddress", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoVismCodecMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 97))
ciscoVismCodecMIB.setRevisions(('2005-05-24 00:00', '2004-01-07 00:00',))
if mibBuilder.loadTexts: ciscoVismCodecMIB.setLastUpdated('200505240000Z')
if mibBuilder.loadTexts: ciscoVismCodecMIB.setOrganization('Cisco Systems, Inc.')
vismCodecTemplateCnfGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 7))
vismCodecCnfGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18))
vismCodecCnfTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1), )
if mibBuilder.loadTexts: vismCodecCnfTable.setStatus('current')
vismCodecCnfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1), ).setIndexNames((0, "CISCO-VISM-CODEC-MIB", "vismCodecCnfIndex"))
if mibBuilder.loadTexts: vismCodecCnfEntry.setStatus('current')
vismCodecCnfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("g711u", 1), ("g711a", 2), ("g726r32000", 3), ("g729a", 4), ("g729ab", 5), ("clearChannel", 6), ("g726r16000", 7), ("g726r24000", 8), ("g726r40000", 9), ("g723h", 11), ("g723ah", 12), ("g723l", 13), ("g723al", 14), ("lossless", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: vismCodecCnfIndex.setStatus('current')
vismCodecName = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: vismCodecName.setStatus('current')
vismCodecPktPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(10, 20, 30, 40, 60))).clone(namedValues=NamedValues(("ten", 10), ("twenty", 20), ("thirty", 30), ("fourty", 40), ("sixty", 60)))).setUnits('milliseconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismCodecPktPeriod.setStatus('current')
vismCodecPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismCodecPreference.setStatus('current')
vismCodecString = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 5), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismCodecString.setStatus('current')
vismCodecIanaType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismCodecIanaType.setStatus('current')
vismAltCodecString1 = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismAltCodecString1.setStatus('current')
vismAltCodecString2 = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 8), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismAltCodecString2.setStatus('current')
vismAltCodecString3 = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 18, 1, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: vismAltCodecString3.setStatus('current')
vismCodecTemplateCnfGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 7, 1), )
if mibBuilder.loadTexts: vismCodecTemplateCnfGrpTable.setStatus('current')
vismCodecTemplateCnfGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 7, 1, 1), ).setIndexNames((0, "CISCO-VISM-CODEC-MIB", "vismCodecTemplateNum"))
if mibBuilder.loadTexts: vismCodecTemplateCnfGrpEntry.setStatus('current')
vismCodecTemplateNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 7, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: vismCodecTemplateNum.setStatus('current')
vismCodecSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 7, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: vismCodecSupported.setStatus('current')
vismCodecTemplateMaxChanCount = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 5, 7, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: vismCodecTemplateMaxChanCount.setStatus('current')
ciscoVismCodecMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 97, 2))
ciscoVismCodecMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 1))
ciscoVismCodecMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 2))
ciscoVismCodecCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 1, 1)).setObjects(("CISCO-VISM-CODEC-MIB", "ciscoVismCodecCnfGroup"), ("CISCO-VISM-CODEC-MIB", "ciscoVismCodecTemplateGrp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoVismCodecCompliance = ciscoVismCodecCompliance.setStatus('deprecated')
ciscoVismCodecComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 1, 2)).setObjects(("CISCO-VISM-CODEC-MIB", "ciscoVismCodecCnfGroup"), ("CISCO-VISM-CODEC-MIB", "ciscoVismCodecTemplateGrp"), ("CISCO-VISM-CODEC-MIB", "ciscoAltVismCodecCnfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoVismCodecComplianceRev1 = ciscoVismCodecComplianceRev1.setStatus('current')
ciscoVismCodecCnfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 2, 1)).setObjects(("CISCO-VISM-CODEC-MIB", "vismCodecCnfIndex"), ("CISCO-VISM-CODEC-MIB", "vismCodecName"), ("CISCO-VISM-CODEC-MIB", "vismCodecPktPeriod"), ("CISCO-VISM-CODEC-MIB", "vismCodecPreference"), ("CISCO-VISM-CODEC-MIB", "vismCodecString"), ("CISCO-VISM-CODEC-MIB", "vismCodecIanaType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoVismCodecCnfGroup = ciscoVismCodecCnfGroup.setStatus('current')
ciscoVismCodecTemplateGrp = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 2, 2)).setObjects(("CISCO-VISM-CODEC-MIB", "vismCodecTemplateNum"), ("CISCO-VISM-CODEC-MIB", "vismCodecSupported"), ("CISCO-VISM-CODEC-MIB", "vismCodecTemplateMaxChanCount"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoVismCodecTemplateGrp = ciscoVismCodecTemplateGrp.setStatus('current')
ciscoAltVismCodecCnfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 97, 2, 2, 3)).setObjects(("CISCO-VISM-CODEC-MIB", "vismAltCodecString1"), ("CISCO-VISM-CODEC-MIB", "vismAltCodecString2"), ("CISCO-VISM-CODEC-MIB", "vismAltCodecString3"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAltVismCodecCnfGroup = ciscoAltVismCodecCnfGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-VISM-CODEC-MIB", vismCodecString=vismCodecString, ciscoVismCodecCompliance=ciscoVismCodecCompliance, vismCodecPktPeriod=vismCodecPktPeriod, vismCodecCnfEntry=vismCodecCnfEntry, ciscoVismCodecTemplateGrp=ciscoVismCodecTemplateGrp, ciscoVismCodecMIBCompliances=ciscoVismCodecMIBCompliances, ciscoVismCodecCnfGroup=ciscoVismCodecCnfGroup, vismAltCodecString2=vismAltCodecString2, ciscoAltVismCodecCnfGroup=ciscoAltVismCodecCnfGroup, vismCodecName=vismCodecName, vismCodecTemplateCnfGrpEntry=vismCodecTemplateCnfGrpEntry, vismCodecTemplateNum=vismCodecTemplateNum, vismCodecCnfIndex=vismCodecCnfIndex, ciscoVismCodecMIBConformance=ciscoVismCodecMIBConformance, ciscoVismCodecMIBGroups=ciscoVismCodecMIBGroups, vismCodecCnfTable=vismCodecCnfTable, vismCodecTemplateCnfGrpTable=vismCodecTemplateCnfGrpTable, vismCodecSupported=vismCodecSupported, ciscoVismCodecComplianceRev1=ciscoVismCodecComplianceRev1, vismCodecPreference=vismCodecPreference, vismCodecIanaType=vismCodecIanaType, PYSNMP_MODULE_ID=ciscoVismCodecMIB, vismCodecCnfGrp=vismCodecCnfGrp, ciscoVismCodecMIB=ciscoVismCodecMIB, vismAltCodecString1=vismAltCodecString1, vismCodecTemplateMaxChanCount=vismCodecTemplateMaxChanCount, vismCodecTemplateCnfGrp=vismCodecTemplateCnfGrp, vismAltCodecString3=vismAltCodecString3)
|
""" auto patch things. """
# manual test for monkey patching
import logging
import sys
# project
import ddtrace
# allow logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
ddtrace.tracer.debug_logging = True
# Patch nothing
ddtrace.patch()
# Patch all except Redis
ddtrace.patch_all(redis=False)
# Patch Redis
ddtrace.patch(redis=True)
|
from hyperpython import div, a
from boogie import router
urlpatterns = router.Router(
template='testapp/{name}.jinja2',
)
@urlpatterns.route('hello/')
def hello_world(request):
return '<Hello World>'
@urlpatterns.route('hello-simple/')
def hello_world_simple():
return 'Hello World!'
@urlpatterns.route('hello/<name>/')
def hello_name(name):
return f'Hello {name}!'
@urlpatterns.route('links/')
def links():
return div([
a('hello', href='/hello/'),
a('hello-simple', href='/hello-simple/'),
a('hello me', href='/hello/me/'),
])
|
#!/usr/bin/env python3
import pybind_isce3.core as m
def test_constants():
for method in "SINC BILINEAR BICUBIC NEAREST BIQUINTIC".split():
assert hasattr(m.DataInterpMethod, method)
|
# *************************
# |docname| - Core routines
# *************************
#
# Imports
# =======
# These are listed in the order prescribed by `PEP 8
# <http://www.python.org/dev/peps/pep-0008/#imports>`_.
#
# Standard library
# ----------------
# None.
#
# Third-party imports
# -------------------
from sqlalchemy.orm import Query, scoped_session
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import ColumnProperty, RelationshipProperty
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm.base import _generative
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.util import class_mapper
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.inspection import inspect
# .. _QueryMaker:
#
# QueryMaker
# ==========
# This class provides a concise, Pythonic syntax for simple queries; as shown in the `Demonstration and unit tests`, ``session(User)['jack'].addresses`` produces a Query_ for the ``Address`` of a ``User`` named ``jack``.
#
# This class provides the following methods:
#
# - Constructor: ``session(User)`` (with help from QueryMakerSession_) creates a query on a User table.
# - Indexing: ``session(User)['jack']`` performs filtering.
# - Attributes: ``session(User)['jack'].addresses`` joins to the Addresses table.
# - Iteration: ``for x in session(User)['jack'].addresses`` iterates over the results of the query.
# - Query access: ``User['jack'].addresses.q`` returns a Query-like object. Any Query_ method can be invoked on it.
#
# See the `demonstration and unit tests` for examples and some less-used methods.
#
# This works by translating class instances in a query/select, indexes into filters, and columns/relationships into joins. The following code shows the Pythonic syntax on the first line, followed by the resulting translation into SQLAlchemy performed by this class on the next line.
#
# .. code-block:: Python
# :linenos:
#
# session(User) ['jack'] .addresses
# session.query().select_from(User).filter(User.name == 'jack').join(Address).add_entity(Address)
#
# Limitations
# -----------
# Note that the `delete <http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.delete>`_ and `update <http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.update>`_ methods cannot be invoked on the query produced by this class. Safer (but lower-performance) is:
#
# .. code-block:: python3
# :linenos:
#
# for _ in session(User)['jack']:
# session.delete(_)
#
# Rationale:
#
# - Per the docs on delete_ and update_, these come with a long list of caveats. Making dangerous functions easy to invoke is poor design.
# - For implementation, QueryMaker_ cannot invoke `select_from <http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.select_from>`_. Doing so raises ``sqlalchemy.exc.InvalidRequestError: Can't call Query.update() or Query.delete() when join(), outerjoin(), select_from(), or from_self() has been called``. So, select_from_ must be deferred -- but to when? ``User['jack'].addresses`` requires a select_from_, while ``User['jack']`` needs just ``add_entity``. We can't know which to invoke until the entire expression is complete.
class QueryMaker(object):
def __init__(self,
# An optional `Declarative class <http://docs.sqlalchemy.org/en/latest/orm/tutorial.html#declare-a-mapping>`_ to query.
declarative_class=None,
# Optionally, begin with an existing query_.
query=None):
if declarative_class:
assert _is_mapped_class(declarative_class)
# If a query is provided, try to infer the declarative_class.
if query is not None:
assert isinstance(query, Query)
self._query = query
try:
self._select = self._get_joinpoint_zero_class()
except:
# We can't infer it. Use what's provided instead, and add this to the query.
assert declarative_class
self._select = declarative_class
self._query = self._query.select_from(declarative_class)
else:
# If a declarative_class was provided, make sure it's consistent with the inferred class.
if declarative_class:
assert declarative_class is self._select
else:
# The declarative class must be provided if the query wasn't.
assert declarative_class
# Since a query was not provied, create an empty `query <http://docs.sqlalchemy.org/en/latest/orm/query.html>`_; ``to_query`` will fill in the missing information.
self._query = Query([]).select_from(declarative_class)
# Keep track of the last selectable construct, to generate the select in ``to_query``.
self._select = declarative_class
# Copied verbatim from ``sqlalchemy.orm.query.Query._clone``. This adds the support needed for the _`generative` interface. (Mostly) quoting from query_, "QueryMaker_ features a generative interface whereby successive calls return a new QueryMaker_ object, a copy of the former with additional criteria and options associated with it."
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
# Looking up a class's `Column <http://docs.sqlalchemy.org/en/latest/core/metadata.html#sqlalchemy.schema.Column>`_ or `relationship <http://docs.sqlalchemy.org/en/latest/orm/relationship_api.html#sqlalchemy.orm.relationship>`_ generates the matching query.
@_generative()
def __getattr__(self, name):
# Find the Column_ or relationship_ in the join point class we're querying.
attr = getattr(self._get_joinpoint_zero_class(), name)
# If the attribute refers to a column, save this as a possible select statement. Note that a Column_ gets replaced with an `InstrumentedAttribute <http://docs.sqlalchemy.org/en/latest/orm/internals.html?highlight=instrumentedattribute#sqlalchemy.orm.attributes.InstrumentedAttribute>`_; see `QueryableAttribute <http://docs.sqlalchemy.org/en/latest/orm/internals.html?highlight=instrumentedattribute#sqlalchemy.orm.attributes.QueryableAttribute.property>`_.
if isinstance(attr.property, ColumnProperty):
self._select = attr
elif isinstance(attr.property, RelationshipProperty):
# Figure out what class this relationship refers to. See `mapper.params.class_ <http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html?highlight=mapper#sqlalchemy.orm.mapper.params.class_>`_.
declarative_class = attr.property.mapper.class_
# Update the query by performing the implied join.
self._query = self._query.join(declarative_class)
# Save this relationship as a possible select statement.
self._select = declarative_class
else:
# This isn't a Column_ or a relationship_.
assert False
# Indexing the object performs the implied filter. For example, ``session(User)['jack']`` implies ``session.query(User).filter(User.name == 'jack')``.
@_generative()
def __getitem__(self,
# Most often, this is a key which will be filtered by the ``default_query`` method of the currently-active `Declarative class`_. In the example above, the ``User`` class must define a ``default_query`` to operate on strings. However, it may also be a filter criterion, such as ``session(User)[User.name == 'jack']``.
key):
# See if this is a filter criterion; if not, rely in the ``default_query`` defined by the `Declarative class`_ or fall back to the first primary key.
criteria = None
jp0_class = self._get_joinpoint_zero_class()
if isinstance(key, ClauseElement):
criteria = key
elif hasattr(jp0_class, 'default_query'):
criteria = jp0_class.default_query(key)
if criteria is None:
pks = inspect(jp0_class).primary_key
criteria = pks[0] == key
self._query = self._query.filter(criteria)
# Support common syntax: ``for x in query_maker:`` converts this to a query and returns results. The session must already have been set.
def __iter__(self):
return self.to_query().__iter__()
# This property returns a `_QueryWrapper`_, a query-like object which transforms returned Query_ values back into this class while leaving other return values unchanged.
@property
def q(self):
return _QueryWrapper(self)
# Transform this object into a Query_.
def to_query(self,
# Optionally, the `Session <http://docs.sqlalchemy.org/en/latest/orm/session_api.html?highlight=session#sqlalchemy.orm.session.Session>`_ to run this query in.
session=None):
# If a session was specified, use it to produce the query_; otherwise, use the existing query_.
query = self._query.with_session(session) if session else self._query
# Choose the correct method to select either a column or a class (e.g. an entity). As noted earlier, a Column_ becomes and InstrumentedAttribute_.
if isinstance(self._select, InstrumentedAttribute):
return query.add_columns(self._select)
else:
return query.add_entity(self._select)
# Get the right-most join point in the current query.
def _get_joinpoint_zero_class(self):
jp0 = self._query._joinpoint_zero()
# If the join point was returned as a `Mapper <http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper>`_, get the underlying class.
if isinstance(jp0, Mapper):
jp0 = jp0.class_
return jp0
# .. _`_QueryWrapper`:
#
# _QueryWrapper
# -------------
# This class behaves mostly like a Query_. However, if the return value of a method is a Query_, it returns a QueryMaker_ object instead. It's intended for internal use by ``QueryMaker.q``.
class _QueryWrapper(object):
def __init__(self, query_maker):
self._query_maker = query_maker
# Delegate directly to the wrapped Query_. Per `special method lookup <https://docs.python.org/3/reference/datamodel.html#special-lookup>`_, the `special method names <https://docs.python.org/3/reference/datamodel.html#special-method-names>`_ bypass ``__getattr__`` (and even ``__getattribute__``) lookup. Only override what Query_ overrides.
#
# The ``_tq`` (to_query) property shortens the following functions.
@property
def _tq(self):
return self._query_maker.to_query()
def __getitem__(self, key):
return self._tq.__getitem__(key)
def __str__(self):
return self._tq.__str__()
def __repr__(self):
return self._tq.__repr__()
def __iter__(self):
return self._tq.__iter__()
# Allow ``__init__`` to create the ``_query_maker`` variable. Everything else goes to the wrapped Query_. Allow direct assignments, as this mimics what an actual Query_ instance would do.
def __setattr__(self, name, value):
if name != '_query_maker':
return self._query_maker.__setattr__(name, value)
else:
self.__dict__[name] = value
# Run the method on the underlying Query_. If a Query_ is returned, wrap it in a QueryMaker_.
def __getattr__(self, name):
attr = getattr(self._tq, name)
if not callable(attr):
# If this isn't a function, then don't do any wrapping.
return attr
else:
def _wrap_query(*args, **kwargs):
# Invoke the requested Query_ method on the "completed" query returned by ``to_query``.
ret = attr(*args, **kwargs)
if isinstance(ret, Query):
# If the return value was a Query_, make it generative by returning a new QueryMaker_ instance wrapping the query.
query_maker = self._query_maker._clone()
# Re-run getattr on the raw query, since we don't want to add columns or entities to the query yet. Otherwise, they'd be added twice (here and again when ``to_query`` is called).
query_maker._query = getattr(query_maker._query, name)(*args, **kwargs)
# If the query involved a join, then the join point has changed. Update what to select.
query_maker._select = query_maker._get_joinpoint_zero_class()
return query_maker
else:
# Otherwise, just return the result.
return ret
return _wrap_query
# .. _QueryMakerDeclarativeMeta:
#
# QueryMakerDeclarativeMeta
# -------------------------
# Turn indexing of a `Declarative class`_ into a query. For example, ``User['jack']`` is a query. See the `advanced examples` for an example of its use.
class QueryMakerDeclarativeMeta(DeclarativeMeta):
def __getitem__(cls, key):
return QueryMaker(cls)[key]
# .. _QueryMakerQuery:
#
# QueryMakerQuery
# ---------------
# Provide support for changing a Query_ instance into a QueryMaker_ instance. See the `advanced examples` for an example of its use.
#
# TODO: This doesn't allow a user-specified Query_ class. Perhaps provide a factory instead?
class QueryMakerQuery(Query):
def query_maker(self, declarative_class=None):
return QueryMaker(declarative_class, self)
# .. _QueryMakerSession:
#
# QueryMakerSession
# -----------------
# Create a Session_ which returns a QueryMaker_ when called as a function. This enables ``session(User)['jack']``. See the `database setup` for an example of its use.
class QueryMakerSession(Session):
def __call__(self, declarative_class):
return QueryMaker(declarative_class, self.query())
# .. _QueryMakerScopedSession:
#
# QueryMakerScopedSession
# ------------------------
# Provide QueryMakerSession_ extensions for a `scoped session <http://docs.sqlalchemy.org/en/latest/orm/contextual.html>`_.
class QueryMakerScopedSession(scoped_session):
# Note that the superclass' `__call__ <http://docs.sqlalchemy.org/en/latest/orm/contextual.html#sqlalchemy.orm.scoping.scoped_session.__call__>`_ method only accepts keyword arguments. So, only return a QueryMaker_ if only arguments, not keyword arguments, are given.
def __call__(self, *args, **kwargs):
if args and not kwargs:
return QueryMaker(*args, query=self.registry().query())
else:
return super().__call__(*args, **kwargs)
# Support routines
# ----------------
# Copied from https://stackoverflow.com/a/7662943.
def _is_mapped_class(cls):
try:
class_mapper(cls)
return True
except:
return False
|
from django.contrib import admin
# Register your models here.
from .models import Libro, Autor, Prestamo
admin.site.register(Libro)
admin.site.register(Autor)
admin.site.register(Prestamo)
|
import os, sys, subprocess, uproot
from glob import glob
import multiprocessing as mp
from tqdm import tqdm
import numpy as np
from scipy.io import savemat
ntuplePath = "/afs/cern.ch/user/i/ideadr/cernbox/TB2021_H8/rawNtupleSiPM"
def getFiles():
files = glob(ntuplePath + "/*list.root")
files = list(map(os.path.abspath, files))
return files
def getAligned():
files = glob(ntuplePath + "/*.npz")
files = list(map(os.path.abspath, files))
return files
def getFilesToAlign():
allFiles = getFiles()
aligned = getAligned()
allFiles = set([f.rsplit(".", 1)[0] for f in allFiles])
aligned = set([f.rsplit(".", 1)[0] for f in aligned])
toAlign = allFiles - aligned
toAlign = [f + ".root" for f in toAlign]
return list(toAlign)
def runAlignement(fname):
# Load data
with uproot.open(fname) as f:
tid = np.array(f["SiPMData"]["TriggerId"], dtype=np.uint64)
if tid.max() == 0:
tqdm.write(f"Error in file {fname}. Skipping")
return None
hg = np.array(f["SiPMData"]["HighGainADC"], dtype=np.uint16)
lg = np.array(f["SiPMData"]["LowGainADC"], dtype=np.uint16)
bid = np.array(f["SiPMData"]["BoardId"], dtype=np.uint8)
# Sort with respect to tid
sortIdx = np.argsort(tid)
hg = hg[sortIdx]
lg = lg[sortIdx]
bid = bid[sortIdx]
tid = tid[sortIdx]
tiduniq = np.unique(tid)
nEvents = tiduniq.size
hgMatrix = np.zeros((20, 16, nEvents), dtype=np.uint16)
lgMatrix = np.zeros((20, 16, nEvents), dtype=np.uint16)
# Align data
for i, t in enumerate(
tqdm(tiduniq, leave=False, dynamic_ncols=True, position=1, colour="RED", desc=fname.rsplit("/", 1)[-1])
):
firstidx = np.searchsorted(tid, t)
nBoards = np.count_nonzero(tid[firstidx : firstidx + 5] == t)
boards = bid[firstidx : firstidx + nBoards]
for j in range(nBoards):
b = boards[j]
hgMatrix[b * 4 : b * 4 + 4, :, i] = hg[firstidx + j].reshape(4, 16)
lgMatrix[b * 4 : b * 4 + 4, :, i] = lg[firstidx + j].reshape(4, 16)
np.savez_compressed(fname[:-5], hg=hgMatrix, lg=lgMatrix, tid=tiduniq)
savemat(
fname[:-5] + ".mat",
{"matrixHighGainSiPM": hgMatrix, "matrixLowGainSiPM": lgMatrix, "triggerId": tiduniq},
do_compression=True,
oned_as="row",
)
def alignAll(fnames):
for f in tqdm(fnames, dynamic_ncols=True, position=0, colour="GREEN"):
runAlignement(f)
if __name__ == "__main__":
toAlign = getFilesToAlign()
for file in toAlign:
tqdm.write(file)
alignAll(toAlign)
|
# File: 10_game.py
# Author: Raphael Holzer
# Date: 07.01.2019
# from <module> import <object>
from sense_hat import SenseHat
from time import sleep
# create a new SenseHat object
sense = SenseHat()
sense.
blue = (0, 0, 255)
red = (255, 0, 0)
white = (255, 255, 255)
bat_y = 4
ball_position = [3, 3]
ball_velocity = [1, 1]
def draw_ball():
ball_position[0] += ball_velocity[0]
if ball_position[0] in [0, 7]:
ball_velocity[0] = -ball_velocity[0]
ball_position[1] += ball_velocity[1]
if ball_position[1] in [0, 7]:
ball_velocity[1] = -ball_velocity[1]
if ball_position[0] == 1 and bat_y-1 <= ball_position[1] <= bat_y+1:
ball_velocity[0] *= -1
if ball_position[0] == 0:
sense.show_message('GAME OVER')
sense.set_pixel(*ball_position, red)
def draw_bat():
sense.set_pixel(0, bat_y, white)
sense.set_pixel(0, bat_y-1, white)
sense.set_pixel(0, bat_y+1, white)
def move_up(event):
global bat_y
if event.action == 'pressed' and bat_y > 1:
bat_y -= 1
def move_down(event):
global bat_y
if event.action == 'pressed' and bat_y < 6:
bat_y += 1
sense.set_pixel(0, 2, blue)
sense.set_pixel(7, 4, red)
# Main programm -------------------------
while True:
sense.clear()
sense.stick.direction_up = move_up
sense.stick.direction_down = move_down
draw_bat()
draw_ball()
sleep(0.25)
|
#################################### IMPORTS ###################################
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils \
import test_not_implemented, unordered_equality, unittest
else:
from test import test_utils
from test.test_utils \
import test_not_implemented, unordered_equality, unittest
import pygame
from pygame import draw
################################################################################
class DrawModuleTest(unittest.TestCase):
def setUp(self):
(self.surf_w, self.surf_h) = self.surf_size = (320, 200)
self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
def test_rect__fill(self):
# __doc__ (as of 2008-06-25) for pygame.draw.rect:
# pygame.draw.rect(Surface, color, Rect, width=0): return Rect
# draw a rectangle shape
rect = pygame.Rect(10, 10, 25, 20)
drawn = draw.rect(self.surf, self.color, rect, 0)
self.assert_(drawn == rect)
#Should be colored where it's supposed to be
for pt in test_utils.rect_area_pts(rect):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt == self.color)
#And not where it shouldn't
for pt in test_utils.rect_outer_bounds(rect):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt != self.color)
def test_rect__one_pixel_lines(self):
# __doc__ (as of 2008-06-25) for pygame.draw.rect:
# pygame.draw.rect(Surface, color, Rect, width=0): return Rect
# draw a rectangle shape
rect = pygame.Rect(10, 10, 56, 20)
drawn = draw.rect(self.surf, self.color, rect, 1)
self.assert_(drawn == rect)
#Should be colored where it's supposed to be
for pt in test_utils.rect_perimeter_pts(drawn):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt == self.color)
#And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
color_at_pt = self.surf.get_at(pt)
self.assert_(color_at_pt != self.color)
def test_line(self):
# __doc__ (as of 2008-06-25) for pygame.draw.line:
# pygame.draw.line(Surface, color, start_pos, end_pos, width=1): return Rect
# draw a straight line segment
drawn = draw.line(self.surf, self.color, (1, 0), (200, 0)) #(l, t), (l, t)
self.assert_(drawn.right == 201,
"end point arg should be (or at least was) inclusive"
)
#Should be colored where it's supposed to be
for pt in test_utils.rect_area_pts(drawn):
self.assert_(self.surf.get_at(pt) == self.color)
#And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
self.assert_(self.surf.get_at(pt) != self.color)
def todo_test_aaline(self):
# __doc__ (as of 2008-08-02) for pygame.draw.aaline:
# pygame.draw.aaline(Surface, color, startpos, endpos, blend=1): return Rect
# draw fine antialiased lines
#
# Draws an anti-aliased line on a surface. This will respect the
# clipping rectangle. A bounding box of the affected area is returned
# returned as a rectangle. If blend is true, the shades will be be
# blended with existing pixel shades instead of overwriting them. This
# function accepts floating point values for the end points.
#
self.fail()
def todo_test_aalines(self):
# __doc__ (as of 2008-08-02) for pygame.draw.aalines:
# pygame.draw.aalines(Surface, color, closed, pointlist, blend=1): return Rect
#
# Draws a sequence on a surface. You must pass at least two points in
# the sequence of points. The closed argument is a simple boolean and
# if true, a line will be draw between the first and last points. The
# boolean blend argument set to true will blend the shades with
# existing shades instead of overwriting them. This function accepts
# floating point values for the end points.
#
self.fail()
def todo_test_arc(self):
# __doc__ (as of 2008-08-02) for pygame.draw.arc:
# pygame.draw.arc(Surface, color, Rect, start_angle, stop_angle,
# width=1): return Rect
#
# draw a partial section of an ellipse
#
# Draws an elliptical arc on the Surface. The rect argument is the
# area that the ellipse will fill. The two angle arguments are the
# initial and final angle in radians, with the zero on the right. The
# width argument is the thickness to draw the outer edge.
#
self.fail()
def todo_test_circle(self):
# __doc__ (as of 2008-08-02) for pygame.draw.circle:
# pygame.draw.circle(Surface, color, pos, radius, width=0): return Rect
# draw a circle around a point
#
# Draws a circular shape on the Surface. The pos argument is the
# center of the circle, and radius is the size. The width argument is
# the thickness to draw the outer edge. If width is zero then the
# circle will be filled.
#
self.fail()
def todo_test_ellipse(self):
# __doc__ (as of 2008-08-02) for pygame.draw.ellipse:
# pygame.draw.ellipse(Surface, color, Rect, width=0): return Rect
# draw a round shape inside a rectangle
#
# Draws an elliptical shape on the Surface. The given rectangle is the
# area that the circle will fill. The width argument is the thickness
# to draw the outer edge. If width is zero then the ellipse will be
# filled.
#
self.fail()
def todo_test_lines(self):
# __doc__ (as of 2008-08-02) for pygame.draw.lines:
# pygame.draw.lines(Surface, color, closed, pointlist, width=1): return Rect
# draw multiple contiguous line segments
#
# Draw a sequence of lines on a Surface. The pointlist argument is a
# series of points that are connected by a line. If the closed
# argument is true an additional line segment is drawn between the
# first and last points.
#
# This does not draw any endcaps or miter joints. Lines with sharp
# corners and wide line widths can have improper looking corners.
#
self.fail()
def todo_test_polygon(self):
# __doc__ (as of 2008-08-02) for pygame.draw.polygon:
# pygame.draw.polygon(Surface, color, pointlist, width=0): return Rect
# draw a shape with any number of sides
#
# Draws a polygonal shape on the Surface. The pointlist argument is
# the vertices of the polygon. The width argument is the thickness to
# draw the outer edge. If width is zero then the polygon will be
# filled.
#
# For aapolygon, use aalines with the 'closed' parameter.
self.fail()
################################################################################
if __name__ == '__main__':
unittest.main()
|
class Demo(object):
A = 1
def get_a(self):
return self.A
demo = Demo()
print(demo.get_a())
|
"""Pytorch recursive layers definition in trident"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
import random
import warnings
from typing import Optional, Tuple, overload,Union
import torch
import torch.nn as nn
from torch import Tensor, _VF
from torch._jit_internal import List
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import PackedSequence
from trident.layers.pytorch_layers import Embedding, Dense, SoftMax
from trident.backend.pytorch_ops import *
from trident.backend.common import *
from trident.backend.pytorch_backend import Layer, get_device
__all__ = ['RNNBase','RNN','LSTM','GRU','LSTMDecoder']
_rnn_impls = {
'RNN_TANH': _VF.rnn_tanh,
'RNN_RELU': _VF.rnn_relu,
}
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
class RNNBase(Layer):
__constants__ = ['mode', 'input_filters', 'hidden_size', 'num_layers', 'use_bias',
'batch_first', 'dropout_rate', 'bidirectional']
mode: str
input_filters: int
hidden_size: int
num_layers: int
use_bias: bool
batch_first: bool
dropout_rate: float
bidirectional: bool
in_sequence: bool
filter_index: int
def __init__(self, mode: str, hidden_size: int,proj_size: int = 0,
num_layers: int = 1,stateful=False, use_bias: bool = True, batch_first: bool = False,
dropout_rate: float = 0., bidirectional: bool = False,keep_output=False,in_sequence=True,filter_index=-1,name=None) -> None:
super(RNNBase, self).__init__(name=name,keep_output=keep_output)
self.in_sequence=in_sequence
self.filter_index=filter_index
self.mode = mode
self.hidden_size = hidden_size
self.proj_size= proj_size
self.num_layers = num_layers
self.use_bias = use_bias
self.stateful=stateful
self.hx=None
self._batch_first = batch_first
if not self._batch_first:
self.batch_index =1
else:
self.batch_index = 0
self.dropout_rate = float(dropout_rate)
self.bidirectional = bidirectional
self.num_directions = 2 if bidirectional else 1
if not isinstance(dropout_rate, numbers.Number) or not 0 <= dropout_rate <= 1 or \
isinstance(dropout_rate, bool):
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout_rate > 0 and num_layers == 1:
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout_rate, num_layers))
if mode == 'LSTM':
self.gate_size = 4 * hidden_size
elif mode == 'GRU':
self.gate_size = 3 * hidden_size
elif mode == 'RNN_TANH':
self.gate_size = hidden_size
elif mode == 'RNN_RELU':
self.gate_size = hidden_size
else:
raise ValueError("Unrecognized RNN mode: " + mode)
self._flat_weights_names = []
self._all_weights = []
@property
def batch_first(self):
return self._batch_first
@batch_first.setter
def batch_first(self, value: bool):
if self._batch_first != value:
self._batch_first = value
if not self._batch_first:
self.batch_index = 1
else:
self.batch_index = 0
def clear_state(self):
self.hx = None
def initial_state(self,input) :
max_batch_size = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
h_zeros=torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device).to(get_device())
c_zeros= torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device).to(get_device())
if self.stateful:
self.hx = (h_zeros, c_zeros)
return self.hx
else:
return (h_zeros, c_zeros)
def build(self, input_shape:TensorShape):
if not self._built:
for layer in range(self.num_layers):
for direction in range(self.num_directions):
layer_input_size = input_shape[-1] if layer == 0 else self.hidden_size * self.num_directions
w_ih = Parameter(torch.Tensor(self.gate_size, layer_input_size).to(get_device()))
w_hh = Parameter(torch.Tensor(self.gate_size, self.hidden_size).to(get_device()))
b_ih = Parameter(torch.Tensor(self.gate_size).to(get_device()))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.Tensor(self.gate_size).to(get_device()))
layer_params = (w_ih, w_hh, b_ih, b_hh)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if self.use_bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
if hasattr(self, "_flat_weights_names") and name in self._flat_weights_names:
# keep self._flat_weights up to date if you do self.weight = ...
idx = self._flat_weights_names.index(name)
self._flat_weights[idx] = param
self.register_parameter(name, param)
self._flat_weights_names.extend(param_names)
self._all_weights.append(param_names)
self._flat_weights = [(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn) for wn in self._flat_weights_names]
self.flatten_parameters()
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
# self.reset_parameters()
# def __setattr__(self, attr, value):
# if hasattr(self, "_flat_weights_names") and attr in self._flat_weights_names:
# # keep self._flat_weights up to date if you do self.weight = ...
# self.register_parameter(attr, value)
# idx = self._flat_weights_names.index(attr)
# self._flat_weights[idx] = value
# #super(RNNBase, self).__setattr__(attr, value)
def flatten_parameters(self) -> None:
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this works only if the module is on the GPU and cuDNN is enabled.
Otherwise, it's a no-op.
"""
# Short-circuits if _flat_weights is only partially instantiated
if len(self._flat_weights) != len(self._flat_weights_names):
return
for w in self._flat_weights:
if not isinstance(w, Tensor):
return
# Short-circuits if any tensor in self._flat_weights is not acceptable to cuDNN
# or the tensors in _flat_weights are of different dtypes
first_fw = self._flat_weights[0]
dtype = first_fw.dtype
for fw in self._flat_weights:
if (not isinstance(fw.data, Tensor) or not (fw.data.dtype == dtype) or
not fw.data.is_cuda or
not torch.backends.cudnn.is_acceptable(fw.data)):
return
# If any parameters alias, we fall back to the slower, copying code path. This is
# a sufficient check, because overlapping parameter buffers that don't completely
# alias would break the assumptions of the uniqueness check in
# Module.named_parameters().
unique_data_ptrs = set(p.data_ptr() for p in self._flat_weights)
if len(unique_data_ptrs) != len(self._flat_weights):
return
with torch.cuda.device_of(first_fw):
import torch.backends.cudnn.rnn as rnn
# Note: no_grad() is necessary since _cudnn_rnn_flatten_weight is
# an inplace operation on self._flat_weights
with torch.no_grad():
if torch._use_cudnn_rnn_flatten_weight():
num_weights = 4 if self.use_bias else 2
if self.proj_size > 0:
num_weights += 1
torch._cudnn_rnn_flatten_weight(
self._flat_weights, num_weights,
self.input_filters, rnn.get_cudnn_mode(self.mode), self.hidden_size, self.proj_size,self.num_layers,self.batch_first, bool(self.bidirectional))
def _apply(self, fn):
ret = super(RNNBase, self)._apply(fn)
if self.built:
# Resets _flat_weights
# Note: be v. careful before removing this, as 3rd party device types
# likely rely on this behavior to properly .to() modules like LSTM.
self._flat_weights = [(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn) for wn in self._flat_weights_names]
# Flattens params (on CUDA)
self.flatten_parameters()
return ret
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_filters != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_filters. Expected {}, got {}'.format(
self.input_filters, input.size(-1)))
def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
if self.proj_size > 0:
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.proj_size)
else:
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
msg: str = 'Expected hidden size {}, got {}') -> None:
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]):
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size)
def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]):
if permutation is None:
return hx
return apply_permutation(hx, permutation)
def forward(self,
input: Union[Tensor, PackedSequence],
hx: Optional[Tensor] = None) -> Tuple[Union[Tensor, PackedSequence], Tensor]:
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = int(batch_sizes[0])
else:
input = cast(Tensor, input)
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
input = cast(Tensor, input)
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
assert hx is not None
input = cast(Tensor, input)
self.check_forward_args(input, hx, batch_sizes)
_impl = _rnn_impls[self.mode]
if batch_sizes is None:
result = _impl(input, hx, self._flat_weights, self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _impl(input, batch_sizes, hx, self._flat_weights, self.bias,
self.num_layers, self.dropout, self.training, self.bidirectional)
output: Union[Tensor, PackedSequence]
output = result[0]
hidden = result[1]
if is_packed:
output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
def extra_repr(self) -> str:
s = '{input_filters}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.use_bias is not True:
s += ', use_bias={use_bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout_rate != 0:
s += ', dropout_rate={dropout_rate}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __setstate__(self, d):
super(RNNBase, self).__setstate__(d)
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._flat_weights_names = []
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.use_bias:
self._all_weights += [weights]
self._flat_weights_names.extend(weights)
else:
self._all_weights += [weights[:2]]
self._flat_weights_names.extend(weights[:2])
self._flat_weights = [(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn) for wn in self._flat_weights_names]
@property
def all_weights(self) -> List[Parameter]:
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
def _replicate_for_data_parallel(self):
replica = super(RNNBase, self)._replicate_for_data_parallel()
# Need to copy these caches, otherwise the replica will share the same
# flat weights list.
replica._flat_weights = replica._flat_weights[:]
replica._flat_weights_names = replica._flat_weights_names[:]
return replica
class RNN(RNNBase):
r"""Applies a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` non-linearity to an
input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
h_t = \tanh(W_{ih} x_t + b_{ih} + W_{hh} h_{(t-1)} + b_{hh})
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
Args:
input_filters: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two RNNs together to form a `stacked RNN`,
with the second RNN taking in outputs of the first RNN and
computing the final results. Default: 1
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)`. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_filters)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
or :func:`torch.nn.utils.rnn.pack_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided. If the RNN is bidirectional,
num_directions should be 2, else it should be 1.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features (`h_t`) from the last layer of the RNN,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has
been given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`.
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Shape:
- Input1: :math:`(L, N, H_{in})` tensor containing input features where
:math:`H_{in}=\text{input\_size}` and `L` represents a sequence length.
- Input2: :math:`(S, N, H_{out})` tensor
containing the initial hidden state for each element in the batch.
:math:`H_{out}=\text{hidden\_size}`
Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}`
If the RNN is bidirectional, num_directions should be 2, else it should be 1.
- Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}`
- Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
of shape `(hidden_size, input_filters)` for `k = 0`. Otherwise, the shape is
`(hidden_size, num_directions * hidden_size)`
weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
of shape `(hidden_size, hidden_size)`
bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
of shape `(hidden_size)`
bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.RNN(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
self.nonlinearity = kwargs.pop('nonlinearity', 'tanh')
if self.nonlinearity == 'tanh':
mode = 'RNN_TANH'
elif self.nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(self.nonlinearity))
super(RNN, self).__init__(mode, *args, **kwargs)
# XXX: LSTM and GRU implementation is different from RNNBase, this is because:
# 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in
# its current state could not support the python Union Type or Any Type
# 2. TorchScript static typing does not allow a Function or Callable type in
# Dict values, so we have to separately call _VF instead of using _rnn_impls
# 3. This is temporary only and in the transition state that we want to make it
# on time for the release
#
# More discussion details in https://github.com/pytorch/pytorch/pull/23266
#
# TODO: remove the overriding implementations for LSTM and GRU when TorchScript
# support expressing these two modules generally.
class LSTM(RNNBase):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
h_t = o_t \odot \tanh(c_t) \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
is the hidden state of the layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_filters: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two LSTMs together to form a `stacked LSTM`,
with the second LSTM taking in outputs of the first LSTM and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
Inputs: input, (h_0, c_0)
- **input** of shape `(seq_len, batch, input_filters)`: tensor containing the features
of the input sequence.
The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
If the LSTM is bidirectional, num_directions should be 2, else it should be 1.
- **c_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial cell state for each element in the batch.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: output, (h_n, c_n)
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features `(h_t)` from the last layer of the LSTM,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`.
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)`` and similarly for *c_n*.
- **c_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the cell state for `t = seq_len`.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
`(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_filters)` for `k = 0`.
Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
`(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
`(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
`(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
def __init__(self, hidden_size,proj_size=0,num_layers:int =2,stateful=False,use_bias=False,use_attention=False,attention_size=16,batch_first=False,dropout_rate=0,bidirectional=False,keep_output=False,name=None, **kwargs):
super(LSTM, self).__init__(mode='LSTM', hidden_size=hidden_size, proj_size=proj_size,
num_layers=num_layers, stateful = stateful, use_bias=use_bias, batch_first = batch_first,
dropout_rate= dropout_rate, bidirectional = bidirectional, keep_output =keep_output, in_sequence = True, filter_index =-1, name = name)
self.use_attention=use_attention
self.attention_size=attention_size
def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
def check_forward_args(self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor]):
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
try:
self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes),
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes),
'Expected hidden[1] size {}, got {}')
except:
self.initial_state(input)
def permute_hidden(self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
def attention(self, lstm_output):
batch_size, sequence_length, channels = int_shape(lstm_output)
if not hasattr(self, 'w_omega') or self.w_omega is None:
self.w_omega = Parameter(torch.zeros(channels, self.attention_size).to(get_device()))
self.u_omega = Parameter(torch.zeros(self.attention_size).to(get_device()))
output_reshape = reshape(lstm_output, (-1, channels))
attn_tanh = torch.tanh(torch.mm(output_reshape, self.w_omega))
attn_hidden_layer = torch.mm(attn_tanh, reshape(self.u_omega, [-1, 1]))
exps = reshape(torch.exp(attn_hidden_layer), [-1, sequence_length])
alphas = exps / reshape(torch.sum(exps, 1), [-1, 1])
alphas_reshape = reshape(alphas, [-1, sequence_length, 1])
return lstm_output * alphas_reshape
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, x: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: # noqa: F811
pass
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, x:PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: # noqa: F811
pass
def forward(self, x, hx=None):
# helper to inject peephole connection if requested
# def peep(x, c, C):
# return x + C * c if use_peepholes else x
#
orig_input = x
is_packed_sequence=isinstance(orig_input, PackedSequence)
self.flatten_parameters()
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if is_packed_sequence:
x, batch_sizes, sorted_indices, unsorted_indices = x
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
if not self.batch_first:
x = x.transpose(1,0)
batch_sizes = None #x.size(0) if self.batch_first else x.size(1)
max_batch_size = x.size(0) if self.batch_first else x.size(1)
sorted_indices = None
unsorted_indices = None
if not self.stateful:
#if self.hidden_state is None or self.cell_state is None or max_batch_size!=int_shape(self.hidden_state)[1]:
hx=self.initial_state(x)
elif self.stateful and self.hx is None and hx is None:
hx = self.initial_state(x)
elif self.stateful and self.hx is None and hx is not None:
self.hx=hx
elif self.stateful and self.hx is not None:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = x.size(0) if self.batch_first else x.size(1)
#mini_batch size changed need re-initial hidden cell
if self.hx[0].size(1)!=mini_batch:
hx = self.initial_state(x)
else:
hx=self.hx
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(x, hx, batch_sizes)
# if not isinstance(x, PackedSequence):
# result = _VF.lstm(x,hx, self._flat_weights, self.use_bias, self.num_layers,
# self.dropout_rate, self.training, self.bidirectional, self.batch_first)
# else:
# result = _VF.lstm(x, batch_sizes, hx, self._flat_weights, self.use_bias,
# self.num_layers, self.dropout_rate, self.training, self.bidirectional)
#
if batch_sizes is None:
result = _VF.lstm(x, hx, self._flat_weights, self.use_bias, self.num_layers,
self.dropout_rate, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.lstm(x, batch_sizes, hx, self._flat_weights, self.use_bias,
self.num_layers, self.dropout_rate, self.training, self.bidirectional)
output = result[0].permute(1, 0, 2) if self.batch_first == False else result[0]
if self.use_attention:
output = self.attention(output)
hidden = result[1:]
hidden=tuple([item.detach() for item in hidden])
if self.stateful:
self.hx = hidden
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if is_packed_sequence:
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
return output, self.permute_hidden(hidden, unsorted_indices)
class LSTMDecoder(Layer):
def __init__(self, num_chars, embedding_dim, h_size=512, num_layers=2,sequence_length=128,stateful=True, dropout_rate=0.2,bidirectional=False,use_attention=False,attention_size=16,teacher_forcing_ratio=1):
super().__init__()
self.teacher_forcing_ratio=teacher_forcing_ratio
self.num_chars = num_chars
self.embedding_dim=embedding_dim
self.h_size = h_size
self.num_layers = num_layers
self.sequence_length=sequence_length
self.embedding = Embedding(embedding_dim=256, num_embeddings=num_chars, sparse=False, norm_type=2, add_noise=True, noise_intensity=0.12)
self.lstm = LSTM(hidden_size=h_size, num_layers=num_layers, stateful=stateful, batch_first=False, dropout_rate=dropout_rate, bidirectional=bidirectional, use_attention=use_attention, attention_size=attention_size)
self.fc_out =Dense(num_chars,use_bias=False,activation=leaky_relu)
self.softmax=SoftMax(axis=-1)
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: # noqa: F811
pass
def forward(self, *x, **kwargs): # noqa: F811
# input = [batch size]
# hidden = [n layers * n directions, batch size, hid dim]
# cell = [n layers * n directions, batch size, hid dim]
# n directions in the decoder will both always be 1, therefore:
# hidden = [n layers, batch size, hid dim]
# context = [n layers, batch size, hid dim]
x,(self.hidden_state, self.cell_state) =unpack_singleton(x)
B,N,C=int_shape(x)
outputs =[]
# input = [batch size,1]
decoder_input =expand_dims(x[:,-1,:] ,1) # shape: (batch_size, input_size)
decoder_hidden = (self.hidden_state, self.cell_state)
# predict recursively
for t in range(self.sequence_length):
decoder_output, decoder_hidden = self.lstm(decoder_input, decoder_hidden)
outputs.append(self.softmax(self.fc_out (decoder_output.squeeze(1))))
decoder_input = decoder_output
return stack(outputs,1)
class GRU(RNNBase):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)}
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_filters: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two GRUs together to form a `stacked GRU`,
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_filters)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided. If the RNN is bidirectional,
num_directions should be 2, else it should be 1.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features h_t from the last layer of the GRU,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Shape:
- Input1: :math:`(L, N, H_{in})` tensor containing input features where
:math:`H_{in}=\text{input\_size}` and `L` represents a sequence length.
- Input2: :math:`(S, N, H_{out})` tensor
containing the initial hidden state for each element in the batch.
:math:`H_{out}=\text{hidden\_size}`
Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}`
If the RNN is bidirectional, num_directions should be 2, else it should be 1.
- Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}`
- Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
(W_ir|W_iz|W_in), of shape `(3*hidden_size, input_filters)` for `k = 0`.
Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
(W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
(b_ir|b_iz|b_in), of shape `(3*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
(b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.GRU(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs)
self.hidden_state = None
self.stateful=kwargs.get('stateful',False)
def initial_state(self,input) :
max_batch_size = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
self.hidden_state = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=self.weights[0].dtype, device=self.weights[0].device, requires_grad=False)
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: # noqa: F811
pass
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]: # noqa: F811
pass
def forward(self,x): # noqa: F811
orig_input = x
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = x
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
if self.batch_first == False:
x = x.permute(1, 0, 2)
batch_sizes = None
max_batch_size = x.size(0) if self.batch_first else x.size(1)
sorted_indices = None
unsorted_indices = None
if self.stateful==False or self.hidden_state is None or max_batch_size!=int_shape(self.hidden_state)[1]:
self.initial_state(x)
else:
self.hidden_state= self.permute_hidden(self.hidden_state, sorted_indices)
self.check_forward_args(x, self.hidden_state, batch_sizes)
if batch_sizes is None:
result = _VF.gru(x, self.hidden_state, self._flat_weights, self.use_bias, self.num_layers,
self.dropout_rate, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.gru(x, batch_sizes, self.hidden_state, self._flat_weights, self.use_bias,
self.num_layers, self.dropout_rate, self.training, self.bidirectional)
output = result[0]
self.hidden_state = result[1]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output_packed, self.permute_hidden(self.hidden_state, unsorted_indices)
else:
if self.batch_first == False:
x = x.permute(1, 0, 2)
return output, self.permute_hidden(self.hidden_state, unsorted_indices) |
from model import RepositoryBase
from github import Github
import os
import git
from pydriller import GitRepository
from configuration import REPOSITORY
from model import RepositorySnapshot
from .progress import GitCloneProgress
from typing import List
import itertools
import math
class GithubRepository(RepositoryBase):
"""Implementation of the Repository protocol that connects to Github and operates on a git repository
"""
def __init__(self, repository_id: str, index: int):
super().__init__(repository_id, index)
self.full_name = repository_id
# reference to PyDriller Repository, will be set after the repository was cloned successfully
self.__internal_repository = None
def checkout(self, download_folder: str) -> 'Repository':
github_interface = Github(REPOSITORY.GITHUB_TOKEN)
try:
# get repository data from github
repository = github_interface.get_repo(self.full_name)
self.name = repository.name
self.description = repository.description
self.key = str(repository.id)
self.language = repository.language
self.home_page = repository.homepage
self.html_url = repository.html_url
clone_url = repository.clone_url
# check if there is already a directory for the repository
self.path = os.path.join(download_folder, self.name)
if os.path.exists(self.path):
print(f'{self.name} already downloaded, skipping...')
else:
os.makedirs(self.path)
# download repository into folder
progress = GitCloneProgress(self.name, self.index)
git.Repo.clone_from(clone_url, self.path, progress=progress)
except Exception as e:
print(f'error while reading repository "{self.full_name}": {str(e)}')
return self
def get_snapshots(self, sample_count: int) -> List[RepositorySnapshot]:
"""
returns all snapshots for the given repository. The sample_count defines
how many commits should be retrieved. If the repository has more commits than
the total sample count, commits will be taken so that their commit number is
evenly distributed. For every commit, a percentage is calculated expressing the time
of this commit within the total project runtime.
If the repository has less commits, all commits will be returned.
:param sample_count:
:return: list of snapshots
"""
# ensure that we are always on main branch before receiving commits
if self.__internal_repository is None:
self.__internal_repository = GitRepository(self.path)
self.__internal_repository.reset()
# calculate sample rate
commit_count = self.__internal_repository.total_commits()
if commit_count < sample_count:
sample_rate = 1
else:
sample_rate = math.floor(commit_count / sample_count)
# get first and last commit to calculate the complete project duration
first = next(self.__internal_repository.get_list_commits(None, reverse=True))
last = next(self.__internal_repository.get_list_commits(None, reverse=False))
time_range = last.committer_date - first.committer_date
# ensure that newest item is always in list, so do not reverse order during slicing
# so the newest one will be the first item, the oldest sample the last
commits = self.__internal_repository.get_list_commits(None, reverse=False)
snapshots = [RepositorySnapshot(
commit.hash,
commit.committer_date,
commit.msg,
(commit.committer_date - first.committer_date) * 100 / time_range,
sample_count - index) # index is reversed, so subtract it from total amount to get revered order
for index, commit in enumerate(list(itertools.islice(commits, 0, None, sample_rate)))][:sample_count]
# snapshots are in reverse order, so newest will be analyzed first
return snapshots
def reset(self):
self.__internal_repository.reset()
def update(self, snapshot: RepositorySnapshot):
self.__internal_repository.checkout(snapshot.key)
return self
def factory(repository_id: str, index: int) -> RepositoryBase:
return GithubRepository(repository_id, index) |
# -*- coding: utf-8 -*-
# Copyright 2017 Carlos Dauden <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
contract_count = fields.Integer(
string='Contracts',
compute='_compute_contract_count',
)
def _compute_contract_count(self):
Contract = self.env['account.analytic.account']
today = fields.Date.today()
for partner in self:
partner.contract_count = Contract.search_count([
('recurring_invoices', '=', True),
('partner_id', '=', partner.id),
('date_start', '<=', today),
'|',
('date_end', '=', False),
('date_end', '>=', today),
])
def act_show_contract(self):
""" This opens contract view
@return: the contract view
"""
self.ensure_one()
res = self.env['ir.actions.act_window'].for_xml_id(
'contract', 'action_account_analytic_overdue_all')
res.update(
context=dict(
self.env.context,
search_default_recurring_invoices=True,
search_default_not_finished=True,
default_partner_id=self.id,
default_recurring_invoices=True,
),
domain=[('partner_id', '=', self.id)],
)
return res
|
from bs4 import BeautifulSoup
from urllib.request import urlopen
class Worldometers():
url = 'https://countrymeters.info/en/World'
# html = requests.get(url).text
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
# World Population
def current_world_population(self, option='total'):
"""
Parses through the HTML page to gather information about the current world population.
:param option: Choose between three types of return values. 'total', the default value for this parameter, returns the total world population. 'male' and 'female' returns the current population for each gender.
:return: Integer with the current population.
"""
if option.lower() == 'total':
return int(Worldometers.soup.find(id='cp1').getText().replace(',', ''))
elif option.lower() == 'male':
return int(Worldometers.soup.find(id='cp2').getText().replace(',', ''))
elif option.lower() == 'female':
return int(Worldometers.soup.find(id='cp3').getText().replace(',', ''))
def population_growth(self, timescale):
"""
Parses through the HTML page to gather information about the population growth based on the chosen timescale.
:param timescale: Choose between two types — 'day' or 'year' — that will return results accordingly.
:return: Integer with the population growth based on the chosen timescale.
"""
if timescale.lower() == 'day':
return int(Worldometers.soup.find(id='cp13').getText().replace(',', ''))
elif timescale.lower() == 'year':
return int(Worldometers.soup.find(id='cp12').getText().replace(',', ''))
def population_history(self):
"""
Parses through the HTML page to gather information about the population history.
:return: Dictionary with the population history in the format {'year': value, 'population': value, 'growth': value}.
"""
population_history = Worldometers.soup.find_all(class_='years')[0]
year_rows = population_history.find_all('tr')
population_history_dict = {}
for x in range(1, len(year_rows)):
columns = year_rows[x].find_all('td')
population_history_dict[x-1] = {
'year': columns[0].getText().replace(',', ''),
'population': columns[1].getText().replace(',', ''),
'growth': columns[2].getText().replace(' %', '')
}
return population_history_dict
def population_projection(self):
"""
Parses through the HTML page to gather information about the population projection.
:return: Dictionary with the population growth in the format {'year': value, 'population': value, 'growth': value}.
"""
population_projection = Worldometers.soup.find_all(class_='years')[1]
year_rows = population_projection.find_all('tr')
population_projection_dict = {}
for x in range(1, len(year_rows)):
columns = year_rows[x].find_all('td')
population_projection_dict[x-1] = {
'year': columns[0].getText().replace(',', ''),
'population': columns[1].getText().replace(',', ''),
'growth': columns[2].getText().replace(' %', '')
}
return population_projection_dict
# Births
def births(self, timescale):
"""
Parses through the HTML page to gather information about births.
:param timescale: Choose between two types — 'day' or 'year' — that will return results accordingly.
:return: Integer with the number of births.
"""
if timescale.lower() == 'year':
return int(Worldometers.soup.find(id='cp6').getText().replace(',', ''))
if timescale.lower() == 'day':
return int(Worldometers.soup.find(id='cp7').getText().replace(',', ''))
# Deaths
def deaths(self, timescale):
"""
Parses through the HTML page to gather information about deaths based on the chosen timescale..
:param timescale: Choose between two types — 'day' or 'year' — that will return results accordingly.
:return: Integer with the number of deaths.
"""
if timescale.lower() == 'year':
return int(Worldometers.soup.find(id='cp8').getText().replace(',', ''))
if timescale.lower() == 'day':
return int(Worldometers.soup.find(id='cp9').getText().replace(',', ''))
def top_death_causes(self, timescale):
"""
Parses through the HTML page to gather information about top death causes based on the chosen timescale.
:param timescale: Choose between two types — 'day' or 'year' — that will return results accordingly.
:return: Integer with the number of deaths.
"""
top_deaths = Worldometers.soup.find('div', class_='death_top')
top_deaths = top_deaths.text.replace(" %", "").splitlines()[4:-4]
top_deaths = list(filter(None, top_deaths))
while 'Connecting . . .' in top_deaths: top_deaths.remove('Connecting . . .')
deaths = self.deaths(timescale=timescale)
deaths_in_timescale = []
for element in top_deaths[1::2]:
deaths_in_timescale.append(int(float(element)*(deaths/100)))
top_deaths_causes_timescale = dict(zip(top_deaths[0::2], deaths_in_timescale))
return top_deaths_causes_timescale
|
"""The package metadata."""
__version__ = '0.1.5'
__url__ = 'https://github.com/sublee/teebee'
__license__ = 'MIT'
__author__ = 'Heungsub Lee'
__author_email__ = '[email protected]'
__description__ = '1k steps for 1 epoch in TensorBoard'
|
import pandas as pd
from pandas import Series,DataFrame
iris_df=pd.read_csv('iris.csv')
print('---DataFrame---')
print(iris_df)
print('---info---')
iris_df.info()
print('---Functions---')
print(iris_df[['Species','Sepal.Length']].groupby(['Species'],as_index=True).mean())
print("Max sepal length: ",iris_df['Sepal.Length'].max())
|
'''
This module is about domain features.
Author:www.muzixing.com
Date Work
2015/7/29 new this file
'''
from ryu.openexchange.domain.setting import features
from ryu.openexchange.utils import controller_id
from ryu import cfg
CONF = cfg.CONF
class features(object):
def __init__(self,
domain_id=features['domain_id'],
proto_type=features['proto_type'],
sbp_version=features['sbp_version'],
capabilities=features['capabilities']):
self.domain_id = domain_id
self.proto_type = proto_type
self.sbp_version = sbp_version
self.capabilities = capabilities
def set_domain_id(self, domain_id):
if isinstance(domain_id, str):
self.domain_id = controller_id.str_to_dpid(domain_id)
CONF.oxp_domain_id = controller_id.str_to_dpid(domain_id)
def set_proto_type(self, proto_type):
self.proto_type = proto_type
CONF.sbp_proto_type = proto_type
def set_version(self, version):
self.sbp_version = version
CONF.sbp_proto_version = version
def set_capabilities(self, capabilities):
self.capabilities = capabilities
CONF.oxp_capabilities = capabilities
def set_features(self,
domain_id=features['domain_id'],
proto_type=features['proto_type'],
sbp_version=features['sbp_version'],
capabilities=features['capabilities']):
self.set_domain_id(domain_id)
self.set_version(sbp_version)
self.set_proto_type(proto_type)
self.set_capabilities(capabilities)
|
#!/usr/bin/env python3
import unittest
import os
import numpy as np
from scipy.io import netcdf
from booz_xform import Booz_xform
TEST_DIR = os.path.join(os.path.dirname(__file__), 'test_files')
class RegressionTest(unittest.TestCase):
def test_regression(self):
configurations = ['circular_tokamak',
'up_down_asymmetric_tokamak',
'li383_1.4m',
'LandremanSenguptaPlunk_section5p3']
for configuration in configurations:
wout_filename = 'wout_' + configuration + '.nc'
boozmn_filename = 'boozmn_' + configuration + '.nc'
boozmn_new_filename = 'boozmn_new_' + configuration + '.nc'
f = netcdf.netcdf_file(os.path.join(TEST_DIR, boozmn_filename),
'r', mmap=False)
b = Booz_xform()
b.read_wout(os.path.join(TEST_DIR, wout_filename))
# Transfer parameters from the reference file to the new
# calculation
b.mboz = f.variables['mboz_b'][()]
b.nboz = f.variables['nboz_b'][()]
b.compute_surfs = f.variables['jlist'][()] - 2
b.run()
# Compare 2D arrays
vars = ['bmnc_b', 'rmnc_b', 'zmns_b', 'numns_b', 'gmnc_b']
asym = bool(f.variables['lasym__logical__'][()])
if asym:
vars += ['bmns_b', 'rmns_b', 'zmnc_b', 'numnc_b', 'gmns_b']
rtol = 1e-12
atol = 1e-12
for var in vars:
# gmnc_b is misspelled in the fortran version
var_ref = var
if var == 'gmnc_b':
var_ref = 'gmn_b'
# Handle the issue that we now use the variable nu,
# whereas the boozmn format uses the variable
# p = -nu.
sign = 1
if var[:2] == 'nu':
sign = -1
var_ref = 'p' + var[2:]
# Reference values:
arr1 = f.variables[var_ref][()]
# Newly computed values:
arr2 = getattr(b, var).transpose()
print('abs diff in ' + var + ':', np.max(np.abs(arr1 - sign * arr2)))
np.testing.assert_allclose(arr1, sign * arr2,
rtol=rtol, atol=atol)
# Now compare some values written to the boozmn files.
b.write_boozmn(boozmn_new_filename)
f2 = netcdf.netcdf_file(boozmn_new_filename)
vars = f.variables.keys()
# These variables will not match:
exclude = ['rmax_b', 'rmin_b', 'betaxis_b', 'version', 'pres_b', 'beta_b', 'phip_b']
for var in vars:
if var in exclude:
continue
# Reference values:
arr1 = f.variables[var][()]
# Newly computed values:
arr2 = f2.variables[var][()]
print('abs diff in ' + var + ':', np.max(np.abs(arr1 - arr2)))
np.testing.assert_allclose(arr1, arr2,
rtol=rtol, atol=atol)
f.close()
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""json读写插件
"""
import re
import types
import ujson
import requests
from girlfriend.util.lang import args2fields
from girlfriend.util.resource import HTTP_SCHEMA
from girlfriend.plugin.data import (
AbstractDataReader,
AbstractDataWriter
)
from girlfriend.exception import InvalidArgumentException
class JSONReaderPlugin(object):
"""可以从文件或者Web URL中加载json对象,并进行格式转换
支持常见的json文件格式
"""
name = "read_json"
def execute(self, context, *json_readers):
return [reader(context) for reader in json_readers]
JSON_REGEX = re.compile(r"\{.*?\}")
class JSONR(AbstractDataReader):
@args2fields()
def __init__(self, path, style,
record_handler=None, record_filter=None, result_wrapper=None,
variable=None):
"""
:param context 上下文对象
:param path 加载路径,可以是文件路径,也可以是web url
:param style json数据格式,允许三种格式:
1. line: 文件每行是一个json对象
2. array: 文件内容是一个json数组
3. extract:property 文件是一个json对象,但是只提取某一部分进行处理
4. block: 区块,不在同一行
:param record_handler 行处理器,返回的每行都是字典对象,通过该函数可以进行包装
如果返回None,那么将对该行忽略
:param record_filter 行过滤器
:param result_wrapper 对最终结果进行包装
:param variable 结果写入上下文的变量名,如果为None,那么将返回值交给框架自身来保存
"""
pass
def __call__(self, context):
result = []
# 基于文件的逐行加载
if self._style == "line" and not self._path.startswith(HTTP_SCHEMA):
with open(self._path, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith(("#", "//", ";")):
continue
record = ujson.loads(line)
self._handle_record(record, result.append)
else:
json_content = None
# 从不同的来源加载json对象
if self._path.startswith(HTTP_SCHEMA):
json_content = requests.get(self._path).text
else:
with open(self._path, "r") as f:
json_content = f.read()
json_content = json_content.strip()
# 按行读取
if self._style == "line":
for line in json_content.splitlines():
line = line.strip()
if not line:
continue
record = ujson.loads(line)
self._handle_record(record, result.append)
# 按块读取
if self._style == "block":
json_buffer = []
in_block = False
for char in json_content:
if char == "{":
in_block = True
json_buffer.append(char)
elif char == "}" and in_block:
json_buffer.append(char)
try:
record = ujson.loads("".join(json_buffer))
except ValueError:
continue
else:
self._handle_record(record, result.append)
json_buffer = []
in_block = False
elif in_block:
json_buffer.append(char)
# 按数组读取
elif self._style == "array":
json_array = ujson.loads(json_content)
for record in json_array:
self._handle_record(record, result.append)
# 使用属性提取器
elif self._style.startswith("extract:"):
json_obj = ujson.loads(json_content)
keys = self._style[len("extract:"):].split(".")
for key in keys:
json_obj = json_obj[key]
for record in json_obj:
self._handle_record(record, result.append)
return self._handle_result(context, result)
class JSONWriterPlugin(object):
name = "write_json"
def execute(self, context, *json_writers):
for json_writer in json_writers:
json_writer(context)
class JSONW(AbstractDataWriter):
@args2fields()
def __init__(self, path, style, object,
record_handler=None, record_filter=None,
http_method="post", http_field=None, variable=None):
"""
:param path 写入路径,默认为文件路径,如果是HTTP或者HTTPS开头,那么将会POST到对应的地址
:param style 写入格式,line - 按行写入 array - 作为json数组写入 object - 作为单独对象写入
:param table 要操作的对象,可以是具体的对象,也可以是context中的变量名
:param record_handler 行处理器,可以在此进行格式转换,比如把时间对象转换为字符串
:param record_filter 行过滤器
:param http_method http写入方法,默认为POST,可以指定PUT
:param variable 将json写入上下文变量
"""
pass
def __call__(self, context):
if (self._style == "line" and self._path and
not self._path.startswith(HTTP_SCHEMA)):
with open(self._path, "w") as f:
for row in self._object:
row = self._handle_record(row)
f.write(ujson.dumps(row) + "\n")
return
# json文本
json_text = ""
if isinstance(self._object, types.FunctionType):
self._object = self._object(context)
elif isinstance(self._object, types.StringTypes):
self._object = context[self._object]
if self._style == "object":
json_text = ujson.dumps(self._object)
result = []
for row in self._object:
row = self._handle_record(row, result.append)
# 数组格式直接dump
if self._style == "array":
json_text = ujson.dumps(result)
# line格式
if self._style == "line":
string_buffer = []
for row in self._object:
row = self._handle_record(row)
string_buffer.append(ujson.dumps(row))
json_text = "\n".join(string_buffer)
if self._path is None:
if self._variable:
context[self._variable] = json_text
return
else:
raise InvalidArgumentException(
u"当path为None时,必须指定一个有效的variable")
if self._path.startswith(HTTP_SCHEMA):
if self._http_method.lower() == "post":
if self._http_field:
requests.post(
self._path, data={self._http_field: json_text})
elif self._style == "line":
requests.post(self._path, data=json_text)
else:
requests.post(self._path, json=json_text)
elif self._http_method.lower() == "put":
requests.put(self._path, json=json_text)
else:
with open(self._path, "w") as f:
f.write(json_text)
if self._variable:
context[self._variable] = json_text
|
from gingerit.gingerit import GingerIt
text = 'The smelt of fliwers bring back memories.'
parser = GingerIt()
print(parser.parse(text)) |
import random
def Header():
print("-=-"*13)
print("Bem vindo ao jogo de forca")
print("-=-"*13)
def forca():
Header()
#manipulação de arquivos
#Abertura do arquivo
with open("/home/felipe/Desktop/python 3 parte 1/palavras.txt", "r") as arq:
palavras = []
#preenchimento da lista, ja formatada
for linha in arq:
palavras.append(linha.strip().lower())
#escolha da palavra
palavra_secreta = random.choice(palavras)
#declaração das variaveis
vivo = False
vitoria = False
tentativas = 0
#list comprehension para criação da lista de lacunas
lista_palavra = ["_" for letra in palavra_secreta]
#impressão da lista
print(lista_palavra)
#laço da gameplay
while (not vivo and not vitoria):
#inicialização do index
index = 0
#recebimento da tentativa/chute do player
tentativa = str(input("Escolha uma letra:\n")).lower().strip()
#validação de acerto
if (tentativa in palavra_secreta):
for letra in palavra_secreta:
if(tentativa == letra):
lista_palavra[index] = letra
index += 1
#validação erro
else:
tentativas += 1
print(f"Voce errou! vidas:{6-tentativas}")
#caso de derrota
vivo = tentativas == 6
#Caso de vitoria
vitoria = "_" not in lista_palavra
#impressão da lista
print(lista_palavra)
#Footer
print("Fim do jogo, obrigado por jogar!!")
print("-=-"*13)
#main
if __name__ == "__main__":
forca()
|
from __future__ import print_function
import re
import requests
from orionsdk import SwisClient
def main():
# Connect to SWIS
server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(server, username, password)
engine_id = 1
node_caption = 'example.com'
node_props = {
'IPAddress': '1.1.1.2',
'EngineID': engine_id,
'Caption': node_caption,
'ObjectSubType': 'SNMP',
'Community': 'public',
'SNMPVersion': 2,
'DNS': '',
'SysName': ''
}
# Add node
swis.create('Orion.Nodes', **node_props)
query_results = swis.query('SELECT NodeID FROM Orion.Nodes WHERE Caption = @caption_par', caption_par=node_caption)
node_id = query_results['results'][0]['NodeID']
print('New node with ID {0} created'.format(node_id))
# Discovere and add interfaces
results = swis.invoke('Orion.NPM.Interfaces', 'DiscoverInterfacesOnNode', node_id)
swis.invoke('Orion.NPM.Interfaces', 'AddInterfacesOnNode', node_id, results['DiscoveredInterfaces'], 'AddDefaultPollers')
query_results = swis.query('SELECT InterfaceID FROM Orion.NPM.Interfaces WHERE NodeID = @node_id_par', node_id_par=node_id)
print('Discovered and added {0} interfaces for node with id {1}'.format(len(query_results['results']), node_id))
# Add CBQoS source for every interface
for row in query_results['results']:
props = {
'NodeID': node_id,
'InterfaceID': row['InterfaceID'],
'EngineID': engine_id,
'Enabled': True
}
swis.create('Orion.Netflow.CBQoSSource', **props)
query_results = swis.query('SELECT CBQoSSourceID FROM Orion.Netflow.CBQoSSource WHERE NodeID = @node_id_par', node_id_par=node_id)
print('Added {0} CBQoS sources for node with id {1}'.format(len(query_results['results']), node_id))
if __name__ == '__main__':
main()
|
__all__ = ["VERSION"]
_MAJOR = "0"
_MINOR = "2"
_PATCH = "0.post1"
VERSION = f"{_MAJOR}.{_MINOR}.{_PATCH}"
|
# https://technovechno.com/creating-graphs-in-python-using-matplotlib-flask-framework-pythonanywhere/
# https://stackoverflow.com/questions/50728328/python-how-to-show-matplotlib-in-flask
from flask import Flask, render_template
from graph import build_graph
app = Flask(__name__)
@app.route('/') # Change URL
def graphs():
#These coordinates could be stored in DB
x1 = [0, 1, 2, 3, 4]
y1 = [10, 30, 40, 5, 50]
x2 = [0, 1, 2, 3, 4]
y2 = [50, 30, 20, 10, 50]
x3 = [0, 1, 2, 3, 4]
y3 = [0, 30, 10, 5, 30]
graph1_url = build_graph(x1,y1);
graph2_url = build_graph(x2,y2);
graph3_url = build_graph(x3,y3);
return render_template('graphs.html',
graph1=graph1_url,
graph2=graph2_url,
graph3=graph3_url)
if __name__ == '__main__':
app.debug = True
app.run() |
#!/usr/bin/env python3.6
# -*- encoding: utf-8; py-indent-offset: 2 -*-
from tkinter import *
def create_textfield():
# constants
background_color = 'grey'
font = "none 14 bold"
window = Tk()
text_input = StringVar()
window.geometry("215x250+100+100")
window.title('A Tkinter Text Field')
window.configure(background=background_color)
# text field
textfield = Entry(window, textvariable=text_input, bg='white', font=font)
textfield.grid(row=0, sticky=S+N)
def get_text():
output = 'Your input was:\n\n{}'.format(text_input.get())
return output
def replace_text_field():
text_output_area = Text(window, bg='white', font=font, height=10)
text_output_area.insert(INSERT, get_text())
text_output_area.grid(row=1, sticky=S + N + W + E)
return str(text_input.get())
# Button
button_print = Button(window, text="PRINT", font="none 14 bold", command=replace_text_field)
button_print.grid(row=2, sticky=S+N)
window.mainloop() |
"""
A robot on an infinite grid starts at point (0, 0) and faces north. The robot can receive one of three possible types of commands:
-2: turn left 90 degrees
-1: turn right 90 degrees
1 <= x <= 9: move forward x units
Some of the grid squares are obstacles.
The i-th obstacle is at grid point (obstacles[i][0], obstacles[i][1])
If the robot would try to move onto them, the robot stays on the previous grid square instead (but still continues following the rest of the route.)
Return the square of the maximum Euclidean distance that the robot will be from the origin.
Example 1:
Input: commands = [4,-1,3], obstacles = []
Output: 25
Explanation: robot will go to (3, 4)
Example 2:
Input: commands = [4,-1,4,-2,4], obstacles = [[2,4]]
Output: 65
Explanation: robot will be stuck at (1, 4) before turning left and going to (1, 8)
"""
class Solution(object):
def robotSim(self, commands, obstacles):
"""
:type commands: List[int]
:type obstacles: List[List[int]]
:rtype: int
"""
"""
Method 1:
"""
x, y, direction = 0, 0, 0
max_dist = 0
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
dict_x = {}
dict_y = {}
for obstacle in obstacles:
if obstacle[0] not in dict_x:
dict_x[obstacle[0]] = [obstacle[1]]
else:
dict_x[obstacle[0]].append(obstacle[1])
if obstacle[1] not in dict_y:
dict_y[obstacle[1]] = [obstacle[0]]
else:
dict_y[obstacle[1]].append(obstacle[0])
# direction 0: north, 1: east, 2: south, 3: west
for command in commands:
if command == -1:
direction = (direction + 1) % 4
elif command == -2:
direction = (direction - 1) % 4
else:
if (direction == 0 or direction == 2) and x not in dict_x:
y += dy[direction] * command
elif (direction == 1 or direction == 3) and y not in dict_y:
x += dx[direction] * command
else:
while command:
if (direction == 0 or direction == 2) and y + dy[direction] not in dict_x[x]:
y = y + dy[direction]
elif (direction == 1 or direction == 3) and x + dx[direction] not in dict_y[y]:
x = x + dx[direction]
command -= 1
if x ** 2 + y ** 2 > max_dist: max_dist = x ** 2 + y ** 2
return max_dist |
import boto3
import os
cw = boto3.client('cloudwatch')
s3 = boto3.client('s3')
def save(filename):
print('Saving ' + filename + ' to s3://'+ os.environ['s3bucket'] + '/' + os.environ['s3key'])
response = s3.upload_file(filename, os.environ['s3bucket'], os.environ['s3key']+'/output/' + filename.split('/')[-1])
print(response)
def log_metric(key,value):
response = cw.put_metric_data(
MetricData = [
{
'MetricName': key,
'Dimensions': [
{
'Name': 'jobid',
'Value': os.environ['s3key']
}
],
'Unit': 'None',
'Value': value
},
],
Namespace='FarOpt'
)
print(response) |
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
df = pd.read_excel('teste.xlsx')
profile = ProfileReport(df, title="Pandas Profiling Report")
print(profile) |
import core.models
methods = {
'Actor': {
'model': core.models.view_tables.Actor,
'detail_fields': ['Actor Description', 'Status', 'Organization', 'Person', 'Person Organization',
'Systemtool', 'Systemtool description', 'Systemtool type', 'Systemtool vendor',
'Systemtool model', 'Systemtool serial', 'Systemtool version'],
'detail_fields_need_fields': {
'Actor Description': ['description'],
'Status': ['status.description'],
'Organization': ['organization.full_name'],
'Person': ['person.first_name', 'person.last_name'],
'Person Organization': ['person.organization.full_name'],
'Systemtool': ['systemtool.systemtool_name'],
'Systemtool description': ['systemtool.description'],
'Systemtool type': ['systemtool.systemtool_type.description'],
'Systemtool vendor': ['systemtool.vendor_organization.full_name'],
'Systemtool model': ['systemtool.model'],
'Systemtool serial': ['systemtool.serial'],
'Systemtool version': ['systemtool.ver']
},
},
'Inventory': {
'model': core.models.view_tables.Inventory,
'detail_fields': ['Description', 'Owner', 'Operator', 'Lab', 'Status', 'Actor'],
'detail_fields_need_fields': {
'Description': ['description'],
'Owner': ['owner'],
'Operator': ['operator'],
'Lab': ['lab'],
'Status': ['status.description'],
'Actor': ['actor']
},
},
'Material': {
'model': core.models.view_tables.Material,
'detail_fields': ['Chemical Name', 'Other Names', 'Type', 'Material Class','Create Date', 'Last Modification Date', 'Status'],
'detail_fields_need_fields': {
'Chemical Name': ['description'],
'Other Names': ['identifier'],
'Type': ['material_type'],
'Material Class': ['material_class'],
'Create Date': ['add_date'],
'Last Modification Date': ['mod_date'],
'Status': ['status.description']
},
},
'Systemtool': {
'model': core.models.view_tables.Systemtool,
'detail_fields': ['Systemtool Name', 'Systemtool Description', 'Systemtool Type',
'Systemtool Vendor', 'Systemtool Model', 'Systemtool Serial',
'Systemtool Version'],
'detail_fields_need_fields': {
'Systemtool Name': ['systemtool_name'],
'Systemtool Description': ['description'],
'Systemtool Type': ['systemtool_type.description'],
'Systemtool Vendor': ['vendor_organization.full_name'],
'Systemtool Model': ['model'],
'Systemtool Serial': ['serial'],
'Systemtool Version': ['ver']
},
},
'MaterialType': {
'model': core.models.view_tables.MaterialType,
'detail_fields': ['Description', 'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Description': ['description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'Organization': {
'model': core.models.view_tables.Organization,
'detail_fields': ['Full Name', 'Short Name', 'Description', 'Address', 'Website',
'Phone', 'Parent Organization', 'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Full Name': ['full_name'],
'Short Name': ['short_name'],
'Description': ['description'],
'Address': ['address1', 'address2', 'zip', 'city', 'state_province', 'country'],
'Website': ['website_url'],
'Phone': ['phone'],
'Parent Organization': ['parent.full_name'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'Person': {
'model': core.models.view_tables.Person,
'detail_fields': ['Full Name', 'Address', 'Phone', 'Email', 'Title',
'Suffix', 'Organization', 'Added Organization', 'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Full Name': ['first_name', 'middle_name', 'last_name'],
'Address': ['address1', 'address2', 'zip', 'city', 'state_province', 'country'],
'Phone': ['phone'],
'Email': ['email'],
'Title': ['title'],
'Suffix': ['suffix'],
'Organization': ['organization.full_name'],
'Added Organization': ['added_organization'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'Status': {
'model': core.models.view_tables.Status,
'detail_fields': ['Description', 'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Description': ['description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'SystemtoolType': {
'model': core.models.view_tables.SystemtoolType,
'detail_fields': ['Description', 'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Description': ['description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'Tag': {
'model': core.models.view_tables.Tag,
'detail_fields': ['Tag Name', 'Description', 'Add Date', 'Last Modification Date',
'Tag Type'],
'detail_fields_need_fields': {
'Tag Name': ['display_text'],
'Description': ['description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date'],
'Tag Type': ['tag_type.type']
},
},
'TagType': {
'model': core.models.view_tables.TagType,
'detail_fields': ['Type', 'Long Description', 'Add Date',
'Last Modification Date'],
'detail_fields_need_fields': {
'Type': ['type'],
'Long Description': ['description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'UdfDef': {
'model': core.models.view_tables.UdfDef,
'detail_fields': ['Description', 'Value Type',
'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Description': ['description'],
'Value Type': ['val_type_description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'Edocument':{
'model': core.models.Edocument,
'detail_fields': ['Title', 'Description', 'Source', 'Status', #, 'Document Type'
'Add Date', 'Last Modification Date'],
'detail_fields_need_fields': {
'Title':['title'],
'Description': ['description'],
'Source':['source'],
# 'Document Type':['doc_type_description'],
'Status': ['status.description'],
'Add Date': ['add_date'],
'Last Modification Date': ['mod_date']
},
},
'InventoryMaterial': {
'model': core.models.view_tables.InventoryMaterial,
'detail_fields': ['Description', 'Inventory', 'Material',
'Part Number', 'On hand amount', 'Expiration Date',
'Inventory Location', 'Status',],
'detail_fields_need_fields': {
'Description': ['description'],
'Inventory': ['inventory.description'],
'Material' : ['material'],
'Part Number' : ['part_no'],
'On hand amount' : ['onhand_amt'],
'Expiration Date': ['expiration_date'],
'Inventory Location' : ['location'],
'Status': ['status.description']
},
},
# 'Experiment': {
# 'model': core.models.view_tables.Experiment,
# 'detail_fields': ['Description', 'Status',],
# 'detail_fields_need_fields': {
# 'Description': ['description'],
# 'Status': ['status.description']
# },
# },
'Vessel': {
'model': core.models.view_tables.Vessel,
'detail_fields': ['Plate Name', 'Well Number', 'Status', 'Date Added','Last Modified'],
'detail_fields_need_fields': {
'Plate Name': ['plate_name'],
'Well Number': ['well_number'],
'Status': ['status.description'],
'Date Added': ['add_date'],
'Last Modified': ['mod_date'],
},
},
}
|
"""
Utility file to select GraphNN model as
selected by the user
"""
from nets.molecules_graph_regression.gated_gcn_net import GatedGCNNet
from nets.molecules_graph_regression.gcn_net import GCNNet
from nets.molecules_graph_regression.gat_net import GATNet
from nets.molecules_graph_regression.graphsage_net import GraphSageNet
from nets.molecules_graph_regression.gin_net import GINNet
from nets.molecules_graph_regression.mo_net import MoNet as MoNet_
from nets.molecules_graph_regression.diffpool_net import DiffPoolNet
from nets.molecules_graph_regression.mlp_net import MLPNet
from nets.molecules_graph_regression.autogcn_net import AUTOGCNNet
from nets.molecules_graph_regression.cheb_net import ChebNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
def GCN(net_params):
return GCNNet(net_params)
def GAT(net_params):
return GATNet(net_params)
def GraphSage(net_params):
return GraphSageNet(net_params)
def GIN(net_params):
return GINNet(net_params)
def MoNet(net_params):
return MoNet_(net_params)
def DiffPool(net_params):
return DiffPoolNet(net_params)
def MLP(net_params):
return MLPNet(net_params)
def AUTOGCN(net_params):
return AUTOGCNNet(net_params)
def CHEB(net_params):
return ChebNet(net_params)
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
'GCN': GCN,
'GAT': GAT,
'GraphSage': GraphSage,
'GIN': GIN,
'MoNet': MoNet,
'DiffPool': DiffPool,
'MLP': MLP,
'AUTOGCN': AUTOGCN,
'ChebNet': CHEB,
}
return models[MODEL_NAME](net_params)
|
import tensorflow as tf
from .gated_conv import conv2d
def ConvLSTM(inputs, prev_state, hidden_size=128, k_size=3, trainable=True, namescope='convlstm'):
# prev_state:([N, H, W, hidden_size], [N, H, W, hidden_size])
# prev_state = (tf.zeros([N, H, W, hidden_size], tf.float32), tf.zeros([N, H, W, hidden_size], tf.float32)) for
# first frame.
# NOTE: THERE is no 'type' params.
with tf.variable_scope(namescope):
prev_hidden, prev_cell = prev_state
stacked_inputs = tf.concat([inputs, prev_hidden], axis=1)
gates = conv2d(stacked_inputs, hidden_size * 4, k_size=k_size, trainable=trainable, use_bias=True, namescope='Gates')
# split across channel dimension
in_gate, remember_gate, out_gate, cell_gate = tf.split(gates, 4, axis=1)
# apply sigmoid non linearity
in_gate = tf.sigmoid(in_gate)
remember_gate = tf.sigmoid(remember_gate)
out_gate = tf.sigmoid(out_gate)
# apply tanh non linearity
cell_gate = tf.tanh(cell_gate)
# compute current cell and hidden state
cell = (remember_gate * prev_cell) + (in_gate * cell_gate)
hidden = out_gate * tf.tanh(cell)
return hidden, cell |
#!/usr/bin/env python
'''
wbutil/func.py
Functional-programming-oriented utilities.
Will Badart <[email protected]>
created: JAN 2018
'''
import re
from functools import partial, reduce, wraps
from inspect import signature
from itertools import chain
from typing import Any, Callable, Generic, Iterable, TypeVar, Union
__all__ = [
'autocurry',
'compose',
'partialright',
'starcompose',
'cmap',
'cfilter',
'creduce',
'lmap',
'lfilter',
'identity',
'pred_negate',
]
_ComposeRetT = TypeVar('_ComposeRetT')
_CurryReturnT = TypeVar('_CurryReturnT')
_NAryFunc = Callable[[Iterable[Any]], Iterable[Any]]
_UnaryFunc = Callable[[Any], Any]
_T = TypeVar('_T')
class autocurry(Generic[_CurryReturnT]):
'''
Wraps a callable which it autocurries. Calling the function with fewer than
the total number of parameters yields a partial application, and fulfilling
the parameter list yields a complete application. Avoid complications by
not applying this to functions of variable parameters.
>>> from operator import add
>>> a = autocurry(add)
>>> a(3)(4)
7
'''
def __init__(
self,
func: Callable[..., _CurryReturnT],
*args,
**kwargs) -> None:
self.func = func
try:
self.nparams = len(signature(func).parameters)
except ValueError:
func = getattr(func, 'func', func) # if func is a partial
prototype = re.search('^[\w_]+(\(.*\))', func.__doc__).groups()[0]
prototype = prototype[1:-1]
self.nparams = len(prototype.split(','))
self.args = args
self.keywords = kwargs
def __call__(self, *args, **kwargs) -> Union['autocurry', _CurryReturnT]:
'''
Partially apply args and kwargs to the wrapped function (full
application if all parameters have been filled.
'''
pos_args = chain(self.args, args)
keywords = dict(**self.keywords, **kwargs)
if len(self.args + args) == self.nparams:
return self.func(*pos_args, **keywords)
else:
return type(self)(self.func, *pos_args, **keywords)
class compose(Generic[_ComposeRetT]):
'''
Returns a function which calls the argument functions in order from left
to right (first to last). Undefined behavior for non-unary functions.
>>> from functools import partial; from operator import mul
>>> times2tostr = compse(partial(mul, 2), str)
>>> times2tostr(10)
'20'
'''
def __init__(self, *funcs: _UnaryFunc) -> None:
self.funcs = funcs
def __call__(self, arg: Any) -> _ComposeRetT:
'''Invoke the composed pipeline with the specified argument.'''
return reduce(lambda acc, f: f(acc), self.funcs, arg)
def __reversed__(self) -> 'compose':
'''
Gives a composition with the calling order reversed. Allows compose to
emulate traditional compositional ordering.
'''
funcs = tuple(reversed(self.funcs))
return type(self)(*funcs)
class starcompose(compose):
'''
Returns a function of *args that applies the argument functions in
order from left to right (first to last). All supplied functions must
return an iterable and accept *args.
>>> switch = lambda a, b: (b, a)
>>> tup2str = lambda a, b: (str(a), str(b))
>>> switchandstr = starcompose(switch, tup2str)
>>> switchandstr(1, 2)
('2', '1')
'''
# Overridden to annotate n-ary function support
def __init__(self, *funcs: _NAryFunc) -> None:
super().__init__(*funcs)
def __call__(self, *args: Any) -> Any:
'''Invoke the composed pipeline with unpacking.'''
return reduce(lambda acc, f: f(*acc), self.funcs, args)
class partialright(partial):
'''
Returns a new function with arguments partially applied from right to left.
>>> data = [1, 2, 3, 4]
>>> process_data = partialright(map, data)
>>> stringified = process_data(str)
>>> stringified
['1', '2', '3', '4']
'''
def __call__(self, *args: Any, **kwargs: Any) -> Any:
'''Call self as a function.'''
pos_args = chain(args, reversed(self.args))
keywords = self.keywords.copy()
keywords.update(kwargs)
return self.func(*pos_args, **keywords)
def cmap(iteratee: Callable[[Any], Any]) -> partial:
'''
Curried map. Sugar for partial(map, iteratee).
>>> from string import digits
>>> transform = cmap(int)
>>> list(transform(digits))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
return partial(map, iteratee)
def cfilter(predicate: Callable[[Any], bool]) -> partial:
'''
Curried filter. Sugar for partial(filter, predicate).
>>> is1or2 = cfilter({1, 2}.__contains__)
>>> list(is1or2(range(10)))
[1, 2]
'''
return partial(filter, predicate)
def creduce(iteratee: Callable[[Any], Any]) -> partial:
'''
Curried reduce. Allows sequence to be supplied later.
>>> from operator import add
>>> mysum = creduce(add)
>>> mysum(range(10)) == sum(range(10))
True
>>> from collections import defaultdict
>>> def byfirst(obj, e):
... obj[e[0]].append(e)
... return obj
...
>>> groupbyfirst = creduce(byfirst)
>>> groupbyfirst(db.getNames(), defaultdict(list))
defaultdict(<class 'list'>, {'A': ['Alice', 'Adam'], 'B': ['Bob']})
'''
return partial(reduce, iteratee)
def lmap(iteratee: Callable[[Any], Any], iterable: Iterable) -> list:
'''
Performs a mapping of `iteratee' over `iterable' and immediately
serializes result into a list.
>>> lmap(str, range(10))
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
'''
return list(map(iteratee, iterable))
def lfilter(predicate: Callable[[Any], bool], iterable: Iterable) -> list:
'''
Filters `iterable' on `predicate', immediately serializing results into a
list.
>>> odd = lambda x: x % 2 == 1
>>> lfilter(odd, range(10))
[1, 3, 5, 7, 9]
'''
return list(filter(predicate, iterable))
def identity(e: _T) -> _T:
'''
Returns the argument untouched. Useful default iteratee.
>>> list(map(identity, range(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
return e
def pred_negate(predicate: Callable[..., bool]) -> Callable[..., bool]:
'''
Gives a function which returns the inverse of the argument predicate.
>>> isodd = lambda x: x % 2 == 1
>>> iseven = pred_negate(isodd)
>>> iseven(10)
True
'''
@wraps(predicate)
def _impl(*args, **kwargs):
return not predicate(*args, **kwargs)
return _impl
|
#! /usr/bin/python3
'''
This file is part of Lightning Network Probing Simulator.
Copyright © 2020-2021 University of Luxembourg
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
SPDX-FileType: SOURCE
SPDX-FileCopyrightText: 2020-2021 University of Luxembourg
SPDX-License-Identifier: MIT
'''
'''
Run experiments as described in the paper.
'''
import statistics
from synthetic import generate_hops, probe_hops_direct
from hop import Hop
from plot import plot
def experiment_1(prober, num_target_hops, num_runs_per_experiment, min_num_channels, max_num_channels):
'''
Measure the information gain and probing speed for direct and remote probing.
Generate or choose target hops with various number of channels.
Probe the target hops in direct and remote mode (if prober is provided), using BS and NBS amount choice methods.
Measure and plot the final achieved information gain and probing speed.
Parameters:
- prober: the Prober object (None to run only direct probing on synthetic hops)
- num_target_hops: how many target hops to choose / generate
- num_runs_per_experiments: how many experiments to run (gain and speed are averaged)
- min_num_channels: the minimal number of channels in hops to consider
- max_num_channels: the maximal number of channels in hops to consider
- use_snapshot:
if False, run only direct probing on synthetic hops;
if True, run direct and remote probing on synthetic and snapshot hops.
- jamming: use jamming (after h and g are fully probed without jamming)
Return: None (saves the resulting plots)
'''
print("\n\n**** Running experiment 1 ****")
BITCOIN = 100*1000*1000
MIN_CAPACITY_SYNTHETIC = 0.01 * BITCOIN
MAX_CAPACITY_SYNTHETIC = 10 * BITCOIN
NUM_CHANNELS_IN_TARGET_HOPS = [n for n in range(min_num_channels, max_num_channels + 1)]
# Hops with 5+ channels are very rare in the snapshot.
def run_one_instance_of_experiment_1(jamming, remote_probing, bs):
'''
Run experiment for all numbers of channels with one parameter set.
Yields two lines on two graphs: gains and speeds.
'''
gains = [0 for _ in range(len(NUM_CHANNELS_IN_TARGET_HOPS))]
speeds = [0 for _ in range(len(NUM_CHANNELS_IN_TARGET_HOPS))]
for i, num_channels in enumerate(NUM_CHANNELS_IN_TARGET_HOPS):
#print("\n\nN = ", num_channels)
gain_list, speed_list = [], []
for num_experiment in range(num_runs_per_experiment):
#print(" experiment", num_experiment)
if prober is not None:
# pick target hops from snapshot, probe them in direct and remote modes
target_hops_node_pairs = prober.choose_target_hops_with_n_channels(num_target_hops, num_channels)
target_hops = [prober.lnhopgraph[u][v]["hop"] for (u,v) in target_hops_node_pairs]
else:
# generate target hops, probe them in direct mode
target_hops = generate_hops(num_target_hops, num_channels, MIN_CAPACITY_SYNTHETIC, MAX_CAPACITY_SYNTHETIC)
#print("Selected" if prober is not None else "Generated", len(target_hops), "target hops with", num_channels, "channels.")
if remote_probing:
assert(prober is not None)
gain, speed = prober.probe_hops(target_hops_node_pairs, bs=bs, jamming=jamming)
else:
gain, speed = probe_hops_direct(target_hops, bs=bs, jamming=jamming)
gain_list.append(gain)
speed_list.append(speed)
gains[i] = gain_list
speeds[i] = speed_list
# prepare data for information gains plot
remote_or_direct = "Remote" if remote_probing else "Direct"
bs_or_nbs = "non-optimized" if bs else "optimized"
colors = ["blue", "purple", "red", "orange"]
color = (colors[3] if bs else colors[2]) if remote_probing else (colors[1] if bs else colors[0])
lines = ["-", "--", "-.", ":"]
line = (lines[3] if bs else lines[2]) if remote_probing else (lines[1] if bs else lines[0])
gains_line = (gains, remote_or_direct + " probing",
"-" if not remote_probing else "-.", "blue" if not remote_probing else "red")
speed_line = (speeds, remote_or_direct + ", " + bs_or_nbs, line, color)
return gains_line, speed_line
def run_and_store_result(gains_all_lines, speed_all_lines, pos, jamming, remote_probing, bs):
gains_line, speed_line = run_one_instance_of_experiment_1(jamming, remote_probing, bs)
if pos % 2 == 0:
gains_all_lines[pos // 2] = gains_line
speed_all_lines[pos] = speed_line
from multiprocessing import Process, Manager
procs = []
manager = Manager()
y_gains_lines_vanilla = manager.list([0 for _ in range(2)])
y_gains_lines_jamming = manager.list([0 for _ in range(2)])
y_speed_lines_vanilla = manager.list([0 for _ in range(4)])
y_speed_lines_jamming = manager.list([0 for _ in range(4)])
for i, jamming in enumerate((False, True)):
for j, remote_probing in enumerate((False, True)):
for k, bs in enumerate((False, True)):
gains_results = y_gains_lines_jamming if jamming else y_gains_lines_vanilla
speed_results = y_speed_lines_jamming if jamming else y_speed_lines_vanilla
pos = 2 * j + k
proc = Process(target=run_and_store_result, args=(gains_results, speed_results, pos, jamming, remote_probing, bs, ))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
targets_source = "snapshot" if prober is not None else "synthetic"
x_label = "\nNumber of channels in target hops\n"
plot(
x_data = NUM_CHANNELS_IN_TARGET_HOPS,
y_data_lists = [y_gains_lines_vanilla, y_gains_lines_jamming],
x_label = x_label,
y_label = "Information gain (share of initial uncertainty)\n",
title = "",#"Information gain\n",
filename = "gains_" + targets_source)
plot(
x_data = NUM_CHANNELS_IN_TARGET_HOPS,
y_data_lists = [y_speed_lines_vanilla, y_speed_lines_jamming],
x_label = x_label,
y_label = "Probing speed (bits / message)\n",
title = "",#"Probing speed\n",
filename = "speed_" + targets_source)
print("\n\n**** Experiment 1 complete ****")
def experiment_2(num_target_hops, num_runs_per_experiment):
'''
Measure the information gain and probing speed for different configurations of a 2-channel hop.
Parameters:
- num_target_hops: how man target hops to consider
- num_runs_per_experiment: how many times to run each experiment (results are averaged)
Return: None (print resulting stats)
'''
print("\n\n**** Running experiment 2 ****")
CAPACITY_BIG = 2**20
CAPACITY_SMALL = 2**15
BIG_BIG = [CAPACITY_BIG, CAPACITY_BIG]
BIG_SMALL = [CAPACITY_BIG, CAPACITY_SMALL]
SMALL_BIG = [CAPACITY_SMALL, CAPACITY_BIG]
ENABLED_BOTH = [0,1]
ENABLED_FIRST = [0]
ENABLED_SECOND = [1]
ENABLED_NONE = []
def get_hop_2_2():
return Hop(BIG_BIG, ENABLED_BOTH, ENABLED_BOTH)
def get_hop_2_2_big_small():
return Hop(BIG_SMALL, ENABLED_BOTH, ENABLED_BOTH)
def get_hop_2_2_small_big():
return Hop(SMALL_BIG, ENABLED_BOTH, ENABLED_BOTH)
def get_hop_1_1():
return Hop(BIG_BIG, ENABLED_FIRST, ENABLED_SECOND)
def get_hop_1_1_big_small():
return Hop(BIG_SMALL, ENABLED_FIRST, ENABLED_SECOND)
def get_hop_1_1_small_big():
return Hop(SMALL_BIG, ENABLED_FIRST, ENABLED_SECOND)
def get_hop_2_1():
return Hop(BIG_BIG, ENABLED_BOTH, ENABLED_FIRST)
def get_hop_2_1_big_small():
return Hop(BIG_SMALL, ENABLED_BOTH, ENABLED_FIRST)
def get_hop_2_1_small_big():
return Hop(SMALL_BIG, ENABLED_BOTH, ENABLED_FIRST)
def get_hop_2_0():
return Hop(BIG_BIG, ENABLED_BOTH, ENABLED_NONE)
def get_hop_2_0_big_small():
return Hop(BIG_SMALL, ENABLED_BOTH, ENABLED_NONE)
def get_hop_2_0_small_big():
return Hop(SMALL_BIG, ENABLED_BOTH, ENABLED_NONE)
def compare_methods(target_hops):
gain_bs, speed_bs = probe_hops_direct(target_hops, bs = True, jamming = False)
gain_nbs, speed_nbs = probe_hops_direct(target_hops, bs = False, jamming = False)
assert(abs((gain_bs-gain_nbs) / gain_nbs) < 0.05), (gain_bs, gain_nbs)
return gain_nbs, speed_bs, speed_nbs
all_types = [
"2_2", "2_2_big_small", "2_2_small_big",
"1_1", "1_1_big_small", "1_1_small_big",
"2_1", "2_1_big_small", "2_1_small_big",
"2_0", "2_0_big_small", "2_0_small_big"]
def compare_methods_average(hop_type):
print("\nHops of type", hop_type)
if hop_type == "2_2":
get_hop = get_hop_2_2
elif hop_type == "2_2_big_small":
get_hop = get_hop_2_2_big_small
elif hop_type == "2_2_small_big":
get_hop = get_hop_2_2_small_big
elif hop_type == "1_1":
get_hop = get_hop_1_1
elif hop_type == "1_1_big_small":
get_hop = get_hop_1_1_big_small
elif hop_type == "1_1_small_big":
get_hop = get_hop_1_1_small_big
elif hop_type == "2_1":
get_hop = get_hop_2_1
elif hop_type == "2_1_big_small":
print("Big channel enabled in both directions, small channel enabled in one direction")
get_hop = get_hop_2_1_big_small
elif hop_type == "2_1_small_big":
print("Small channel enabled in both directions, big channel enabled in one direction")
get_hop = get_hop_2_1_small_big
elif hop_type == "2_0":
get_hop = get_hop_2_0
elif hop_type == "2_0_big_small":
print("Big channel enabled in both directions, small channel enabled in one direction")
get_hop = get_hop_2_0_big_small
elif hop_type == "2_0_small_big":
print("Small channel enabled in both directions, big channel enabled in one direction")
get_hop = get_hop_2_0_small_big
else:
print("Incorrect hop type:", hop_type)
return
gain_list, speed_bs_list, speed_nbs_list = [], [], []
for _ in range(num_runs_per_experiment):
gain_nbs, speed_bs, speed_nbs = compare_methods([get_hop() for _ in range(num_target_hops)])
gain_list.append(gain_nbs)
speed_bs_list.append(speed_bs)
speed_nbs_list.append(speed_nbs)
print("Gains (mean): ", round(statistics.mean(gain_list),2))
#print(" stdev:", statistics.stdev(gain_list))
speed_bs_mean = statistics.mean(speed_bs_list)
speed_nbs_mean = statistics.mean(speed_nbs_list)
print("Speed BS (mean): ", round(speed_bs_mean,2))
#print(" stdev:", statistics.stdev(speed_bs_list))
print("Speed NBS (mean): ", round(speed_nbs_mean,2))
#print(" stdev:", statistics.stdev(speed_nbs_list))
print("Advantage: ", round((speed_nbs_mean-speed_bs_mean)/speed_bs_mean,2))
for hop_type in all_types:
compare_methods_average(hop_type)
print("\n\n**** Experiment 2 complete ****")
|
#!/usr/bin/env python
import asyncio
import logging
from aiowsio.server import WSIOServer
logger = logging.getLogger("websockets")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
server = WSIOServer("127.0.0.5", 8001)
@server.on("chat message")
async def on_chat_message(client, data):
# broadcast chat message to all clients
await server.emit("chat message", data)
await client.emit("chat message", "your message was sent")
@server.on("sum")
async def on_sum(client, data):
# assume that data is a list of ints
# return sum
return sum(data)
try:
asyncio.get_event_loop().run_until_complete(server)
asyncio.get_event_loop().run_forever()
except KeyboardInterrupt:
pass
finally:
asyncio.get_event_loop().run_until_complete(server.close())
asyncio.get_event_loop().close()
|
from flask_sketch.templates.api import resources
__all__ = ["resources"]
|
"""
Contains functions for post-processing of covariance matrices/blocks.
"""
import glob
import os.path
import time
import healpy as hp
import numpy as np
import pymaster as nmt
def get_mixmat(mask_path, nside, lmax_mix, lmax_out, save_path):
"""
Produce the EE->EE mixing matrix for a given mask.
Args:
mask_path (str): Path to mask fits file, or set to None for full sky.
nside (int): Healpix map resolution to use - input mask will be up/downgraded to this resolution.
lmax_mix (int): Maximum l to include mixing to/from.
lmax_out (int): Maximum l to support output to.
save_path (str): Path to save mixing matrix as a .npz file.
"""
# Load and rescale mask, and calculate fsky
if mask_path is not None:
print('Loading and rescaling mask')
mask = hp.pixelfunc.ud_grade(hp.read_map(mask_path, dtype=float, verbose=False), nside)
else:
print('Full sky')
mask = np.ones(hp.pixelfunc.nside2npix(nside))
fsky = np.mean(mask)
print(f'fsky = {fsky:.3f}')
# Create NaMaster binning scheme as individual Cls
print('Creating binning scheme')
bins = nmt.NmtBin.from_lmax_linear(lmax_mix, 1)
# Calculate full mixing matrix for spin 2-2
print(f'Calculating mixing matrix at {time.strftime("%c")}')
field_spin2 = nmt.NmtField(mask, None, spin=2, lite=True)
workspace_spin22 = nmt.NmtWorkspace()
workspace_spin22.compute_coupling_matrix(field_spin2, field_spin2, bins)
# Extract the relevant block
# For 2-2 there are 4x4 elements per l, ordered EE, EB, BE, BB.
# We only need EE->EE, so select every 4th row and 1st column from each block
print('Extracting mixing matrix block')
mixmats_spin22 = workspace_spin22.get_coupling_matrix()
mixmat_ee_to_ee = mixmats_spin22[::4, ::4]
# Trim to give lmax_out output
assert mixmat_ee_to_ee.shape == (lmax_mix + 1, lmax_mix + 1)
mixmat_ee_to_ee = mixmat_ee_to_ee[:(lmax_out + 1), :]
# Check shape and save
header = (f'EE->EE mixing matrix indexed as [l1, l2]. Output from {__file__}.get_mixmat for input parameters '
f'mask_path = {mask_path}, nside = {nside}, lmax_mix = {lmax_mix}, lmax_out = {lmax_out} '
f'at {time.strftime("%c")}')
np.savez_compressed(save_path, mixmat_ee_to_ee=mixmat_ee_to_ee, header=header)
print('Saved ' + save_path)
def mix_blocks(input_dir, input_filemask, mixmat_path, output_dir, lmin, lmax_in, lmax_out):
"""
Apply a mixing matrix to all covariance blocks inside the input directory matching the input filemask.
Args:
input_dir (str): Path to input directory.
input_filemask (str): Glob filemask for input files within input directory (excluding path to directory).
mixmat_path (str): Path to mixing matrix.
output_dir (str): Path to output directory.
lmin (int): Minimum l in input covariance blocks.
lmax_in (int): Maximum l in input covariance blocks.
lmax_out (int): Maximum l in output.
"""
n_ell_in = lmax_in - lmin + 1
n_ell_out = lmax_out - lmin + 1
# Load mixing matrix and trim to lmin
print('Loading mixing matrix')
with np.load(mixmat_path) as data:
mixmat = data['mixmat_ee_to_ee'][lmin:, lmin:]
assert mixmat.shape == (n_ell_out, n_ell_in)
# Loop over files matching input mask
input_files = glob.glob(os.path.join(input_dir, input_filemask))
n_files = len(input_files)
print(f'{n_files} found')
for file_no, input_path in enumerate(input_files, 1):
# Load unmixed covariance
print(f'{file_no} / {n_files}: Loading')
with np.load(input_path) as data:
cov_unmixed = data['cov']
prev_headers = [data['header'], data['orig_header']]
assert cov_unmixed.shape == (n_ell_in, n_ell_in)
# Apply mixing matrix
print(f'{file_no} / {n_files}: Mixing')
cov_mixed = mixmat @ cov_unmixed @ mixmat.T
assert cov_mixed.shape == (n_ell_out, n_ell_out)
assert np.all(np.isfinite(cov_mixed))
# Save to disk
print(f'{file_no} / {n_files}: Saving')
output_path = input_path.replace(input_dir, output_dir)
header = (f'Output from {__file__}.mix_blocks for input file {input_path}, mixing matrix path {mixmat_path}, '
f'lmin {lmin}, lmax_in {lmax_in}, lmax_out {lmax_out}, at {time.strftime("%c")}.')
np.savez_compressed(output_path, cov_mixed=cov_mixed, header=header, prev_headers=prev_headers)
print(f'{file_no} / {n_files}: Saved {output_path}')
print('Done')
def bin_blocks(input_dir, input_filemask, input_label, binmat_path, output_dir):
"""
Apply a binning matrix to all covariance blocks inside the input directory matching the input filemask.
Args:
input_dir (str): Path to input directory.
input_filemask (str): Glob filemask for input files within input directory (excluding path to directory).
input_label (str): Label for covariance block within input .npz file. Should be 'cov_block' for Cov_G blocks
output by gaussian_cov.get_cov_blocks, or 'cov_mixed' for the output from mix_blocks.
binmat_path (str): Path to binning matrix.
output_dir (str): Path to output directory.
"""
# Load binning matrix
print('Loading binning matrix')
with np.load(binmat_path) as data:
pbl = data['pbl']
# Loop over files matching input mask
input_files = glob.glob(os.path.join(input_dir, input_filemask))
n_files = len(input_files)
print(f'{n_files} found')
for file_no, input_path in enumerate(input_files, 1):
# Load unbinned covariance block
print(f'{file_no} / {n_files}: Loading')
with np.load(input_path) as data:
cov_unbinned = data[input_label]
prev_headers = [data['header']]
if 'prev_headers' in data:
prev_headers.extend(data['prev_headers'])
# Apply binning matrix
print(f'{file_no} / {n_files}: Binning')
cov_binned = pbl @ cov_unbinned @ pbl.T
assert np.all(np.isfinite(cov_binned))
# Save to disk
print(f'{file_no} / {n_files}: Saving')
output_path = input_path.replace(input_dir, output_dir)
header = (f'Output from {__file__}.bin_blocks for input file {input_path}, '
f'binning matrix path {binmat_path}, at {time.strftime("%c")}.')
np.savez_compressed(output_path, cov_binned=cov_binned, header=header, prev_headers=prev_headers)
print(f'{file_no} / {n_files}: Saved {output_path}')
print('Done')
def combine_blocks(input_filemask, input_label, save_path, n_spec, n_bp):
"""
Combine covariance blocks into a full covariance matrix.
Args:
input_filemask (str): Filemask for input blocks, with {spec1} and {spec2} placeholders for the indices of the
two power spectra.
input_label (str): Label for covariance block within input .npz file. Should be 'cov_binned' for output from
bin_blocks.
save_path (str): Path to save output covariance matrix to.
n_spec (int): Total number of power spectra.
n_bp (int): Number of bandpowers.
"""
# Preallocate full matrix
n_data = n_spec * n_bp
cov = np.full((n_data, n_data), np.nan)
# Loop over blocks and insert into matrix
for spec1 in range(n_spec):
for spec2 in range(spec1 + 1):
print(f'Loading spec1 {spec1}, spec2 {spec2}')
with np.load(input_filemask.format(spec1=spec1, spec2=spec2)) as data:
block = data[input_label]
cov[(spec1 * n_bp):((spec1 + 1) * n_bp), (spec2 * n_bp):((spec2 + 1) * n_bp)] = block
# Reflect to fill remaining elements, and check symmetric
cov = np.where(np.isnan(cov), cov.T, cov)
assert np.all(np.isfinite(cov))
assert np.allclose(cov, cov.T, atol=0)
# Save to disk
header = f'Output from {__file__}.combine_blocks for input_filemask {input_filemask} at {time.strftime("%c")}'
np.savez_compressed(save_path, cov=cov, header=header)
print('Saved ' + save_path)
def get_composite_covs(cov_g_path, cov_ss_path, cov_cng_path, output_path):
"""
Form composite covariances from different combinations of G, SS and CNG, and save to disk.
Args:
cov_g_path (str): Path to Gaussian covariance.
cov_ss_path (str): Path to super-sample covariance.
cov_cng_path (str): Path to connected non-Gaussian covariance.
output_path (str): Output path, with {label} placeholder which will be replaced by g, g_ss, g_cng or tot.
"""
if '{label}' not in output_path:
raise ValueError('output_path should contain {label} placeholder')
# Load the three covariance contributions
with np.load(cov_g_path) as data:
cov_g = data['cov']
with np.load(cov_ss_path) as data:
cov_ss = data['cov']
with np.load(cov_cng_path) as data:
cov_cng = data['cov']
# Save the different composite covariances to disk
header_base = (f'Output from {__file__}.get_composite_covs for input cov_g_path = {cov_g_path}, '
f'cov_ss_path = {cov_ss_path}, cov_cng_path = {cov_cng_path}, at {time.strftime("%c")}.')
cov_g_save_path = output_path.format(label='g')
np.savez_compressed(cov_g_save_path, cov=cov_g, header=('Cov_G. ' + header_base))
print('Saved ' + cov_g_save_path)
cov_g_ss_save_path = output_path.format(label='g_ss')
np.savez_compressed(cov_g_ss_save_path, cov=(cov_g + cov_ss), header=('Cov_G + Cov_SS. ' + header_base))
print('Saved ' + cov_g_ss_save_path)
cov_g_cng_save_path = output_path.format(label='g_cng')
np.savez_compressed(cov_g_cng_save_path, cov=(cov_g + cov_cng), header=('Cov_G + Cov_CNG. ' + header_base))
print('Saved ' + cov_g_cng_save_path)
cov_tot_save_path = output_path.format(label='tot')
np.savez_compressed(cov_tot_save_path, cov=(cov_g + cov_ss + cov_cng),
header=('Cov_G + Cov_SS + Cov_CNG. ' + header_base))
print('Saved ' + cov_tot_save_path)
|
from __future__ import division, print_function
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
import DataManagerNii as DMNII
from vnet import VNet as Net
from logger import Logger
class Model(object):
''' the network model for training, validation and testing '''
dataManagerTrain = None
dataManagerValidation = None
dataManagerTest = None
min_loss = 9999999999
min_loss_accuracy = 0
max_accuracy = 0
max_accuracy_loss = 0
best_iteration_acc = 0
best_iteration_loss = 0
def __init__(self, params):
self.params = params
self.logger = Logger(__name__, self.params['ModelParams']['dirLog'])
def getValidationLossAndAccuracy(self, model):
'''get the segmentation loss and accuracy of the validation data '''
numpyImages = self.dataManagerValidation.numpyImages
numpyGTs = self.dataManagerValidation.numpyGTs
loss = 0.0
accuracy = 0.0
for key in numpyImages:
_, temp_loss, temp_acc = self.produceSegmentationResult(model, numpyImages[key], numpyGTs[key])
loss += temp_loss
accuracy += temp_acc
return loss / len(numpyImages), accuracy / len(numpyImages)
def getTestResultImage(self, model, numpyImage, numpyGT):
result, loss, accuracy = self.produceSegmentationResult(model, numpyImage, numpyGT)
self.logger.info("loss: {} acc: {}".format(loss, accuracy))
return result
def getTestResultImages(self, model):
''' return the segmentation results of the testing data'''
numpyImages = self.dataManagerTest.numpyImages
numpyGTs = self.dataManagerTest.numpyGTs
ResultImages = dict()
loss = 0.0
accuracy = 0.0
for key in numpyImages:
temp_result, temp_loss, temp_acc = self.produceSegmentationResult(model, numpyImages[key], numpyGTs[key])
loss += temp_loss
accuracy += temp_acc
ResultImages[key] = temp_result
self.logger.info("loss: {} acc: {}".format(loss / len(numpyImages), accuracy / len(numpyImages)))
return ResultImages
def produceSegmentationResult(self, model, numpyImage, numpyGT, calLoss=True):
''' produce the segmentation result, one time one image'''
# model.eval()
# model.cuda()
ori_shape = numpyImage.shape
tempresult = np.zeros(numpyImage.shape, dtype=np.float32)
tempWeight = np.zeros(numpyImage.shape, dtype=np.float32)
height, width, depth = self.params['DataManagerParams']['VolSize']
stride_height, stride_width, stride_depth = self.params['DataManagerParams']['TestStride']
whole_height, whole_width, whole_depth = numpyImage.shape
all_loss = list()
# crop the image
for ystart in list(range(0, whole_height-height, stride_height)) + [whole_height-height]:
for xstart in list(range(0, whole_width-width, stride_width)) + [whole_width-width]:
for zstart in list(range(0, whole_depth-depth, stride_depth)) + [whole_depth-depth]:
slice_index = (
slice(ystart, ystart + height),
slice(xstart, xstart + width),
slice(zstart, zstart + depth)
)
sliced_img = numpyImage[slice_index]
batchData = sliced_img.reshape(1, 1, *sliced_img.shape)
data = torch.from_numpy(batchData).float()
# volatile is used in the input variable for the inference,
# which indicates the network doesn't need the gradients, and this flag will transfer to other variable
# as the network computating
data = Variable(data).cuda()
output = model(data)
pred = output.max(2)[1]
tempresult[slice_index] = tempresult[slice_index] + pred.cpu().numpy().reshape(*sliced_img.shape)
tempWeight[slice_index] = tempWeight[slice_index] + 1
if calLoss:
sliced_label = numpyGT[slice_index]
batchLabel = sliced_label.reshape(1, 1, *sliced_label.shape)
target = torch.from_numpy(batchLabel)
target = Variable(target).cuda()
target = target.view(1, -1)
temploss = model.dice_loss(output, target).cpu().item()
all_loss.append(temploss)
result = (tempresult / tempWeight)[:ori_shape[0], :ori_shape[1], :ori_shape[2]]
loss = np.mean(all_loss)
accuracy = np.mean(result == numpyGT)
print(result.sum(), numpyGT.sum())
return result, loss, accuracy
def save_checkpoint(self, state, path, prefix, filename='checkpoint.pth.tar'):
''' save the snapshot'''
prefix_save = os.path.join(path, prefix)
name = prefix_save + str(state['iteration']) + '_' + filename
torch.save(state, name)
def trainThread(self, dataQueue, model):
'''train the network and plot the training curve'''
nr_iter = self.params['ModelParams']['epoch'] * self.dataManagerTrain.num
batchsize = self.params['ModelParams']['batchsize']
batchbasesize = (batchsize, 1) + tuple(self.params['DataManagerParams']['VolSize'])
batchData = np.zeros(batchbasesize, dtype=float)
batchLabel = np.zeros(batchbasesize, dtype=float)
test_interval = self.params['ModelParams']['testInterval']
train_interval = self.params['ModelParams']['trainInterval']
train_loss = np.zeros(nr_iter)
train_accuracy = np.zeros(nr_iter // train_interval)
testloss = np.zeros(nr_iter // test_interval)
testaccuracy = np.zeros(nr_iter // test_interval)
tempaccuracy = 0
temptrain_loss = 0
self.logger.info("Build V-Net")
model.train()
model.cuda()
optimizer = optim.Adam(
model.parameters(),
weight_decay=self.params['ModelParams']['weight_decay'],
lr=self.params['ModelParams']['baseLR']
)
for origin_it in range(nr_iter):
for i in range(batchsize):
batchData[i, 0], batchLabel[i, 0] = dataQueue.get()
data = torch.from_numpy(batchData).float()
data = Variable(data).cuda()
target = torch.from_numpy(batchLabel)
target = Variable(target).cuda()
target = target.view(batchsize, -1)
optimizer.zero_grad()
output = model(data)
pred = output.max(2)[1] # get the index of the max log-probability
loss = model.dice_loss(output, target)
# loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
temptrain_loss += loss.cpu().item()
tempaccuracy += pred.eq(target.long()).float().mean().cpu().item()
it = origin_it + 1
if not it % train_interval:
train_report_it = it // train_interval - 1
train_accuracy[train_report_it] = tempaccuracy / train_interval
train_loss[train_report_it] = temptrain_loss / train_interval
self.logger.info(
"training: iter: {} loss: {} acc: {}".format(
self.params['ModelParams']['snapshot'] + it,
train_loss[train_report_it],
train_accuracy[train_report_it]
))
tempaccuracy = 0.0
temptrain_loss = 0.0
if not it % test_interval:
test_report_it = it // test_interval - 1
save_it = self.params['ModelParams']['snapshot'] + it
testloss[test_report_it], testaccuracy[test_report_it] = self.getValidationLossAndAccuracy(model)
if testaccuracy[test_report_it] > self.max_accuracy:
self.max_accuracy = testaccuracy[test_report_it]
self.max_accuracy_loss = testloss[test_report_it]
self.best_iteration_acc = save_it
self.save_checkpoint({'iteration': save_it,
'state_dict': model.state_dict(),
'best_acc': True},
self.params['ModelParams']['dirSnapshots'],
self.params['ModelParams']['tailSnapshots'])
if testloss[test_report_it] < self.min_loss:
self.min_loss = testloss[test_report_it]
self.min_loss_accuracy = testaccuracy[test_report_it]
self.best_iteration_loss = save_it
self.save_checkpoint({'iteration': save_it,
'state_dict': model.state_dict(),
'best_acc': False},
self.params['ModelParams']['dirSnapshots'],
self.params['ModelParams']['tailSnapshots'])
self.logger.info(
"testing: iteration: {} loss: {} accuracy: {}".format(
save_it, testloss[test_report_it], testaccuracy[test_report_it]
))
self.logger.info(
"testing: best_acc: {} loss: {} accuracy: {}".format(
self.best_iteration_acc, self.max_accuracy_loss, self.max_accuracy
))
self.logger.info(
"testing: best_loss: {} loss: {} accuracy: {}".format(
self.best_iteration_loss, self.min_loss, self.min_loss_accuracy
))
def weights_init(self, m):
''' initialize the model'''
classname = m.__class__.__name__
if classname.find('Conv3d') != -1:
nn.init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def train(self, dataManagerTrain, dataQueue):
''' train model'''
# we define here a data manager object
self.logger.info('Start to train model')
self.dataManagerTrain = dataManagerTrain
self.dataManagerValidation = DMNII.DataManagerNii(
self.params['ModelParams']['dirValidation'],
self.params['ModelParams']['dirResult'],
self.params['DataManagerParams']
)
self.dataManagerValidation.loadData()
image_num = gt_num = self.dataManagerTrain.num
self.logger.info("The dataset has shape: data {}. labels: {}".format(image_num, gt_num))
# create the network
model = Net()
# train from scratch or continue from the snapshot
if self.params['ModelParams']['snapshot'] > 0:
self.logger.info("loading checkpoint " + str(self.params['ModelParams']['snapshot']))
prefix_save = os.path.join(
self.params['ModelParams']['dirSnapshots'],
self.params['ModelParams']['tailSnapshots']
)
name = prefix_save + str(self.params['ModelParams']['snapshot']) + '_' + "checkpoint.pth.tar"
checkpoint = torch.load(name)
model.load_state_dict(checkpoint['state_dict'])
self.logger.info("loaded checkpoint " + str(self.params['ModelParams']['snapshot']))
else:
model.apply(self.weights_init)
#plt.ion()
self.trainThread(dataQueue, model)
def test(self, snapnumber):
self.dataManagerTest = DMNII.DataManagerNii(
self.params['ModelParams']['dirTest'],
self.params['ModelParams']['dirResult'],
self.params['DataManagerParams'],
self.params['TestParams']['ProbabilityMap']
)
self.dataManagerTest.loadData()
model = Net()
prefix_save = os.path.join(
self.params['ModelParams']['dirSnapshots'],
self.params['ModelParams']['tailSnapshots']
)
name = prefix_save + str(snapnumber) + '_' + "checkpoint.pth.tar"
checkpoint = torch.load(name)
model.load_state_dict(checkpoint['state_dict'])
model.cuda()
for name in tqdm(self.dataManagerTest.fileList):
print(name)
img = self.dataManagerTest.numpyImages[name]
label = self.dataManagerTest.numpyGTs[name]
result = self.getTestResultImage(model, img, label)
self.dataManagerTest.writeResultsFromNumpyLabel(result, name)
|
class Remote:
def isClickRight():
pass
class Move:
def moveRight():
pass
remote1 = Remote()
move1 = Move()
if (remote1.isClickRight()):
move1.moveRight()
|
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import pandas as pd
from datetime import datetime
import requests
import numpy as np
import re
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
loop = {
1:{"sheet":'3'},
2:{"sheet":'4'},
3:{"sheet":'5'},
4:{"sheet":'6'},
5:{"sheet":'7'},
6:{"sheet":'8'},
7:{"sheet":'9'},
8:{"sheet":'10'},
}
dfFinal = pd.DataFrame()
for key in loop.keys():
df = pd.read_excel("https://www.ine.cl/docs/default-source/ventas-de-servicios/cuadro-estadisticos/base-promedio-a%C3%B1o-2014-100/series-mensuales-desde-enero-2014-a-la-fecha.xls?sfvrsn=2ba936a_54", skiprows=6, sheet_name=loop[key]["sheet"])
df = df.loc[:, ~(df == '/R').any()]
indiceFinal = df[df['Mes y año'] == '/R: Cifras rectificadas'].index[0]
df1 = df.iloc[(indiceFinal+1):,0]
df = df.dropna(how='all').dropna(how='all',axis=1)
df =df.dropna(how='all',axis=1)
df = df.dropna(how='all', subset=df.columns[1:])
def dateWrangler(x):
global y
x=str(x)
list_x = x.split('-')
if list_x[0] == 'ene':
y = '20' + list_x[1]+'-01-01'
elif list_x[0] == 'feb':
y = '20' + list_x[1]+'-02-01'
elif list_x[0] == 'mar':
y = '20' + list_x[1]+'-03-01'
elif list_x[0] == 'abr':
y = '20' + list_x[1]+'-04-01'
elif list_x[0] == 'may':
y = '20' + list_x[1]+'-05-01'
elif list_x[0] == 'jun':
y = '20' + list_x[1]+'-06-01'
elif list_x[0] == 'jul':
y = '20' + list_x[1]+'-07-01'
elif list_x[0] == 'ago':
y = '20' + list_x[1]+'-08-01'
elif list_x[0] == 'sep':
y = '20' + list_x[1]+'-09-01'
elif list_x[0] == 'oct':
y = '20' + list_x[1]+'-10-01'
elif list_x[0] == 'nov':
y = '20' + list_x[1]+'-11-01'
elif list_x[0] == 'dic':
y = '20' + list_x[1]+'-12-01'
return y
df['Mes y año'] = df['Mes y año'].apply(lambda x: dateWrangler(x))
df = df.rename({'Mes y año': 'Date'}, axis=1)
df['Date']=pd.to_datetime(df['Date'])
df = df.set_index('Date')
dfFinal = dfFinal.merge(df, how='outer', left_index=True, right_index=True)
dfFinal = dfFinal.rename({4921: 'Transporte urbano y suburbano de pasajeros por vía terrestre (Metro de Santiago y RED)', 49225: 'Transporte de pasajeros en buses interurbanos', 4923: 'Transporte de carga por carretera', 50: 'Transporte por vía acuática' ,
51: 'Transporte por vía aérea', 521: 'Almacenamiento y depósito', 52213: 'Servicios prestados por concesionarios de carreteras', 5222: 'Actividades de servicios vinculadas al transporte acuático (puertos)',
5229: 'Otras actividades de apoyo al transporte', 53: 'Actividades postales y de mensajería', 551: 'Actividades de alojamiento para estancias cortas', 561: 'Actividades de restaurantes y de servicio móvil de comidas',
562: 'Suministro de comidas por encargo y otras actividades de servicio de comidas' , 58: 'Actividades de edición', 60: 'Actividades de programación y transmisión',
61: 'Telecomunicaciones', 62: 'Programación informática, consultoría de informática y actividades conexas', 63: 'Actividades de servicios de información', 681: 'Actividades inmobiliarias realizadas con bienes propios o arrendados',
682: 'Actividades inmobiliarias realizadas a cambio de una retribución o por contrata', 691: 'Actividades jurídicas', 692: 'Actividades de contabilidad, teneduría de libros y auditoría; consultoría fiscal',
70: 'Actividades de oficinas principales; actividades de consultoría de gestión', 711: 'Actividades de arquitectura e ingeniería y actividades conexas de consultoría técnica', 731: 'Publicidad',
771: 'Alquiler y arrendamiento de vehículos automotores', 773: 'Alquiler y arrendamiento de otros tipos de maquinaria, equipo y bienes tangibles', 78: 'Actividades de empleo', 80: 'Actividades de seguridad e investigación',
81: 'Actividades de servicios a edificios y de paisajismo', 822: 'Actividades de call-center', 829: 'Actividades de servicios de apoyo a las empresas', 90: 'Actividades creativas, artísticas y de entretenimiento',
92: 'Actividades de juegos de azar y apuestas', 93: 'Actividades deportivas, de esparcimiento y recreativas', 96: 'Otras actividades de servicios personales'
}, axis=1)
dfFinal['country'] = 'Chile'
alphacast.datasets.dataset(314).upload_data_from_df(dfFinal,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
|
"""
Model objects for the Glance mimic.
"""
from __future__ import absolute_import, division, unicode_literals
from json import dumps
from uuid import uuid4
import attr
from mimic.util.helper import json_from_request
from six import text_type
random_image_list = [
{"id": text_type(uuid4()), "name": "OnMetal - CentOS 6", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - CentOS 7", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - CoreOS (Alpha)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - CoreOS (Beta)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - Debian 7 (Wheezy)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - Debian 8 (Jessie)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - Fedora 21", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - Fedora 22", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - Ubuntu 14.04 LTS (Trusty Tahr)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - CoreOS (Stable)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "OnMetal - Ubuntu 12.04 LTS (Precise Pangolin)",
"distro": "linux"},
{"id": text_type(uuid4()), "name": "Ubuntu 14.04 LTS (Trusty Tahr)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "Ubuntu 15.04 (Vivid Vervet)", "distro": "linux"},
{"id": text_type(uuid4()), "name": "Windows Server 2012 R2", "distro": "windows"}
]
@attr.s
class Image(object):
"""
A Image object
"""
image_id = attr.ib(validator=attr.validators.instance_of(text_type))
name = attr.ib(validator=attr.validators.instance_of(text_type))
distro = attr.ib(validator=attr.validators.instance_of(text_type))
tenant_id = attr.ib(
validator=attr.validators.optional(
attr.validators.instance_of(text_type)),
default=None
)
status = attr.ib(validator=attr.validators.instance_of(text_type),
default='active')
static_server_image_defaults = {
"minRam": 256,
"minDisk": 00,
}
static_glance_defaults = {
"flavor_classes": "*",
"min_ram": 256,
"min_disk": 00,
"container_format": None,
"owner": "00000",
"size": 10000,
"tags": [],
"visibility": "public",
"checksum": "0000",
"protected": False,
"disk_format": None,
"ssh_user": "mimic",
"schema": "/v2/schemas/image",
"auto_disk_config": "disabled",
"virtual_size": None,
"visibility": "public"
}
static_metadata = {
"com.rackspace__1__build_rackconnect": "1",
"com.rackspace__1__options": "0",
"com.rackspace__1__release_id": "000",
"com.rackspace__1__build_core": "1",
"image_type": "base",
"org.openstack__1__os_version": "0.1",
"com.rackspace__1__platform_target": "MimicCloud",
"com.rackspace__1__build_managed": "1",
"org.openstack__1__architecture": "x64",
"com.rackspace__1__visible_core": "1",
"com.rackspace__1__release_build_date": "1972-01-01_15-59-11",
"com.rackspace__1__visible_rackconnect": "1",
"com.rackspace__1__release_version": "1",
"com.rackspace__1__visible_managed": "1",
"cache_in_nova": "True",
"com.rackspace__1__build_config_options": "mimic",
"auto_disk_config": "True",
"com.rackspace__1__source": "kickstart",
"com.rackspace__1__ui_default_show": "True"
}
def links_json(self, absolutize_url):
"""
Create a JSON-serializable data structure describing the links to this
image.
"""
return [
{
"href": absolutize_url("v2/{0}/images/{1}"
.format(self.tenant_id, self.image_id)),
"rel": "self"
},
{
"href": absolutize_url("{0}/images/{1}"
.format(self.tenant_id, self.image_id)),
"rel": "bookmark"
},
{
"href": absolutize_url("/images/{0}".format(self.image_id)),
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
]
def get_server_image_details_json(self, absolutize_url):
"""
JSON-serializable object representation of this image, as
returned by either a GET on this individual image through the
servers api.
"""
template = self.static_server_image_defaults.copy()
template.update({
"id": self.image_id,
"name": self.name,
"status": self.status.upper(),
"links": self.links_json(absolutize_url),
"progress": 100,
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-IMG-SIZE:size": 100000,
"metadata": self.static_metadata,
"created": "1972-01-01_15-59-11",
"updated": "1972-01-01_15-59-11"
})
if self.distro != "windows":
template["metadata"]["os_distro"] = self.distro
return template
def brief_json(self, absolutize_url):
"""
Brief JSON-serializable version of this image.
"""
return {
"name": self.name,
"id": self.image_id,
"links": self.links_json(absolutize_url)
}
def get_glance_admin_image_json(self):
"""
JSON-serializable object representation of this image, as
returned by either a GET on this individual image or a member in the
list returned by the list-details request.
"""
template = self.static_glance_defaults.copy()
template.update(self.static_metadata)
template.update({
"id": self.image_id,
"name": self.name,
"status": self.status,
"created_at": "1972-01-01_15-59-11",
"updated_at": "1972-01-01_15-59-11",
"file": "/v2/images/{0}/file".format(self.image_id),
"self": "/v2/images/" + self.image_id,
"org.openstack__1__os_distro": "mimic." + self.distro,
"os_type": self.distro,
"vm_mode": "onmetal"
})
if self.distro != "windows":
template.update({
"os_distro": self.distro
})
if "OnMetal" in self.name:
template.update({
"vm_mode": "metal",
"flavor_classes": "onmetal"
})
return template
@attr.s
class GlanceAdminImageStore(object):
"""
A collection of :obj:`Image`.
"""
glance_admin_image_store = attr.ib(default=attr.Factory(list))
def image_by_id(self, image_id):
"""
Retrieve a :obj:`Image` object by its ID.
"""
for image in self.glance_admin_image_store:
if image.image_id == image_id:
return image
def add_to_glance_admin_image_store(self, **attributes):
"""
Create a new Image object and add it to the
:obj: `glance_admin_image_store`
"""
image = Image(**dict((str(k), v) for k, v in attributes.items()))
self.glance_admin_image_store.append(image)
return image
def list_images(self):
"""
List all the images for the Glance Admin API.
"""
if not self.glance_admin_image_store:
for each_image in random_image_list:
self.add_to_glance_admin_image_store(
image_id=each_image['id'],
name=each_image['name'],
distro=each_image['distro'])
return {"images": [image.get_glance_admin_image_json()
for image in self.glance_admin_image_store]}
def get_image(self, http_request, image_id):
"""
get image with image_id for the Glance Admin API.
"""
image = self.image_by_id(image_id)
if image:
return image.get_glance_admin_image_json()
http_request.setResponseCode(404)
return ''
def create_image(self, http_create_request):
"""
Creates a new image with the given request json and returns the image.
Note: This is more like a control plane API as I dint find seem
to find documentation for add image under the Glance admin API.
"""
try:
content = json_from_request(http_create_request)
image_name = content.get('name')
if image_name is None:
raise KeyError("no name supplied")
image_distro = content.get('distro')
if image_distro is None:
raise KeyError("no distro supplied")
except Exception as e:
http_create_request.setResponseCode(400)
return dumps({"Error": text_type(e)})
image_id = text_type(uuid4())
new_image = self.add_to_glance_admin_image_store(
image_id=image_id,
name=image_name,
distro=image_distro)
http_create_request.setResponseCode(201)
return new_image.get_glance_admin_image_json()
def delete_image(self, http_request, image_id):
"""
Deletes the image and returns 204.
If image does not exit, returns 404.
Docs: http://bit.ly/1Obujvd
"""
image = self.image_by_id(image_id)
if image:
self.glance_admin_image_store.remove(image)
http_request.setResponseCode(204)
return b''
http_request.setResponseCode(404)
return b''
|
"""
The count-and-say sequence is the sequence of integers with the first five
terms as following:
1. 1
2. 11
3. 21
4. 1211
5. 111221
1 is read off as "one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n where 1 ≤ n ≤ 30, generate the nth term of the
count-and-say sequence.
Note: Each term of the sequence of integers will be represented as a string.
"""
import re
class Solution:
"""
Runtime: 32 ms, faster than 98.43% of Python3
Memory Usage: 13.1 MB, less than 78.72% of Python3.
Algorithm idea: Use previous string value to generate a new one
"""
def countAndSay(self, n: int) -> str:
result = '1'
for _ in range(n - 1):
prev = result
result = ''
i = 0
while i < len(prev):
curr_digit = prev[i]
count = 1
i += 1
while i < len(prev) and prev[i] == curr_digit:
count += 1
i += 1
result += f'{count}{curr_digit}'
return result
class Solution2:
"""
Regexp pattern matching, where we find out all those repetitive groups of digits.
Runtime: 48 ms, faster than 20.79% of Python3
Memory Usage: 14.5 MB, less than 6.85% of Python3
Time complexity: O(2**n) where n is the index of the desired seq.
https://en.wikipedia.org/wiki/Time_complexity#Superpolynomial_time
Space complexity: O(2**(n-1))
"""
def countAndSay(self, n: int) -> str:
curr_seq = '1'
# match 0 to n repetitions digit
pattern = r'((.)\2*)' # outer brackets stands for group1, inner for group2
for i in range(n - 1):
next_seq = []
for g1, g2 in re.findall(pattern, curr_seq):
# append <count, digit> pair
next_seq.append(str(len(g1)))
next_seq.append(g2)
# prepare next iteration
curr_seq = ''.join(next_seq)
return curr_seq
if __name__ == "__main__":
solutions = [Solution(), Solution2()]
tc = [
(1, '1'),
(2, '11'),
(3, '21'), # result means we seen 2 times of 1 on previous result (11)
(4, '1211'), # result means we seen 1 time of 2, then 1 time of 11 of previous result (21)
(5, '111221'), # on previous result (1211) we saw 1 - 1 time, then 2 also 1 time, then 1 - 2 times sequentially
(6, '312211'),
(7, '13112221'),
(8, '1113213211'),
(9, '31131211131221'),
(10, '13211311123113112211'),
]
for s in solutions:
for inp, exp in tc:
assert s.countAndSay(inp) == exp
|
import reshade as rs
import pytest
class Test_ConnectionLayer:
def test_ConnectionLayer(self):
layer = rs.ConnectionLayer(height=3, width=4, depth=2)
assert layer.values == [
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
]
with pytest.raises(TypeError) as e:
layer = rs.ConnectionLayer()
assert "The depth, height, and width of the layer must be specified."\
in str(e)
connection1 = rs.Connection(0)
connection2 = rs.Connection(1)
connection3 = rs.Connection(0)
connection4 = rs.Connection(1)
connection5 = rs.Connection(0)
connection6 = rs.Connection(1)
spectrum1 = rs.Spectrum([connection1])
spectrum2 = rs.Spectrum([connection2])
spectrum3 = rs.Spectrum([connection3])
spectrum4 = rs.Spectrum([connection4])
spectrum5 = rs.Spectrum([connection5])
spectrum6 = rs.Spectrum([connection6])
image1 = rs.Image([spectrum1, spectrum2, spectrum3])
image2 = rs.Image([spectrum4, spectrum5, spectrum6])
layer = rs.ConnectionLayer(images=[image1, image2])
assert layer.images == [image1, image2]
assert layer.values == [
[[0],
[1],
[0]],
[[1],
[0],
[1]]
]
layer.values = [
[[0.5],
[1],
[1.5]],
[[2],
[2.5],
[3]]
]
assert layer.values == [
[[0.5],
[1],
[1.5]],
[[2],
[2.5],
[3]]
]
assert layer[0][1][0] == connection2
assert layer[1:][0][1][0] == connection5
with pytest.raises(IndexError) as e:
connection = layer[2][0][0]
assert "Layer depth exceeded." in str(e)
test_list = []
for image in layer:
for spectrum in image:
for connection in spectrum:
test_list.append(connection)
assert test_list == [
connection1,
connection2,
connection3,
connection4,
connection5,
connection6
]
assert len(layer) == 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.