metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "josemarti/CarND-Capstone",
"score": 2
} |
#### File: tl_detector/light_classification/tl_classifier.py
```python
import rospy
import rospkg
import numpy as np
import os
import sys
import tensorflow as tf
from collections import defaultdict
from utils import label_map_util
from utils import visualization_utils as vis_util
import time
from styx_msgs.msg import TrafficLight
SIM_MODEL_PATH = 'light_classification/model_files/frozen_inference_graph_sim.pb'
SITE_MODEL_PATH = 'light_classification/model_files/frozen_inference_graph_real.pb'
LABELS_PATH = 'light_classification/model_files/labels.pbtxt'
NUM_CLASSES = 4
class TLClassifier(object):
def __init__(self, mode='SIMULATOR'):
self.current_light = TrafficLight.UNKNOWN
CWD_PATH = os.getcwd()
model = os.path.join(CWD_PATH, SIM_MODEL_PATH)
if mode is 'SITE':
model = os.path.join(CWD_PATH, SITE_MODEL_PATH)
labels_path = os.path.join(CWD_PATH, LABELS_PATH)
label_map = label_map_util.load_labelmap(labels_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
self.image_np_deep = None
self.detection_graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
jit_level = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jit_level
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph, config=config)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
print("Loaded frozen model graph for mode = {}".format(mode))
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
self.current_light = TrafficLight.UNKNOWN
image_expanded = np.expand_dims(image, axis=0)
time1 = time.time()
with self.detection_graph.as_default():
(boxes, scores, classes, num) = self.sess.run([self.detection_boxes,
self.detection_scores,
self.detection_classes,
self.num_detections],
feed_dict={self.image_tensor:image_expanded})
time2 = time.time()
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
min_score_threshold = 0.5
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_threshold:
class_name = self.category_index[classes[i]]['name']
rospy.loginfo('Light Color : {}'.format(class_name))
rospy.loginfo('Time for inference : {}ms'.format((time2-time1)*1000))
if class_name == 'Red':
self.current_light = TrafficLight.RED
elif class_name == 'Yellow':
self.current_light = TrafficLight.YELLOW
elif class_name == 'Green':
self.current_light = TrafficLight.GREEN
else:
self.current_light = TrafficLight.UNKNOWN
#self.image_np_deep = image
return self.current_light
``` |
{
"source": "josematiasarevalo/django-khipu",
"score": 2
} |
#### File: django-khipu/khipu/exceptions.py
```python
class KhipuError(Exception):
def __init__(self, result):
Exception.__init__(self, result)
```
#### File: khipu/tests/test_api.py
```python
import mock
from django.conf import settings
from django.test import TestCase
from ..api import Khipu
from ..exceptions import KhipuError
class TestAPI(TestCase):
def test_khipu_service(self):
"""
Testing de Khipu API.
"""
# Test para cuando el servicio no existe.
khipu = Khipu()
with self.assertRaises(KhipuError):
khipu.service('testing')
# Test para cuando no existen variables settings.
with self.assertRaises(KhipuError):
settings.KHIPU_RECEIVER_ID = None
khipu.service('GetBanks')
settings.KHIPU_RECEIVER_ID = '148653'
# Test para cuando todo esta OK con el servicio.
with mock.patch(
'khipu.services.common.KhipuService.request'),\
mock.patch(
'khipu.services.common.KhipuService.response',
return_value={'banks': 'testingbank'}):
result = khipu.service('GetBanks')
self.assertTrue('banks' in result)
```
#### File: django-khipu/khipu/views.py
```python
import logging
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .api import Khipu
from .exceptions import KhipuError
from .models import Payment
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
def set_khipu_model(**kwargs):
"""
Setear todos los nuevos valores que tenemos del modelo.
@Params
kwargs:
Todos los valores enviados por parte de Khipu
"""
payment = Payment.objects.get(payment_id=kwargs.get('payment_id'))
if float(payment.amount) == float(kwargs.get('amount')):
if int(settings.KHIPU_RECEIVER_ID) == kwargs.get('receiver_id'):
payment.save(**kwargs)
else:
payment.status = 'receiver_error'
payment.notification_token = kwargs.get('notification_token')
payment.save()
else:
payment.status = 'amount_error'
payment.notification_token = kwargs.get('notification_token')
payment.save()
# Enviamos los signlas para que la Django App sea capaz de procesar
try:
payment.send_signals()
except:
logger.error("Could not send signals")
return payment
@csrf_exempt
@require_POST
def verificacion(request):
# def verificacion(request, *args, **kwargs):
"""
Vista para validar el estatus de un pago.
Se recibira por metodo POST un Token por parte de Khipu, se verificara en
un servicio de Khipu el status del pago.
"""
logger.debug("Informacion que nos envia Khipu {}".format(request.POST))
notification_token = request.POST.get('notification_token')
khipu = Khipu()
try:
result = khipu.service(
'GetPayment', **{'notification_token': notification_token})
logger.debug("Informacion del servicio GetPayment {}".format(
result))
except KhipuError as e:
logger.error("GetPayment Communication error {}".format(e))
return HttpResponse(status=400)
try:
set_khipu_model(**result) # Guardar todo lo que Khipu nos envia.
except Payment.DoesNotExist:
logger.error("Payment does not exist. Data {}".format(result))
return HttpResponse(status=400)
return HttpResponse()
``` |
{
"source": "josemauro/kytos-utils",
"score": 3
} |
#### File: commands/bug_report/parser.py
```python
import sys
from docopt import docopt
from kytos.cli.commands.bug_report.api import BugReportAPI
from kytos.utils.exceptions import KytosException
def parse(argv):
"""Parse cli args."""
args = docopt(__doc__, argv=argv)
try:
BugReportAPI.bug_report(args)
except KytosException as exception:
print("Error parsing args: {}".format(exception))
sys.exit(-1)
```
#### File: unit/commands/test_napps_parser.py
```python
import sys
import unittest
from unittest.mock import patch
from kytos.cli.commands.napps.parser import (call, parse, parse_napp,
parse_napps)
from kytos.utils.exceptions import KytosException
class TestNappsParser(unittest.TestCase):
"""Test the NappsAPI parser methods."""
@staticmethod
@patch('kytos.cli.commands.napps.parser.call')
@patch('kytos.cli.commands.napps.parser.docopt', return_value='args')
def test_parse(*args):
"""Test parse method."""
(_, mock_call) = args
with patch.object(sys, 'argv', ['A', 'B', 'C']):
parse('argv')
mock_call.assert_called_with('C', 'args')
@staticmethod
@patch('sys.exit')
@patch('kytos.cli.commands.napps.parser.call')
@patch('kytos.cli.commands.napps.parser.docopt', return_value='args')
def test_parse__error(*args):
"""Test parse method to error case."""
(_, mock_call, mock_exit) = args
mock_call.side_effect = KytosException
with patch.object(sys, 'argv', ['A', 'B', 'C']):
parse('argv')
mock_exit.assert_called()
@staticmethod
@patch('kytos.cli.commands.napps.api.NAppsAPI.install')
@patch('kytos.utils.config.KytosConfig')
def test_call(*args):
"""Test call method."""
(_, mock_napps_api) = args
call_args = {'<napp>': 'all'}
call('install', call_args)
mock_napps_api.assert_called_with(call_args)
def test_parse_napps__all(self):
"""Test parse_napps method to all napps."""
napp_ids = ['all']
napps = parse_napps(napp_ids)
self.assertEqual(napps, 'all')
def test_parse_napps__any(self):
"""Test parse_napps method to any napp."""
napp_ids = ['user/napp:version']
napps = parse_napps(napp_ids)
self.assertEqual(napps, [('user', 'napp', 'version')])
def test_parse_napp__success(self):
"""Test parse_napp method to success case."""
napp = 'user/napp:version'
groups = parse_napp(napp)
self.assertEqual(groups, ('user', 'napp', 'version'))
def test_parse_napp__error(self):
"""Test parse_napp method to error case."""
napp = 'usernappversion'
with self.assertRaises(KytosException):
parse_napp(napp)
``` |
{
"source": "josemauro/of_core",
"score": 2
} |
#### File: tests/integration/test_flow.py
```python
import unittest
from pyof.v0x01.controller2switch.flow_mod import FlowMod as OFFlow01
from pyof.v0x04.controller2switch.flow_mod import FlowMod as OFFlow04
from kytos.core.switch import Switch
from napps.kytos.of_core.v0x01.flow import Flow as Flow01
from napps.kytos.of_core.v0x04.flow import Flow as Flow04
class TestFlow(unittest.TestCase):
"""Test OF flow abstraction."""
SWITCH = Switch('dpid')
EXPECTED = {'id': '1ce5d08a46496fcb856cb603a5bfa00f',
'switch': SWITCH.id,
'table_id': 1,
'match': {
'dl_src': '11:22:33:44:55:66'
},
'priority': 2,
'idle_timeout': 3,
'hard_timeout': 4,
'cookie': 5,
'actions': [
{'action_type': 'set_vlan',
'vlan_id': 6}],
'stats': {}}
def test_flow_mod(self):
"""Convert a dict to flow and vice-versa."""
for flow_class in Flow01, Flow04:
with self.subTest(flow_class=flow_class):
flow = flow_class.from_dict(self.EXPECTED, self.SWITCH)
actual = flow.as_dict()
self.assertDictEqual(self.EXPECTED, actual)
def test_of_flow_mod(self):
"""Test convertion from Flow to OFFlow."""
flow_mod_01 = Flow01.from_dict(self.EXPECTED, self.SWITCH)
flow_mod_04 = Flow04.from_dict(self.EXPECTED, self.SWITCH)
of_flow_mod_01 = flow_mod_01.as_of_add_flow_mod()
of_flow_mod_04 = flow_mod_04.as_of_delete_flow_mod()
self.assertIsInstance(of_flow_mod_01, OFFlow01)
self.assertIsInstance(of_flow_mod_04, OFFlow04)
```
#### File: tests/unit/test_utils.py
```python
from unittest import TestCase
from unittest.mock import MagicMock, patch
from kytos.lib.helpers import get_connection_mock, get_switch_mock
from napps.kytos.of_core.utils import (GenericHello, _emit_message,
_unpack_int, emit_message_in,
emit_message_out, of_slicer)
from tests.helpers import get_controller_mock
class TestUtils(TestCase):
"""Test utils."""
def setUp(self):
"""Execute steps before each tests."""
self.mock_controller = get_controller_mock()
self.mock_switch = get_switch_mock('00:00:00:00:00:00:00:01', 0x04)
self.mock_connection = get_connection_mock(0x04, self.mock_switch)
def test_of_slicer(self):
"""Test of_slicer."""
data = b'\x04\x00\x00\x10\x00\x00\x00\x3e'
data += b'\x00\x01\x00\x08\x00\x00\x00\x10'
response = of_slicer(data)
self.assertEqual(data, response[0][0])
self.assertCountEqual(response[1], [])
def test_unpack_int(self):
"""Test test_unpack_int."""
mock_packet = MagicMock()
response = _unpack_int(mock_packet)
self.assertEqual(int.from_bytes(mock_packet,
byteorder='big'), response)
@patch('napps.kytos.of_core.utils.KytosEvent')
def test_emit_message(self, mock_event):
"""Test emit_message."""
mock_message = MagicMock()
_emit_message(self.mock_controller, self.mock_connection, mock_message,
'in')
mock_event.assert_called()
_emit_message(self.mock_controller, self.mock_connection, mock_message,
'out')
mock_event.assert_called()
@patch('napps.kytos.of_core.utils._emit_message')
def test_emit_message_in_out(self, mock_message_in):
"""Test emit_message in and out."""
emit_message_in(self.mock_controller, self.mock_connection, 'in')
mock_message_in.assert_called()
emit_message_out(self.mock_controller, self.mock_connection, 'in')
mock_message_in.assert_called()
class TestGenericHello(TestCase):
"""Test GenericHello."""
data = b'\x04\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x08\x00\x00\x00\x10'
@patch('napps.kytos.of_core.utils.OFPTYPE')
def test_pack(self, mock_ofptype):
"""Test pack."""
mock_ofptype.return_value = True
generic = GenericHello(packet=self.data, versions=b'\x04')
response = generic.pack()
self.assertEqual(self.data, response)
```
#### File: of_core/v0x04/match_fields_base.py
```python
from abc import ABC, abstractmethod
class MatchField(ABC):
"""Base class for match fields. Abstract OXM TLVs of python-openflow.
Just extend this class and you will be forced to define the required
low-level attributes and methods below:
* "name" attribute (field name to be displayed in JSON);
* "oxm_field" attribute (``OxmOfbMatchField`` enum);
* Method to return a pyof OxmTLV;
* Method to create an instance from an OxmTLV.
"""
def __init__(self, value):
"""Define match field value."""
self.value = value
@property
@classmethod
@abstractmethod
def name(cls):
"""Define a name to be displayed in JSON.
It can be overriden just by a class attibute.
"""
@property
@classmethod
@abstractmethod
def oxm_field(cls):
"""Define this subclass ``OxmOfbMatchField`` value.
It can be overriden just by as a class attibute.
"""
@abstractmethod
def as_of_tlv(self):
"""Return a pyof OXM TLV instance."""
@classmethod
@abstractmethod
def from_of_tlv(cls, tlv):
"""Return an instance from a pyof OXM TLV."""
def __eq__(self, other):
"""Two objects are equal if their values are the same.
The oxm_field equality is checked indirectly when comparing whether
the objects are instances of the same class.
"""
return isinstance(other, self.__class__) and other.value == self.value
class MatchFieldFactory(ABC):
"""Create the correct MatchField subclass instance.
As OF 1.3 has many match fields and there are many ways to (un)pack their
OxmTLV.oxm_value, this class does all the work of finding the correct
MatchField class and instantiating the corresponding object.
"""
__classes = {}
@classmethod
def from_name(cls, name, value):
"""Return the proper object from name and value."""
field_class = cls._get_class(name)
if field_class:
return field_class(value)
return None
@classmethod
def from_of_tlv(cls, tlv):
"""Return the proper object from a pyof OXM TLV."""
field_class = cls._get_class(tlv.oxm_field)
if field_class:
return field_class.from_of_tlv(tlv)
return None
@classmethod
def _get_class(cls, name_or_field):
"""Return the proper object from field name or OxmTLV.oxm_field."""
if not cls.__classes:
cls._index_classes()
return cls.__classes.get(name_or_field)
@classmethod
def _index_classes(cls):
for subclass in MatchField.__subclasses__():
cls.__classes[subclass.name] = subclass
cls.__classes[subclass.oxm_field] = subclass
``` |
{
"source": "josemauro/python-openflow",
"score": 3
} |
#### File: pyof/foundation/basic_types.py
```python
import struct
from copy import deepcopy
# Local source tree imports
from pyof.foundation import exceptions
from pyof.foundation.base import GenericStruct, GenericType, UBIntBase
__all__ = ('BinaryData', 'Char', 'ConstantTypeList', 'FixedTypeList',
'IPAddress', 'DPID', 'HWAddress', 'Pad', 'UBInt8', 'UBInt16',
'UBInt32', 'UBInt64', 'UBInt128')
class Pad(GenericType):
"""Class for padding attributes."""
_fmt = ''
def __init__(self, length=0):
"""Pad up to ``length``, in bytes.
Args:
length (int): Total length, in bytes.
"""
super().__init__()
self._length = length
def __repr__(self):
return "{}({})".format(type(self).__name__, self._length)
def __str__(self):
return '0' * self._length
def get_size(self, value=None):
"""Return the type size in bytes.
Args:
value (int): In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: Size in bytes.
"""
return self._length
def unpack(self, buff, offset=0):
"""Unpack *buff* into this object.
Do nothing, since the _length is already defined and it is just a Pad.
Keep buff and offset just for compability with other unpack methods.
Args:
buff: Buffer where data is located.
offset (int): Where data stream begins.
"""
def pack(self, value=None):
"""Pack the object.
Args:
value (int): In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
bytes: the byte 0 (zero) *length* times.
"""
return b'\x00' * self._length
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return Pad(length=self._length)
class UBInt8(UBIntBase):
"""Format character for an Unsigned Char.
Class for an 8-bit (1-byte) Unsigned Integer.
"""
_fmt = "!B"
class UBInt16(UBIntBase):
"""Format character for an Unsigned Short.
Class for an 16-bit (2-byte) Unsigned Integer.
"""
_fmt = "!H"
class UBInt32(UBIntBase):
"""Format character for an Unsigned Int.
Class for an 32-bit (4-byte) Unsigned Integer.
"""
_fmt = "!I"
class UBInt64(UBIntBase):
"""Format character for an Unsigned Long Long.
Class for an 64-bit (8-byte) Unsigned Integer.
"""
_fmt = "!Q"
class UBInt128(UBIntBase):
"""Format character for an Unsigned Long Long.
Class for an 128-bit (16-byte) Unsigned Integer.
"""
_fmt = "!8H"
class DPID(GenericType):
"""DataPath ID. Identifies a switch."""
_fmt = "!8B"
def __init__(self, dpid=None):
"""Create an instance and optionally set its dpid value.
Args:
dpid (str): String with DPID value(e.g. `00:00:00:00:00:00:00:01`).
"""
super().__init__(value=dpid)
def __str__(self):
return self._value
@property
def value(self):
"""Return dpid value.
Returns:
str: DataPath ID stored by DPID class.
"""
return self._value
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
return struct.pack('!8B', *[int(v, 16) for v in value.split(':')])
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
begin = offset
hexas = []
while begin < offset + 8:
number = struct.unpack("!B", buff[begin:begin+1])[0]
hexas.append("%.2x" % number)
begin += 1
self._value = ':'.join(hexas)
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return DPID(dpid=self._value)
class Char(GenericType):
"""Build a double char type according to the length."""
def __init__(self, value=None, length=0):
"""Create a Char with the optional parameters below.
Args:
value: The character to be build.
length (int): Character size.
"""
super().__init__(value)
self.length = length
self._fmt = '!{}{}'.format(self.length, 's')
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
try:
if value is None:
value = self.value
packed = struct.pack(self._fmt, bytes(value, 'ascii'))
return packed[:-1] + b'\0' # null-terminated
except struct.error as err:
msg = "Char Pack error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
begin = offset
end = begin + self.length
unpacked_data = struct.unpack(self._fmt, buff[begin:end])[0]
except struct.error:
raise Exception("%s: %s" % (offset, buff))
self._value = unpacked_data.decode('ascii').rstrip('\0')
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return Char(value=self._value, length=self.length)
class IPAddress(GenericType):
"""Defines a IP address."""
netmask = UBInt32()
max_prefix = UBInt32(32)
def __init__(self, address="0.0.0.0/32", netmask=None):
"""Create an IPAddress with the parameters below.
Args:
address (str): IP Address using ipv4. Defaults to '0.0.0.0/32'
"""
if '/' in address:
address, netmask = address.split('/')
else:
netmask = 32 if netmask is None else netmask
super().__init__(address)
self.netmask = int(netmask)
def pack(self, value=None):
"""Pack the value as a binary representation.
If the value is None the self._value will be used to pack.
Args:
value (str): IP Address with ipv4 format.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
if value.find('/') >= 0:
value = value.split('/')[0]
try:
value = value.split('.')
return struct.pack('!4B', *[int(x) for x in value])
except struct.error as err:
msg = "IPAddress error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
try:
unpacked_data = struct.unpack('!4B', buff[offset:offset+4])
self._value = '.'.join([str(x) for x in unpacked_data])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
def get_size(self, value=None):
"""Return the ip address size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The address size in bytes.
"""
return 4
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return IPAddress(address=self._value, netmask=self.netmask)
class IPv6Address(GenericType):
"""Defines a IPv6 address."""
netmask = UBInt128()
def __init__(self, address="0000:0000:0000:0000:0000:0000:0000:0000/128",
netmask=None):
"""Create an IPv6Address with the parameters below.
Args:
address (str): IP Address using IPv6.
Defaults to '0000:0000:0000:0000:0000:0000:0000:0000/128'
"""
if '/' in address:
address, netmask = address.split('/')
else:
netmask = 128 if netmask is None else netmask
if address == '::':
address = '0:0:0:0:0:0:0:0'
elif '::' in address:
temp = address.split(':')
index = temp.index('')
temp = [x for x in temp if x != '']
address = temp[:index] + ['0'] * (8 - len(temp)) + temp[index:]
address = ':'.join(address)
super().__init__(address)
self.netmask = int(netmask)
def pack(self, value=None):
"""Pack the value as a binary representation.
If the value is None the self._value will be used to pack.
Args:
value (str): IP Address with IPv6 format.
Returns:
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
if value.find('/') >= 0:
value = value.split('/')[0]
try:
value = value.split(':')
return struct.pack('!8H', *[int(x, 16) for x in value])
except struct.error as err:
msg = "IPv6Address error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
def _int2hex(number):
return "{0:0{1}x}".format(number, 4)
try:
unpacked_data = struct.unpack('!8H', buff[offset:offset+16])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
transformed_data = ':'.join([_int2hex(x) for x in unpacked_data])
self._value = transformed_data
def get_size(self, value=None):
"""Return the IPv6 address size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The address size in bytes.
"""
return 16
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return IPv6Address(address=self._value, netmask=self.netmask)
class HWAddress(GenericType):
"""Defines a hardware address."""
# pylint: disable=useless-super-delegation
def __init__(self, hw_address='00:00:00:00:00:00'):
"""Create a HWAddress with the parameters below.
Args:
hw_address (bytes): Hardware address. Defaults to
'00:00:00:00:00:00'.
"""
super().__init__(hw_address)
def pack(self, value=None):
"""Pack the value as a binary representation.
If the passed value (or the self._value) is zero (int), then the pack
will assume that the value to be packed is '00:00:00:00:00:00'.
Returns
bytes: The binary representation.
Raises:
struct.error: If the value does not fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self._value
if value == 0:
value = '00:00:00:00:00:00'
value = value.split(':')
try:
return struct.pack('!6B', *[int(x, 16) for x in value])
except struct.error as err:
msg = "HWAddress error. "
msg += "Class: {}, struct error: {} ".format(type(value).__name__,
err)
raise exceptions.PackException(msg)
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
def _int2hex(number):
return "{0:0{1}x}".format(number, 2)
try:
unpacked_data = struct.unpack('!6B', buff[offset:offset+6])
except struct.error as exception:
raise exceptions.UnpackException('%s; %s: %s' % (exception,
offset, buff))
transformed_data = ':'.join([_int2hex(x) for x in unpacked_data])
self._value = transformed_data
def get_size(self, value=None):
"""Return the address size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The address size in bytes.
"""
return 6
def is_broadcast(self):
"""Return true if the value is a broadcast address. False otherwise."""
return self.value == 'ff:ff:ff:ff:ff:ff'
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return HWAddress(hw_address=self._value)
class BinaryData(GenericType):
"""Class to create objects that represent binary data.
This is used in the ``data`` attribute from
:class:`~pyof.v0x01.asynchronous.packet_in.PacketIn` and
:class:`~pyof.v0x01.controller2switch.packet_out.PacketOut` messages.
Both the :meth:`pack` and :meth:`unpack` methods will return the
binary data itself. :meth:`get_size` method will
return the size of the instance using Python's :func:`len`.
"""
def __init__(self, value=None): # pylint: disable=useless-super-delegation
"""Initialize with a value (optional).
Args:
value (bytes): The binary data. Defaults to an empty value.
"""
super().__init__(value)
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
Raises:
ValueError: If value can't be represented with bytes
"""
if value is None:
value = self._value
if hasattr(value, 'pack') and callable(value.pack):
return value.pack()
if isinstance(value, bytes):
return value
if value is None:
return b''
raise ValueError(f"BinaryData can't be {type(value)} = '{value}'")
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results. Since the *buff* is binary data, no conversion is done.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
"""
self._value = buff[offset:]
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value (bytes): In structs, the user can assign other value instead
of this class' instance. Here, in such cases, ``self`` is a
class attribute of the struct.
Returns:
int: The address size in bytes.
"""
if value is None:
value = self._value
if hasattr(value, 'get_size'):
return value.get_size()
return len(self.pack(value))
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
return BinaryData(value=self._value)
class TypeList(list, GenericStruct):
"""Base class for lists that store objects of one single type."""
def __init__(self, items):
"""Initialize the list with one item or a list of items.
Args:
items (iterable, ``pyof_class``): Items to be stored.
"""
super().__init__()
if isinstance(items, list):
self.extend(items)
elif items:
self.append(items)
def extend(self, items):
"""Extend the list by adding all items of ``items``.
Args:
items (iterable): Items to be added to the list.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has an unexpected
type.
"""
for item in items:
self.append(item)
def pack(self, value=None):
"""Pack the value as a binary representation.
Returns:
bytes: The binary representation.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self
else:
container = type(self)(items=None)
container.extend(value)
value = container
bin_message = b''
try:
for item in value:
bin_message += item.pack()
return bin_message
except exceptions.PackException as err:
msg = "{} pack error: {}".format(type(self).__name__, err)
raise exceptions.PackException(msg)
# pylint: disable=arguments-differ
def unpack(self, buff, item_class, offset=0):
"""Unpack the elements of the list.
Args:
buff (bytes): The binary data to be unpacked.
item_class (:obj:`type`): Class of the expected items on this list.
offset (int): If we need to shift the beginning of the data.
"""
begin = offset
limit_buff = len(buff)
while begin < limit_buff:
item = item_class()
item.unpack(buff, begin)
self.append(item)
begin += item.get_size()
# pylint: enable=arguments-differ
def get_size(self, value=None):
"""Return the size in bytes.
Args:
value: In structs, the user can assign other value instead of
this class' instance. Here, in such cases, ``self`` is a class
attribute of the struct.
Returns:
int: The size in bytes.
"""
if value is None:
if not self:
# If this is a empty list, then returns zero
return 0
if issubclass(type(self[0]), GenericType):
# If the type of the elements is GenericType, then returns the
# length of the list multiplied by the size of the GenericType.
return len(self) * self[0].get_size()
# Otherwise iter over the list accumulating the sizes.
return sum(item.get_size() for item in self)
return type(self)(value).get_size()
def __str__(self):
"""Human-readable object representantion."""
return "{}".format([str(item) for item in self])
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
items = [deepcopy(item) for item in self]
return TypeList(items=items)
class FixedTypeList(TypeList):
"""A list that stores instances of one pyof class."""
_pyof_class = None
def __init__(self, pyof_class, items=None):
"""Create a FixedTypeList with the parameters follows.
Args:
pyof_class (:obj:`type`): Class of the items to be stored.
items (iterable, ``pyof_class``): Items to be stored.
"""
self._pyof_class = pyof_class
super().__init__(items)
def append(self, item):
"""Append one item to the list.
Args:
item: Item to be appended. Its type must match the one defined in
the constructor.
Raises:
:exc:`~.exceptions.WrongListItemType`: If the item has a different
type than the one specified in the constructor.
"""
if isinstance(item, list):
self.extend(item)
elif issubclass(item.__class__, self._pyof_class):
list.append(self, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self._pyof_class.__name__)
def insert(self, index, item):
"""Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted. It must have the type specified in the
constructor.
Raises:
:exc:`~.exceptions.WrongListItemType`: If the item has a different
type than the one specified in the constructor.
"""
if issubclass(item.__class__, self._pyof_class):
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self._pyof_class.__name__)
def unpack(self, buff, offset=0): # pylint: disable=arguments-differ
"""Unpack the elements of the list.
This unpack method considers that all elements have the same size.
To use this class with a pyof_class that accepts elements with
different sizes, you must reimplement the unpack method.
Args:
buff (bytes): The binary data to be unpacked.
offset (int): If we need to shift the beginning of the data.
"""
super().unpack(buff, self._pyof_class, offset)
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
items = [deepcopy(item) for item in self]
return FixedTypeList(pyof_class=self._pyof_class, items=items)
class ConstantTypeList(TypeList):
"""List that contains only objects of the same type (class).
The types of all items are expected to be the same as the first item's.
Otherwise, :exc:`~.exceptions.WrongListItemType` is raised in many
list operations.
"""
# pylint: disable=useless-super-delegation
def __init__(self, items=None):
"""Create a ConstantTypeList that can contain itens to be stored.
Args:
items (iterable, :class:`object`): Items to be stored.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
super().__init__(items)
def append(self, item):
"""Append one item to the list.
Args:
item: Item to be appended.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
if isinstance(item, list):
self.extend(item)
elif not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.append(self, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__)
def insert(self, index, item):
"""Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted.
Raises:
:exc:`~.exceptions.WrongListItemType`: If an item has a different
type than the first item to be stored.
"""
if not self:
list.append(self, item)
elif item.__class__ == self[0].__class__:
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self[0].__class__.__name__)
def __deepcopy__(self, memo):
"""Improve deepcopy speed."""
items = [deepcopy(item) for item in self]
return ConstantTypeList(items=items)
```
#### File: v0x01/common/action.py
```python
from pyof.foundation.base import GenericBitMask, GenericStruct
from pyof.foundation.basic_types import (
FixedTypeList, HWAddress, Pad, UBInt8, UBInt16, UBInt32)
from pyof.foundation.constants import UBINT16_MAX_VALUE
# Third-party imports
__all__ = ('ActionType', 'ActionHeader', 'ActionOutput', 'ActionStripVlan',
'ActionEnqueue', 'ActionVlanVid', 'ActionVlanPCP', 'ActionDLAddr',
'ActionNWAddr', 'ActionNWTos', 'ActionTPPort', 'ActionVendorHeader',
'ListOfActions')
# Enums
class ActionType(GenericBitMask):
"""Actions associated with flows and packets."""
#: Output to switch port.
OFPAT_OUTPUT = 0
#: Set the 802.1q VLAN id.
OFPAT_SET_VLAN_VID = 1
#: Set the 802.1q priority.
OFPAT_SET_VLAN_PCP = 2
#: Strip the 802.1q header.
OFPAT_STRIP_VLAN = 3
#: Ethernet source address.
OFPAT_SET_DL_SRC = 4
#: Ethernet destination address.
OFPAT_SET_DL_DST = 5
#: IP source address.
OFPAT_SET_NW_SRC = 6
#: IP destination address.
OFPAT_SET_NW_DST = 7
#: IP ToS (DSCP field, 6 bits).
OFPAT_SET_NW_TOS = 8
#: TCP/UDP source port.
OFPAT_SET_TP_SRC = 9
#: TCP/UDP destination port.
OFPAT_SET_TP_DST = 10
#: Output to queue.
OFPAT_ENQUEUE = 11
#: Vendor specific.
OFPAT_VENDOR = 0xffff
# Classes
class ActionHeader(GenericStruct):
"""Defines the Header that is common to all actions."""
action_type = UBInt16(enum_ref=ActionType)
length = UBInt16()
# Pad for 64-bit alignment.
# This attribute will not be implemented since not all subclasses from
# this class will hold it on the same place and with the same size.
# pad = Pad(4)
_allowed_types = ()
def __init__(self, action_type=None, length=None):
"""Create an ActionHeader with the optional parameters below.
Args:
action_type (~pyof.v0x01.common.action.ActionType):
The type of the action.
length (int): Length of action, including this header.
"""
super().__init__()
self.action_type = action_type
self.length = length
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
on the results.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
Raises:
Exception: If there is a struct unpacking error.
"""
self.action_type = UBInt16(enum_ref=ActionType)
self.action_type.unpack(buff, offset)
for cls in ActionHeader.__subclasses__():
if self.action_type.value in cls.get_allowed_types():
self.__class__ = cls
break
super().unpack(buff, offset)
@classmethod
def get_allowed_types(cls):
"""Return allowed types for the class."""
return cls._allowed_types
class ActionOutput(ActionHeader):
"""Defines the actions output.
Action structure for :attr:`ActionType.OFPAT_OUTPUT`, which sends packets
out :attr:`port`. When the :attr:`port` is the
:attr:`.Port.OFPP_CONTROLLER`, :attr:`max_length` indicates the max number
of bytes to send. A :attr:`max_length` of zero means no bytes of the packet
should be sent.
"""
port = UBInt16()
max_length = UBInt16()
_allowed_types = (ActionType.OFPAT_OUTPUT,)
def __init__(self, port=None, max_length=UBINT16_MAX_VALUE):
"""Create an ActionOutput with the optional parameters below.
Args:
port (:class:`~pyof.v0x01.common.phy_port.Port` or :class:`int`):
Output port.
max_length (int): Max length to send to controller.
"""
super().__init__(action_type=ActionType.OFPAT_OUTPUT, length=8)
self.port = port
self.max_length = max_length
class ActionStripVlan(ActionHeader):
"""Strips VLAN information from packets.
Action defined for switches to remove the 802.1q VLAN information from
packets.
"""
pad = Pad(4)
_allowed_types = (ActionType.OFPAT_STRIP_VLAN,)
def __init__(self):
"""Construct the ActionHeader with the appropriate ActionType.
No parameters need to be specified.
"""
super().__init__(action_type=ActionType.OFPAT_STRIP_VLAN, length=8)
class ActionEnqueue(ActionHeader):
"""Send packets to a queue's port.
A switch may support only queues that are tied to specific PCP/TOS bits.
In that case, we cannot map an arbitrary flow to a specific queue,
therefore the action ENQUEUE is not supported. The user can still use
these queues and map flows to them by setting the relevant fields
(TOS, VLAN PCP).
"""
port = UBInt16()
#: Pad for 64-bit alignment.
pad = Pad(6)
queue_id = UBInt32()
_allowed_types = (ActionType.OFPAT_ENQUEUE,)
def __init__(self, port=None, queue_id=None):
"""Create an ActionEnqueue with the optional parameters below.
Args:
port (physical port or :attr:`.Port.OFPP_IN_PORT`): Queue's port.
queue_id (int): Where to enqueue the packets.
"""
super().__init__(action_type=ActionType.OFPAT_ENQUEUE, length=16)
self.port = port
self.queue_id = queue_id
class ActionVlanVid(ActionHeader):
"""Action structure for :attr:`ActionType.OFPAT_SET_VLAN_VID`.
.. note:: The vlan_vid field is 16 bits long,
when an actual VLAN id is only 12 bits.
The value 0xffff is used to indicate that no VLAN id was set
"""
vlan_id = UBInt16()
#: Pad for bit alignment.
pad2 = Pad(2)
_allowed_types = (ActionType.OFPAT_SET_VLAN_VID,)
def __init__(self, vlan_id=None):
"""Create an ActionVlanVid with the optional parameters below.
Args:
vlan_id (int): VLAN priority.
"""
super().__init__(action_type=ActionType.OFPAT_SET_VLAN_VID, length=8)
self.vlan_id = vlan_id
class ActionVlanPCP(ActionHeader):
"""Action structure for :attr:`ActionType.OFPAT_SET_VLAN_PCP`."""
vlan_pcp = UBInt8()
#: Pad for bit alignment.
pad = Pad(3)
_allowed_types = (ActionType.OFPAT_SET_VLAN_PCP,)
def __init__(self, vlan_pcp=None):
"""Create an ActionVlanPCP with the optional parameters below.
Args:
vlan_pcp (int): VLAN Priority.
.. note:: The vlan_pcp field is 8 bits long,
but only the lower 3 bits have meaning.
"""
super().__init__(action_type=ActionType.OFPAT_SET_VLAN_PCP, length=8)
self.vlan_pcp = vlan_pcp
class ActionDLAddr(ActionHeader):
"""Action structure for :attr:`ActionType.OFPAT_SET_DL_SRC` or _DST."""
dl_addr = HWAddress()
#: Pad for bit alignment.
pad = Pad(6)
_allowed_types = (ActionType.OFPAT_SET_DL_SRC, ActionType.OFPAT_SET_DL_DST)
def __init__(self, action_type=None, dl_addr=None):
"""Create an ActionDLAddr with the optional parameters below.
Args:
action_type (:class:`~pyof.v0x01.common.action.ActionType`):
:attr:`~ActionType.OFPAT_SET_DL_SRC` or
:attr:`~ActionType.OFPAT_SET_DL_DST`.
dl_addr (:class:`~.HWAddress`): Ethernet address.
Defaults to None.
"""
super().__init__(action_type, length=16)
self.dl_addr = dl_addr
class ActionNWAddr(ActionHeader):
"""Action structure for :attr:`ActionType.OFPAT_SET_NW_SRC` or _DST."""
nw_addr = UBInt32()
_allowed_types = (ActionType.OFPAT_SET_NW_SRC, ActionType.OFPAT_SET_NW_DST)
def __init__(self, action_type=None, nw_addr=None):
"""Create an ActionNWAddr with the optional parameters below.
Args:
action_type (:class:`~pyof.v0x01.common.action.ActionType`):
:attr:`~ActionType.OFPAT_SET_NW_SRC` or
:attr:`~ActionType.OFPAT_SET_NW_DST`.
nw_addr (int): IP Address.
"""
super().__init__(action_type, length=8)
self.nw_addr = nw_addr
class ActionNWTos(ActionHeader):
"""Action structure for :attr:`ActionType.OFPAT_SET_NW_TOS`.
.. note:: The nw_tos field is the 6 upper bits of the ToS field to set,
in the original bit positions (shifted to the left by 2).
"""
nw_tos = UBInt8()
#: Pad for bit alignment.
pad = Pad(3)
_allowed_types = (ActionType.OFPAT_SET_NW_TOS,)
def __init__(self, action_type=None, nw_tos=None):
"""Create an ActionNWTos with the optional parameters below.
Args:
action_type (:class:`~pyof.v0x01.common.action.ActionType`):
:attr:`~ActionType.OFPAT_SET_NW_SRC` or
:attr:`~ActionType.OFPAT_SET_NW_DST`.
nw_tos (int): IP ToS (DSCP field, 6 bits).
"""
super().__init__(action_type, length=8)
self.nw_tos = nw_tos
class ActionTPPort(ActionHeader):
"""Action structure for :attr:`ActionType.OFPAT_SET_TP_SRC` or _DST."""
tp_port = UBInt16()
#: Pad for bit alignment.
pad = Pad(2)
_allowed_types = (ActionType.OFPAT_SET_TP_SRC, ActionType.OFPAT_SET_TP_DST)
def __init__(self, action_type=None, tp_port=None):
"""Create an ActionTPPort with the optional parameters below.
Args:
action_type (:class:`~pyof.v0x01.common.action.ActionType`):
:attr:`~ActionType.OFPAT_SET_TP_SRC` or
:attr:`~ActionType.OFPAT_SET_TP_DST`.
tp_port (int): TCP/UDP/other port to set.
"""
super().__init__(action_type, length=8)
self.tp_port = tp_port
class ActionVendorHeader(ActionHeader):
"""Action header for :attr:`ActionType.OFPAT_VENDOR`.
The rest of the body is vendor-defined.
"""
vendor = UBInt32()
_allowed_types = (ActionType.OFPAT_VENDOR,)
def __init__(self, length=None, vendor=None):
"""Create an ActionVendorHeader with the optional parameters below.
Args:
length (int): Length is a multiple of 8.
vender (int): Vendor ID with the same form as in VendorHeader.
Defaults to None.
"""
super().__init__(action_type=ActionType.OFPAT_VENDOR, length=length)
self.vendor = vendor
class ListOfActions(FixedTypeList):
"""List of actions.
Represented by instances of ActionHeader and used on ActionHeader objects.
"""
def __init__(self, items=None):
"""Create a ListOfActions with the optional parameters below.
Args:
items (:class:`~pyof.v0x01.common.action.ActionHeader`):
Instance or a list of instances.
"""
super().__init__(pyof_class=ActionHeader, items=items)
```
#### File: v0x01/controller2switch/common.py
```python
from enum import IntEnum
from pyof.foundation.base import GenericMessage, GenericStruct
from pyof.foundation.basic_types import (
BinaryData, Char, Pad, UBInt8, UBInt16, UBInt32, UBInt64)
from pyof.foundation.constants import (
DESC_STR_LEN, OFP_MAX_TABLE_NAME_LEN, SERIAL_NUM_LEN)
# Local source tree imports
from pyof.v0x01.common.action import ListOfActions
from pyof.v0x01.common.flow_match import FlowWildCards, Match
from pyof.v0x01.common.header import Header
from pyof.v0x01.common.phy_port import Port
# Third-party imports
__all__ = ('ConfigFlag', 'StatsType', 'AggregateStatsReply',
'AggregateStatsRequest', 'DescStats', 'FlowStats',
'FlowStatsRequest', 'PortStats', 'PortStatsRequest', 'QueueStats',
'QueueStatsRequest', 'TableStats', 'VendorStats',
'VendorStatsRequest')
# Enums
class ConfigFlag(IntEnum):
"""Configuration Flags. Handling of IP Fragments."""
#: No special handling for fragments
OFPC_FRAG_NORMAL = 0
#: Drop fragments
OFPC_FRAG_DROP = 1
#: Reassemble (only if OFPC_IP_REASM set)
OFPC_FRAG_REASM = 2
OFPC_FRAG_MASK = 3
class StatsType(IntEnum):
"""Type field to be used both in both request and reply.
It specifies the kind of information being passed and determines how the
body field is interpreted.
"""
#: Description of this OpenFlow switch. The request body is empty.
OFPST_DESC = 0
#: Individual flow statistics. The request body is struct
#: ofp_flow_stats_request.
OFPST_FLOW = 1
#: Aggregate flow statistics. The request body is struct
#: ofp_aggregate_stats_request.
OFPST_AGGREGATE = 2
#: Flow table statistics. The request body is empty.
OFPST_TABLE = 3
#: Physical port statistics. The request body is empty.
OFPST_PORT = 4
#: Queue statistics for a port. The request body defines the port
OFPST_QUEUE = 5
#: Vendor extension. The request and reply bodies begin with a 32-bit
#: vendor ID
OFPST_VENDOR = 0xffff
# Classes
class SwitchConfig(GenericMessage):
"""Used as base class for SET_CONFIG and GET_CONFIG_REPLY messages."""
header = Header()
flags = UBInt16(enum_ref=ConfigFlag)
miss_send_len = UBInt16()
def __init__(self, xid=None, flags=None, miss_send_len=None):
"""Create a SwitchConfig with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
flags (ConfigFlag): OFPC_* flags.
miss_send_len (int): UBInt16 max bytes of new flow that the
datapath should send to the controller.
"""
super().__init__(xid)
self.flags = flags
self.miss_send_len = miss_send_len
def __repr__(self):
"""Show a full representation of the object."""
return "%s(xid=%r, flags=%s, miss_send_len=%r)" \
% (self.__class__.__name__, self.header.xid, self.flags,
self.miss_send_len)
class AggregateStatsReply(GenericStruct):
"""Body of reply to OFPST_AGGREGATE request."""
packet_count = UBInt64()
byte_count = UBInt64()
flow_count = UBInt32()
#: Align to 64 bits
pad = Pad(4)
def __init__(self, packet_count=None, byte_count=None, flow_count=None):
"""Create a AggregateStatsReply with the optional parameters below.
Args:
packet_count (int): Number of packets in flows
byte_count (int): Number of bytes in flows
flow_count (int): Number of flows
"""
super().__init__()
self.packet_count = packet_count
self.byte_count = byte_count
self.flow_count = flow_count
class AggregateStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_AGGREGATE."""
match = Match()
table_id = UBInt8()
#: Align to 32 bits
pad = Pad(1)
out_port = UBInt16()
def __init__(self, match=Match(), table_id=0xff, out_port=Port.OFPP_NONE):
"""Create a AggregateStatsRequest with the optional parameters below.
Args:
match (~pyof.v0x01.common.flow_match.Match): Fields to match.
table_id (int): ID of table to read (from pyof_table_stats) 0xff
for all tables or 0xfe for emergency.
out_port (int): Require matching entries to include this as an
output port. A value of OFPP_NONE indicates no restriction.
"""
super().__init__()
self.match = match
self.table_id = table_id
self.out_port = out_port
class DescStats(GenericStruct):
"""Information available from the OFPST_DESC stats request.
Information about the switch manufacturer, hardware revision, software
revision, serial number and a description field.
"""
mfr_desc = Char(length=DESC_STR_LEN)
hw_desc = Char(length=DESC_STR_LEN)
sw_desc = Char(length=DESC_STR_LEN)
serial_num = Char(length=SERIAL_NUM_LEN)
dp_desc = Char(length=DESC_STR_LEN)
def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
serial_num=None, dp_desc=None):
"""Create a DescStats with the optional parameters below.
Args:
mfr_desc (str): Manufacturer description
hw_desc (str): Hardware description
sw_desc (str): Software description
serial_num (str): Serial number
dp_desc (str): Human readable description of datapath
"""
super().__init__()
self.mfr_desc = mfr_desc
self.hw_desc = hw_desc
self.sw_desc = sw_desc
self.serial_num = serial_num
self.dp_desc = dp_desc
class FlowStats(GenericStruct):
"""Body of reply to OFPST_FLOW request."""
length = UBInt16()
table_id = UBInt8()
#: Align to 32 bits.
pad = Pad(1)
match = Match()
duration_sec = UBInt32()
duration_nsec = UBInt32()
priority = UBInt16()
idle_timeout = UBInt16()
hard_timeout = UBInt16()
#: Align to 64-bits
pad2 = Pad(6)
cookie = UBInt64()
packet_count = UBInt64()
byte_count = UBInt64()
actions = ListOfActions()
def __init__(self, length=None, table_id=None, match=None,
duration_sec=None, duration_nsec=None, priority=None,
idle_timeout=None, hard_timeout=None, cookie=None,
packet_count=None, byte_count=None, actions=None):
"""Create a FlowStats with the optional parameters below.
Args:
length (int): Length of this entry.
table_id (int): ID of table flow came from.
match (~pyof.v0x01.common.flow_match.Match): Description of fields.
duration_sec (int): Time flow has been alive in seconds.
duration_nsec (int): Time flow has been alive in nanoseconds in
addition to duration_sec.
priority (int): Priority of the entry. Only meaningful when this
is not an exact-match entry.
idle_timeout (int): Number of seconds idle before expiration.
hard_timeout (int): Number of seconds before expiration.
cookie (int): Opaque controller-issued identifier.
packet_count (int): Number of packets in flow.
byte_count (int): Number of bytes in flow.
actions (:class:`~pyof.v0x01.common.actions.ListOfActions`):
List of Actions.
"""
super().__init__()
self.length = length
self.table_id = table_id
self.match = match
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.cookie = cookie
self.packet_count = packet_count
self.byte_count = byte_count
self.actions = [] if actions is None else actions
def unpack(self, buff, offset=0):
"""Unpack *buff* into this object.
Do nothing, since the _length is already defined and it is just a Pad.
Keep buff and offset just for compability with other unpack methods.
Args:
buff (bytes): Buffer where data is located.
offset (int): Where data stream begins.
"""
self.length = UBInt16()
self.length.unpack(buff, offset)
max_length = offset + self.length.value
super().unpack(buff[:max_length], offset)
class FlowStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_FLOW."""
match = Match()
table_id = UBInt8()
#: Align to 32 bits.
pad = Pad(1)
out_port = UBInt16()
def __init__(self, match=None, table_id=0xff, out_port=Port.OFPP_NONE):
"""Create a FlowStatsRequest with the optional parameters below.
Args:
match (:class:`~pyof.v0x01.common.flow_match.Match`):
Fields to match.
table_id (int): ID of table to read (from pyof_table_stats)
0xff for all tables or 0xfe for emergency.
out_port (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
Require matching entries to include this as an output port.
A value of :attr:`.Port.OFPP_NONE` indicates no restriction.
"""
super().__init__()
self.match = Match() if match is None else match
self.table_id = table_id
self.out_port = out_port
class PortStats(GenericStruct):
"""Body of reply to OFPST_PORT request.
If a counter is unsupported, set the field to all ones.
"""
port_no = UBInt16()
#: Align to 64-bits.
pad = Pad(6)
rx_packets = UBInt64()
tx_packets = UBInt64()
rx_bytes = UBInt64()
tx_bytes = UBInt64()
rx_dropped = UBInt64()
tx_dropped = UBInt64()
rx_errors = UBInt64()
tx_errors = UBInt64()
rx_frame_err = UBInt64()
rx_over_err = UBInt64()
rx_crc_err = UBInt64()
collisions = UBInt64()
def __init__(self, port_no=None, rx_packets=None,
tx_packets=None, rx_bytes=None, tx_bytes=None,
rx_dropped=None, tx_dropped=None, rx_errors=None,
tx_errors=None, rx_frame_err=None, rx_over_err=None,
rx_crc_err=None, collisions=None):
"""Create a PortStats with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
Port number.
rx_packets (int): Number of received packets.
tx_packets (int): Number of transmitted packets.
rx_bytes (int): Number of received bytes.
tx_bytes (int): Number of transmitted bytes.
rx_dropped (int): Number of packets dropped by RX.
tx_dropped (int): Number of packets dropped by TX.
rx_errors (int): Number of receive errors. This is a super-set of
more specific receive errors and should be greater than or
equal to the sum of all rx_*_err values.
tx_errors (int): Number of transmit errors. This is a super-set of
more specific transmit errors and should be greater than or
equal to the sum of all tx_*_err values (none currently
defined).
rx_frame_err (int): Number of frame alignment errors.
rx_over_err (int): Number of packets with RX overrun.
rx_crc_err (int): Number of CRC errors.
collisions (int): Number of collisions.
"""
super().__init__()
self.port_no = port_no
self.rx_packets = rx_packets
self.tx_packets = tx_packets
self.rx_bytes = rx_bytes
self.tx_bytes = tx_bytes
self.rx_dropped = rx_dropped
self.tx_dropped = tx_dropped
self.rx_errors = rx_errors
self.tx_errors = tx_errors
self.rx_frame_err = rx_frame_err
self.rx_over_err = rx_over_err
self.rx_crc_err = rx_crc_err
self.collisions = collisions
class PortStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_PORT."""
port_no = UBInt16()
#: Align to 64-bits.
pad = Pad(6)
def __init__(self, port_no=None):
"""Create a PortStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
OFPST_PORT message must request statistics either for a single
port (specified in ``port_no``) or for all ports
(if ``port_no`` == :attr:`.Port.OFPP_NONE`).
"""
super().__init__()
self.port_no = port_no
class QueueStats(GenericStruct):
"""Implements the reply body of a port_no."""
port_no = UBInt16()
#: Align to 32-bits.
pad = Pad(2)
queue_id = UBInt32()
tx_bytes = UBInt64()
tx_packets = UBInt64()
tx_errors = UBInt64()
def __init__(self, port_no=None, queue_id=None, tx_bytes=None,
tx_packets=None, tx_errors=None):
"""Create a QueueStats with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
Port Number.
queue_id (int): Queue ID.
tx_bytes (int): Number of transmitted bytes.
tx_packets (int): Number of transmitted packets.
tx_errors (int): Number of packets dropped due to overrun.
"""
super().__init__()
self.port_no = port_no
self.queue_id = queue_id
self.tx_bytes = tx_bytes
self.tx_packets = tx_packets
self.tx_errors = tx_errors
class QueueStatsRequest(GenericStruct):
"""Implements the request body of a ``port_no``."""
port_no = UBInt16()
#: Align to 32-bits
pad = Pad(2)
queue_id = UBInt32()
def __init__(self, port_no=None, queue_id=None):
"""Create a QueueStatsRequest with the optional parameters below.
Args:
port_no (:class:`int`, :class:`~pyof.v0x01.common.phy_port.Port`):
All ports if :attr:`.Port.OFPP_ALL`.
queue_id (int): All queues if OFPQ_ALL (``0xfffffff``).
"""
super().__init__()
self.port_no = port_no
self.queue_id = queue_id
class TableStats(GenericStruct):
"""Body of reply to OFPST_TABLE request."""
table_id = UBInt8()
#: Align to 32-bits.
pad = Pad(3)
name = Char(length=OFP_MAX_TABLE_NAME_LEN)
wildcards = UBInt32(enum_ref=FlowWildCards)
max_entries = UBInt32()
active_count = UBInt32()
count_lookup = UBInt64()
count_matched = UBInt64()
def __init__(self, table_id=None, name=None, wildcards=None,
max_entries=None, active_count=None, count_lookup=None,
count_matched=None):
"""Create a TableStats with the optional parameters below.
Args:
table_id (int): Identifier of table. Lower numbered tables are
consulted first.
name (str): Table name.
wildcards (:class:`~pyof.v0x01.common.flow_match.FlowWildCards`):
Bitmap of OFPFW_* wildcards that are supported by the table.
max_entries (int): Max number of entries supported.
active_count (int): Number of active entries.
count_lookup (int): Number of packets looked up in table.
count_matched (int): Number of packets that hit table.
"""
super().__init__()
self.table_id = table_id
self.name = name
self.wildcards = wildcards
self.max_entries = max_entries
self.active_count = active_count
self.count_lookup = count_lookup
self.count_matched = count_matched
class VendorStats(GenericStruct):
"""Vendor extension."""
vendor = UBInt32()
body = BinaryData()
def __init__(self, vendor=None, body=b''):
"""Create instance attributes.
Args:
vendor (int): 32-bit vendor ID.
body (bytes): Vendor-defined body
"""
super().__init__()
self.vendor = vendor
self.body = body
VendorStatsRequest = VendorStats
```
#### File: v0x01/controller2switch/flow_mod.py
```python
from enum import IntEnum
from pyof.foundation.base import GenericBitMask, GenericMessage
from pyof.foundation.basic_types import UBInt16, UBInt32, UBInt64
from pyof.v0x01.common.action import ListOfActions
from pyof.v0x01.common.constants import NO_BUFFER
# Local source tree imports
from pyof.v0x01.common.flow_match import Match
from pyof.v0x01.common.header import Header, Type
from pyof.v0x01.common.phy_port import Port
# Third-party imports
__all__ = ('FlowMod', 'FlowModCommand', 'FlowModFlags')
# Enums
class FlowModCommand(IntEnum):
"""List the possible commands for a flow."""
#: New flow
OFPFC_ADD = 0
#: Modify all flows
OFPFC_MODIFY = 1
#: Modify entry strictly matching wildcards
OFPFC_MODIFY_STRICT = 2
#: Delete all matching flows
OFPFC_DELETE = 3
#: Strictly match wildcards and priority
OFPFC_DELETE_STRICT = 4
class FlowModFlags(GenericBitMask):
"""Types to be used in Flags field."""
#: Send flow removed message when flow expires or is deleted
OFPFF_SEND_FLOW_REM = 1 << 0
#: Check for overlapping entries first
OFPFF_CHECK_OVERLAP = 1 << 1
#: Remark this is for emergency
OFPFF_EMERG = 1 << 2
# Classes
class FlowMod(GenericMessage):
"""Modifies the flow table from the controller."""
header = Header(message_type=Type.OFPT_FLOW_MOD)
match = Match()
cookie = UBInt64()
command = UBInt16(enum_ref=FlowModCommand)
idle_timeout = UBInt16()
hard_timeout = UBInt16()
priority = UBInt16()
buffer_id = UBInt32()
out_port = UBInt16(enum_ref=Port)
flags = UBInt16(enum_ref=FlowModFlags)
actions = ListOfActions()
def __init__(self, xid=None, match=None, cookie=0, command=None,
idle_timeout=0, hard_timeout=0, priority=0,
buffer_id=NO_BUFFER, out_port=Port.OFPP_NONE,
flags=FlowModFlags.OFPFF_SEND_FLOW_REM, actions=None):
"""Create a FlowMod with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
match (~pyof.v0x01.common.flow_match.Match): Fields to match.
cookie (int): Opaque controller-issued identifier.
command (~pyof.v0x01.controller2switch.flow_mod.FlowModCommand):
One of OFPFC_*.
idle_timeout (int): Idle time before discarding (seconds).
hard_timeout (int): Max time before discarding (seconds).
priority (int): Priority level of flow entry.
buffer_idle (int): Buffered packet to apply to (or -1).
Not meaningful for OFPFC_DELETE*.
out_port (~pyof.v0x01.common.phy_port.Port):
For OFPFC_DELETE* commands, require matching entries to include
this as an output port. A value of OFPP_NONE indicates no
restriction.
flags (~pyof.v0x01.controller2switch.flow_mod.FlowModFlags):
One of OFPFF_*.
actions (~pyof.v0x01.common.action.ListOfActions):
The action length is inferred from the length field in the
header.
"""
super().__init__(xid)
self.match = match or Match()
self.cookie = cookie
self.command = command
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.buffer_id = buffer_id
self.out_port = out_port
self.flags = flags
self.actions = actions or []
```
#### File: v0x04/common/flow_instructions.py
```python
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericStruct
from pyof.foundation.basic_types import (
FixedTypeList, Pad, UBInt8, UBInt16, UBInt32, UBInt64)
from pyof.foundation.exceptions import PackException
from pyof.v0x04.common.action import ListOfActions
from pyof.v0x04.controller2switch.meter_mod import Meter
# Third-party imports
__all__ = ('InstructionApplyAction', 'InstructionClearAction',
'InstructionGotoTable', 'InstructionMeter', 'InstructionType',
'InstructionWriteAction', 'InstructionWriteMetadata',
'ListOfInstruction')
# Enums
class InstructionType(IntEnum):
"""List of instructions that are currently defined."""
#: Setup the next table in the lookup pipeline
OFPIT_GOTO_TABLE = 1
#: Setup the metadata field for use later in pipeline
OFPIT_WRITE_METADATA = 2
#: Write the action(s) onto the datapath action set
OFPIT_WRITE_ACTIONS = 3
#: Applies the action(s) immediately
OFPIT_APPLY_ACTIONS = 4
#: Clears all actions from the datapath action set
OFPIT_CLEAR_ACTIONS = 5
#: Apply meter (rate limiter)
OFPIT_METER = 6
#: Experimenter instruction
OFPIT_EXPERIMENTER = 0xFFFF
def find_class(self):
"""Return a class related with this type."""
classes = {1: InstructionGotoTable, 2: InstructionWriteMetadata,
3: InstructionWriteAction, 4: InstructionApplyAction,
5: InstructionClearAction, 6: InstructionMeter}
return classes.get(self.value, None)
# Classes
class Instruction(GenericStruct):
"""Generic Instruction class.
This class represents a Generic Instruction that can be instanciated as
'InstructionApplyAction', 'InstructionClearAction', 'InstructionGotoTable',
'InstructionMeter', 'InstructionWriteAction', 'InstructionWriteMetadata'.
"""
instruction_type = UBInt16(enum_ref=InstructionType)
length = UBInt16()
def __init__(self, instruction_type=None):
"""Create a Instruction with the optional parameters below.
Args:
instruction_type(InstructionType): Type of instruction.
"""
super().__init__()
self.instruction_type = instruction_type
def pack(self, value=None):
"""Update the length and pack the massege into binary data.
Returns:
bytes: A binary data that represents the Message.
Raises:
Exception: If there are validation errors.
"""
if value is None:
self.update_length()
return super().pack()
if isinstance(value, type(self)):
return value.pack()
msg = "{} is not an instance of {}".format(value, type(self).__name__)
raise PackException(msg)
def update_length(self):
"""Update length attribute."""
self.length = self.get_size()
def unpack(self, buff=None, offset=0):
"""Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
instruction_type = UBInt16(enum_ref=InstructionType)
instruction_type.unpack(buff, offset)
self.__class__ = InstructionType(instruction_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset)
class InstructionApplyAction(Instruction):
"""Instruction structure for OFPIT_APPLY_ACTIONS.
The :attr:`~actions` field is treated as a list, and the actions are
applied to the packet in-order.
"""
#: Align to 64-bits
pad = Pad(4)
#: Actions associated with OFPIT_APPLY_ACTIONS
actions = ListOfActions()
def __init__(self, actions=None):
"""Create a InstructionApplyAction with the optional parameters below.
Args:
actions (:class:`~.actions.ListOfActions`):
Actions associated with OFPIT_APPLY_ACTIONS.
"""
super().__init__(InstructionType.OFPIT_APPLY_ACTIONS)
self.actions = actions if actions else []
class InstructionClearAction(Instruction):
"""Instruction structure for OFPIT_CLEAR_ACTIONS.
This structure does not contain any actions.
"""
#: Align to 64-bits
pad = Pad(4)
#: OFPIT_CLEAR_ACTIONS does not have any action on the list of actions.
actions = ListOfActions()
def __init__(self, actions=None):
"""Create a InstructionClearAction with the optional parameters below.
Args:
actions (:class:`~.actions.ListOfActions`):
Actions associated with OFPIT_CLEAR_ACTIONS.
"""
super().__init__(InstructionType.OFPIT_CLEAR_ACTIONS)
self.actions = actions if actions else []
class InstructionGotoTable(Instruction):
"""Instruction structure for OFPIT_GOTO_TABLE."""
#: Set next table in the lookup pipeline.
table_id = UBInt8()
#: Pad to 64 bits.
pad = Pad(3)
def __init__(self, table_id=Meter.OFPM_ALL):
"""Create a InstructionGotoTable with the optional parameters below.
Args:
length (int): Length of this struct in bytes.
table_id (int): set next table in the lookup pipeline.
"""
super().__init__(InstructionType.OFPIT_GOTO_TABLE)
self.table_id = table_id
class InstructionMeter(Instruction):
"""Instruction structure for OFPIT_METER.
meter_id indicates which meter to apply on the packet.
"""
#: Meter instance.
meter_id = UBInt32()
def __init__(self, meter_id=Meter.OFPM_ALL):
"""Create a InstructionMeter with the optional parameters below.
Args:
meter_id (int): Meter instance.
"""
super().__init__(InstructionType.OFPIT_METER)
self.meter_id = meter_id
class InstructionWriteAction(Instruction):
"""Instruction structure for OFPIT_WRITE_ACTIONS.
The actions field must be treated as a SET, so the actions are not
repeated.
"""
#: Align to 64-bits
pad = Pad(4)
#: Actions associated with OFPIT_WRITE_ACTIONS
actions = ListOfActions()
def __init__(self, actions=None):
"""Create a InstructionWriteAction with the optional parameters below.
Args:
actions (:class:`~.actions.ListOfActions`):
Actions associated with OFPIT_WRITE_ACTIONS.
"""
super().__init__(InstructionType.OFPIT_WRITE_ACTIONS)
self.actions = actions if actions else []
class InstructionWriteMetadata(Instruction):
"""Instruction structure for OFPIT_WRITE_METADATA."""
#: Align to 64-bits
pad = Pad(4)
#: Metadata value to write
metadata = UBInt64()
#: Metadata write bitmask
metadata_mask = UBInt64()
def __init__(self, metadata=0, metadata_mask=0):
"""Create InstructionWriteMetadata with the optional parameters below.
Args:
metadata (int): Metadata value to write.
metadata_mask (int): Metadata write bitmask.
"""
super().__init__(InstructionType.OFPIT_WRITE_METADATA)
self.metadata = metadata
self.metadata_mask = metadata_mask
class ListOfInstruction(FixedTypeList):
"""List of Instructions.
Represented by instances of Instruction.
"""
def __init__(self, items=None):
"""Create ListOfInstruction with the optional parameters below.
Args:
items (:class:`~pyof.v0x04.common.flow_instructions.Instruction`):
Instance or a list of instances.
"""
super().__init__(pyof_class=Instruction, items=items)
```
#### File: v0x04/controller2switch/role_request.py
```python
from pyof.v0x04.common.header import Type
from pyof.v0x04.controller2switch.common import RoleBaseMessage
__all__ = ('RoleRequest',)
# Classes
class RoleRequest(RoleBaseMessage):
"""RoleRequest Message.
When the controller wants to change its role, it uses the OFPT_ROLE_REQUEST
message.
"""
def __init__(self, xid=None, role=None, generation_id=None):
"""Create a RoleRequest with the optional parameters below.
Args:
xid (int): OpenFlow xid to the header.
role (:class:`~.controller2switch.common.ControllerRole`):
Is the new role that the controller wants to assume.
generation_id (int): Master Election Generation Id.
"""
super().__init__(xid, role, generation_id)
self.header.message_type = Type.OFPT_ROLE_REQUEST
```
#### File: tests/unit/test_struct.py
```python
import unittest
from pyof.foundation.base import GenericMessage
from tests.unit.raw_dump import RawDump
class TestStruct(unittest.TestCase):
"""Run tests related to struct packing and unpacking.
Test the lib with raw dump files from an OpenFlow switch. We assume the
raw files are valid according to the OF specs to check whether our pack and
unpack implementations are correct.
Also, check the minimum size of the struct by instantiating an object with
no parameters.
To run these tests, just extends this class and call 2 methods in the
``setUp`` method like the example.
Example:
.. code-block:: python3
class MyTest(TestDump):
@classmethod
def setUpClass(cls):
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_barrier_reply')
# Create BarrierReply(xid=5) when needed
super().set_raw_dump_object(BarrierReply, xid=5)
# As in spec: ``OFP_ASSERT(sizeof(struct ...) == ...);``
super().set_minimum_size(8)
To only test the minimum size and skip packing/unpacking:
.. code-block:: python3
class MyTest(TestDump):
@classmethod
def setUpClass(cls):
super().set_minimum_size(8, BarrierReply)
"""
def __init__(self, *args, **kwargs):
"""Avoid that this class tests are executed.
The tests in this class are executed through the child, so there's no
no need for them to be executed once more through the parent.
"""
super().__init__(*args, **kwargs)
# Override the run method, so it does nothing instead of running the
# tests (again).
if self.__class__ == TestStruct:
self.run = lambda *args, **kwargs: None
_new_raw_dump = None
_new_raw_object = None
_msg_cls = None
_min_size = None
@classmethod
def set_raw_dump_file(cls, version, basename):
"""Set which raw dump the tests will use.
Args:
protocol_version (str): OpenFlow protocol version,
e.g. ``v0x01``.
basename (str): The raw filename without extension.
E.g. ``ofpt_echo_reply``.
"""
cls._new_raw_dump = lambda: RawDump(version, basename)
@classmethod
def get_raw_dump(cls):
"""Return a new instance of :class:`.RawDump`.
Use the parameters set in :meth:`set_raw_dump_file`.
Returns:
RawDump: with parameters previously set using
:meth:`set_raw_dump_file`.
"""
if cls._new_raw_dump is None:
raise FileNotFoundError()
return cls._new_raw_dump()
@classmethod
def set_raw_dump_object(cls, msg_cls, *args, **kwargs):
"""Set how to create the object that is dumped in a raw file.
Args:
msg_class (:obj:`type`): The message class that is packed as a
raw file, followed by its parameters to instantiate an
object.
Example:
``super().__init__(BarrierReply, xid=5)`` will create
``BarrierReply(xid=5)``.
"""
TestStruct._msg_cls = msg_cls
cls._new_raw_object = lambda: msg_cls(*args, **kwargs)
@classmethod
def get_raw_object(cls):
"""Create a new object of the dumped message.
Use the class and parameters set in :meth:`set_raw_dump_object`.
Returns:
A new object using class and parameters priviously set through
:meth:`set_raw_dump_object`.
"""
pyof_obj = cls._new_raw_object()
if isinstance(pyof_obj, GenericMessage):
pyof_obj.update_header_length()
return pyof_obj
@classmethod
def set_minimum_size(cls, size, msg_cls=None):
"""Set the struct minimum size.
The minimum size can be found in OF spec. For example,
:class:`.PhyPort` minimum size is 48 because of
``OFP_ASSERT(sizeof(struct ofp_phy_port) == 48);`` (spec 1.0.0).
Args:
size (int): The minimum size of the struct, in bytes.
msg_cls (class): The class (or function) to have its size checked.
If None, use the same class set in :meth:`set_raw_dump_object`.
"""
cls._min_size = size
if msg_cls is not None:
TestStruct._msg_cls = msg_cls
def _test_pack(self, obj, expected_bytes):
"""Check whether packed objects equals to dump file."""
actual_bytes = obj.pack()
self.assertSequenceEqual(expected_bytes, actual_bytes)
def test_raw_dump_file(self):
"""Object pack should equal file; file unpack should equal object.
The object to be packed is set with :method:`set_raw_object` and the
file, with :method:`set_raw_dump_file`.
"""
try:
file_bytes = self.get_raw_dump().read()
except FileNotFoundError:
raise self.skipTest('No raw dump file found.')
pyof_obj = self.get_raw_object()
self._test_pack(pyof_obj, file_bytes)
self._test_unpack(pyof_obj, file_bytes)
def _test_unpack(self, pyof_obj, bytes2unpack=None):
"""Check whether unpacking ``bytes2unpack`` equals ``pyof_obj``.
Args:
pyof_obj (GenericStruct, GenericType): Object supporting (un)pack
operations.
bytes2unpack (bytes): If not supplied, use ``pyof_obj.pack()``.
"""
bytes2unpack = bytes2unpack or pyof_obj.pack()
unpacked = type(pyof_obj)()
# If it's a GenericMessage, unpack the Header first
if isinstance(pyof_obj, GenericMessage):
header_bytes = bytes2unpack[:8]
unpacked.header.unpack(header_bytes)
bytes2unpack = bytes2unpack[8:unpacked.header.length.value]
unpacked.unpack(bytes2unpack)
self.assertEqual(pyof_obj, unpacked)
self.assertEqual(pyof_obj.get_size(), unpacked.get_size())
def test_minimum_size(self):
"""Test struct minimum size."""
obj = TestStruct._msg_cls()
if self._min_size is None:
raise Exception(f'{self.__class__.__name__}._min_size is not set')
self.assertEqual(obj.get_size(), self._min_size)
```
#### File: v0x01/test_asynchronous/test_error_msg.py
```python
from pyof.v0x01.asynchronous.error_msg import (
BadRequestCode, ErrorMsg, ErrorType, FlowModFailedCode)
from tests.unit.test_struct import TestStruct
class TestErrorMessage(TestStruct):
"""Test the Error Message."""
@classmethod
def setUpClass(cls):
"""Setup TestStruct."""
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_error_msg')
super().set_raw_dump_object(ErrorMsg, xid=12,
error_type=ErrorType.OFPET_BAD_REQUEST,
code=BadRequestCode.OFPBRC_BAD_STAT,
data=b'')
super().set_minimum_size(12)
def test_unpack_error_msg(self):
"""Test Unpack a sample ErrorMsg."""
expected = b'\x01\x01\x00\x1b\x00\x00\x00\x18\x00\x03\x00\x02FLOW'
error_msg = ErrorMsg(xid=24,
error_type=ErrorType.OFPET_FLOW_MOD_FAILED,
code=FlowModFailedCode.OFPFMFC_EPERM,
data=b'FLOW')
actual = ErrorMsg(xid=24)
actual.unpack(expected[8:])
self.assertEqual(actual, error_msg)
```
#### File: v0x01/test_common/test_header.py
```python
import os
import unittest
from unittest.mock import patch
from pyof.v0x01.common.header import Header, Type
class TestHeader(unittest.TestCase):
"""Test the message Header."""
def setUp(self):
"""Setup the TestHeader Class instantiating a HELLO header."""
self.message = Header()
self.message.message_type = Type.OFPT_HELLO
self.message.xid = 1
self.message.length = 0
def test_size(self):
"""[Common/Header] - size 8."""
self.assertEqual(self.message.get_size(), 8)
@unittest.expectedFailure
def test_pack_empty(self):
"""[Common/Header] - packing empty header."""
self.assertRaises(TypeError,
Header().pack())
def test_pack(self):
"""[Common/Header] - packing Hello."""
packed_header = b'\x01\x00\x00\x00\x00\x00\x00\x01'
self.assertEqual(self.message.pack(), packed_header)
def test_unpack(self):
"""[Common/Header] - unpacking Hello."""
filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
'raw/v0x01/ofpt_hello.dat')
f = open(filename, 'rb')
self.message.unpack(f.read(8))
self.assertEqual(self.message.length, 8)
self.assertEqual(self.message.xid, 1)
self.assertEqual(self.message.message_type, Type.OFPT_HELLO)
self.assertEqual(self.message.version, 1)
f.close()
```
#### File: v0x01/test_controller2switch/test_aggregate_stats_request.py
```python
from pyof.v0x01.common.flow_match import Match
from pyof.v0x01.common.phy_port import Port
from pyof.v0x01.controller2switch.common import (
AggregateStatsRequest, StatsType)
from pyof.v0x01.controller2switch.stats_request import StatsRequest
from tests.unit.test_struct import TestStruct
class TestAggregateStatsRequest(TestStruct):
"""Test class for TestAggregateStatsRequest."""
@classmethod
def setUpClass(cls):
"""[Controller2Switch/AggregateStatsRequest] - size 44."""
request = AggregateStatsRequest(table_id=1, out_port=Port.OFPP_NONE,
match=_get_match())
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_aggregate_request')
super().set_raw_dump_object(StatsRequest, xid=17,
body_type=StatsType.OFPST_AGGREGATE,
flags=0, body=request)
super().set_minimum_size(12)
def _get_match():
"""Function used to built Match instance used by AggregateStatsRequest."""
return Match(in_port=80, dl_src="01:02:03:04:05:06",
dl_dst="01:02:03:04:05:06", dl_vlan=1,
dl_vlan_pcp=1, dl_type=1,
nw_tos=1, nw_proto=1,
nw_src='192.168.0.1', nw_dst='192.168.0.1',
tp_src=80, tp_dst=80)
```
#### File: v0x04/test_controller2switch/test_aggregate_stats_request.py
```python
from pyof.v0x04.common.flow_match import Match
from pyof.v0x04.controller2switch.common import MultipartType
from pyof.v0x04.controller2switch.multipart_request import (
AggregateStatsRequest, MultipartRequest)
from tests.unit.test_struct import TestStruct
class TestAggregateStatsRequest(TestStruct):
"""Aggregate stats request message."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
mp_type = MultipartType.OFPMP_AGGREGATE
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_aggregate_stats_request')
super().set_raw_dump_object(MultipartRequest, xid=1,
multipart_type=mp_type,
flags=0, body=_get_body())
super().set_minimum_size(16)
def _get_body():
"""Return the body used by MultipartRequest message."""
return AggregateStatsRequest(match=Match())
```
#### File: v0x04/test_controller2switch/test_flow_stats.py
```python
from pyof.v0x04.common.action import ActionOutput, ListOfActions
from pyof.v0x04.common.flow_instructions import (
InstructionApplyAction, ListOfInstruction)
from pyof.v0x04.common.flow_match import (
Match, MatchType, OxmClass, OxmOfbMatchField, OxmTLV)
from pyof.v0x04.common.port import PortNo
from pyof.v0x04.controller2switch.common import MultipartType
from pyof.v0x04.controller2switch.multipart_reply import (
FlowStats, MultipartReply)
from tests.unit.test_struct import TestStruct
class TestFlowStats(TestStruct):
"""Flow stats message."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_flow_stats')
super().set_raw_dump_object(MultipartReply, xid=2898845528,
multipart_type=MultipartType.OFPMP_FLOW,
flags=0,
body=_get_body())
super().set_minimum_size(16)
def _get_body():
"""Return the body used by MultipartReply message."""
return FlowStats(length=88, table_id=0, duration_sec=56,
duration_nsec=635000000, priority=1000, idle_timeout=0,
hard_timeout=0, flags=0x00000001,
cookie=0x0000000000000000, packet_count=18,
byte_count=756, match=_new_match(),
instructions=_new_list_of_instructions())
def _new_match():
"""Crate new Match instance."""
oxmtlv1 = OxmTLV(oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,
oxm_field=OxmOfbMatchField.OFPXMT_OFB_ETH_TYPE,
oxm_hasmask=False, oxm_value=b'\x88\xcc')
oxmtlv2 = OxmTLV(oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,
oxm_field=OxmOfbMatchField.OFPXMT_OFB_VLAN_VID,
oxm_hasmask=False, oxm_value=b'\x1e\xd7')
return Match(match_type=MatchType.OFPMT_OXM,
oxm_match_fields=[oxmtlv1, oxmtlv2])
def _new_list_of_instructions():
"""Crate new ListOfInstruction."""
action_output = ActionOutput(port=PortNo.OFPP_CONTROLLER)
loa = ListOfActions([action_output])
instruction = InstructionApplyAction(loa)
return ListOfInstruction([instruction])
```
#### File: v0x04/test_controller2switch/test_get_async_request.py
```python
from pyof.v0x04.controller2switch.get_async_request import GetAsyncRequest
from tests.unit.test_struct import TestStruct
class TestGetAsyncRequest(TestStruct):
"""Test the GetAsyncRequest message."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_get_async_request')
super().set_raw_dump_object(GetAsyncRequest, xid=3)
super().set_minimum_size(8)
```
#### File: v0x04/test_controller2switch/test_meter_mod.py
```python
from unittest import TestCase
from pyof.v0x04.controller2switch.meter_mod import (
MeterBandDrop, MeterBandDscpRemark, MeterBandExperimenter, MeterBandHeader,
MeterMod)
class TestMeterMod(TestCase):
"""MeterMod test."""
def test_min_size(self):
"""Test minimum message size."""
self.assertEqual(16, MeterMod().get_size())
class TestMeterBandHeader(TestCase):
"""MeterBandHeader test."""
def test_min_size(self):
"""Test minimum message size."""
self.assertEqual(12, MeterBandHeader().get_size())
class TestMeterBandDrop(TestCase):
"""MeterBandDrop test."""
def test_min_size(self):
"""Test minimum message size."""
self.assertEqual(16, MeterBandDrop().get_size())
class TestMeterBandDscpRemark(TestCase):
"""MeterBandDscpRemark test."""
def test_min_size(self):
"""Test minimum message size."""
self.assertEqual(16, MeterBandDscpRemark().get_size())
class TestMeterBandExperimenter(TestCase):
"""MeterBandExperimenter test."""
def test_min_size(self):
"""Test minimum message size."""
self.assertEqual(16, MeterBandExperimenter().get_size())
```
#### File: v0x04/test_controller2switch/test_port_stats_request.py
```python
from pyof.v0x04.controller2switch.multipart_request import PortStatsRequest
from tests.unit.test_struct import TestStruct
class TestPortStatsRequest(TestStruct):
"""Config Port Stats Request message tests."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats_request')
super().set_raw_dump_object(PortStatsRequest)
super().set_minimum_size(8)
```
#### File: v0x04/test_controller2switch/test_set_config.py
```python
from pyof.v0x04.common.action import ControllerMaxLen
from pyof.v0x04.controller2switch.common import ConfigFlag
from pyof.v0x04.controller2switch.set_config import SetConfig
from tests.unit.test_struct import TestStruct
class TestSetConfig(TestStruct):
"""Test the Set Config message."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
buffer = ControllerMaxLen.OFPCML_NO_BUFFER
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_set_config')
super().set_raw_dump_object(SetConfig, xid=1201346349,
flags=ConfigFlag.OFPC_FRAG_NORMAL,
miss_send_len=buffer)
super().set_minimum_size(12)
```
#### File: unit/v0x04/test_struct.py
```python
import unittest
from pyof.v0x04.common.header import Header
from pyof.v0x04.common.utils import new_message_from_header
class TestStruct(unittest.TestCase):
"""Run tests related to struct packing and unpacking.
Test the lib with raw dump files from an OpenFlow switch. We assume the
raw files are valid according to the OF specs to check whether our pack and
unpack implementations are correct.
Also, check the minimum size of the struct by instantiating an object with
no parameters.
To run these tests, just extends this class and call 2 methods in the
``setUp`` method like the example.
Example:
.. code-block:: python3
class MyTest(TestStruct):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Create BarrierReply(xid=5)
super().set_message(BarrierReply, xid=5)
# As in spec: ``OFP_ASSERT(sizeof(struct ...) == ...);``
super().set_minimum_size(8)
To only test the minimum size and skip packing/unpacking:
.. code-block:: python3
class MyTest(TestStruct):
@classmethod
def setUpClass(cls):
super().set_message(BarrierReply)
super().set_minimum_size(8)
"""
def __init__(self, *args, **kwargs):
"""The constructor will avoid that this class tests are executed.
The tests in this class are executed through the child, so there's no
no need for them to be executed once more through the parent.
"""
super().__init__(*args, **kwargs)
# Override the run method, so it does nothing instead of running the
# tests (again).
if self.__class__ == TestStruct:
self.run = lambda *args, **kwargs: None
_msg_cls = None
_msg_params = None
_min_size = None
@classmethod
def set_message(cls, msg_cls, *args, **kwargs):
"""Set how to create the message object.
Args:
msg_class (:obj:`type`): The message class followed by its
parameters to instantiate an object.
Example:
``super().__init__(BarrierReply, xid=5)`` will create
``BarrierReply(xid=5)``.
"""
TestStruct._msg_cls = msg_cls
cls._msg_params = (args, kwargs)
@classmethod
def set_minimum_size(cls, size):
"""Set the struct minimum size (from spec).
The minimum size can be found in OF spec. For example,
:class:`.PhyPort` minimum size is 48 because of
``OFP_ASSERT(sizeof(struct ofp_phy_port) == 48);`` (spec 1.0.0).
Args:
size (int): The minimum size of the struct, in bytes.
"""
cls._min_size = size
def test_pack_unpack(self):
"""Pack the message, unpack and check whether they are the same."""
if self._msg_cls:
args, kwargs = self._msg_params
self._test_pack_unpack(*args, **kwargs)
def _test_pack_unpack(self, *args, **kwargs):
"""Pack the message, unpack and check whether they are the same.
Call this method multiple times if you want to test more than one
object.
"""
obj = self._msg_cls(*args, **kwargs)
packed = obj.pack()
header = Header()
header_size = header.get_size()
header.unpack(packed[:header_size])
unpacked = new_message_from_header(header)
unpacked.unpack(packed[header_size:])
self.assertEqual(packed, unpacked.pack())
def test_minimum_size(self):
"""Test struct minimum size."""
if self._min_size is None:
raise self.skipTest('minimum size was not set.')
obj = TestStruct._msg_cls()
self.assertEqual(obj.get_size(), self._min_size)
``` |
{
"source": "josemauro/storehouse",
"score": 2
} |
#### File: tests/unit/test_etcd.py
```python
from unittest import TestCase
from unittest.mock import MagicMock, patch
from napps.kytos.storehouse.backends.etcd import (Etcd, join_fullname,
split_fullname)
from napps.kytos.storehouse.main import Box
# pylint: disable=protected-access, unused-argument, no-member
class TestEtcd(TestCase):
"""Tests for the Etcd class."""
# pylint: disable=arguments-differ
@patch('napps.kytos.storehouse.backends.etcd.etcd3.client')
def setUp(self, mock_client):
"""Execute steps before each tests."""
mock_client.return_value = MagicMock()
self.base = Etcd()
# 'metadata' is the name of the one of objects obtained at tuple
# returned by the etcd get_all method.
self.metadata = MagicMock()
self.metadata.key = b'namespace.123'
self.base.etcd.get_all.return_value = [(b'', self.metadata)]
def test_get_all_keys(self):
"""Test _get_all_keys method."""
all_keys = self.base._get_all_keys()
self.assertEqual(b'namespace.123', next(all_keys))
@patch('pickle.dumps', return_value='raw_data')
def test_create(self, mock_dumps):
"""Test create method."""
box = Box('any', 'namespace', box_id='123')
self.base.create(box)
self.base.etcd.put.assert_called_with('namespace.123', 'raw_data')
@patch('pickle.loads', return_value='data')
def test_retrieve_success_case(self, mock_loads):
"""Test retrieve method to success case."""
self.base.etcd.get.return_value = ('raw_data', '')
box = Box('any', 'namespace', box_id='123')
retrieve = self.base.retrieve(box.namespace, box.box_id)
self.base.etcd.get.assert_called_with('namespace.123')
self.assertEqual(retrieve, 'data')
def test_retrieve_failure_case(self):
"""Test retrieve method to failure case."""
self.base.etcd.get.return_value = (None, '')
box = Box('any', 'namespace', box_id='123')
retrieve = self.base.retrieve(box.namespace, box.box_id)
self.base.etcd.get.assert_called_with('namespace.123')
self.assertIsNone(retrieve)
def test_delete(self):
"""Test delete method."""
box = Box('any', 'namespace', box_id='123')
self.base.delete(box.namespace, box.box_id)
self.base.etcd.delete.assert_called_with('namespace.123')
def test_list(self):
"""Test list method."""
obj = MagicMock()
obj.key = b'namespace.123'
self.base.etcd.get_prefix.return_value = [('', obj)]
list_return = self.base.list('namespace')
self.base.etcd.get_prefix.assert_called_with('namespace',
keys_only=True)
self.assertEqual(next(list_return), b'123')
def test_list_namespaces(self):
"""Test list_namespaces method."""
namespaces = self.base.list_namespaces()
self.assertEqual(namespaces, {b'namespace'})
@patch('pickle.loads')
def test_backup(self, mock_loads):
"""Test backup method."""
next(self.base.backup())
mock_loads.assert_called_with((b'', self.metadata))
def test_split_fullname(self):
"""Test split_fullname method."""
fullname = b'namespace.box_id'
split = split_fullname(fullname)
self.assertEqual(split, [b'namespace', b'box_id'])
def test_join_fullname(self):
"""Test join_fullname method to binary and string parameters."""
fullname_1 = join_fullname(b'namespace', b'box_id')
self.assertEqual(fullname_1, b'namespace.box_id')
fullname_2 = join_fullname('namespace', 'box_id')
self.assertEqual(fullname_2, 'namespace.box_id')
``` |
{
"source": "josemaz/aracne-multicore",
"score": 2
} |
#### File: aracne-multicore/launch/miRNA-gen.py
```python
import pandas as pd
from re import match
import numpy as np
import sys, glob
from termcolor import colored, cprint
from pathlib import Path
logprint = lambda x: cprint(x, 'red', attrs=["bold"])
msgprint = lambda x: cprint(x, 'green', attrs=["bold"])
def procs_mi( fin, fout):
# mat = pd.read_csv("expr-all-ctrl-complete.tsv", sep = "\t")
mat = pd.read_csv(fin, sep = "\t")
mat.index = mat.columns
msgprint("Size of matrix: " + str(mat.shape))
# mat.isnull().sum().sum()
genes = list(filter(lambda v: match('^ENS', v), mat.columns))
ngenes = len(genes) # 16290
msgprint("Genes without miRNAs: " + str(ngenes))
if mat.iloc[:ngenes,:].isnull().sum().sum() != 0:
print("NAs on mirna-gen matrix...")
sys.exit(15)
gen_gen = mat.iloc[:ngenes,:ngenes]
gen_gen.index = gen_gen.columns
gen_gen = gen_gen.where(np.triu(np.ones(gen_gen.shape),1).astype(np.bool))
gen_gen = gen_gen.stack().reset_index()
gen_gen.columns = ['Source','Target','MI']
gen_gen = gen_gen.sort_values('MI', ascending=False)
print(gen_gen)
msgprint("Writing: " + fout + '-gengen.tsv')
gen_gen.to_csv(fout + '-gengen.tsv',
index = False, header=True, sep='\t')
# gen-gen interactions: 132673905
gen_mirna = mat.iloc[:ngenes,ngenes:]
gen_mirna = gen_mirna.stack().reset_index()
gen_mirna.columns = ['Source','Target','MI']
gen_mirna = gen_mirna.sort_values('MI', ascending=False)
print(gen_mirna)
msgprint("Writing: " + fout + '-genmirna.tsv')
gen_mirna.to_csv(fout + '-genmirna.tsv',
index = False, header=True, sep='\t')
# gen-miRNA interactions:
alldata = pd.concat([gen_gen, gen_mirna])
alldata = alldata.sort_values('MI', ascending=False)
print(alldata)
msgprint("Writing: " + fout + '-all.tsv')
alldata.to_csv(fout + '-all.tsv',
index = False, header=True, sep='\t')
######################################################################
## MAIN
Path("expr-miRNA").mkdir(parents=True, exist_ok=True)
for file in sorted(glob.glob('*-complete.tsv')):
logprint("Using file: " + file)
prefix = '-'.join(file.split('-')[:-1])
prefix = 'expr-miRNA/' + prefix
procs_mi(file,prefix)
``` |
{
"source": "josemazo/hey_you",
"score": 2
} |
#### File: josemazo/hey_you/hey_you.py
```python
import logging
import time
from urllib.request import urlopen
from environs import Env
from twilio.rest import Client
def read_config():
logging_level = logging.getLevelName('INFO')
logging_format = '[%(asctime)s] (%(process)d) {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'
logging_datefmt = '%Y-%m-%dT%H:%M:%SZ'
logging.basicConfig(format=logging_format, datefmt=logging_datefmt)
logger = logging.getLogger('__hey_you__')
logger.setLevel(logging_level) # Setting the level now will avoid getting logs from other libraries
env = Env()
return {
'logger': logger,
'watch_url': env.str('WATCH_URL', 'https://github.com/josemazo/hey_you'),
'twilio_account_sid': env.str('TWILIO_ACCOUNT_SID', 'abcdefghijklmnopqrstuvwxyz'),
'twilio_auth_token': env.str('TWILIO_AUTH_TOKEN', '<KEY>'),
'twilio_to': env.str('TWILIO_TO', '+34600000000'),
'twilio_from': env.str('TWILIO_FROM', '+34955000000'),
'sleep': env.int('SLEEP', 30)
}
def main(config):
html = None
while True:
html_new = urlopen(config['watch_url']).read().decode('utf-8')
if html is None:
html = html_new
config['logger'].info('First iteration')
elif html != html_new:
html = html_new
config['logger'].info('Gotcha!')
hey_you(config)
else:
config['logger'].info('No luck :(')
time.sleep(config['sleep'])
def hey_you(config):
account_sid = config['twilio_account_sid']
auth_token = config['twilio_auth_token']
to = config['twilio_to']
from_ = config['twilio_from']
client = Client(account_sid, auth_token)
call = client.calls.create(url='http://demo.twilio.com/docs/voice.xml', to=to, from_=from_)
config['logger'].info(f'Calling: {call.sid}')
if __name__ == '__main__':
config = read_config()
main(config)
``` |
{
"source": "josemazo/scrap-me-a-cave",
"score": 3
} |
#### File: josemazo/scrap-me-a-cave/cave_services.py
```python
import abc
import re
import urllib2
from cave import Cave
from lxml import html
class CaveService(object):
_user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
_regex_number = re.compile("[0-9]+")
_regex_namespace = {'re': 'http://exslt.org/regular-expressions'}
def __init__(self, search_url='', search_description=''):
self.search_url = search_url
self.search_description = search_description
headers = {
'User-Agent': self._user_agent
}
self._opener = urllib2.build_opener()
self._opener.addheaders = headers.items()
@abc.abstractmethod
def search_caves(self, old_caves=[]):
return
class IdealistaService(CaveService):
def search_caves(self, old_caves=[]):
new_caves = []
data_html = self._opener.open(self.search_url).read()
dom = html.fromstring(data_html)
caves = dom.xpath('.//li[re:test(@id, "[0-9]+")]',
namespaces=self._regex_namespace)
caves = {cave.attrib['id']: cave for cave in caves}
# A solution with a comprehension will use another for ;)
for cave_id in caves:
if cave_id not in old_caves:
new_cave = caves[cave_id]
price = new_cave.xpath('.//li[@class="col-0"]')[0].text
price = self._regex_number.findall(price)[0]
meters = new_cave.xpath('.//li[@class="col-1"]')[0].text
meters = self._regex_number.findall(meters)[0]
description = new_cave.xpath(
'.//a[@href="/inmueble/{0}/"]'.format(
new_cave.attrib['id']))[1].text.strip()
url = 'http://idealista.com/inmueble/{0}/'.format(
new_cave.attrib['id'])
new_cave_obj = Cave(price, meters, description, url,
self.search_url)
new_caves.append(new_cave_obj)
return new_caves
class SegundaManoService(CaveService):
def search_caves(self, old_caves=[]):
new_caves = []
data_html = self._opener.open(self.search_url).read()
dom = html.fromstring(data_html)
caves = dom.xpath('.//ul[re:test(@id, "[0-9]+")]',
namespaces=self._regex_namespace)
caves = {cave.attrib['id']: cave for cave in caves}
# A solution with a comprehension will use another for ;)
for cave_id in caves:
if cave_id not in old_caves:
new_cave = caves[cave_id]
price = new_cave.xpath('.//a[@class="subjectPrice"]')[0].text
price = self._regex_number.findall(price)[0]
try:
meters = new_cave.xpath(
'.//div[@class="infoBottom"]/text()')[3]
meters = self._regex_number.findall(meters)[0]
except:
meters = 'not available'
description = new_cave.xpath(
'.//a[@class="subjectTitle"]')[0].text.strip()
url = new_cave.xpath(
'.//a[@class="subjectTitle"]')[0].attrib['href']
new_cave_obj = Cave(price, meters, description, url,
self.search_url)
new_caves.append(new_cave_obj)
return new_caves
``` |
{
"source": "josemdv/take-home",
"score": 3
} |
#### File: take-home/google_prv/google_service.py
```python
import logging
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_prv.google_auth import generate_credentials
def drive_service():
try:
# We create the Drive V3 service and return it
service = build('drive', 'v3', credentials=generate_credentials())
logging.info('Google Drive service created successfully')
return service
except HttpError as error:
# Just a few examples of the errors we can handle
if error.resp.status == 500:
logging.error("G Suite backend error: {error}")
elif error.resp.status == 503:
logging.error("G Suite service unavailble: {error}")
else:
logging.error("Unexpected error with Google API: {error}")
``` |
{
"source": "josemfc/centro-recursos",
"score": 2
} |
#### File: centro-recursos/recursos/forms.py
```python
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from django.utils.html import escape
from recursos.models import *
# Usuarios
class LoginForm (forms.Form):
name = forms.CharField (label = 'Nombre de usuario',
max_length = 30,
required = False,
)
passw = forms.CharField ( label = '<PASSWORD>',
max_length = 30,
widget = forms.PasswordInput(),
required = False,
)
def clean(self):
cleaned_data = super(LoginForm, self).clean()
n = escape(self.cleaned_data.get('name'))
p = escape(self.cleaned_data.get('passw'))
if n == '' or p == '':
raise forms.ValidationError(_('Por favor, introduzca usuario y contraseña para entrar.'))
"""
class NuevoUsuarioForm(forms.Form):
username = forms.CharField(label = 'Nombre de usuario', max_length = 30)
password = forms.CharField (label = '<PASSWORD>', max_length = 30, widget=forms.PasswordInput())
email = forms.EmailField(label='Correo electrónico')
nombre = forms.CharField(label = 'Nombre', max_length = 30, required = False)
apellidos = forms.CharField(label = 'Apellidos', max_length = 30, required = False)
es_gestor = forms.BooleanField(label = 'Gestor', required = False)
def clean(self):
cleaned_data = super(NuevoUsuarioForm, self).clean()
us = escape(self.cleaned_data.get('username'))
if User.objects.all().filter(username=us).exists():
raise forms.ValidationError('El usuario \'%s\' ya existe.' % us)
class ModUsuarioForm(forms.Form):
def __init__(self, usuario, *args, **kwargs):
super(ModUsuarioForm,self).__init__(*args,**kwargs)
self.usuario = usuario
self.min_length = 8
self.fields['nombre'] = forms.CharField(initial = self.usuario.first_name, required=False)
self.fields['apellidos'] = forms.CharField(initial = self.usuario.last_name, required=False)
self.fields['email'] = forms.EmailField(initial = self.usuario.email, required=False)
self.fields['es_gestor'] = forms.BooleanField(initial = usuario.groups.filter(name='Gestores').exists(), required=False)
old_pass = forms.CharField (label = 'Contraseña actual', max_length = 30, widget=forms.PasswordInput(), required=False)
new_pass = forms.CharField (label = 'Nueva contraseña', max_length = 30, widget=forms.PasswordInput(), required=False)
new_pass2 = forms.CharField (label = 'Repita la nueva contraseña', max_length = 30, widget=forms.PasswordInput(), required=False)
def clean (self):
cleaned_data = super(ModUsuarioForm, self).clean()
co = escape(cleaned_data.get("old_pass"))
c1 = escape(cleaned_data.get("new_pass"))
c2 = escape(cleaned_data.get("new_pass2"))
if co != "" and len(c1) < self.min_length:
raise forms.ValidationError(_('La contraseña debe tener al menos ocho caracteres.'))
elif co != "" and not self.usuario.check_password(co):
raise forms.ValidationError(_('Contraseña actual incorrecta.'))
elif co != "" and co == c1:
raise forms.ValidationError(_('La contraseña antigua y la nueva no pueden ser iguales.'))
elif c1 != "" and c1 != c2:
raise forms.ValidationError(_('Los campos de contraseña nueva no coinciden.'))
class RecuperarForm(forms.Form):
username = forms.CharField(label = 'Nombre de usuario', max_length = 30, required = False)
email = forms.EmailField(label = 'Correo electrónico', required = False)
def clean(self):
cleaned_data = super(RecuperarForm, self).clean()
u = escape(self.cleaned_data.get('username'))
e = escape(self.cleaned_data.get('email'))
if u == '' or e == '':
raise forms.ValidationError(_('Por favor, rellene ambos campos para continuar.'))
"""
# Video
class NuevoVideoForm (forms.ModelForm):
def __init__(self, *args, **kwargs):
super(NuevoVideoForm, self).__init__(*args, **kwargs)
self.fields['titulo'].label = "Título"
self.fields['titulo'].widget.attrs['size'] = 50
self.fields['enlace'].widget.attrs['size'] = 50
self.fields['duracion'].label = "Duración ('HH:MM:SS')"
self.fields['duracion'].widget = forms.TimeInput(format='%M:%S')
self.fields['duracion'].required = False
class Meta:
model = Video
exclude = ['fecha_pub', 'visualizaciones']
def clean(self):
cleaned_data = super(NuevoVideoForm, self).clean()
enl = escape(self.cleaned_data.get('enlace'))
if Video.objects.filter(enlace = enl).exists():
raise forms.ValidationError(_("Error: El vídeo introducido ya existe en la base de datos."))
class ModVideoForm(forms.ModelForm):
def __init__(self, vid, *args, **kwargs):
self.vid = vid
super(ModVideoForm, self).__init__(*args, **kwargs)
self.fields['titulo'].initial = vid.titulo
self.fields['descripcion'].initial = vid.descripcion
self.fields['duracion'].initial = vid.duracion
# Categoria
class Meta:
model = Video
fields = ('titulo', 'descripcion', 'duracion')
def save(self, commit=True):
self.vid.titulo = self.cleaned_data['titulo']
self.vid.descripcion = self.cleaned_data['descripcion']
self.vid.duracion = self.cleaned_data['duracion']
if commit:
self.vid.save()
return self.vid
# Foros
"""class NuevoPost (forms.ModelForm):
class Meta:
model = Post
fields = ['content']"""
``` |
{
"source": "josemfc/formacion-crm",
"score": 2
} |
#### File: formacion-crm/faq/models.py
```python
from django.db import models
from formacion.ftp import FTPStorage
from django.conf import settings
fs = FTPStorage()
def upload_path_handler(instance, filename): # Indica dónde subir archivo
return "{f}".format(f=filename)
class FuentesJSON(models.Model):
nombre = models.CharField(max_length = 20)
descripcion = models.CharField(max_length = 100)
fuente = models.FileField('Fuente de datos', upload_to = upload_path_handler, storage = fs)
def _get_filename_fuente(self):
return settings.MEDIA_ROOT + self.fuente.name
filename_fuente = property(_get_filename_fuente)
def __str__(self):
return self.nombre
```
#### File: formacion-crm/faq/views.py
```python
from django.shortcuts import get_object_or_404, render, redirect
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from faq.models import FuentesJSON
from faq.forms import *
import urllib.request
import json
def index(request):
if request.method == 'POST':
dudas_form = DudasForm(request.POST)
if dudas_form.is_valid():
nom = escape(dudas_form.cleaned_data['nombre'])
tip = int(dudas_form.cleaned_data['tipo'])
mail = escape(dudas_form.cleaned_data['email'])
det = escape(dudas_form.cleaned_data['detalle'])
# Enviar notificación
TIPOS_DUDAS = ( ('-1', '-- Seleccione un tipo --'), ('0', 'Área comercial'), ('1', 'Área académica'), ('2', 'Registro y Control'), ('3', 'Área financiera'), ('4', 'Área de marketing'), ('5', 'Otras cuestiones'))
path = request.build_absolute_uri(reverse('faq:index'))
mensj = "Se ha recibido una nueva duda desde " + path +".\n\nNombre: " + nom + "\nCorreo electrónico: " + mail + "\nDetalle: " + det
email_dest = '<EMAIL>'
send_mail('Formación CRM - Duda de ' + TIPOS_DUDAS[tip+1][1], mensj, settings.DEFAULT_FROM_EMAIL, [email_dest], fail_silently=False)
return render(request, 'faq/redirect.html')
else:
return render(request, 'faq/index.html', { 'dudas_form': dudas_form })
else: # GET
dudas_form = DudasForm()
fuente = get_object_or_404(FuentesJSON, nombre = "faq")
response = urllib.request.urlopen(fuente.filename_fuente)
preguntas = json.loads(response.read().decode('utf-8'))
context = {
'dudas_form': dudas_form,
'preguntas': preguntas
}
return render(request, 'faq/index.html', context)
``` |
{
"source": "josemfc/translation-projects-management",
"score": 2
} |
#### File: translation-projects-management/proyectos/models.py
```python
from django.db import models
from django.contrib.auth.models import User
import os
class Proyecto(models.Model):
creador = models.ForeignKey(User)
nombre = models.CharField(max_length=200)
fecha_pub = models.DateTimeField('Fecha de publicacion')
terminado = models.BooleanField(default=False)
def __str__(self):
return self.nombre
def upload_path_handler(instance, filename): # Indica dónde subir archivo
return "{id_p}/{id_t}/{f}".format(id_p=instance.proyecto.id, id_t=instance.id, f=filename)
class Tarea(models.Model):
proyecto = models.ForeignKey(Proyecto)
asignada_a = models.ForeignKey(User)
nombre = models.CharField(max_length=50)
comentario = models.CharField(max_length=300, null=True, blank=True)
terminada = models.BooleanField(default=False)
num_horas = models.IntegerField('Num. de horas estimado')
original = models.FileField('Texto original', upload_to=upload_path_handler, null=True, blank=True)
traducido = models.FileField('Texto traducido', upload_to=upload_path_handler, null=True, blank=True)
TIPO_TAREA = (
('T', 'Traducción'),
('R', 'Revisión'),
)
tipo_tarea = models.CharField(max_length=15, choices = TIPO_TAREA, default = 'T')
def _get_filename_o(self):
return os.path.basename(self.original.name)
def _get_filename_t(self):
return os.path.basename(self.traducido.name)
filename_o = property(_get_filename_o)
filename_t = property(_get_filename_t)
def __str__(self):
return self.nombre
def __str__(self):
return self.comentario
``` |
{
"source": "JoseM-G/Python",
"score": 4
} |
#### File: JoseM-G/Python/guess.py
```python
def game():
print("I'm thinking of an integer, you have three guesses.")
import random
rand_integer = random.randint(1, 10)
for i in range(3):
user_integer = eval(input("Guess "+str(i+1)+": Please enter an integer between 1 and 10: "))
if user_integer > rand_integer:
if i == 2:
print("Too bad. The number is:", rand_integer)
break
else:
print("Your guess is too big")
if user_integer < rand_integer:
if i == 2:
print("Too bad. The number is:", rand_integer)
break
else:
print("Your guess is too small")
if user_integer == rand_integer:
print("You got it!")
break
game()
``` |
{
"source": "JoseMiguel92/mrcp-project",
"score": 3
} |
#### File: JoseMiguel92/mrcp-project/solution_greedy_adjacent.py
```python
from solution_greedy import SolutionGreedy
class SolutionGreedyNeighbors(SolutionGreedy):
def __init__(self, graph, name):
super().__init__(graph, name)
def find_better(self, adjacent):
""" Find better candidate (with more neighbors) from adjacent and verify if him form a clique. """
current_neighbors = -1
node_chosen = None
for node in adjacent:
node_neighbors = len(self.graph.get_node(node).neighbors_indices)
if node_neighbors > current_neighbors:
current_neighbors = node_neighbors
node_chosen = node
return node_chosen
```
#### File: JoseMiguel92/mrcp-project/solution_greedy_ratio.py
```python
from solution_greedy import SolutionGreedy
class SolutionGreedyRatio(SolutionGreedy):
def __init__(self, graph, name):
super().__init__(graph, name)
def find_better(self, adjacent):
""" Find better candidate (with better ratio) from adjacent and verify if him form a clique. """
current_ratio = -1
node_chosen = None
for node in adjacent:
node_ratio = self.graph.get_node(node).p_weight / self.graph.get_node(node).q_weight
if node_ratio > current_ratio:
current_ratio = node_ratio
node_chosen = node
return node_chosen
```
#### File: mrcp-project/test/solution_grasp_tests.py
```python
import unittest
import random
import bisect
from instance import Instance
from solution_grasp import SolutionGrasp
from graph_utils import GraphUtils
class SolutionGraspTests(unittest.TestCase):
GRAPH_TEST = 'test_files/setsPruebasFinal/set-e/DIMACS2/johnson8-2-4.txt'
GRAPH_SIMPLE_1_TEST_PTH = 'test_files/test-graph-type-1.txt'
def test_grasp_OK(self):
graph = Instance()
file = SolutionGraspTests.GRAPH_SIMPLE_1_TEST_PTH
graph.read_file(file)
solution_type = SolutionGrasp.ADJACENT
fixed_seed = 1
alpha = 0.5
instance_solution = SolutionGrasp()
result = instance_solution.find_grasp_solution(graph, file, solution_type, fixed_seed, alpha)
def test_random_seed(self):
for i in range(13):
random.seed(1)
num = random.sample(range(10), 10)
print("{0} : {1}".format(i, num))
self.assertTrue(True)
def test_bisect(self):
g_c = list()
bisect.insort(g_c, 100)
bisect.insort(g_c, 90)
bisect.insort(g_c, 30)
bisect.insort(g_c, 45)
bisect.insort(g_c, 60)
bisect.insort(g_c, 59)
print(g_c)
num = 59
pos = bisect.bisect_left(g_c, num)
print(pos)
g_c_result = g_c[pos:]
g_c_result.reverse()
print(g_c_result)
def test_random(self):
a = dict()
for i in range(10):
num_r = random.randint(0, 100)
gen = {i:num_r}
a.update(gen)
print("{1} : {0}".format(num_r, i+1))
for k, v in a:
print("clave= {0}: valor= {1}".format(k, v))
def test_apply_ls(self):
solution = {16, 18, 19, 20, 21, 23}
graph = Instance()
file = SolutionGraspTests.GRAPH_TEST
graph.read_file(file)
instace_sol = SolutionGrasp()
instace_sol.apply_ls(graph, solution)
def test_verify_clique(self):
SET_D = "test_files/setsPruebasFinal/set-d/wind-2004.txt"
SET_F = "test_files/setsPruebasFinal/set-f/DIMACS10/email.txt"
clique = {94, 66, 278, 14}
graph = Instance()
file = SET_D
graph.read_file(file)
if GraphUtils.is_clique_solution(graph, clique):
print("yes")
else:
print("no")
if __name__ == '__main__':
unittest.main()
```
#### File: mrcp-project/test/solution_greedy_clique_ratio_tests.py
```python
import unittest
import os
from instance import Instance
from solution_greedy_clique_ratio import SolutionGreedyRatio
class SolutionGreedyCliqueRatioTest(unittest.TestCase):
GRAPH_1_TEST = 'test_files/test-graph-greedy-simple-1.txt'
GRAPH_2_TEST = "test_files/test-graph-type-1.txt"
GRAPH_3_TEST = "test_files/test_graph_type_1_worst.txt"
CSV_OUTPUT_FILE = "test_files/output/solution_table.csv"
def test_find_ratio_clique_1_OK(self):
file = SolutionGreedyCliqueRatioTest.GRAPH_2_TEST
graph = Instance()
graph.read_file(file)
solution = SolutionGreedyRatio(graph, os.path.splitext(file)[0])
clique = solution.find_clique_by_ratio()
print(clique)
print(solution.cardinality)
print(solution.sol_value)
def test_find_ratio_clique_2_OK(self):
file = SolutionGreedyCliqueRatioTest.GRAPH_3_TEST
graph = Instance()
graph.read_file(file)
solution = SolutionGreedyRatio(graph, os.path.splitext(file)[0])
clique = solution.find_clique_by_ratio()
print(clique)
print(solution.cardinality)
print(solution.sol_value)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JoseMiguelGutierrezGuevara/ASR-Preguntas",
"score": 3
} |
#### File: test/Pregunta_1/pregunta1.py
```python
import os
import sys
import time
import shutil
""" %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
La función obtenerIP() lee la lista de IPs del archivo: ips.txt y la muestra en pantalla con
un retardo de 1 segundo a través de la función sllep()
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def obtenerIP():
archivo = open("ips.txt", "r")
print (" ... OBTENIEDO IPs ... ")
for c in range(0, 25):
cadena = archivo.readline()
print (cadena)
time.sleep(1)
print (" ... IPs obtenidas ... \n")
""" %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
La función realizarPING() lee la lista de direcciones IP del archivo: ips.txt y realiza un ping
a cada IP a través de la función os.system(). Se realizan 4 iteraciones, es decir,
a cada IP de la lista de IP's se le realizan 4 PING en total.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def realizarPING():
archivo2 = open("adverts.txt", "w")
for i in range(1, 4):
archivo = open("ips.txt", "r")
print ("\n ************** Iteracion %d **************** " % (i))
archivo2.write("************ ITERACION %d ************ \n" % (i))
for c in range(0, 25):
cadena = archivo.readline()
print (cadena)
comando = "ping -c 3 " + cadena
output = os.system(comando)
if output == 0:
print (" ---------> SI RESPONDE <------------- \n ")
else:
print (" !!!!!!!!!!!!! NO REPONDE !!!!!!!!!!! \n")
ip = cadena
archivo2.write(" La ip " + ip + "no respondio \n")
archivo.close()
archivo2.close()
""" %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Al final de realizar los PING, las IP que no respondieron al PING se registran en un archivo de
texto llamado: adverts.txt . Al final del programa se lee el archivo adverts.txt y se imprime en
pantalla.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
print (" - - - - - - - - - - - - - PING PULLER - - - - - - - - - - - - - \n")
obtenerIP()
realizarPING()
os.system("clear")
print(" \n ... RESULTADOS OBTENIDOS .... \n")
resultado = open("adverts.txt", "r")
with open ("adverts.txt", "r") as f:
shutil.copyfileobj(f, sys.stdout)
```
#### File: Tabla6/Pregunta2/funcionesP2.py
```python
import telnetlib
from twilio.rest import Client
import time
import smtplib
from email.mime.text import MIMEText
def extraerInformacion(direccion):
user = "humberto"
password = "<PASSWORD>"
show = "show processes CPU"
salir = "exit"
espacio = " "
ruta = r"cpu/"
nombre = direccion
nombre_archivo = "buffer.txt"
archivo = open(ruta + nombre, "w+")
tn = telnetlib.Telnet(direccion)
print(" ¡¡¡ CONEXIÓN TELNET EXITOSA !!! ")
tn.read_until(b"Username: ")
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ")
tn.write(password.encode('ascii') + b"\n")
tn.write(show.encode('ascii') + b"\n")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"")
tn.write(espacio.encode('ascii') + b"\n")
tn.write(salir.encode('ascii') + b"\n")
archivo.write(tn.read_all().decode('ascii'))
print(archivo.read())
archivo.close()
print (" ¡¡¡ FIN DE LA CONEXIÓN !!! ")
return nombre_archivo
""" ---------------------------------------------------------------------------------- """
def obtenerNombre(direccion):
ruta = r"cpu/"
archivo = open(ruta + direccion, "r")
bandera = 0
while bandera != 1:
linea = archivo.readline()
if linea.find("#") >= 0:
limite = linea.find("#")
nombre_router = linea[0:limite]
bandera = 1
print(nombre_router)
return nombre_router
""" ---------------------------------------------------------------------------------- """
""" ---------------------------------------------------------------------------------- """
def extraerProcesamiento(nombre, direccion):
informacion = r"cpu/" + direccion
ruta_salida = r"Estadisticas/" + nombre
archivo = open(informacion, "r")
archivo_salida = open(ruta_salida, "a+")
bandera = 0
while bandera != 1:
linea = archivo.readline()
if linea.find(":") >= 0 and linea.find("/"):
inicio = linea.find(":")
fin = linea.find("/")
porcentaje = linea[inicio + 2: fin - 1]
print(porcentaje)
bandera = 1
estadistica = porcentaje + "\n"
porcentaje = int(porcentaje)
if porcentaje >= 1:
print("ADVERTENCIA, EL ROUTER " + nombre + " HA ALCANZADO EL LIMITE DE PROCESAMIENTO")
archivo_salida.write(estadistica)
```
#### File: Tabla6/Pregunta3/funcionesP3.py
```python
import os
""" ------------------ FUNCION PARA VINCULAR CADA INTERFAZ DE RED CON SU RESPECTIVO TRÁFICO DE DATOS --------------- """
def vincularDatos():
ruta = r"../Pregunta1/Tratamiento"
estadisticas = r"Estadisticas/"
archivos = os.listdir(ruta)
mayor_uso = open("mayor_uso.txt", "w+")
for archivo in archivos:
linea = archivo
posicion = linea.find("-")
nombre_archivo = linea[0:posicion]
print(nombre_archivo)
informacion = open(ruta + "/" + archivo, "r")
salida = open(estadisticas + nombre_archivo, "w+")
numero_lineas = len(informacion.readlines())
informacion.seek(0)
interfaz_mayor = " "
uso_mayor = 0
recorrido = 0
while recorrido != numero_lineas:
linea2 = informacion.readline()
if linea2.find("FastEthernet") >= 0 and linea2.find("/") >= 0:
pos_interfaz = linea2.find(" ")
nombre_ethernet = linea2[0:pos_interfaz]
print(nombre_ethernet)
linea3 = informacion.readline()
entradas = obtenerBytes(linea3)
recorrido = recorrido + 1
linea3 = informacion.readline()
salidas = obtenerBytes(linea3)
recorrido = recorrido + 1
total = entradas + salidas
texto = nombre_ethernet + "," + str(total) + "\n"
print(texto)
salida.write(texto)
if total > uso_mayor:
uso_mayor = total
interfaz_mayor = nombre_ethernet
if linea2.find("Serial") >= 0 and linea2.find("/") >= 0:
pos_interfaz = linea2.find(" ")
nombre_serial = linea2[0:pos_interfaz]
print(nombre_serial)
linea4 = informacion.readline()
recorrido = recorrido + 1
linea4 = informacion.readline()
entradas = obtenerBytes(linea3)
recorrido = recorrido + 1
linea4 = informacion.readline()
salidas = obtenerBytes(linea3)
recorrido = recorrido + 1
total = entradas + salidas
print("Total: %d " % total)
salida.write(nombre_serial + "," + str(total) + "\n")
if total > uso_mayor:
uso_mayor = total
interfaz_mayor = nombre_serial
recorrido = recorrido + 1
salida.close()
mayor_uso.write(nombre_archivo + "," + interfaz_mayor + "," + str(uso_mayor) + "\n")
print("------------------------------------------------------------------------")
""" ---------------------------------------------------------------------------------------------------------- """
def obtenerBytes(linea):
inicio = linea.find(",")
fin = linea.find("bytes")
recorrido = linea[inicio + 1:fin - 1]
recorrido = int(recorrido)
return recorrido
""" ---------------------------------------------------------------------------------------------------------- """
``` |
{
"source": "JoseMiguel/otakume",
"score": 2
} |
#### File: tasks/parser/annreview.py
```python
from bs4 import BeautifulSoup
import numpy as np
import logging
from collections import defaultdict
from tasks.core.base import SiteTask, OrchestrateTask
import urllib2
from porc import Client
class ANNReviewOrchestrate(OrchestrateTask):
@property
def id(self):
return 2
def __init__(self):
logging.basicConfig(filename=self.conf['logs']['crawler'],
format='%(asctime)s - %(levelname)s:%(message)s',
level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S %p')
def run(self):
task = ANNReviewTask()
self.key = task.key
reviews = self.get()
updateReviews = task.run()
# since there are about 20 reviews per season
for i, updateReview in enumerate(updateReviews['reviews']):
print updateReview
for review in reviews['reviews']:
if review['title'] == updateReview['title']:
updateReviews['reviews'][i]['id'] = review['id']
break
self.json = updateReviews
self.put()
class ANNReviewTask(SiteTask):
def __init__(self):
self.key = self.site['season'] + self.site['year']
@property
def id(self):
return 2
def request_url(self):
return self.site['api_url']
def retrieve(self, leafXML, element):
attrs = {}
if element['property'] != None:
attrs = {element['property'] : element['value']}
return leafXML.findAll(element['tag'], attrs=attrs)
def prepare_url(self,url):
try:
xml = urllib2.urlopen(url, timeout=30)
except Exception as e:
logging.warning('Failed info in % with key %d, raise: ', self.source, self.key, e.message)
return None
return xml
def run(self):
url = self.request_url()
xml = self.prepare_url(url)
return self.crawl(xml, self.elements)
def crawl(self, xml, elements):
result = defaultdict(list)
print 'x'
if xml != None:
logging.info('Getting anime info from %s with key %s', self.source, self.key)
soupXML = BeautifulSoup(xml)
soupTable = self.retrieve(soupXML, self.elements['table'])[0]
for animeSoup in self.retrieve(soupTable, self.elements['animes']):
ratings = []
reviewDict = {}
elementReview = self.elements['review_url']
reviewUrlSoup = self.retrieve(animeSoup, elementReview)[0]
name = self.retrieve(animeSoup, self.elements['name'])[0].text
suffix_url = reviewUrlSoup[elementReview['inplace']]
separator = ('/' if suffix_url[0] != '/' else '')
review_url = self.site['base_url'] + separator + suffix_url
xml_review = self.prepare_url(review_url)
# in order to crawl reviews for each anime
# this might change, it's pure adhoc
reviewSiteSoup = BeautifulSoup(xml_review)
reviews = self.retrieve(reviewSiteSoup, self.elements['rating'])
length = self.elements['rating']['length']
for review in reviews:
reviewDescription = review.text[:length]
pos = reviewDescription.rfind(self.elements['rating']['content'])
text = reviewDescription[pos:length]
rating_value = -1
try:
rating_value = float(text.split()[1])
except:
pass
if rating_value > 0:
ratings.append(rating_value)
reviewDict['title'] = str(name.replace('\n', ' '))
reviewDict['mean'] = np.mean(ratings)
reviewDict['st-dev'] = 2 * np.std(ratings)
result['reviews'].append(reviewDict)
result['season'] = self.site['season']
result['year'] = self.site['year']
return result
def main():
# pass
print 'y'
ann = ANNReviewOrchestrate()
ann.run()
if __name__ == '__main__':
main()
``` |
{
"source": "Josemilla/ndirco2",
"score": 2
} |
#### File: Josemilla/ndirco2/ndirco2.py
```python
import time
import serial
import scrollphathd
import math
import ephem
from scrollphathd.fonts import font3x5
from datetime import datetime as dt
from datetime import timedelta
# Comando para leer la concentración de CO2
PETICION = [0xFF, 0x01, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79]
# Rango1 de 0 a 2000 ppm
RANGO1 = [0xFF, 0x01, 0x99, 0x00, 0x00, 0x00, 0x07, 0xd0, 0x8F]
# Rango2 de 0 a 5000 ppm
RANGO2 = [0xFF, 0x01, 0x99, 0x00, 0x00, 0x00, 0x13, 0x88, 0xCB]
# Rango3 de 0 a 10000 ppm
RANGO3 = [0xFF, 0x01, 0x99, 0x00, 0x00, 0x00, 0x27, 0x10, 0x2F]
# Calibración
CALIBRAR = [0xFF, 0x01, 0x87, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78]
# Activar auto calibración
ACT_AUTO_CALIBRACION = [0xFF, 0x01, 0x79, 0xA0, 0x00, 0x00, 0x00, 0x00, 0xE6]
# Desactivar auto calibración
DES_AUTO_CALIBRACION = [0xFF, 0x01, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86]
MAXIMO_BARRA = 800
MINIMO_BARRA = 400
BRILLO = None
# BRILLO_BAJO = 0.1 # Por la noche
# BRILLO_ALTO = 0.3 # Por el día
# Configurar aquí los datos de longitud, latitud y altura
LONGITUD = '40.285408'
LATITUD = '-3.788855'
ALTURA = 660
# Configuramos la conexión serie según los datos del fabricante
sensor = serial.Serial(
port = '/dev/serial0',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
timeout = 1)
# En mi configuración actual tengo que invertir la pantalla
scrollphathd.rotate(180)
# Esta función imprime el valor en la pantalla
def imprime_scrollphat(dato):
global BRILLO
global MAXIMO_BARRA
global MINIMO_BARRA
# Alinea la cifra siempre a la derecha, tenga 3 ó 4 cifras.
if dato >= 1000:
x = 1
else:
x = 5
scrollphathd.clear()
# Sólo mostramos la barra por el día, es decir, en función del valor del brillo
if BRILLO == 0.1:
scrollphathd.write_string(str(dato), x = x, y = 1, font = font3x5, brightness = BRILLO)
else:
scrollphathd.write_string(str(dato), x = x, y = 0, font = font3x5, brightness = BRILLO)
# Las siguientes cuatro lineas imprimen un indicador en la parte inferior con 400ppm estará al 0 y con 1000ppm al 100%
scrollphathd.fill(BRILLO - 0.1 if BRILLO > 0.1 else BRILLO, 0, 6, int((dato - 400) / ((MAXIMO_BARRA - MINIMO_BARRA) / 17)), 1)
scrollphathd.fill(BRILLO - 0.1 if BRILLO > 0.1 else BRILLO, 0, 5, 1, 2)
scrollphathd.fill(BRILLO - 0.1 if BRILLO > 0.1 else BRILLO, 8, 5, 1, 2)
scrollphathd.fill(BRILLO - 0.1 if BRILLO > 0.1 else BRILLO, 16, 5, 1, 2)
scrollphathd.show()
# Esta función lee el valor de CO2 y lo devuelve
def obten_co2():
# Enviamos el comando para pedir el valor de CO2
sensor.write(bytearray(PETICION))
respuesta = sensor.read(9)
# if len(respuesta) == 9:
# El valor que buscamos se encuentra en el byte 2 (high byte) y 3 (low byte).
# return (respuesta[2] << 8) | respuesta[3]
return (respuesta[2] << 8) | respuesta[3]
# Esta funcion usa la librería ephem para calcular si es de día en función de los datos de longitud y latitud y ajusta la variable BRILLO
def ajustar_brillo():
global LONGITUD
global LATITUD
global ALTURA
global BRILLO
# Sólo si el usuario ha configurado los datos de LON, LAT y ALT hacen el cálculo...
if LONGITUD != 0 and LATITUD != 0 and ALTURA != 0:
sol = ephem.Sun()
observador = ephem.Observer()
# ↓ Define your coordinates here ↓
observador.lat, observador.lon, observador.elevation = LONGITUD, LATITUD, ALTURA
# ↓ Set the time (UTC) here ↓
observador.date = dt.utcnow()
sol.compute(observador)
# altitud_sol = sol.alt
# print(altitud_sol*180/math.pi)
# -16.8798870431°
angulo = (sol.alt * 180 / math.pi)
if angulo > 0: # Es de día
BRILLO = 0.3
else: # Es de noche
BRILLO = 0.1
# ...si no ponemos el brillo a 0.2
else:
BRILLO = 0.2
hora_comprobacion_luz = dt.now()
ajustar_brillo()
# Mostramos la palabra HEAT (no hay palabra en español de cuatro letras) en la pantalla durante los tres minutos de calentamiento
scrollphathd.write_string("HEAT", x = 1, y = 1, font = font3x5, brightness = BRILLO)
scrollphathd.show()
# Configuramos el sensor en el rango de medición de 0 - 2000 ppm. Cuanto más bajo es el rango, mejor es la precisión.
sensor.write(bytearray(RANGO1))
# Por experiencia, el primer valor devuelto por el sensor es una medida errónea. Así que leemos y descartamos el valor.
obten_co2()
# Esperamos tres minutos, tiempo que indica el fabricante para el calentamiento del sensor. El for muestra la cuenta atrás.
print("\nndirCO2.py v1.0 - Josema - 30 de marzo de 2021 - <EMAIL>\n")
print("Esperando al calentamiento del sensor (Control + C para saltar)...")
try:
for segundos in range(180, 0, -1):
print(" " + str(segundos) + " segundos. " if segundos > 1 else " segundo. ", end="\r")
time.sleep(1)
print("Iniciando...")
except KeyboardInterrupt:
pass
# sensor.write(bytearray(RANGO1))
# Vuelvo a pedir el valor de CO2 para intentar evitar el valor 3420 que devuelve al principio
obten_co2()
# Volvemos a hacer un a lectura para mostrar el primer valor en la pantalla
valor_co2_anterior = obten_co2()
imprime_scrollphat(valor_co2_anterior)
# Entramos el bucle y no salimos nunca
try:
while True:
# Paramos un segundo en cada iteración del bucle
time.sleep(1)
valor_co2 = obten_co2()
# Calculamos la dirección de bucle for
if valor_co2 > valor_co2_anterior:
direccion_for = 1
elif valor_co2 < valor_co2_anterior:
direccion_for = -1
else:
imprime_scrollphat(valor_co2)
continue
# Este for muestra la animación del conteo cuando cambia el valor
for digito in range(valor_co2_anterior, valor_co2, direccion_for):
imprime_scrollphat(digito)
# Sólo si el salto entre valores es menor de 15 hacemos una pausa de 300ms. Si no lo fuera no hacemos pausa para que la animación no sea tediosa.
if abs(valor_co2_anterior - valor_co2) <= 15:
time.sleep(0.3)
valor_co2_anterior = valor_co2
# Entramos cada minuto aquí para comprobar si es de día o de noche
if dt.now() >= (hora_comprobacion_luz + timedelta(minutes=1)):
# print("Sólo entro aquí cada minuto")
ajustar_brillo()
hora_comprobacion_luz = dt.now()
except KeyboardInterrupt:
scrollphathd.clear()
``` |
{
"source": "josemiserra/CLEMSite_notebooks",
"score": 3
} |
#### File: CLEMSite_notebooks/2_crossing_Detector_SEM_MatTek/common_analysis.py
```python
import cv2
import numpy as np
import math
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
from collections import deque
from skimage.segmentation import slic
from skimage import morphology
import random
from scipy.ndimage import label,sum
from functools import reduce
# Many functions have been adapted from <NAME> : https://www.peterkovesi.com/matlabfns/
def plotPoints(img,points, color = 'red', size=10):
implot = plt.imshow(img)
# put a blue dot at (10, 20)
points_x = points[:,0]
points_y = points[:,1]
plt.scatter([points_x], [points_y],c=color,s=size)
plt.show()
def plotHist(img):
# hist,bins = np.histogram(img.flatten(),256,[0,256])
plt.hist(img.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
def normalise(im, reqmean = 0, reqvar = 1):
im = np.array(im,dtype = np.float32)
#im = im - np.mean(im)
#im = im / np.std(im)
# n = reqmean + im * np.sqrt(reqvar);
return im
def canny(i_image,isigma):
image = gaussfilt(i_image,isigma)
Ix,Iy = derivative5(image)
Ix_2 = np.multiply(Ix,Ix)
Iy_2 = np.multiply(Iy,Iy)
gradient = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude.
orientation = np.arctan2(-Iy, Ix) # Angles -pi to + pi.
orientation[orientation<0] = orientation[orientation<0]+np.pi; # Map angles to 0-pi.
orientation = orientation*180/np.pi;
return gradient,orientation
def gaussfilt(img,sigma):
sze = int(math.ceil(6*sigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),sigma)
# conv2(image, mask) is the same as filter2(rot90(mask,2), image)
image = convolve2d(img,h,'same')
return image
def fspecial_gauss2D(shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def derivative5(i_image):
# 5 tap 1st derivative cofficients. These are optimal if you are just
# seeking the 1st derivatives
# Copyright (c) 2010 <NAME>
p = np.array([0.037659,0.249153,0.426375,0.249153,0.037659], dtype = np.float32)
d1 =np.array([0.109604,0.276691,0.000000,-0.276691,-0.109604],dtype = np.float32)
a = p[:,np.newaxis]*d1.transpose()
b = d1[:,np.newaxis]*p.transpose()
Ix = convolve2d(i_image,a,'same')
Iy = convolve2d(i_image,b,'same')
return Ix,Iy
def floodfill(bw, r, c, N):
filled = np.zeros(bw.shape)
theStack = deque(zip(r, c))
m, n = bw.shape
while len(theStack) > 0:
x, y = theStack.pop()
if x < 0:
x = 0
if x >= n:
x = n - 1
if y < 0:
y = 0
if y >= m:
y = m - 1
if filled[x, y] == 1:
continue
if bw[x, y] == 0:
continue
filled[x, y] = 1
theStack.append((x + 1, y)) # right
theStack.append((x - 1, y)) # left
theStack.append((x, y + 1)) # down
theStack.append((x, y - 1)) # up
if (N == 8):
theStack.append((x + 1, y + 1)) # d right
theStack.append((x - 1, y - 1)) # d left
theStack.append((x - 1, y + 1)) # down
theStack.append((x + 1, y - 1)) # up
return filled
class Pixel:
value = 0
i = 0
j = 0
distance = 0
label = 0
def __init__(self,distance,i,j,label):
self.distance = distance
self.i = i
self.j = j
self.label = label
def propagate(img,mask,seeds,ilambda):
labels_out = np.copy(seeds)
dists = np.full(img.shape,np.inf)
dists[seeds>0] = 0
pq = deque([])
total_seeds = seeds.max()+1
for i in range(1,total_seeds):
# Get all pixel coordinates from pixels that are seeds
listpx, listpy = np.where(seeds==i)
for x,y in zip(listpx,listpy):
push_neighs_on_queue(pq,0.0,x,y ,img,ilambda,i,labels_out, mask)
while(len(pq)>0):
p = pq.popleft()
if(dists[p.i,p.j]>p.distance):
dists[p.i,p.j] = p.distance
labels_out[p.i,p.j] = p.label
push_neighs_on_queue(pq, p.distance,p.i,p.j, img, ilambda, labels_out[p.i,p.j], labels_out, mask)
return dists,labels_out
def clamped_fetch(img,i,j):
m,n = img.shape
if i < 0:
i = 0
if i >= n:
i = n-1
if j < 0:
j = 0
if j >= m:
j = m-1
return img[i,j]
def difference(img,i1,j1,i2,j2,ilambda):
pixel_diff = 0
#s1 = integrate(ii,i1-1,j1-1,i1+1,j1+1)
#s2 = integrate(ii,i2-1,j2-1,i2+1,j2+1)
#pixel_diff = np.abs(s1-s2)
dEucl = (i1-i2)*(i1-i2) + (j1-j2)*(j1-j2)
#fdist =np.sqrt((pixel_diff * pixel_diff +dEucl*dEucl*ilambda*ilambda)) # / (1.0 +ilambda ))
return int(dEucl*ilambda)
#return np.sqrt((pixel_diff * pixel_diff +ilambda *dEucl) / (1.0 +ilambda ))
#return (sqrt(pixel_diff * pixel_diff + (fabs((double) i1 - i2) + fabs((double) j1 - j2)) * lambda * lambda ));
def push_neighs_on_queue(pq,distance,i,j,img,ilambda,label, labels_out, mask):
# 4-connected
m,n = img.shape
if (i > 0):
val = labels_out[i-1,j]
if (val==0 and mask[i-1, j]>0):
delta_d = difference(img, i, j, i-1, j, ilambda) # if the neighbor was not labeled, do pushing
pix = Pixel(distance + delta_d, i-1, j, label)
pq.append(pix)
if (j > 0):
val = labels_out[i,j-1]
if val==0 and mask[i, j-1]!=0 :
delta_d = difference(img,i,j,i,j-1,ilambda)
pix = Pixel(distance + delta_d, i, j-1, label)
pq.append(pix)
if i<(n-1):
val = labels_out[i+1,j]
if (val==0 and mask[i+1, j]!=0) :
delta_d = difference(img, i, j, i+1, j , ilambda)
pix = Pixel(distance + delta_d, i+1, j , label)
pq.append(pix)
if (j < (m-1)):
val = labels_out[i,j+1]
if val==0 and (mask[i, j+1]!=0):
delta_d = difference(img, i, j, i, j + 1, ilambda)
pix = Pixel(distance + delta_d, i, j + 1, label)
pq.append(pix)
# 8-connected
if (i > 0) and (j > 0):
val = labels_out[i-1,j-1]
if(val==0 and mask[i-1, j-1]!=0):
delta_d = difference(img, i, j, i-1, j - 1, ilambda)
pix = Pixel(distance + delta_d, i-1, j - 1, label)
pq.append(pix)
if (i < (n-1) and (j > 0)):
val=labels_out[i+1,j-1]
if (val==0 and (mask[i+1, j-1])!=0):
delta_d = difference(img, i, j, i+1, j - 1, ilambda)
pix = Pixel(distance + delta_d, i+1, j - 1, label)
pq.append(pix)
if (i > 0) and j < (m-1):
val =labels_out[i-1,j+1]
if (val==0 and mask[i-1, j+1]!=0 ):
delta_d = difference(img, i, j, i-1, j + 1, ilambda)
pix = Pixel(distance + delta_d, i-1, j + 1, label)
pq.append(pix)
if (i < (n-1) and j < (m-1)):
val=labels_out[i+1,j+1]
if val==0 and (mask[i+1, j+1]!=0):
delta_d = difference(img, i, j, i+1, j + 1, ilambda)
pix = Pixel(distance + delta_d, i+1, j + 1, label)
pq.append(pix)
return
def integral_image(x):
"""Integral image / summed area table.
The integral image contains the sum of all elements above and to the
left of it, i.e.:
.. math::
S[m, n] = \sum_{i \leq m} \sum_{j \leq n} X[i, j]
Parameters
----------
x : ndarray
Input image.
Returns
-------
S : ndarray
Integral image / summed area table.
References
----------
.. [1] <NAME>, "Summed-area tables for texture mapping,"
ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212.
"""
return x.cumsum(1).cumsum(0)
def integrate(ii, r0, c0, r1, c1):
"""Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
S = 0
S += clamped_fetch(ii,r1,c1)
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += clamped_fetch(ii,r0-1,c0-1)
if (r0 - 1 >= 0):
S -= clamped_fetch(ii,r0-1,c1)
if (c0 - 1 >= 0):
S -= clamped_fetch(ii,r1,c0-1)
return S
def softmax(y):
s = np.exp(y)
y_prob = s / np.sum(s)
return y_prob
def remove_borders(img,border):
# remove borders
m,n = img.shape
img[:border, :] = 0
img[-border:, :] = 0
img[:, :border] = 0
img[:, -border:] = 0
return img
def ridgeorient(im,gradientsigma,blocksigma,orientsmoothsigma, rel = 0.01):
# Arguments: im - A normalised input image.
# gradientsigma - Sigma of the derivative of Gaussian
# used to compute image gradients.
# blocksigma - Sigma of the Gaussian weighting used to
# sum the gradient moments.
# orientsmoothsigma - Sigma of the Gaussian used to smooth
# the final orientation vector field.
# Optional: if ommitted it defaults to 0
# Returns: orientim - The orientation image in radians.
# Orientation values are +ve clockwise
# and give the direction *along* the
# ridges.
# reliability - Measure of the reliability of the
# orientation measure. This is a value
# between 0 and 1. I think a value above
# about 0.5 can be considered 'reliable'.
# reliability = 1 - Imin./(Imax+.001);
# coherence - A measure of the degree to which the local
# area is oriented.
# coherence = ((Imax-Imin)./(Imax+Imin)).^2;
rows,cols = im.shape
# Calculate image gradients.
sze = int(np.fix(6*gradientsigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),gradientsigma)
fx,fy = np.gradient(h) # Gradient of Gausian.
Gx = convolve2d(im, fx,'same') # Gradient of the image in x
Gy = convolve2d(im, fy, 'same') # ... and y
# Estimate the local ridge orientation at each point by finding the
# principal axis of variation in the image gradients.
Gxx = np.multiply(Gx,Gx) # Covariance data for the image gradients
Gxy = np.multiply(Gx,Gy)
Gyy = np.multiply(Gy,Gy)
# Now smooth the covariance data to perform a weighted summation of the data.
sze = int(np.fix(6*blocksigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),blocksigma)
Gxx = convolve2d(Gxx, h,'same');
Gxy = 2*convolve2d(Gxy,h,'same');
Gyy = convolve2d(Gyy,h,'same');
# Analytic solution of principal direction
Gxy_2 = np.multiply(Gxy,Gxy)
Gm = Gxx-Gyy
Gm = np.multiply(Gm,Gm)
denom = np.sqrt(Gxy_2 + Gm) + np.spacing(1)
sin2theta = np.divide(Gxy,denom) # Sine and cosine of doubled angles
cos2theta = np.divide(Gxx-Gyy,denom)
sze = int(np.fix(6*orientsmoothsigma))
if(sze%2 == 0):
sze = sze+1
h = fspecial_gauss2D((sze,sze),orientsmoothsigma)
cos2theta = convolve2d(cos2theta,h,'same')# Smoothed sine and cosine of
sin2theta = convolve2d(sin2theta,h,'same'); # doubled angles
orientim = np.pi/2 + np.arctan2(sin2theta,cos2theta)/2;
# Calculate 'reliability' of orientation data. Here we calculate the
# area moment of inertia about the orientation axis found (this will
# be the minimum inertia) and an axis perpendicular (which will be
# the maximum inertia). The reliability measure is given by
# 1.0-min_inertia/max_inertia. The reasoning being that if the ratio
# of the minimum to maximum inertia is close to one we have little
# orientation information.
Imin = (Gyy+Gxx)/2
Imin = Imin - np.multiply((Gxx-Gyy),cos2theta)/2 - np.multiply(Gxy,sin2theta)/2
Imax = Gyy+Gxx - Imin
reliability = 1 - np.divide(Imin,(Imax+.001))
# aux = Imax+Imin
# aux = np.multiply(aux,aux)
# coherence = np.divide((Imax-Imin),aux)
# Finally mask reliability to exclude regions where the denominator
# in the orientation calculation above was small. Here I have set
# the value to 0.001, adjust this if you feel the need
reliability = np.multiply(reliability,(denom>rel))
return orientim,reliability
def SWT(i_img, edgeImage, orientim, stroke_width=20, angle=np.pi / 6):
orientim = np.radians(orientim)
im = gaussfilt(i_img, 1)
Ix, Iy = derivative5(im)
Ix_2 = np.multiply(Ix, Ix)
Iy_2 = np.multiply(Iy, Iy)
g_mag = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude.
Ix = np.divide(Ix, g_mag)
Iy = np.divide(Iy, g_mag)
cres = 0
prec = 0.4
mSWT = -np.ones(i_img.shape)
count = 1
h_stroke = stroke_width * 0.5
rows, cols = i_img.shape
for i in range(rows):
for j in range(cols):
if (edgeImage[i, j] > 0):
count = 0
points_x = []
points_y = []
points_x.append(j)
points_y.append(i)
count += 1
curX = float(j) + 0.5
curY = float(i) + 0.5
cres = 0
while cres < stroke_width:
curX = curX + Ix[i, j] * prec # find directionality increments x or y
curY = curY + Iy[i, j] * prec
cres = cres + 1
curPixX = int(math.floor(curX))
curPixY = int(math.floor(curY))
if (curPixX < 0 or curPixX > cols - 1 or curPixY < 0 or curPixY > rows - 1):
break
points_x.append(curPixX)
points_y.append(curPixY)
count += 1
if (edgeImage[curPixY, curPixX] > 0 and count < 21):
ang_plus = orientim[i, j] + angle
if (ang_plus > np.pi):
ang_plus = np.pi
ang_minus = orientim[i, j] - angle
if (ang_minus < 0):
ang_minus = 0
if ((orientim[curPixY, curPixX] < ang_plus) and (
orientim[curPixY, curPixX] > ang_minus) and count > h_stroke):
dist = math.sqrt((curPixX - j) * (curPixX - j) + (curPixY - i) * (curPixY - i))
for k in range(count - 1):
if (mSWT[points_y[k], points_x[k]] < 0):
mSWT[points_y[k], points_x[k]] = dist
else:
mSWT[points_y[k], points_x[k]] = np.min([dist, mSWT[points_y[k], points_x[k]]])
if (count > stroke_width):
break
return mSWT
def SWT_Total(i_image, edges, orientation, stroke_width, angle=np.pi / 6):
inv_iim = 255 - i_image # needed for shadowing
swtim = SWT(i_image, edges, orientation, stroke_width, angle) # one image
swtinv_im = SWT(inv_iim, edges, orientation, stroke_width, angle) # the inverse
swtim[np.nonzero(swtim < 0)] = 0
swtinv_im[np.nonzero(swtinv_im < 0)] = 0
swt_end = swtim
indexes = np.nonzero(swtim == 0)
swt_end[indexes] = swtinv_im[indexes]
return swt_end
def hysthresh(image,T1,T2):
if T1 < T2 : # T1 and T2 reversed - swap values
tmp = T1
T1 = T2
T2 = tmp
aboveT2 = image > T2; # Edge points above lower threshold.
[aboveT1r,aboveT1c] = np.nonzero(image > T1); # Row and colum coords of points above upper threshold.
# Obtain all connected regions in aboveT2 that include a point that has a
# value above T1
bw = floodfill(aboveT2, aboveT1r, aboveT1c, 8)
return bw
def cleanswt2(swt,edges):
mask = swt[swt > 0]
labeled,nr_objects = label(mask)
w, h = swt.shape
max_pix = (0.05 * w)
for i in range(nr_objects):
numpix = len(np.where(labeled == i))
if(numpix < max_pix):
swt[np.where(labeled==i)] = 0
swt[edges > 0] = np.max(swt)
return swt
def autocanny(nm,canthresh):
m,n = nm.shape
im_size = np.array([m,n])
med = float(np.median(nm[nm > 0]))
max_factor = 0.95 * np.max(nm)
factor_a = max_factor
factor_b_p = 0.4*med
bwedge = []
value = 0
msize = m*n
max_pix = int(msize*canthresh)
iter = 0
while (value < max_pix and iter<50):
iter = iter+1
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
factor_a = factor_a * 0.9
if (factor_a < 1e-15):
break
c1 = 0
alpha_1 = 0.01
alpha_2 = 0.01
inv = True
iter = 0
while (np.abs(value-max_pix)>200 and iter<20):
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
iter = iter+1
if(value<max_pix):
if(inv):
alpha_1 = 0.01
inv = False
factor_a = factor_a - alpha_1
c1 = c1 + 1
if(c1==2):
alpha_1 = alpha_1 * 2
c1 = 0
else:
if(not inv):
alpha_2 = 0.01
inv = True
c1 = c1 - 1
factor_a = factor_a + alpha_2
if(c1 == -2 ):
alpha_2 = alpha_2 * 2
c1 = 0
return bwedge
def autocanny2(prepro, nm, canthresh, blocksize):
m,n = prepro.shape
im_size = np.array([m,n])
size_pixels = im_size / blocksize
size_pixels = int(size_pixels[0] * size_pixels[1])
# Clustering of image
segments = slic(prepro, n_segments=size_pixels, sigma=1.5, compactness=0.08, start_label=0)
num_labels = np.max(segments) + 1
med = float(np.median(nm[nm > 0]))
max_factor = 0.95 * np.max(nm)
factor_a = max_factor
factor_b_p = 0.4*med
bwedge = []
value = 0
msize = m*n
max_pix = int(msize*canthresh)
while (value < max_pix):
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
factor_a = factor_a * 0.9
if (factor_a < 1e-15):
break
f = []
f.append(factor_a)
factor_original = factor_a
c1 = 0
alpha_1 = 0.01
alpha_2 = 0.01
inv = True
iter = 0
while (np.abs(value-max_pix)>200 and iter<20):
bwedge = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge)
iter = iter+1
if(value<max_pix):
if(inv):
alpha_1 = 0.01
inv = False
factor_a = factor_a - alpha_1
c1 = c1 + 1
if(c1==2):
alpha_1 = alpha_1 * 2
c1 = 0
else:
if(not inv):
alpha_2 = 0.01
inv = True
c1 = c1 - 1
factor_a = factor_a + alpha_2
if(c1 == -2 ):
alpha_2 = alpha_2 * 2
c1 = 0
f.append(factor_a)
expected_density = (msize * canthresh) / size_pixels # Expected
label_counter = 0
for i in range(num_labels):
label_density = np.sum(bwedge[np.where(segments == i)])
if (label_density < 2 * expected_density):
nm[segments == i]= 0
else:
bwedge[np.where(segments == i)] = 0;
label_counter = label_counter + 1
subsize = label_counter * blocksize * blocksize
max_pix = (subsize/(msize*1.0))*canthresh
factor_a = max_factor
value = 0
bwedge2 = np.zeros((m,n))
while (value < max_pix):
bwedge2 = hysthresh(nm, factor_a * med, factor_b_p);
value = np.sum(bwedge2)/subsize
factor_a = factor_a * 0.9;
if (factor_a < 1e-15):
break
f = []
f.append(factor_a)
factor_original = factor_a
c1 = 0
alpha_1 = 0.01
alpha_2 = 0.01
inv = True
iter = 0
while (np.abs(value-max_pix)>0.001 and iter<20):
bwedge2 = hysthresh(nm, factor_a * med, factor_b_p)
value = np.sum(bwedge2)/subsize
iter = iter+1
if(value<max_pix):
if(inv):
alpha_1 = 0.01
inv = False
factor_a = factor_a - alpha_1
c1 = c1 + 1
if(c1==2):
alpha_1 = alpha_1 * 2
c1 = 0
else:
if(not inv):
alpha_2 = 0.01
inv = True
c1 = c1 - 1
factor_a = factor_a + alpha_2
if(c1 == -2 ):
alpha_2 = alpha_2 * 2
c1 = 0
f.append(factor_a)
bwedge = np.logical_or(bwedge, bwedge2)
return bwedge
def kuwahara_filter(input,winsize):
# Kuwahara filters an image using the Kuwahara filter
"""
filtered = Kuwahara(original, windowSize)
filters the image with a given windowSize and yielsd the result in filtered
It uses = variance = (mean of squares) - (square of mean).
filtered = Kuwahara(original, 5);
Description : The kuwahara filter workds on a window divide into 4 overlapping subwindows
In each subwindow the mean and hte variance are computed. The output value (locate at the center of the window)
is set to the mean of the subwindow with the smallest variance
References:
http: // www.ph.tn.tudelft.nl / DIPlib / docs / FIP.pdf
http: // www.incx.nec.co.jp / imap - vision / library / wouter / kuwahara.html
:param input:
:param winsize:
:return:
"""
input = np.array(input,dtype = np.float64)
m,n = input.shape
if (winsize%4) != 1 :
return
tmpAvgKerRow = np.concatenate((np.ones( (1, (winsize - 1) / 2 + 1)), np.zeros((1, (winsize - 1) / 2))),axis=1)
tmpPadder = np.zeros((1, winsize));
tmpavgker = np.matlib.repmat(tmpAvgKerRow, (winsize - 1) / 2 + 1, 1)
tmpavgker = np.concatenate((tmpavgker, np.matlib.repmat(tmpPadder, (winsize - 1) / 2, 1)))
tmpavgker = tmpavgker / np.sum(tmpavgker)
# tmpavgker is a 'north-west'
t1,t2 = tmpavgker.shape
avgker = np.zeros((t1,t2,4))
avgker[:,:, 0] = tmpavgker # North - west(a)
avgker[:,:, 1] = np.fliplr(tmpavgker) # North - east(b)
avgker[:,:, 3] = np.flipud(tmpavgker) # South - east(c)
avgker[:,:, 2] = np.fliplr(np.flipud(tmpavgker)) # South - west(d)
squaredImg = input**2
avgs = np.zeros((m,n,4))
stddevs = np.zeros((m,n,4))
## Calculation of averages and variances on subwindows
for k in range(0,4):
avgs[:,:, k] = convolve2d(input, avgker[:,:, k], 'same') # mean
stddevs[:,:, k] = convolve2d(squaredImg, avgker[:,:, k], 'same') # mean
stddevs[:,:, k] = stddevs[:,:, k]-avgs[:,:, k]**2 # variance
# minima = np.min(stddevs, axis=2)
indices = np.argmin(stddevs,axis = 2)
filtered = np.zeros(input.shape)
for k in range(m) :
for i in range(n):
filtered[k, i] = avgs[k, i, indices[k, i]]
return filtered
def nonmaxsup_python(gradient,orientation,radius = 1.2):
"""
# Input:
# inimage - Image to be non-maxima suppressed.
# orient - Image containing feature normal orientation angles in degrees
# (0-180), angles positive anti-clockwise.
# radius - Distance in pixel units to be looked at on each side of each
# pixel when determining whether it is a local maxima or not.
# This value cannot be less than 1.
# (Suggested value about 1.2 - 1.5)
# Returns:
# im - Non maximally suppressed image.
#
# Notes:
# The suggested radius value is 1.2 - 1.5 for the following reason. If the
# radius parameter is set to 1 there is a chance that a maxima will not be
# identified on a broad peak where adjacent pixels have the same value. To
# overcome this one typically uses a radius value of 1.2 to 1.5. However
# under these conditions there will be cases where two adjacent pixels will
# both be marked as maxima. Accordingly there is a final morphological
# thinning step to correct this.
# This function is slow. It uses bilinear interpolation to estimate
# intensity values at ideal, real-valued pixel locations on each side of
# pixels to determine if they are local maxima.
# Copyright (c) 1996-2013 <NAME>
"""
im = np.zeros(gradient.shape)
if(radius<1):
return
iradius = int(math.ceil(radius))
# Precalculate x and y offsets relative to centre pixel for each orientation angle
angle = range(0,181,1)
angle = (np.array(angle)*np.pi)/180 # Array of angles in 1 degree increments (but in radians).
xoff = radius*np.cos(angle) # x and y offset of points at specified radius and angle
yoff = radius*np.sin(angle) # from each reference position.
hfrac = xoff - np.floor(xoff) # Fractional offset of xoff relative to integer location
vfrac = yoff - np.floor(yoff) # Fractional offset of yoff relative to integer location
orient = np.fix(orientation) # Orientations start at 0 degrees but arrays start
# with index 1.
orient = np.array(orient,dtype=np.int16)
# Now run through the image interpolating grey values on each side
# of the centre pixel to be used for the non-maximal suppression.
[rows,cols] = gradient.shape
nrow = range(iradius+1,rows - iradius)
ncol = range(iradius+1,cols - iradius)
for elr in nrow:
for elc in ncol:
ori = orient[elr,elc] # Index into precomputed arrays
x = elc + xoff[ori] # x, y location on one side of the point in question
y = elr - yoff[ori]
fx = int(np.floor(x)) # Get integer pixel locations that surround location x,y
cx = int(np.ceil(x))
fy = int(np.floor(y))
cy = int(np.ceil(y))
tl = gradient[fy,fx] # Value at top left integer pixel location.
tr = gradient[fy,cx] # top right
bl = gradient[cy,fx] # bottom left
br = gradient[cy,cx] # bottom right
upperavg = tl + hfrac[ori]*(tr - tl) # Now use bilinear interpolation to
loweravg = bl + hfrac[ori]*(br - bl) # estimate value at x,y
v1 = upperavg + vfrac[ori]*(loweravg - upperavg)
if (gradient[elr, elc] > v1): # We need to check the value on the other side...
x = elc - xoff[ori] # x, y location on the `other side' of the point in question
y = elr + yoff[ori]
fx = int(np.floor(x))
cx = int(np.ceil(x))
fy = int(np.floor(y))
cy = int(np.ceil(y))
tl = gradient[fy,fx] # % Value at top left integer pixel location.
tr = gradient[fy,cx] # % top right
bl = gradient[cy,fx] # % bottom left
br = gradient[cy,cx] # % bottom right
upperavg = tl + hfrac[ori]*(tr - tl)
loweravg = bl + hfrac[ori]*(br - bl)
v2 = upperavg + vfrac[ori]*(loweravg - upperavg)
if (gradient[elr,elc] > v2): # This is a local maximum.
im[elr, elc] = gradient[elr, elc] # Record value in the output
# Finally thin the 'nonmaximally suppressed' image by pointwise
# multiplying itself with a morphological skeletonization of itself.
# I know it is oxymoronic to thin a nonmaximally supressed image but
# fixes the multiple adjacent peaks that can arise from using a radius
# value > 1.
#
# skel = bwmorph(im>0,'skel',Inf);
#
im2 = (im>0).astype(np.int8)
skel= morphology.skeletonize(im2)
im = np.multiply(im,skel)
return im
def floodfill(bw, r, c, N=8):
filled = np.zeros(bw.shape)
theStack = deque(zip(r, c))
while len(theStack) > 0:
x, y = theStack.pop()
if filled[x, y] == 1:
continue
if bw[x, y] == 0:
continue
filled[x, y] = 1
theStack.append((x + 1, y)) # right
theStack.append((x - 1, y)) # left
theStack.append((x, y + 1)) # down
theStack.append((x, y - 1)) # up
if (N == 8):
theStack.append((x + 1, y + 1)) # d right
theStack.append((x - 1, y - 1)) # d left
theStack.append((x - 1, y + 1)) # down
theStack.append((x + 1, y - 1)) # up
return filled
def borderEnhancer(img,filtersize):
# Estimate the local mean of f.
prod_fs = reduce(lambda x, y: x * y, filtersize, 1)
localMean = convolve2d(img,np.ones(filtersize),'same') / prod_fs;
# Estimate of the local variance of f.
img_2 = np.multiply(img,img)
localMean_2 = localMean*localMean
localVar = convolve2d(img_2,np.ones(filtersize),'same') / prod_fs - localMean_2;
localVar = localVar>0
return localVar
```
#### File: CLEMSite_notebooks/2_crossing_Detector_SEM_MatTek/SOFT.py
```python
import sys, os
import time
import cv2
import numpy as np
import math
from scipy.signal import convolve2d
from scipy.ndimage import label,sum
from scipy.misc import imrotate
from matplotlib import pyplot as plt
from skimage import morphology
from skimage.segmentation import slic
from bwmorph import bwmorph_thin
from collections import deque
class SOFT:
# def __init__(self):
# pass
def soft(self,image,sigma=1.5, clahe=True, canny_thresh=0.05, stroke=20):
"""
Find Lines using Orientations and Projections
Note: +CT means, increases computation time.
'K' Number of neighbors to consider in the Orientation Field
Transform.Each neighbor is evaluated against a candidate angle
and then add up. The biggest the lines, the better the
result for a bigger K. (K big,+CT)
Default: 12.
'Delta' Angle increment from 0.1 to 90.
The resulting projection will be more accurate if
the increment is small. (Delta small, +CT)
Default: 1.
'dispim' If true, images are shown. If false,no images are shown.
Default: True
%
'wiener' Two-element vector of positive integers: [M N].
[M N] specifies the number of tile rows and
columns. Both M and N must be at least 2.
The total number of image tiles is equal to M*N.
If the lines are too thin, it is used to dilate them.
Default: [2 2].
Use 0 to not execute the wiener filter.
'strokeWidth' When the Stroke Width Transform is executed, for each
pixel, rays are created from the pixel to the next
change of gradient. If your stroke is big, use a bigger
width.(strokeWidth big, ++CT)
Default: 20.
'canthresh' Automatic canny thresholding is performed using an
iterative loop. If the percentage of white pixels is bigger than a
threshold,then we are assuming the image is getting more
and more clutter.
Default: 0.075, means a 7.5% of the pixels is white.
'Sigma' Preprocessing gaussian filter. Helps with noisy images
of after CLAHE. Values between 0 and 2 are recommended.
Default: 0 (not applied).
'clahe' If true, CLAHE (automatic brightness and contrast
balance) is applied.
Default: False.
##########################################
saved values:
R - map of projections
peaks - peaks detected
prepro - image after preprocessing (clahe and gaussian filter)
bwedge - image after automatic canny filtering
orientim - image with ridge orientations
reliability - probabilistic plot of orientations
:return:
"""
if (sigma < 0):
print 'Invalid value. Sigma cannot be smaller than 0.'
sigma = 0
self.sigma = sigma
self.clahe = clahe
if (canny_thresh > 1 or canny_thresh <= 0):
print 'Invalid threshold. Cannot be bigger than 1 or smaller than 0. Setting default value.'
canny_thresh = 0.05
self.canthresh = canny_thresh
if (stroke < 2 or stroke > 1024):
print 'Invalid stroke size. Accepted values between and half the size of your image.Setting default value.'
stroke = 20
self.stroke = stroke
print("Preprocessing")
start = time.time()
prepro = image
if(self.clahe):
print('CLAHE true, performed at clipLimit 0.01 and tileGridSize of 32,32')
if(self.sigma>0):
sze = int(math.ceil(6*self.sigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),self.sigma)
I = convolve2d(prepro,h,'same')
print('Gaussian blur performed with Size ' +str(sze)+ ' and sigma '+ str(self.sigma))
PREPRO = I
end = time.time()
print "Preprocessing done: "+str(end - start)+" s."
##### Gradient
start = time.time()
gradient,orientation = self.canny(I,2)
end = time.time()
print "Gradient done: "+str(end - start)+" s."
# plt.subplot(121),plt.imshow(orientation,cmap='gray')
# plt.subplot(122),plt.imshow(gradient*10,cmap='gray')
# plt.show()
start = time.time()
# nm = self.nonmaxsup(gradient,orientation,1.5)
nm = self.nonmaxsup_python(gradient,orientation,1.5)
end = time.time()
print "NMS done: "+str(end - start)+" s."
start = time.time()
# nm = nonmaxsup(gradient,orientation,1.5)
BWEDGE = self.autocanny2(prepro,nm,16)
end = time.time()
print "Autocanny done: "+str(end - start)+" s."
m_size = np.array([2,2])
J = self.borderEnhancer(BWEDGE,m_size)
print 'Border Enhancement done'
start = time.time()
ORIENTIM, _reliability = self.ridgeorient(gradient, 1, 5, 5)
segments = slic(prepro, n_segments=2500, sigma=1.5, compactness=0.08)
num_labels = np.max(segments) + 1
orientim_slic = np.copy(ORIENTIM)
for i in range(num_labels):
orientim_slic[np.where(segments == i)] = np.median(ORIENTIM[np.where(segments == i)])
ORIENTIM = orientim_slic
_, RELIABILITY = self.ridgeorient(gradient, 1, 3, 3)
RELIABILITY[RELIABILITY<0.5] = 0
end = time.time()
print "Ridges done: "+str(end - start)+" s."
# plt.imshow(orientim2 ,cmap='jet')
tl = np.multiply(J,RELIABILITY) # Enhance the bw image removing disordered regions
if self.stroke>0:
print "Starting SWT with strokeWidth of "+str(self.stroke)
start = time.time()
iSWT= self.SWT_Total(I,tl,ORIENTIM,self.stroke)
end = time.time()
print "SWT done: "+str(end - start)+" s."
start = time.time()
print('Removing ill components')
FSWT = self.cleanswt2(iSWT,J)
end = time.time()
print "Removing done: " + str(end - start) + " s.\n"
plt.show()
return PREPRO,BWEDGE,ORIENTIM,RELIABILITY,iSWT,FSWT
def autocanny(self,nm):
med = np.median(nm[nm>0])
max_factor = 0.8*np.max(nm)
factor_a = max_factor
factor_b = 0.4
lenm = nm.shape
bwedge = np.zeros(lenm)
value = 0
msize = (lenm[0]*lenm[1])
while(value<self.canthresh):
bwedge = self.hysthresh(nm, factor_a*med,factor_b*med);
value = np.sum(bwedge)/msize
factor_a = factor_a*0.9
# Coarse part or histeresis accomplished
while(value>self.canthresh):
factor_a = factor_a + 0.01
bwedge = self.hysthresh(nm, factor_a*med,factor_b*med);
value = np.sum(bwedge)/msize
print 'Automatic Canny Done'
print 'Lower threshold reached at '+str(factor_b)
print 'Upper threshold reached at '+str(factor_a)
return bwedge
def autocanny2(self, prepro, nm, blocksize):
m,n = prepro.shape
im_size = np.array([m,n])
size_pixels = im_size / blocksize
size_pixels = int(size_pixels[0] * size_pixels[1])
# Clustering of image
segments = slic(prepro, n_segments=size_pixels, sigma=1.5, compactness=0.08)
num_labels = np.max(segments) + 1
med = float(np.median(nm[nm > 0]))
max_factor = 0.95 * np.max(nm)
factor_a = max_factor
factor_b = 0.4
bwedge = []
value = 0
msize = m*n
while (value < self.canthresh):
bwedge = self.hysthresh(nm, factor_a * med, factor_b * med)
value = np.sum(bwedge)/msize
factor_a = factor_a * 0.9
if (factor_a < 1e-15):
break
while (value > self.canthresh):
factor_a = factor_a + 0.01
bwedge = self.hysthresh(nm, factor_a * med, factor_b * med);
value = np.sum(bwedge)/msize
expected_density = (msize * self.canthresh) / size_pixels # Expected
label_counter = 0
for i in range(num_labels):
label_density = np.sum(bwedge[np.where(segments == i)])
if (label_density < 2 * expected_density):
nm[segments == i]= 0
else:
bwedge[np.where(segments == i)] = 0;
label_counter = label_counter + 1
subsize = label_counter * blocksize * blocksize
canthresh = (subsize/(msize*1.0))*self.canthresh
factor_a = max_factor
factor_b = 0.4
value = 0
bwedge2 = np.zeros((m,n))
while (value < canthresh):
bwedge2 = self.hysthresh(nm, factor_a * med, factor_b * med);
value = np.sum(bwedge2) / subsize;
factor_a = factor_a * 0.9;
if (factor_a < 1e-15):
break
while (value > canthresh):
factor_a = factor_a + 0.01;
bwedge2 = self.hysthresh(nm, factor_a * med, factor_b * med);
value = sum(sum(bwedge2)) / msize
bwedge[bwedge2>0] = 1
print 'Automatic Canny Done'
print 'Lower threshold reached at ' + str(factor_b)
print 'Upper threshold reached at ' + str(factor_a)
return bwedge
def gaussfilt(self,img,sigma):
sze = int(math.ceil(6*sigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),sigma)
# conv2(image, mask) is the same as filter2(rot90(mask,2), image)
image = convolve2d(img,h,'same')
return image
def derivative5(self,i_image):
# 5 tap 1st derivative cofficients. These are optimal if you are just
# seeking the 1st derivatives
# Copyright (c) 2010 <NAME>
p = np.array([0.037659,0.249153,0.426375,0.249153,0.037659], dtype = np.float32)
d1 =np.array([0.109604,0.276691,0.000000,-0.276691,-0.109604],dtype = np.float32)
a = p[:,np.newaxis]*d1.transpose()
b = d1[:,np.newaxis]*p.transpose()
Ix = convolve2d(i_image,a,'same')
Iy = convolve2d(i_image,b,'same')
return Ix,Iy
def fspecial_gauss2D(self,shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def nonmaxsup_python(self,gradient,orientation,radius = 1.2):
"""
# Input:
# inimage - Image to be non-maxima suppressed.
# orient - Image containing feature normal orientation angles in degrees
# (0-180), angles positive anti-clockwise.
# radius - Distance in pixel units to be looked at on each side of each
# pixel when determining whether it is a local maxima or not.
# This value cannot be less than 1.
# (Suggested value about 1.2 - 1.5)
# Returns:
# im - Non maximally suppressed image.
#
# Notes:
# The suggested radius value is 1.2 - 1.5 for the following reason. If the
# radius parameter is set to 1 there is a chance that a maxima will not be
# identified on a broad peak where adjacent pixels have the same value. To
# overcome this one typically uses a radius value of 1.2 to 1.5. However
# under these conditions there will be cases where two adjacent pixels will
# both be marked as maxima. Accordingly there is a final morphological
# thinning step to correct this.
# This function is slow. It uses bilinear interpolation to estimate
# intensity values at ideal, real-valued pixel locations on each side of
# pixels to determine if they are local maxima.
# Copyright (c) 1996-2013 <NAME>
"""
im = np.zeros(gradient.shape)
if(radius<1):
print 'ERROR: radius should be bigger than 1'
return
iradius = int(math.ceil(radius))
# Precalculate x and y offsets relative to centre pixel for each orientation angle
angle = range(0,181,1)
angle = (np.array(angle)*np.pi)/180 # Array of angles in 1 degree increments (but in radians).
xoff = radius*np.cos(angle) # x and y offset of points at specified radius and angle
yoff = radius*np.sin(angle) # from each reference position.
hfrac = xoff - np.floor(xoff) # Fractional offset of xoff relative to integer location
vfrac = yoff - np.floor(yoff) # Fractional offset of yoff relative to integer location
orient = np.fix(orientation) # Orientations start at 0 degrees but arrays start
# with index 1.
orient = np.array(orient,dtype=np.int16)
# Now run through the image interpolating grey values on each side
# of the centre pixel to be used for the non-maximal suppression.
[rows,cols] = gradient.shape
nrow = range(iradius+1,rows - iradius)
ncol = range(iradius+1,cols - iradius)
for elr in nrow:
for elc in ncol:
ori = orient[elr,elc] # Index into precomputed arrays
x = elc + xoff[ori] # x, y location on one side of the point in question
y = elr - yoff[ori]
fx = int(np.floor(x)) # Get integer pixel locations that surround location x,y
cx = int(np.ceil(x))
fy = int(np.floor(y))
cy = int(np.ceil(y))
tl = gradient[fy,fx] # Value at top left integer pixel location.
tr = gradient[fy,cx] # top right
bl = gradient[cy,fx] # bottom left
br = gradient[cy,cx] # bottom right
upperavg = tl + hfrac[ori]*(tr - tl) # Now use bilinear interpolation to
loweravg = bl + hfrac[ori]*(br - bl) # estimate value at x,y
v1 = upperavg + vfrac[ori]*(loweravg - upperavg)
if (gradient[elr, elc] > v1): # We need to check the value on the other side...
x = elc - xoff[ori] # x, y location on the `other side' of the point in question
y = elr + yoff[ori]
fx = int(np.floor(x))
cx = int(np.ceil(x))
fy = int(np.floor(y))
cy = int(np.ceil(y))
tl = gradient[fy,fx] # % Value at top left integer pixel location.
tr = gradient[fy,cx] # % top right
bl = gradient[cy,fx] # % bottom left
br = gradient[cy,cx] # % bottom right
upperavg = tl + hfrac[ori]*(tr - tl)
loweravg = bl + hfrac[ori]*(br - bl)
v2 = upperavg + vfrac[ori]*(loweravg - upperavg)
if (gradient[elr,elc] > v2): # This is a local maximum.
im[elr, elc] = gradient[elr, elc] # Record value in the output
# Finally thin the 'nonmaximally suppressed' image by pointwise
# multiplying itself with a morphological skeletonization of itself.
# I know it is oxymoronic to thin a nonmaximally supressed image but
# fixes the multiple adjacent peaks that can arise from using a radius
# value > 1.
#
# skel = bwmorph(im>0,'skel',Inf);
#
im2 = (im>0).astype(np.int8)
skel= morphology.skeletonize(im2)
im = np.multiply(im,skel)
return im
def hysthresh(self,image,T1,T2):
if T1 < T2 : # T1 and T2 reversed - swap values
tmp = T1
T1 = T2
T2 = tmp
aboveT2 = image > T2; # Edge points above lower threshold.
[aboveT1r,aboveT1c] = np.nonzero(image > T1); # Row and colum coords of points above upper threshold.
# Obtain all connected regions in aboveT2 that include a point that has a
# value above T1
bw = self.floodfill(aboveT2, aboveT1r, aboveT1c, 8)
return bw
def floodfill(self,bw, r, c, N=8):
filled = np.zeros(bw.shape)
theStack = deque(zip(r,c))
while len(theStack) > 0:
x, y = theStack.pop()
if filled[x, y] == 1:
continue
if bw[x, y] == 0:
continue
filled[x, y] = 1
theStack.append((x + 1, y)) # right
theStack.append((x - 1, y)) # left
theStack.append((x, y + 1)) # down
theStack.append((x, y - 1)) # up
if (N == 8):
theStack.append((x + 1, y + 1)) # d right
theStack.append((x - 1, y - 1)) # d left
theStack.append((x - 1, y + 1)) # down
theStack.append((x + 1, y - 1)) # up
return filled
def borderEnhancer(self,img,filtersize):
# Estimate the local mean of f.
prod_fs = reduce(lambda x, y: x * y, filtersize, 1)
localMean = convolve2d(img,np.ones(filtersize),'same') / prod_fs;
# Estimate of the local variance of f.
img_2 = np.multiply(img,img)
localMean_2 = localMean*localMean
localVar = convolve2d(img_2,np.ones(filtersize),'same') / prod_fs - localMean_2;
localVar = localVar>0
return localVar
def ridgeorient(self,im,gradientsigma,blocksigma,orientsmoothsigma):
# Arguments: im - A normalised input image.
# gradientsigma - Sigma of the derivative of Gaussian
# used to compute image gradients.
# blocksigma - Sigma of the Gaussian weighting used to
# sum the gradient moments.
# orientsmoothsigma - Sigma of the Gaussian used to smooth
# the final orientation vector field.
# Optional: if ommitted it defaults to 0
# Returns: orientim - The orientation image in radians.
# Orientation values are +ve clockwise
# and give the direction *along* the
# ridges.
# reliability - Measure of the reliability of the
# orientation measure. This is a value
# between 0 and 1. I think a value above
# about 0.5 can be considered 'reliable'.
# reliability = 1 - Imin./(Imax+.001);
# coherence - A measure of the degree to which the local
# area is oriented.
# coherence = ((Imax-Imin)./(Imax+Imin)).^2;
rows,cols = im.shape
# Calculate image gradients.
sze = int(np.fix(6*gradientsigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),gradientsigma)
fx,fy = np.gradient(h) # Gradient of Gausian.
Gx = convolve2d(im, fx,'same') # Gradient of the image in x
Gy = convolve2d(im, fy, 'same') # ... and y
# Estimate the local ridge orientation at each point by finding the
# principal axis of variation in the image gradients.
Gxx = np.multiply(Gx,Gx) # Covariance data for the image gradients
Gxy = np.multiply(Gx,Gy)
Gyy = np.multiply(Gy,Gy)
# Now smooth the covariance data to perform a weighted summation of the data.
sze = int(np.fix(6*blocksigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),blocksigma)
Gxx = convolve2d(Gxx, h,'same');
Gxy = 2*convolve2d(Gxy,h,'same');
Gyy = convolve2d(Gyy,h,'same');
# Analytic solution of principal direction
Gxy_2 = np.multiply(Gxy,Gxy)
Gm = Gxx-Gyy
Gm = np.multiply(Gm,Gm)
denom = np.sqrt(Gxy_2 + Gm) + np.spacing(1)
sin2theta = np.divide(Gxy,denom) # Sine and cosine of doubled angles
cos2theta = np.divide(Gxx-Gyy,denom)
sze = int(np.fix(6*orientsmoothsigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),orientsmoothsigma)
cos2theta = convolve2d(cos2theta,h,'same')# Smoothed sine and cosine of
sin2theta = convolve2d(sin2theta,h,'same'); # doubled angles
orientim = np.pi/2 + np.arctan2(sin2theta,cos2theta)/2;
# Calculate 'reliability' of orientation data. Here we calculate the
# area moment of inertia about the orientation axis found (this will
# be the minimum inertia) and an axis perpendicular (which will be
# the maximum inertia). The reliability measure is given by
# 1.0-min_inertia/max_inertia. The reasoning being that if the ratio
# of the minimum to maximum inertia is close to one we have little
# orientation information.
Imin = (Gyy+Gxx)/2
Imin = Imin - np.multiply((Gxx-Gyy),cos2theta)/2 - np.multiply(Gxy,sin2theta)/2
Imax = Gyy+Gxx - Imin
reliability = 1 - np.divide(Imin,(Imax+.001))
# aux = Imax+Imin
# aux = np.multiply(aux,aux)
# coherence = np.divide((Imax-Imin),aux)
# Finally mask reliability to exclude regions where the denominator
# in the orientation calculation above was small. Here I have set
# the value to 0.001, adjust this if you feel the need
reliability = np.multiply(reliability,(denom>.001))
return orientim,reliability
def SWT(self,i_img,edgeImage,orientim,stroke_width=20,angle=np.pi/6):
im = self.gaussfilt(i_img,1)
Ix,Iy = self.derivative5(im)
Ix_2 = np.multiply(Ix,Ix)
Iy_2 = np.multiply(Iy,Iy)
g_mag = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude.
Ix = np.divide(Ix,g_mag)
Iy = np.divide(Iy,g_mag)
cres = 0
prec = 0.4
mSWT = -np.ones(i_img.shape)
count = 1
h_stroke = stroke_width*0.5
rows,cols = i_img.shape
for i in range(rows):
for j in range(cols):
if(edgeImage[i,j]>0):
count = 0
points_x = []
points_y = []
points_x.append(j)
points_y.append(i)
count += 1
curX = float(j)+0.5
curY = float(i)+0.5
cres = 0
while cres<stroke_width :
curX = curX + Ix[i,j]*prec # find directionality increments x or y
curY = curY + Iy[i,j]*prec
cres = cres +1
curPixX = int(math.floor(curX))
curPixY = int(math.floor(curY))
if(curPixX<0 or curPixX > cols-1 or curPixY<0 or curPixY>rows-1):
break
points_x.append(curPixX)
points_y.append(curPixY)
count +=1
if(edgeImage[curPixY,curPixX]>0 and count<21):
ang_plus = orientim[i,j]+angle
if(ang_plus>np.pi):
ang_plus = np.pi
ang_minus = orientim[i,j]- angle
if(ang_minus<0):
ang_minus = 0
if((orientim[curPixY,curPixX]<ang_plus) and (orientim[curPixY,curPixX]>ang_minus) and count> h_stroke ):
dist= math.sqrt((curPixX - j)*(curPixX - j) + (curPixY-i)*(curPixY-i))
for k in range(count-1):
if(mSWT[points_y[k],points_x[k]]<0):
mSWT[points_y[k],points_x[k]]=dist
else:
mSWT[points_y[k],points_x[k]]= np.min([dist,mSWT[points_y[k],points_x[k]]])
if(count>stroke_width):
break
return mSWT
def SWT_Total(self,i_image,edges,orientation,stroke_width, angle = np.pi/6):
inv_iim = 255 - i_image # needed for shadowing
swtim = self.SWT(i_image,edges,orientation,stroke_width,angle) # one image
swtinv_im = self.SWT(inv_iim,edges,orientation,stroke_width,angle) # the inverse
swtim[np.nonzero(swtim<0)]=0
swtinv_im[np.nonzero(swtinv_im<0)]=0
swt_end = swtim
indexes = np.nonzero(swtim==0)
swt_end[indexes] = swtinv_im[indexes]
return swt_end
def cleanswt(self,image,edges):
# find connected components
labeled, nr_objects = label(image > 0)
print "Number of objects is "+str(nr_objects)
# image = binary_opening(image>0, structure=np.ones((3,3))).astype(np.int)
mask = image > image.mean()
sizes = sum(mask, labeled, range(nr_objects + 1))
mask_size = sizes < 0.05*image.shape[0]
remove_pixel = mask_size[labeled]
image[remove_pixel]=0
edges[edges>0] = np.max(image)
return image+edges
def cleanswt2(self,swt,edges):
mask = swt[swt > 0]
labeled,nr_objects = label(mask)
w, h = swt.shape
max_pix = (0.05 * w)
for i in range(nr_objects):
numpix = len(np.where(labeled == i))
if(numpix < max_pix):
swt[np.where(labeled==i)] = 0
swt[edges > 0] = np.max(swt)
return swt
def projections(self,iswt, iorient, K, inc, aspace = False, arange = None):
if (K < 4 or K > 1024):
print 'Invalid average value. Accepted values between 4 and half the size of your image. Setting default value.'
K = 12
if (inc > 90 or inc < 0):
print 'Invalid Delta, must be positive and less than 90'
inc = 1
print "Starting projections"
# pad the image with zeros so we don't lose anything when we rotate.
iLength, iWidth = iswt.shape
iDiag = math.sqrt(iLength**2 + iWidth**2)
LengthPad = math.ceil(iDiag - iLength) + 1
WidthPad = math.ceil(iDiag - iWidth) + 1
padIMG = np.zeros((iLength+LengthPad, iWidth+WidthPad))
pad1 = int(math.ceil(LengthPad/2))
pad2 = int(math.ceil(LengthPad/2)+iLength)
pad3 = int(math.ceil(WidthPad/2))
pad4 = int(math.ceil(WidthPad/2)+iWidth)
padIMG[pad1:pad2, pad3:pad4] = iswt
padIMGOR = np.zeros((iLength+LengthPad, iWidth+WidthPad))
padIMGOR[pad1:pad2,pad3:pad4]= iorient
#
# loop over the number of angles, rotate 90-theta (because we can easily sum
# if we look at stuff from the top), and then add up. Don't perform any
# interpolation on the rotating.
#
# -90 and 90 are the same, we must remove 90
THETA = range(-90,90,inc)
th = np.zeros(len(THETA))+np.inf
if(arange):
for ang in aspace:
k = ang+90
kplus = k+range
kminus = k-range
if(kplus>180):
kplus = 180
if(kminus<0):
kminus = 1
th[range(k,kplus)] = THETA[range(k,kplus)]
th[range(kminus,k)] = THETA[range(kminus,k)]
else:
th = THETA
th = np.array(th,dtype =np.float32)*np.pi*(1/180.0)
n = len(THETA)
PR = np.zeros((padIMG.shape[1], n))
M = padIMG > 0
iPL,iPW = padIMG.shape
center = (iPL / 2, iPW / 2)
for i in range(n):
if(th[i]!=np.inf):
final = self.oft(M,K, padIMGOR,th[i])
Mt = cv2.getRotationMatrix2D(center, -THETA[i], 1.0)
rotated = cv2.warpAffine(final, Mt, (iPL, iPW))
PR[:,i] = (np.sum(rotated,axis=0))
else:
PR[:,i]=0
PR[np.nonzero(PR<0)]=0.0
PR = PR/iDiag
PR = PR*10
PR = np.multiply(PR,PR)
PR = PR*0.1
PR = PR/np.max(PR)
return PR
def oft(self,M,K,L,ang):
kernel = np.zeros((K,K))
v_cos = math.cos(ang)
v_sin = math.sin(ang)
Mval = np.cos(2*(L-ang))
count = 0
for k in range(-K/2-1,K/2+2):
ni = round(K/2+k*v_cos)
nj = round(K/2+k*v_sin)
if((ni>-1 and ni<K) and (nj>-1 and nj<K)):
kernel[ni,nj]=1
count +=1
kernel = kernel/count
cO = convolve2d(Mval, kernel, 'same')
Or = np.zeros(M.shape)
Or[np.nonzero(M)] = cO[np.nonzero(M)]
return Or
def canny(self,i_image, isigma):
image = self.gaussfilt(i_image, isigma)
Ix, Iy = self.derivative5(image)
Ix_2 = np.multiply(Ix, Ix)
Iy_2 = np.multiply(Iy, Iy)
gradient = np.sqrt(Ix_2 + Iy_2) # Gradient magnitude.
orientation = np.arctan2(-Iy, Ix) # Angles -pi to + pi.
orientation[orientation < 0] = orientation[orientation < 0] + np.pi; # Map angles to 0-pi.
orientation = orientation * 180 / np.pi;
return gradient, orientation
```
#### File: CLEMSite_notebooks/4_correlation_strategy_LM-SEM_global/virtualGridMap.py
```python
import random
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from scipy.linalg import orth
from occupancy_map import Map,ZMap
from ptp import LocalArea,PointToPoint,matrixrank, anglebetween
from math import degrees
import json
import threading
from multiprocessing.pool import ThreadPool
from contextlib import closing
import scipy.spatial as spt
class PointType:
calibrated = "CALIBRATED" # Points that have both map coordinates
non_calibrated = "NON_CALIBRATED" # Points with map1 coordinates but not with map2.
target = "TARGET" # Points with map1 but that only can be predicted to map2.
acquired = "ACQUIRED" # Points with only map2 but with no information about map1
unknown = "NA"
class State:
"""
The class State is a special feature that does not correspond to the PointType.
The PointType is a static situation that gives identity to the point.
The state is something temporary that can be altered.
"""
protected = "PROTECTED" # Point has been manually overwritten and cannot be modified
blocked = "BLOCKED"
zeroed = "" # No especial states
class virtualGridMap(object):
"""
A virtual map is a class that gets all the information of the grid and tries
to give a prediction of unknown positions.
It considers two homologous maps and establishes correspondences between them.
E.g.:
- Given a LM coordinate, returns the corresponding estimation of the SEM (not possible in LM map)
- Given a letter returns the corresponding coordinates of the estimated center
- Given a coordinate, estimate the letter where we are going to land
Representation of the points
We have selected 4 different kind of points:
- Non Calibrated NC: points coming from LM without assigned correspondence, used for calibration
- Calibrated C: points coming from LM, with the correspondent SEM coordinates, used for calibration
- Targets T: points coming from LM used for targeting
- Acquisition Acq: points acquired on the fly
Instead of saving the points in 4 different lists, we are saving all of them in one array and then
saving the indices for each categorie (Ind).
That allows having points belonging to more than one categorie, or easily to introduce
more category points.
Could be a 2D or a 3D
"""
__metaclass__ = ABCMeta
warning_transformation =""
map_lock = threading.Lock()
def __init__(self,logger, force2D =False, parent = None):
self.logger = logger
self.current_pos = "" # Landmark reference
self.last_point_added = ""
# LANDMARK
# Dataframe instead of class reason it is because the
# porting to a file is immediate and the managing of lists of arrays too.
# In design terms, having a Landmark class would be much better, but in practical terms
# slows down. The following is a mixture between class and database, linked by the landmark ID
self.columns = [ 'LANDMARK','TYPE', 'STATE',
'UPDATE_ORIGIN','UPDATE_DESTINY','UPDATE_TAG',
'COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y', 'COORDS_ORIGIN_Z',
'COORDS_DESTINY_X', 'COORDS_DESTINY_Y', 'COORDS_DESTINY_Z']
#
self.rms_avg = []
self.rms_sd = []
self.columns_corigin = ['LANDMARK','BELIEF','COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y', 'COORDS_ORIGIN_Z']
self.columns_cdestiny =['LANDMARK','BELIEF','COORDS_DESTINY_X', 'COORDS_DESTINY_Y', 'COORDS_DESTINY_Z']
if(force2D):
self.col_dim_coords_origin = ['COORDS_ORIGIN_X','COORDS_ORIGIN_Y']
self.col_dim_coords_destiny = ['COORDS_DESTINY_X','COORDS_DESTINY_Y']
else:
self.col_dim_coords_origin = ['COORDS_ORIGIN_X', 'COORDS_ORIGIN_Y','COORDS_ORIGIN_Z']
self.col_dim_coords_destiny = ['COORDS_DESTINY_X', 'COORDS_DESTINY_Y','COORDS_DESTINY_Z']
self.col_reset = ['RMS_AVG','RMS_SD']
self.map_df = pd.DataFrame(columns=self.columns)
self.cor_df = pd.DataFrame(columns=self.columns_corigin)
self.cde_df = pd.DataFrame(columns=self.columns_cdestiny)
self.list_local_area = {} # every point can have a radius of action
# List of error associated to each point
self.list_errorOrigin = {}
self.list_errorDestiny = {}
self.map_exists = False
self.map_id = "map1_map2"
self.CalibratedPtp = PointToPoint()
self.GlobalPtp = PointToPoint()
# Occupancy map
self.grid_map = Map(1)
self.orientation = 0
@staticmethod
def dist_microns(x, y):
return np.sqrt(np.sum((x - y) ** 2)) * 1000.0 ## Error in um
@staticmethod
def dist(x, y):
if (x[0] == np.inf or x[1] == np.inf or y[0] == np.inf or y[1] == np.inf):
return np.inf
else:
return np.sqrt(np.sum((x - y) ** 2))
def checkValidSystem(self, calculateOrientation = False):
# Get all calibration points
coordsOrigin, coordsDestiny, pids = self.getLandmarksByType(PointType.calibrated)
coordsDestiny = coordsDestiny[:,0:2]
if(matrixrank(coordsDestiny,1)>=2):
# TODO : calculate orientation based on data
# A = orth(coordsDestiny)
# angle = anglebetween(A[0],[1,0])
#if(calculateOrientation):
# self.orientation = np.rad2deg(angle) # this angle has to b
return True
def unit_vector(vector):
""" Returns the unit vector of the vector. """
eps = np.finfo(np.float32).eps
if (np.sum(np.linalg.norm(vector)) < eps):
return vector
return vector / np.linalg.norm(vector)
def collinear(p0, p1, p2):
x1, y1 = p1[0] - p0[0], p1[1] - p0[1]
x2, y2 = p2[0] - p0[0], p2[1] - p0[1]
val = x1 * y2 - x2 * y1
return abs(val) < 1e-2
def loadMap(self,dict_map):
# Split in 3 dictionaries
stmap = dict_map['MAP']
stcor = dict_map['COR']
stcde = dict_map['CDE']
self.map_df = pd.read_json(stmap)
self.cor_df = pd.read_json(stcor)
self.cde_df = pd.read_json(stcde)
for index, row in self.map_df.iterrows():
p_id = str(row['LANDMARK'])
self.list_local_area[p_id] = LocalArea()
def isEmpty(self,arr):
arr = np.array(arr)
if not np.any(arr.shape):
return True
if(arr.size == 0):
return True
if np.any(np.isinf(arr.astype(float))):
return True
return False
def getTotalLandmarks(self):
return len(self.map_df)
def getLandmarkIds(self):
"""
Return available ids
"""
return list(self.map_df.LANDMARK);
def getCoordsFromLandmarks(self,ilids,map_value):
list_coords = []
for el in ilids:
coords = self.getLandmark(el, map_value)
if(not np.any(np.isinf(coords))):
list_coords.append(coords)
return np.array(list_coords)
def getLandmarksByType(self, type):
"""
ACK
"""
df2 = self.map_df.loc[self.map_df['TYPE'] == type]
point_ids = list(df2['LANDMARK'])
coordsOrigin = self.getCoordsFromLandmarks(point_ids,1)
coordsDestiny = self.getCoordsFromLandmarks(point_ids, 2)
return coordsOrigin,coordsDestiny,point_ids
def getLandmarkIDsByType(self, type):
"""
ACK
"""
df2 = self.map_df.loc[self.map_df['TYPE'] == type]
point_ids = list(df2['LANDMARK'])
return point_ids
def checkState(self,point_id,state):
df2 = self.map_df.loc[self.map_df['STATE'] == state] # Get all points in state
return np.any(df2['LANDMARK'].isin([point_id])); # Return true if any of the points is in the list
def isin(self,point_id):
return np.any(self.map_df['LANDMARK'].isin([point_id]));
def checkType(self,point_id,type):
df2 = self.map_df.loc[self.map_df['TYPE'] == type] # Get all points by type
return(np.any(df2['LANDMARK'].isin([point_id]))); # Return true if any of the points is in the list
def getLandmarkType(self,point_id):
df2 = self.map_df.loc[self.map_df['LANDMARK']==point_id]
flist = list(df2['TYPE'])
return flist[0]
def getLandmarkState(self,point_id):
df2 = self.map_df.loc[self.map_df['LANDMARK']==point_id]
flist = list(df2['STATE'])
return flist[0]
def setLandmarkId(self,old_id,new_id):
"""
ACK
"""
if(self.isin(old_id)):
self.map_df.loc[self.map_df['LANDMARK']==old_id,'LANDMARK'] = new_id
self.cor_df.loc[self.cor_df['LANDMARK']==old_id,'LANDMARK'] = new_id
self.cde_df.loc[self.cde_df['LANDMARK']==old_id,'LANDMARK'] = new_id
self.list_local_area[new_id] = self.list_local_area[old_id]
del self.list_local_area[old_id]
self.list_errorDestiny[new_id] = self.list_errorDestiny[old_id]
del self.list_errorDestiny[old_id]
self.list_errorOrigin[new_id] = self.list_errorOrigin[old_id]
del self.list_errorOrigin[old_id]
return "OK"
else:
return "ERROR: id not in list"
def getLandmark(self,point_id,map_value):
"""
Map value returns the coordinates : 1 for origin, 2 for destiny
"""
if(not self.isin(point_id)):
return np.array([-np.inf])
if (map_value == 1):
coords = self.map_df.loc[self.map_df['LANDMARK'] == point_id,self.col_dim_coords_origin]
coords = np.squeeze(coords.values)
return np.array(coords,dtype = np.float32)
elif (map_value == 2):
coords = self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_destiny]
coords = np.squeeze(coords.values)
return np.array(coords,dtype = np.float32)
else:
self.logger.error("ERROR: In getLandmark for :" + str(point_id) + ". From " + str(self.map_id) + " Use map_value 1 to origin, 2 to destiny.")
return np.array([-np.inf])
def updateLandmarks(self):
"""
Update inner set of landmarks
:return:
"""
point_ids = self.getLandmarkIds()
for el in point_ids:
self.updateLandmark(el)
def updateLandmark(self,point_id):
"""
Map value returns the coordinates : 1 for origin, 2 for destiny
"""
if not self.cor_df['LANDMARK'].empty:
df_pid = self.cor_df.loc[self.cor_df['LANDMARK'] == point_id]
if not df_pid.empty :
if len(df_pid) == 1:
# UPDATE GENERAL LANDMARK MAP
coords = np.array(df_pid[self.col_dim_coords_origin],dtype=np.float32)[0]
self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_origin] = coords[range(0, len(self.col_dim_coords_origin))]
else:
coords = self.averageLandmarkPosition(np.array(df_pid[self.col_dim_coords_origin],dtype=np.float32), np.array(df_pid['BELIEF']))
self.map_df.loc[self.map_df['LANDMARK'] == point_id,self.col_dim_coords_origin] = coords[range(0,len(self.col_dim_coords_origin))]
if not self.cde_df['LANDMARK'].empty:
df_pid = self.cde_df.loc[self.cde_df['LANDMARK'] == point_id]
if not df_pid.empty:
# UPDATE GENERAL LANDMARK MAP
if len(df_pid) == 1:
coords = np.array(df_pid[self.col_dim_coords_destiny],dtype=np.float32)[0]
self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_destiny] = coords[range(0, len(self.col_dim_coords_destiny))]
else:
coords = self.averageLandmarkPosition(np.array(df_pid[self.col_dim_coords_destiny],dtype=np.float32), np.array(df_pid['BELIEF']))
self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_dim_coords_destiny] = coords[range(0,len(self.col_dim_coords_destiny))]
def resetCoordinates(self, point_id, map_id):
"""
Set coordinates to 0
Map value returns the coordinates : 1 for origin, 2 for destiny
"""
if (not self.isin(point_id)):
return -1
if map_id == 1:
self.cor_df = self.cor_df[self.cor_df.LANDMARK != point_id]
self.addCoordsOrigin(point_id, np.zeros(len(self.col_dim_coords_origin)), 0.0)
self.list_errorOrigin[point_id] = []
self.list_local_area[point_id] = LocalArea()
if map_id == 2:
self.cde_df = self.cde_df[self.cde_df.LANDMARK != point_id]
self.addCoordsDestiny(point_id, np.zeros(len(self.col_dim_coords_destiny)), 0.0)
self.list_errorDestiny[point_id] = []
self.list_local_area[point_id] = LocalArea()
def averageLandmarkPosition(self, coords, belief, method = 'average'):
"""
We are going to start with a simple method of determining the landmark position by averaging all points estimated.
:param col_names:
:param df:
:return:
"""
if(method=='average'):
n_arr = (coords.transpose() * belief).transpose() # Multiply by weights
total_belief = np.sum(belief)
if(total_belief>0):
avg_coords = np.sum(n_arr, axis=0) / np.sum(belief)
else:
avg_coords = np.mean(coords,axis=0)
return avg_coords
elif(method =='max_belief'):
ind = np.amax(belief)
return coords[ind]
def getAllLandmarkCoordinates(self):
point_ids = list(self.map_df['LANDMARK'])
coords_origin = self.getCoordsFromLandmarks(point_ids, 1)
coords_destiny = self.getCoordsFromLandmarks(point_ids, 2)
return coords_origin,coords_destiny, point_ids
def getTypeIndices(self):
return list(self.map_df["TYPE"]),list(self.map_df["LANDMARK"])
def getStateIndices(self):
return list(self.map_df["STATE"]),list(self.map_df["LANDMARK"])
def getTotalCalibration(self):
"""---------------------------------------
Returns the number of calibrated points.
"""
return len(self.map_df.loc[self.map_df['TYPE'] == PointType.calibrated])
def deleteCalibrations(self):
coordsOrigin, coordsDestiny, point_ids = self.getLandmarksByType(PointType.calibrated)
are_protected = []
are_blocked = []
for el in point_ids:
if self.is_protected(el):
are_protected.append(el)
elif self.is_blocked(el):
are_blocked.append(el)
self.deleteLandmark(el,False,False)
self.CalibratedPtp.reset()
self.GlobalPtp.reset()
self.addSetPoints(coordsOrigin,[],point_ids,PointType.non_calibrated, are_protected, are_blocked)
self.updateMap()
return
def getTotalLandmarksByType(self,type):
return len(self.map_df.loc[self.map_df['TYPE'] == type])
####################################################################
def blockPoint(self,point_id):
self.changeState(point_id,State.blocked)
# self.cde_df.loc[self.cde_df['LANDMARK'] == point_id,'BELIEF'] = 0
# self.cor_df.loc[self.cor_df['LANDMARK'] == point_id, 'BELIEF'] = 0
def unblockPoint(self,point_id):
self.changeState(point_id, State.zeroed)
def changeState(self, point_id, state):
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'STATE'] = state
def changeType(self,point_id,type, updateModel = False):
self.map_df.loc[self.map_df['LANDMARK'] == point_id,'TYPE'] = type
if type == PointType.calibrated :
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_ORIGIN'] = False
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_DESTINY'] = False
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_TAG'] = False
elif type == PointType.target or type == PointType.non_calibrated :
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_ORIGIN'] = False
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_DESTINY'] = True
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_TAG'] = False
elif type == PointType.acquired:
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_ORIGIN'] = True
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_DESTINY'] = False
self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'UPDATE_TAG'] = False
if updateModel:
self.updateMap(point_id)
def is_blocked(self,point_id):
return self.checkState(point_id,State.blocked)
def is_protected(self, point_id):
return self.checkState(point_id,State.protected)
def are_protected(self, point_id_list):
prot_list = []
for el in point_id_list:
if self.is_protected(el):
prot_list.append(el)
return prot_list
##################################################################################################################
#### PROCEDURES
#################################################################################################################
@abstractmethod
def ready(self):
pass
def addCoordsOrigin(self,point_id, coords, belief):
if not (self.isEmpty(coords)):
df2 = self.cor_df['LANDMARK'] == point_id
dindex = df2.index[df2 == True].tolist()
if dindex:
my_ind = dindex[0]
self.cor_df.loc[my_ind, self.col_dim_coords_origin] = coords[range(0, len(self.col_dim_coords_origin))]
else:
self.cor_df.loc[len(self.cor_df), self.col_dim_coords_origin] = coords[range(0,len(self.col_dim_coords_origin))]
my_ind = self.cor_df.index[-1]
self.cor_df.loc[my_ind, 'LANDMARK'] = point_id
if (belief > 0):
self.cor_df.loc[my_ind, 'BELIEF'] = belief
else:
if (np.isnan(self.cor_df.loc[my_ind, 'BELIEF'])):
self.cor_df.loc[my_ind, 'BELIEF'] = 0
# otherwise, leave it
def addCoordsDestiny(self,point_id, coords, belief):
if not (self.isEmpty(coords)):
df2 = self.cde_df['LANDMARK'] == point_id
dindex = df2.index[df2 == True].tolist()
if dindex:
my_ind = dindex[0]
self.cde_df.loc[my_ind, self.col_dim_coords_destiny] = coords[range(0, len(self.col_dim_coords_destiny))]
else:
self.cde_df.loc[len(self.cde_df), self.col_dim_coords_destiny] = coords[range(0,len(self.col_dim_coords_destiny))]
my_ind = self.cde_df.index[-1]
self.cde_df.loc[my_ind, 'LANDMARK'] = point_id
if (belief > 0):
self.cde_df.loc[my_ind, 'BELIEF'] = belief
else:
if (np.isnan(self.cde_df.loc[my_ind, 'BELIEF'])):
self.cde_df.loc[my_ind, 'BELIEF'] = 0
# otherwise, leave it
def addPoint(self,coords_origin,coords_destiny,point_type,point_id, belief = [1.0,1.0], updateModel=True):
"""
ACK
Adds a point to the map.
We supply:
coordinates of origin, coordinates of destiny, name
coordinates of origin, coordinates of destiny, None -> autotag created (or temp_tag waiting to be updated)
corigin, cdestiny, name calibrated
corigin, [ temp], name non-calibrated, target
[],cdestiny,name acquired
[],[], name Not accepted
"""
coords_origin = np.array(coords_origin)
coords_destiny = np.array(coords_destiny)
if(coords_destiny.size == 0 and coords_origin.size == 0):
self.logger.info("No data")
return -1
if(not belief):
belief = [1.0,1.0]
## IMPORTANT : THIS SEQUENCE HAS CONDITIONAL DEPENDENCIES
## PHASE 1 : CONSISTENCY
## ACQUIRED POINTS
## DEFINITION : landmarks acquired on the destiny coordinates
## normally they don't have origin coordinates
## name is usually generated
if(point_type == PointType.acquired):
# If I donot have destiny coordinates... ERROR
if (self.isEmpty(coords_destiny)):
self.logger.info("From "+str(self.map_id)+": Trying to add ACQUIRED point without coordinates of destiny!!")
return -1
# I don't have origin coordinates, then I have to generate them
if(self.isEmpty(coords_origin)):
coords_origin = self.point_to_Origin(coords_destiny) # Generate origin coordinates
# If I have no ID, I will have to generate one
if(not point_id):
point_id = self.getAutoTag(coords_origin)
## NON-CALIBRATED OR TARGET
elif (point_type == PointType.non_calibrated or point_type == PointType.target):
# If I donot have origin coordinates... ERROR
if (self.isEmpty(coords_origin)):
self.logger.info("From " + str(self.map_id) + ": Trying to add NON_CAL or TARGET point without coordinates of origin!!")
return -1
# I don't have destiny coordinates, then I have to generate them
if (self.isEmpty(coords_destiny)):
coords_destiny = self.point_to_Destiny(coords_origin) # Generate origin coordinates
# If I have no ID, I will have to generate one
if (not point_id):
point_id = self.getAutoTag(coords_destiny)
# CALIBRATED
elif(point_type == PointType.calibrated):
if (self.isEmpty(coords_destiny)):
self.logger.info("From " + str(self.map_id) + ": Trying to add CALIBRATION point without coordinates of origin AND destiny!!")
return -1
if (not point_id):
if(self.isEmpty(coords_origin)):
return -1
point_id = self.getAutoTag(coords_destiny)
else:
if(self.isEmpty(coords_origin)):
coords_origin = self.getLandmark(point_id,1)
########## Now we check if the landmark is here already #######################
## Are we repeating the landmark ?
if(self.isin(point_id)):
# Is this a type change?
### SAME TYPE
if(self.checkType(point_id,point_type)): # IF has the same type, we have to add it (it is a REAL MEASURE of the point)
self.addCoordsOrigin(point_id,coords_origin,belief[0])
self.addCoordsDestiny(point_id,coords_destiny,belief[1])
if(updateModel):
self.updateMap(point_id)
self.last_point_added = point_id
return point_id
else:
## PHASE 2 : We have to check types and upgrade them accordingly
### non_calibrated -> calibrated
### This code is henious. Has to be refactored
###
old_type = self.map_df.loc[self.map_df['LANDMARK'] == point_id, 'TYPE']
old_type = old_type.iloc[0]
self._changeTypeAndAdd(old_type,point_type,point_id,coords_origin,coords_destiny, belief, updateModel)
else: # NORMAL new acquisition
my_ind = len(self.map_df)
self.map_df.loc[my_ind, :] = 0
self.map_df.loc[my_ind, "LANDMARK"] = point_id
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.addCoordsDestiny(point_id, coords_destiny, belief[1])
if (point_type == PointType.calibrated):
self.map_df.loc[my_ind, "TYPE"] = PointType.calibrated
self.map_df.loc[my_ind, "UPDATE_ORIGIN"] = False
self.map_df.loc[my_ind, "UPDATE_DESTINY"] = False
self.map_df.loc[my_ind, "UPDATE_TAG"] = False
elif(point_type==PointType.non_calibrated):
self.map_df.loc[my_ind, "TYPE"] = PointType.non_calibrated
self.map_df.loc[my_ind, "UPDATE_ORIGIN"] = False
self.map_df.loc[my_ind, "UPDATE_DESTINY"] = True
if ("NO_ID" in point_id):
self.map_df.loc[my_ind, "UPDATE_TAG"] = True
else:
self.map_df.loc[my_ind, "UPDATE_TAG"] = False
elif(point_type==PointType.target):
self.map_df.loc[my_ind, "TYPE"] = PointType.target
self.map_df.loc[my_ind, "UPDATE_ORIGIN"] = False
self.map_df.loc[my_ind, "UPDATE_DESTINY"] = True
if ("NO_ID" in point_id):
self.map_df.loc[my_ind, "UPDATE_TAG"] = True
else:
self.map_df.loc[my_ind, "UPDATE_TAG"] = False
elif (point_type == PointType.acquired):
self.map_df.loc[my_ind, "TYPE"] = PointType.acquired
self.map_df.loc[my_ind, "UPDATE_ORIGIN"] = True
self.map_df.loc[my_ind, "UPDATE_DESTINY"] = False
if ("NO_ID" in point_id):
self.map_df.loc[my_ind, "UPDATE_TAG"] = True
else:
self.map_df.loc[my_ind, "UPDATE_TAG"] = False
else:
self.logger.error("From " + str(self.map_id) + ":ERROR, type of point not found.")
return
if (self.isEmpty(coords_origin)): # Not enough info to generate them
self.map_df.loc[my_ind, "UPDATE_ORIGIN"] = True
if (self.isEmpty(coords_destiny)): # Not enough info to generate them
self.map_df.loc[my_ind, "UPDATE_DESTINY"] = True
self.list_errorOrigin[point_id] = []
self.list_errorDestiny[point_id] = []
self.map_df.loc[my_ind, "RMS_AVG"] = 0
self.map_df.loc[my_ind, "RMS_SD"] = 0
self.list_local_area[point_id] = LocalArea()
self.updateLandmark(point_id)
self.last_point_added = point_id
return point_id
def _changeTypeAndAdd(self,old_type, point_type, point_id, coords_origin, coords_destiny, belief, updateModel = False):
"""
First refactoring step.
:param old_type:
:param point_type:
:param point_id:
:param coords_origin:
:param coords_destiny:
:param belief:
:param updateModel:
:return:
"""
if (old_type == PointType.calibrated and point_type == PointType.non_calibrated): # Update coordinates origin, that's all
self.addCoordsOrigin(point_id, coords_origin, belief[0])
else:
if (old_type == PointType.non_calibrated and point_type == PointType.calibrated):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.addCoordsDestiny(point_id, coords_destiny, belief[1])
self.changeType(point_id, PointType.calibrated, updateModel)
elif (old_type == PointType.calibrated) and (point_type == PointType.non_calibrated):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.addCoordsDestiny(point_id, coords_destiny, belief[1])
self.changeType(point_id, PointType.non_calibrated)
elif (old_type == PointType.acquired) and (point_type == PointType.non_calibrated):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.changeType(point_id, PointType.calibrated, updateModel)
elif (old_type == PointType.non_calibrated) and (point_type == PointType.acquired):
self.addCoordsDestiny(point_id, coords_destiny, belief[1])
self.changeType(point_id, PointType.calibrated, updateModel)
elif (old_type == PointType.target) and (point_type == PointType.non_calibrated):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.changeType(point_id, PointType.non_calibrated)
elif (old_type == PointType.non_calibrated) and (point_type == PointType.target):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.changeType(point_id, PointType.target)
elif (old_type == PointType.calibrated) and (point_type == PointType.target):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.changeType(point_id, PointType.target)
elif (old_type == PointType.acquired) and (point_type == PointType.target):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.changeType(point_id, PointType.target)
elif (old_type == PointType.target) and (point_type == PointType.acquired):
self.addCoordsDestiny(point_id, coords_destiny, belief[1])
self.changeType(point_id, PointType.target)
elif (old_type == PointType.calibrated) and (point_type == PointType.acquired):
self.addCoordsDestiny(point_id, coords_destiny, belief[1])
self.changeType(point_id, PointType.calibrated, updateModel)
elif (old_type == PointType.target) and (point_type == PointType.calibrated):
self.addCoordsOrigin(point_id, coords_origin, belief[0])
self.changeType(point_id, PointType.target)
else:
self.logger.info(" Old type :" + str(old_type) + " New type :" + str(point_type))
self.logger.info("From " + str(self.map_id) + ": Change of type not supported for " + point_id + ".")
return -1
return
def addSetPoints(self, pointsOrigin, pointsDestiny, names, point_type, protected_list = None, blocked_list = None, update_origin=False, update_destiny=True, updateModel=True):
pointsDestiny = np.array(pointsDestiny, dtype=np.float32)
pointsOrigin = np.array(pointsOrigin, dtype=np.float32)
if (not bool(pointsDestiny.shape) or pointsDestiny.size == 0):
s = (pointsOrigin.shape)
pointsDestiny = np.empty(s)
pointsDestiny.fill(np.inf)
if (not bool(pointsOrigin.shape) or pointsOrigin.size == 0):
s = (pointsDestiny.shape)
pointsOrigin = np.empty(s)
pointsOrigin.fill(np.inf)
if len(pointsOrigin.shape)<1: # Failsafe, should be changed by exception
return
s,d = pointsOrigin.shape
for i in range(s):
coords_lm = pointsOrigin[i]
coords_sem = pointsDestiny[i]
nid = names[i]
self.addPoint(coords_lm, coords_sem, point_type, nid, [1.,1.], False)
if nid in protected_list:
self.changeState(nid,State.protected)
if nid in blocked_list:
self.changeState(nid,State.blocked)
self.updateOrigin(nid,update_origin)
self.updateDestiny(nid,update_destiny)
if(updateModel):
self.updateMap()
def updateOrigin(self,point_id, bool_up):
self.map_df.loc[self.map_df["LANDMARK"]==point_id, "UPDATE_ORIGIN"] = bool_up
def updateDestiny(self, point_id, bool_up):
self.map_df.loc[self.map_df["LANDMARK"] == point_id, "UPDATE_DESTINY"] = bool_up
def replaceLandmark(self, coords_origin, coords_destiny, belief, point_id):
"""
Keeps the identity of the point (type), but updates values, erasing EVERYTHING
:param coords_origin:
:param coords_destiny:
:param point_id:
:return:
"""
if (not self.isin(point_id)):
return -1
if (not self.isEmpty(coords_origin)):
self.cor_df = self.cor_df[self.cor_df.LANDMARK != point_id]
self.addCoordsOrigin(point_id,coords_origin,belief[0])
self.list_errorOrigin[point_id] = []
if (not self.isEmpty(coords_destiny)):
self.cde_df = self.cde_df[self.cde_df.LANDMARK != point_id]
self.addCoordsDestiny(point_id, coords_destiny,belief[1])
self.list_errorDestiny[point_id] = []
#self.map_df.loc[self.map_df['LANDMARK'] == point_id, self.col_reset] = 0
self.list_local_area[point_id] = LocalArea()
def updateLastLandmark(self, coords_origin, coords_destiny, point_id, protect = False, which_up = 0):
if (not self.isin(point_id)):
return False
if which_up == 0 :
up_or = True
up_dest = True
elif which_up == 1:
up_or = True
up_dest = False
elif which_up == 2 :
up_or = False
up_dest = True
else:
return False
df2 = self.cde_df['LANDMARK'] == point_id
dindex = df2.index[df2 == True].tolist()
if not dindex:
self.logger.info("Coordinates not found for "+point_id+". Give coordinates of reference first before securing.")
return False
else:
dindex = dindex[0]
if np.any(np.array(coords_origin).shape) and up_or :
distance = virtualGridMap.dist(np.array([self.cor_df.COORDS_DESTINY_X[dindex], self.cor_df.COORDS_DESTINY_Y[dindex]]),
np.array([coords_origin[0], coords_origin[1]]))
self.logger.info( "From " + str(self.map_id) + ":Point " + point_id + " corrected. Difference is:" + str(distance))
self.addCoordsOrigin(point_id, coords_origin, 1.0)
if np.any(np.array(coords_destiny).shape) and up_dest:
distance = virtualGridMap.dist(
np.array([self.cde_df.COORDS_DESTINY_X[dindex], self.cde_df.COORDS_DESTINY_Y[dindex]]),
np.array([coords_destiny[0], coords_destiny[1]]))
self.logger.info(
"From " + str(self.map_id) + ":Point " + point_id + " corrected. Difference is:" + str(distance))
self.addCoordsDestiny(point_id, coords_destiny, 1.0)
self.list_local_area[point_id] = LocalArea()
if protect :
self.changeState(point_id,State.protected)
self.map_df.loc[dindex, "UPDATE_ORIGIN"] = False
self.map_df.loc[dindex, "UPDATE_DESTINY"] = False
self.map_df.loc[dindex,"UPDATE_TAG"] = False
self.updateLandmark(point_id)
return True
def getMeasuresPoint(self,point_id,map_value, removeInfs = True):
if(not self.isin(point_id)):
return []
if (map_value == 1):
data_df = self.cor_df.loc[self.cor_df['LANDMARK'] == point_id]
if(removeInfs):
data_df.replace([np.inf, -np.inf], np.nan).dropna(subset=self.col_dim_coords_origin, how = "all")
return np.array(data_df[self.col_dim_coords_origin])
elif (map_value == 2):
data_df = self.cde_df.loc[self.cde_df['LANDMARK'] == point_id]
if (removeInfs):
data_df.replace([np.inf, -np.inf], np.nan).dropna(subset=self.col_dim_coords_destiny, how="all")
return np.array(data_df[self.col_dim_coords_destiny])
else:
self.logger.error("From " + str(self.map_id) + ":ERROR: Use map_value 1 to origin, 2 to destiny.")
return []
def setMeasuresPoint(self,point_id,map_value,measures, beliefs =[] ):
if (not self.isin(point_id)):
self.logger.error("From " + str(self.map_id) + ":ERROR: you need to provide a name for the point. Not valid:" + str(point_id))
return
if (map_value == 1):
self.cor_df.loc[self.cor_df['LANDMARK'] == point_id, self.col_dim_coords_origin] = measures[range(0,len(self.col_dim_coords_origin))]
if (not beliefs):
self.cor_df.loc[self.cor_df['LANDMARK'] == point_id, "BELIEF"] = 1.0
else:
self.cor_df.loc[self.cor_df['LANDMARK'] == point_id, "BELIEF"] = beliefs
elif (map_value == 2):
self.cde_df.loc[self.cde_df['LANDMARK'] == point_id,self.col_dim_coords_destiny] = measures[range(0,len(self.col_dim_coords_destiny))]
if(not beliefs):
self.cde_df.loc[self.cde_df['LANDMARK'] == point_id, "BELIEF"] = 1.0
else:
self.cde_df.loc[self.cde_df['LANDMARK'] == point_id, "BELIEF"] = beliefs
else:
self.logger.error("From " + str(self.map_id) + ":ERROR: Use map_value 1 to origin, 2 to destiny.")
return
def deleteLandmark(self,point_id, updateMap = True, verbose = True):
"""
Deletes a point to the map. Please, call update map if you use this function
vmap.updateMap()
"""
# Do we have a name for the point?
if(not self.isin(point_id)):
self.logger.error("From "+str(self.map_id)+":ERROR: you need to provide a name for the point. Not valid:"+str(point_id))
return
self.map_df = self.map_df[self.map_df.LANDMARK != point_id]
if not self.cor_df.LANDMARK.empty:
self.cor_df = self.cor_df[self.cor_df.LANDMARK != point_id]
if not self.cde_df.LANDMARK.empty:
self.cde_df = self.cde_df[self.cde_df.LANDMARK != point_id]
self.list_errorOrigin.pop(point_id,0)
self.list_errorDestiny.pop(point_id,0)
self.list_local_area.pop(point_id,0)
##
if(self.last_point_added == point_id and len(self.map_df)>0):
self.last_point_added = self.map_df.LANDMARK[0]
self.map_df = self.map_df.reset_index(drop=True)
self.cor_df = self.cor_df.reset_index(drop=True)
self.cde_df = self.cde_df.reset_index(drop=True)
if updateMap:
self.updateMap(point_id)
if verbose:
self.logger.info("From "+str(self.map_id)+":Deleted point :"+point_id)
return point_id
def updateTransformPoint(self, pointid):
ptype = self.getLandmarkType(pointid)
# I have to update closest neighbors
coords_ref = self.getLandmark(pointid,1)
neightags = self.GlobalPtp.getNeighs(coords_ref, k=20) # Update closest 20 neighbors
for neigh in neightags :
if ptype == PointType.acquired:
coords_p = self.getLandmark(pointid, 2)
elif ptype in [PointType.non_calibrated, PointType.target, PointType.unknown]:
coords_p = self.getLandmark(pointid, 1)
else:
continue
self.CalibratedPtp.updateLocalArea(neigh, coords_p)
return
def updateTransform(self):
"""
ACK
Transformation between canvas points and the inverse must be stored in the canvas
It also calculates H_inv, given a coordinate from the destiny (like user click in canvas).
At 1 point, nothing is know, the map relies on prior information.
With 3 or more points, Least squares can be used. We have to avoid the risk of points being colinear.
With 6 or more, homography with ransac works better.
With 10 or more, local homography is used for the 6 closest points or more given a specified radius.
This is uses the ptp (point to point) protocol, taking closest points to the region selected and
calculating the closest in the region.
"""
coordCalibrationsOrigin, coordCalibrationsDestiny, namesCalibration = self.getLandmarksByType(PointType.calibrated)
total_calibration_points = len(coordCalibrationsOrigin)
if (total_calibration_points < 2):
self.logger.info("From " + str(self.map_id) + ":Not enough points to update.")
return
self.logger.info("From "+str(self.map_id)+":Updating transform with " + str(total_calibration_points) + " reference points")
origin = np.zeros((total_calibration_points, 2), dtype=np.float32)
destiny = np.zeros((total_calibration_points, 2), dtype=np.float32)
for i in range(total_calibration_points):
origin[i, 0] = coordCalibrationsOrigin[i][0]
origin[i, 1] = coordCalibrationsOrigin[i][1]
destiny[i, 0] = coordCalibrationsDestiny[i][0]
destiny[i, 1] = coordCalibrationsDestiny[i][1]
self.CalibratedPtp.updateGlobal(origin,destiny,namesCalibration)
coordT, _, namesTarget = self.getLandmarksByType(PointType.target)
self.processLocalArea(coordT,namesTarget)
coordNC, _, namesNonCal = self.getLandmarksByType(PointType.non_calibrated)
self.processLocalArea(coordNC, namesNonCal)
_, coordACQ, namesAcq = self.getLandmarksByType(PointType.acquired)
self.processLocalArea(coordACQ, namesAcq)
namesAll = self.getLandmarkIds()
originAll = self.getCoordsFromLandmarks(namesAll,1)
destinyAll = self.getCoordsFromLandmarks(namesAll,2)
self.GlobalPtp.updateGlobal(originAll, destinyAll, namesAll)
def processLocalArea(self, coords_p, namePoints):
if (len(namePoints) > 0): # if there are targets, we go target centric
local_areas = [(self.list_local_area[nameT], coords_p[i]) for i, nameT in enumerate(namePoints)]
with closing(ThreadPool(8)) as pool:
pool.starmap(self.CalibratedPtp.updateLocalArea, local_areas)
def getRMSError(self):
if(self.rms_avg):
return (self.rms_avg[-1],self.rms_sd[-1])
else:
return ("NA","NA")
def getErrorList(self,point_id):
error_retro = [np.inf]
if point_id in self.list_local_area.keys():
error_retro = self.list_local_area[point_id].getErrorRetro()
return error_retro
@abstractmethod
def point_to_Origin(self,coord_destiny, point_id=""):
pass
@abstractmethod
def point_to_Destiny(self,coord_origin,point_id=""):
pass
@abstractmethod
def getAutoTag(self, coord_):
pass
def getRadius(self,p_id,map_value):
if(map_value == 1):
return self.list_local_area[p_id].radius_origin
elif(map_value == 2):
return self.list_local_area[p_id].radius_destiny
else:
return
def setRadius(self,p_id,radius,map_value):
list_ids = self.getLandmarkIds()
if p_id not in list_ids:
return 0
#######################################
if(map_value == 1):
self.list_local_area[p_id].radius_origin = radius
self.list_local_area[p_id].radius_destiny = self.conversion_ratio*radius
elif(map_value == 2):
self.list_local_area[p_id].radius_origin = radius*(1/self.conversion_ratio)
self.list_local_area[p_id].radius_destiny = radius
else:
return 0
self.CalibratedPtp.updateLocalArea(self.list_local_area[p_id])
return self.conversion_ratio*radius
def getRadii(self,map_value):
list_radius = []
if(map_value == 1):
for el in self.list_local_area.values():
list_radius.append(el.radius_origin)
return np.array(list_radius)
else:
for el in self.list_local_area.values():
list_radius.append(el.radius_destiny)
return np.array(list_radius)
def applyTransform(self,M, map_value):
coordOrigin, coordDestiny, pointids = self.getAllLandmarkCoordinates()
if map_value == 1 :
homo = (np.asmatrix(coordOrigin) + np.array([0., 0., 1.]))
res = np.dot(M, homo.T)
aux_df = pd.DataFrame(columns=self.columns_corigin)
aux_df['LANDMARK'] = pointids
aux_df.loc[:, 'COORDS_ORIGIN_X'] = np.squeeze(np.asarray(res[0]))
aux_df.loc[:, 'COORDS_ORIGIN_Y'] = np.squeeze(np.asarray(res[1]))
aux_df.loc[:, 'COORDS_ORIGIN_Z'] = 0.0
aux_df.loc[:, 'BELIEF'] = 0.0
self.cor_df = aux_df
elif map_value == 2 :
homo = (np.asmatrix(coordDestiny) + np.array([0., 0., 1.]))
res = np.dot(M, homo.T)
aux_df = pd.DataFrame(columns=self.columns_cdestiny)
aux_df['LANDMARK'] = pointids
aux_df.loc[:, 'COORDS_DESTINY_X'] = np.squeeze(np.asarray(res[0]))
aux_df.loc[:, 'COORDS_DESTINY_Y'] = np.squeeze(np.asarray(res[1]))
aux_df.loc[:, 'COORDS_DESTINY_Z'] = 0.0
aux_df.loc[:, 'BELIEF'] = 0.0
self.cde_df = aux_df
else:
raise ValueError
self.updateMap()
@abstractmethod
def applyTransformToPoint(self,M, coords):
"""
Maps a point from one coordinates to others using M
"""
pass
def checkCoordinateLimits(self,coord):
"""
:param coord:
:return:
"""
raise NotImplementedError()
def getAutoTag(self, coords):
if (coords[0] == -1 or np.isinf(coords[0])):
tag = random.randint(1, 100000) # Integer from 1 to 10, endpoints included
point_id = "NO_ID_" + str(tag)
list_ids = self.getLandmarkIds()
if point_id in list_ids:
rval = random.randint(2, 10) * 100000
tag = random.randint(100000, rval)
point_id = "NO_ID_" + str(tag)
return point_id
cx = round(coords[0])
cy = round(coords[1])
point_id = self.grid_map.getTag(cx, cy)
if(self.isin(point_id)):
# Has numbers?
if ("_n_" in point_id):
indx = point_id.find("_n_")
seq = point_id[indx + 3:]
point_id = point_id[:indx + 3] + str(int(seq) + 1)
else:
point_id = point_id + "_n_1"
return point_id
def updateMap(self, pointid = None):
if pointid:
self.updateLandmark(pointid)
self.updateTransformPoint(pointid)
else:
self.updateLandmarks()
self.updateTransform()
update = False
# for each point that coordinates must be updated
if(np.any(self.CalibratedPtp.Hg_inv)):
df_to_up = self.map_df.loc[self.map_df["UPDATE_DESTINY"]]
listids = df_to_up["LANDMARK"]
for el in listids:
coord_origin = df_to_up.loc[df_to_up["LANDMARK"]==el,self.col_dim_coords_origin]
n_coord = self.point_to_Destiny(np.squeeze(coord_origin.values),el)
self.addCoordsDestiny(el, n_coord, 0)
if(np.any(df_to_up.loc[df_to_up["LANDMARK"] == el, "UPDATE_TAG"])):
self.map_df.loc[self.map_df["LANDMARK"] == el, "UPDATE_TAG"] = False
tag = self.getAutoTag(n_coord)
self.map_df.loc[self.map_df["LANDMARK"] == el,'LANDMARK'] = tag
self.cor_df.loc[self.cor_df["LANDMARK"] == el,'LANDMARK'] = tag
self.cde_df.loc[self.cde_df["LANDMARK"] == el,'LANDMARK'] = tag
update = True
if(np.any(self.CalibratedPtp.Hg)):
df_to_up = self.map_df.loc[self.map_df["UPDATE_ORIGIN"]]
listids = df_to_up["LANDMARK"]
for el in listids:
coord_destiny = df_to_up.loc[df_to_up["LANDMARK"] == el, self.col_dim_coords_destiny]
n_coord = self.point_to_Origin(np.squeeze(coord_destiny.values),el)
self.addCoordsOrigin(el,n_coord, 0)
if(np.any(df_to_up.loc[df_to_up["LANDMARK"] == el, "UPDATE_TAG"])):
self.map_df.loc[self.map_df["LANDMARK"] == el, "UPDATE_TAG"] = False
tag = self.getAutoTag(n_coord)
self.map_df.loc[self.map_df["LANDMARK"] == el,'LANDMARK'] = tag
self.cor_df.loc[self.cor_df["LANDMARK"] == el,'LANDMARK'] = tag
self.cde_df.loc[self.cde_df["LANDMARK"] == el,'LANDMARK'] = tag
update = True
if(update):
self.updateLandmarks()
def to_dict(self):
self.updateMap()
map_dict = self.map_df.to_json()
cor_dict = self.cor_df.to_json()
cde_dict = self.cde_df.to_json()
f_dict = {}
f_dict["MAP"] = map_dict
f_dict["COR"] = cor_dict
f_dict["CDE"] = cde_dict
f_dict["MAP_ID"] = self.map_id
return f_dict
def getLocalArea(self,pid):
return self.list_local_area[pid]
def getNN(self,coords,map_id,k, types = None):
if(not types):
return self.CalibratedPtp.getNeighs(coords,0,k, map_id)
else:
# We have to get all data based on types
all_data = []
all_ids = []
for el in types:
coordOrigin, coordDestiny, ids = self.getLandmarksByType(el)
if(map_id==1):
m_coords = coordOrigin[:,0:len(coords)]
else:
m_coords = coordDestiny[:,0:len(coords)]
if(np.any(np.isinf(m_coords)) or np.any(np.isnan(m_coords))):
continue
else:
all_data.append(m_coords)
all_ids = all_ids + ids
all_data = np.vstack(all_data)
if(not np.any(all_data)):
return [-np.inf,-np.inf]
tree = spt.KDTree(all_data)
dists,inds = tree.query(coords, k)
# If there is no more points it returns inf, and this has to be removed
to_del = np.where(np.isinf(dists))
dists = np.delete(dists,to_del)
inds = np.delete(inds,to_del)
all_ids = np.array(all_ids)
return all_data[inds],list(all_ids[inds]),dists
############################################################################################
#
# LM
#
#############################################################################################
class virtualGridMapLM(virtualGridMap):
"""
In LM, ORIGIN will be the Canvas
DESTINY will be the LM
i.e. map Canvas_LM
LM and Canvas maps adjust to the specific grid pattern we provide, in our case
a Mattek grid dish.
It is a 2D Map
"""
def __init__(self,logger):
super(virtualGridMapLM, self).__init__(logger,force2D=True)
def ready(self):
return (len(self.getLandmarkIDsByType(PointType.calibrated))>2)
def applyTransformToPoint(self, M, coords_origin):
"""
Maps a point from the origin to the destiny origin of coordinates
The transformation matrix M is provided and only works for 2D points
"""
x = coords_origin[0]
y = coords_origin[1]
if M is not None:
tmp = np.float32([x, y, 1.0])
trh = np.dot(M, tmp.transpose()) # trh = self.H.Transform(tmp.transpose())
# a = np.array([point],dtype='float32')
# a = np.array([a])
# pointOut= cv2.perspectiveTransform(a,ret)
trh /= trh[2]
# ERROR checking
if (trh[0] < 0 or trh[1] < 0):
self.logger.warning("[", trh[0], ",", "]")
self.logger.warning("From "+str(self.map_id)+":ERROR: negative coordinates")
if (trh[0] < 0):
trh[0] = 0.0
if (trh[1] < 0):
trh[1] = 0.0
return (trh[0:2])
else:
return (np.array([-1, -1]))
def point_to_Destiny(self, coords_origin,point_id =""):
"""
Maps a point from the origin to the destiny origin of coordinates
"""
x = coords_origin[0]
y = coords_origin[1]
if self.CalibratedPtp.Hg is not None:
tmp = np.float32([x,y,1.0])
neighs,_ = self.CalibratedPtp._getNeighs([x,y],map_id=1)
Hl,_ = self.CalibratedPtp.getLocalTransform(neighs)
trh = np.dot(Hl, tmp.transpose()) # trh = self.H.Transform(tmp.transpose())
# a = np.array([point],dtype='float32')
# a = np.array([a])
# pointOut= cv2.perspectiveTransform(a,ret)
trh /= trh[2]
return(trh[0:2])
else:
return(np.array([-np.inf,-np.inf]))
def point_to_Origin(self,coords_destiny,point_id =""):
"""
Maps a point from the destiny to the origin of coordinates
"""
x = coords_destiny[0]
y = coords_destiny[1]
if self.CalibratedPtp.Hg_inv is not None:
tmp = np.float32([x,y,1.0])
neighs,_ = self.CalibratedPtp._getNeighs([x,y],map_id = 2)
Hlinv,_ = self.CalibratedPtp.getLocalTransform(neighs,inverse=True)
trh = np.dot(Hlinv, tmp.transpose()) # trh = self.H.Transform(tmp.transpose())
# a = np.array([point],dtype='float32')
# a = np.array([a])
# pointOut= cv2.perspectiveTransform(a,ret)
trh /= trh[2]
return(trh[0:2])
else:
return(np.array([-1,-1]))
def getAutoTag(self,coords_destiny):
if(coords_destiny[0]==-1):
tag = random.randint(1, 100000) # Integer from 1 to 10, endpoints included
point_id = "NO_ID_"+str(tag)
if(self.isin(point_id)):
rval = random.randint(2,10)*100000
tag = random.randint(100000,rval)
point_id = "NO_ID_"+str(tag)
return point_id
cx = round(coords_destiny[0])
cy = round(coords_destiny[1])
point_id = self.grid_map.getTag(cx,cy)
if(self.isin(point_id)):
# Has numbers?
if("_n_" in point_id):
indx = point_id.find("_n_")
seq = point_id[indx+3:]
point_id = point_id[:indx+3]+"_%08d"%(int(seq) + 1,0)
else:
point_id = point_id+"_n_1"
return point_id
###############################################################################
######################### MATTEK DISH specific #################################333
def find_closest_letter(self, coords, map_value):
"""
Given a set of 2D coordinates in the plane, it gives you back the closest landmark
0 Canvas 1 LM
Assuming Canvas_LM Map
"""
return self.grid_map.find_closest_letter(coords,map_value)
def find_square_letter(self, coords, map_value, do_round=False):
"""
Given a set of 2D coordinates in the plane, it gives you back the closest landmark
0 Canvas 1 LM
Assuming Canvas_LM Map
"""
ncoords = []
letter = ''
if (map_value == 1):
ncoords = self.point_to_Origin(coords)
if(do_round == True):
ncoords = np.round(ncoords)
else:
ncoords = coords
return self.grid_map.find_square_letter(ncoords)
# def createVirtualGraph(self,tags,datalm,datamap,points_list,orientation):
# self.grid_map.populateGraph(tags,datalm,datamap,points_list,orientation)
# self.addSetPoints(self.grid_map.map_coordinates_origin.values(),self.grid_map.map_coordinates_destiny.values(),self.grid_map.map_coordinates_origin.keys(),'CALIBRATED')
def getLetterCoordinates(self, letter, map):
coords = []
try:
if(map == 0):
return self.grid_map.map_coordinates_origin[letter]
else:
return self.grid_map.map_coordinates_destiny[letter]
except KeyError:
print('Letter '+str(letter)+ 'does not exist.' )
return ''
########################################################################################
#
# SEM
#
#####################################################################################
class virtualGridMapSEM(virtualGridMap):
def __init__(self,logger):
super(virtualGridMapSEM, self).__init__(logger)
self.map = Map(scale=1e-3)
self.zmap = ZMap()
def ready(self):
return (len(self.getLandmarkIDsByType(PointType.calibrated)) > 2)
def applyTransformToPoint(self, M, coords_origin):
"""
Maps a point from the origin to the destiny origin of coordinates
"""
x = coords_origin[0]
y = coords_origin[1]
if M is not None:
tmp = np.float32([x, y, 1.0])
trh = np.dot(M, tmp.transpose()) # trh = self.H.Transform(tmp.transpose())
# a = np.array([point],dtype='float32')
# a = np.array([a])
# pointOut= cv2.perspectiveTransform(a,ret)
if(M.shape[0]==3): # Perspective Transform, but not affine...
trh /= trh[2]
# ERROR checking
if (trh[0] < 0 or trh[1] < 0):
self.logger.info("[", trh[0], ",", "]")
self.logger.info("From "+str(self.map_id)+":ERROR: negative coordinates")
if (trh[0] < 0):
trh[0] = 0.0
if (trh[1] < 0):
trh[1] = 0.0
return np.array([trh[0], trh[1], 0.0])
else:
return (np.array([-1, -1, -1]))
def point_to_Destiny(self, coords_origin, point_id=""):
"""
Maps a point from the origin to the destiny origin of coordinates
"""
x = coords_origin[0]
y = coords_origin[1]
if self.CalibratedPtp.Hg is not None:
tmp = np.float32([x, y, 1.0])
if (self.isin(point_id)):
H = self.list_local_area[point_id].getTransform()
if H is not None:
trh = np.dot(H, tmp.transpose())
else:
trh = np.dot(self.CalibratedPtp.Hg, tmp.transpose())
else:
trh = np.dot(self.CalibratedPtp.Hg, tmp.transpose()) # trh = self.H.Transform(tmp.transpose())
# a = np.array([point],dtype='float32')
# a = np.array([a])
# pointOut= cv2.perspectiveTransform(a,ret)
trh /= trh[2]
return np.array([trh[0],trh[1],0.0])
else:
return (np.array([-np.inf, -np.inf, -np.inf]))
def point_to_Origin(self, coords_destiny, point_id=""):
"""
Maps a point from the destiny to the origin of coordinates
"""
x = coords_destiny[0]
y = coords_destiny[1]
if self.CalibratedPtp.Hg_inv is not None:
tmp = np.float32([x, y, 1.0])
if (self.isin(point_id)):
Hinv = self.list_local_area[point_id].getTransform(inverse=True)
if Hinv is not None:
trh = np.dot(Hinv, tmp.transpose())
else:
trh = np.dot(self.CalibratedPtp.Hg_inv, tmp.transpose())
else:
trh = np.dot(self.CalibratedPtp.Hg_inv, tmp.transpose()) # trh = self.H.Transform(tmp.transpose())
# a = np.array([point],dtype='float32')
# a = np.array([a])
# pointOut= cv2.perspectiveTransform(a,ret)
trh /= trh[2]
return np.array([trh[0],trh[1],0.0])
else:
return (np.array([-np.inf, -np.inf, -np.inf]))
###############################################################################
############ Mattek specific
def find_closest_letter(self, coords, map_value):
"""
Given a set of 2D coordinates in the plane, it gives you back the closest landmark
Assuming Canvas_LM Map
"""
ncoords = []
letter = ''
if (map_value == 1):
ncoords = self.point_to_Origin(coords)
else:
ncoords = coords
return self.grid_map.find_closest_letter(ncoords);
def getLetterCoordinates(self, letter_id, map_value):
letter = ''
coords = self.grid_map.map_labels[letter_id]
if (map_value == 1):
return coords
else:
if(self.isin(letter_id)):
return self.getLandmark(letter_id,2)
else:
return self.point_to_Destiny(coords)
def getCornerNeighs(self, letter):
ctx = self.grid_map.xlabels.index(letter[0])
cty = self.grid_map.ylabels.index(letter[1])
n1 = self.grid_map.xlabels[ctx + 1] + self.grid_map.ylabels[cty]
n2 = self.grid_map.xlabels[ctx + 1] + self.grid_map.ylabels[cty + 1]
n3 = self.grid_map.xlabels[ctx] + self.grid_map.ylabels[cty + 1]
return [letter, n1, n2, n3]
def addToZMap(self,coords_stage,iz_value):
self.zmap.position.append(coords_stage)
self.zmap.z_value.append(iz_value)
```
#### File: CLEMSite_notebooks/4_correlation_strategy_LM-SEM_global/virtualMapManager.py
```python
from virtualGridMap import *
from abc import ABCMeta, abstractmethod
from scipy.spatial import distance
import ast
from skimage import transform as tf
from scipy.spatial import KDTree,distance
from collections import OrderedDict
import random
import sys
class NonValidMap(Exception):
pass
class VirtualMapManager(object):
"""
It manages the map, and serves as a explicit interface between microscope and map:
- The map only takes care of mathematical transformation and carrying the data
between homologue graphs.
- The manager uses that information, takes the microscope information and
works with it.
Attributes:
vMap: A virtualGridMap representing the maps of the sample.
msc: must come from the main application and it is the microscope control.
NOTE : INITIALIZE A MAP DOES NOT CREATE A MAP
"""
msc_server = None
def __init__(self, logger, server_msc):
"""
Stores microscope server
"""
self.msc_server = server_msc
self.logger = logger
self.vMaps = dict()
@abstractmethod
def addMap(self,map_id):
""" Creates a new empty instance of a map"""
pass
@abstractmethod
def removeMap(self,map_id):
""" Deletes and instance of a map """
pass
def getMap(self,map_id):
return self.vMaps[map_id]
def loadMap(self, d_map):
if(isinstance(d_map,str)):
d_map = ast.literal_eval(d_map)
map_id = d_map['MAP_ID']
vMap = self.getMap(map_id)
d_map.pop('MAP_ID',map_id)
vMap.loadMap(d_map)
vMap.updateMap()
class VirtualMapManagerLM(VirtualMapManager):
"""
The VMapManager manages at the level of map.
It has also the algorithms for error management.
"""
def __init__(self,logger, server_msc):
self.samples_list = dict()
self.samples_list_center = dict()
self.per_sample_eavg = dict()
self.per_sample_estd = dict()
self.global_error_avg = []
self.global_error_std = []
super(VirtualMapManagerLM, self).__init__(logger, server_msc)
def addMap(self,map_id):
self.vMaps[map_id] = virtualGridMapLM(self.logger)
self.vMaps[map_id].map_id = map_id
return self.vMaps[map_id]
def removeMap(self,map_id):
del self.vMaps[map_id]
return
def addSetPoints(self, pointsOrigin, pointsDestiny, names, map_id, point_type, updateOrigin=False,
updateDestiny=False, updateModel=True, updateOccupancy=False):
vMap = self.vMaps[map_id]
pointsDestiny = np.array(pointsDestiny, dtype=np.float32)
pointsOrigin = np.array(pointsOrigin, dtype=np.float32)
if (not bool(pointsDestiny.shape)):
s = (pointsOrigin.shape)
pointsDestiny = np.empty(s)
pointsDestiny.fill(np.inf)
if (not bool(pointsOrigin.shape)):
s = (pointsDestiny.shape)
pointsOrigin = np.empty(s)
pointsOrigin.fill(np.inf)
for i in range(len(pointsOrigin)):
coords_map = pointsOrigin[i]
coords_lm = pointsDestiny[i]
nid = names[i]
vMap.addPoint(coords_map, coords_lm, point_type, nid, [1., 1.], updateModel=False)
vMap.updateOrigin(nid, updateOrigin)
vMap.updateDestiny(nid, updateDestiny)
if(updateOccupancy):
vMap.grid_map.update_grid(pointsDestiny, names)
if (updateModel):
vMap.updateMap()
def generateGridMap(self,map_id,data,datamap,tags,orientation):
vMap = self.vMaps[map_id]
vMap.grid_map.generateGridMapCoordinates(data,tags,orientation)
# There is a condition in which only 1 or 2 points are passed
# If that is the case, we have to add at least 3 points to
# generate a model. We pass 5.
data=np.array(data)
ds = data.shape
if(ds[0]<3):
tags,origincs,destinycs = vMap.grid_map.get4neighbors(tags[0])
self.addSetPoints(origincs,destinycs, tags, map_id, PointType.calibrated, updateOccupancy=False)
else:
self.addSetPoints(datamap, data, tags, map_id, PointType.calibrated, updateOccupancy=False)
def addSample(self,sample_name,centerPoint,datamap,datalm,tags,map_id, top_dist = 100):
vMap = self.vMaps[map_id]
# Project the new tags
# Get canvas coords from grid_map
bad_p = []
for i,el in enumerate(tags):
origincs = vMap.grid_map.map_coordinates_origin[el]
point_dest = vMap.point_to_Destiny(origincs)
m_dist = distance.euclidean(datalm[i],np.array((point_dest[0],point_dest[1],0.0),dtype=np.float32))
# Calculate difference per point basis
# if error>100 um per point OUT
if(m_dist>top_dist):
self.logger.info("Point "+el+" discarded:"+str(m_dist)+" um away")
bad_p.append(i)
# Remove bad points
datalm = np.delete(datalm, bad_p,axis=0)
datamap = np.delete(datamap, bad_p,axis=0)
tags_aux = list(tags)
for el in bad_p:
tags_aux.remove(tags[el])
tags = tags_aux
if(datalm.shape[0]==0):
self.logger.info("Sample wrongly assigned. NOT ADDING TO MAP.")
return False
# addSetPoints
self.samples_list[sample_name] = tags
self.samples_list_center[sample_name] = centerPoint
self.addSetPoints(datamap, datalm, tags, map_id, PointType.calibrated, updateOccupancy=True)
return True
###
# crossvalidate and get error per point (add to vmap)
# per sample error and total average and sd error
def updateErrorByCV(self,map_id, num_neighs = 20):
vMap = self.vMaps[map_id]
per_sample_error_avg = []
per_sample_error_std = []
sample_name_list= list(self.samples_list_center.keys())
problematic_points = True
counter = 0
self.global_error_avg = [np.inf]
while(problematic_points):
self.logger.info("ITERATION #"+str(counter))
counter = counter+1
for ind,centerpoint in enumerate(self.samples_list_center.values()):
centerpoint = [centerpoint[1], centerpoint[0]]
sample_name = sample_name_list[ind]
# Get center point associated to sample
# Get all closest 20 neighbors
neighs,_ = vMap.CalibratedPtp._getNeighs(centerpoint, k =num_neighs, map_id = 2)
# Get all tags we have associated to the sample
m_tags = self.samples_list[sample_name]
# remove them from the neighbors
coords_map = vMap.CalibratedPtp.coordOrigin[neighs]
bad_t = []
for tag in m_tags:
tcoords = vMap.grid_map.map_coordinates_origin[tag]
for iind,ecm in enumerate(coords_map):
if(np.equal(tcoords[0],ecm[0]) and np.equal(tcoords[1],ecm[1])):
bad_t.append(iind)
neighs = np.delete(neighs,bad_t)
if(neighs.shape[0]<4):
continue
# Calculate local transform using neighbors
Hl,_ = vMap.CalibratedPtp.getLocalTransform(neighs)
# Get predictions for all the points of my sample
per_sample = []
for tag in m_tags:
tcoords = vMap.grid_map.map_coordinates_origin[tag]
tmp = np.float32([tcoords[0], tcoords[1], 1.0])
predicted = np.dot(Hl, tmp.transpose())
predicted /= predicted[2]
predicted = np.float32([predicted[0], predicted[1], 0.0])
# Get current actual coordinates
actual = vMap.getLandmark(tag,2)
actual = np.float32([actual[0], actual[1], 0.0])
# difference between each point and predictions
m_dist = distance.euclidean(predicted,actual)
#save error for each individual point in the map
vMap.list_errorDestiny[tag].append(m_dist)
per_sample.append(m_dist)
avg_er = np.mean(np.ma.masked_array(per_sample))
sd_er = np.std(np.ma.masked_array(per_sample))
# calculate average error and std and save it for the sample
bad_points = self.findBadPredictions(per_sample)
if(np.any(bad_points)):
problematic_points = True
for bp in bad_points:
bp_id = m_tags[bp]
m_point = vMap.getMeasuresPoint(bp_id,2)
mp_list = []
for mp in m_point:
# calculate the error of predicted again
tcoords = vMap.grid_map.map_coordinates_origin[bp_id]
tmp = np.float32([tcoords[0], tcoords[1], 1.0])
predicted = np.dot(Hl, tmp.transpose())
predicted /= predicted[2]
predicted = np.float32([predicted[0], predicted[1], 0.0])
# Get current actual coordinates
actual = np.array([mp[0], mp[1], 0.0])
# difference between each point and predictions
m_dist = distance.euclidean(predicted, actual)
mp_list.append(m_dist)
# take the minimum
ind_min = np.argmin(mp_list)
# remove all the others
vMap.setMeasuresPoint(bp_id,2,m_point[ind_min])
self.logger.info("Average error sample "+str(sample_name)+": "+ str(avg_er) + " +/-" + str(sd_er)+" um")
per_sample_error_avg.append(avg_er)
per_sample_error_std.append(sd_er)
if sample_name in self.per_sample_eavg.keys():
self.per_sample_eavg[sample_name].append(avg_er)
self.per_sample_estd[sample_name].append(sd_er)
else:
self.per_sample_estd[sample_name] = []
self.per_sample_eavg[sample_name] = []
self.per_sample_eavg[sample_name].append(avg_er)
self.per_sample_estd[sample_name].append(sd_er)
# Final error is the mean of all per sample errors
if(np.any(per_sample_error_avg)):
self.global_error_avg.append(np.mean(per_sample_error_avg))
self.global_error_std.append(np.std(per_sample_error_std))
self.logger.info("Global error mean:"+str(self.global_error_avg[-1])+"+/-"+str(self.global_error_std[-1])+" um")
problematic_points = self.global_error_avg[-1] != self.global_error_avg[-2]
return
def findBadPredictions(self,elist, tolerance = 10):
bad_apples = True
bp_list = []
while(bad_apples):
val_mean_bad = np.mean(elist)
val_max = np.max(elist)
ind_max = np.argmax(elist)
good_list = np.delete(elist,ind_max)
val_mean_good = np.mean(good_list)
if (val_max-val_mean_good)>tolerance:
# bad_point add_to_list
bp_list.append(ind_max)
elist = good_list
else:
bad_apples = False
return bp_list
class VirtualMapManagerSEM(VirtualMapManager):
"""
"""
scan_found = dict()
scan_prepared = False
def __init__(self,logger, server_msc):
super(VirtualMapManagerSEM,self).__init__(logger,server_msc)
##########################################################################
# MAP Management
##########################################################################
def cleanAll(self):
self.vMaps = dict()
def addMap(self,map_id):
self.vMaps[map_id] = virtualGridMapSEM(self.logger)
self.vMaps[map_id].map_id = map_id
return self.vMaps[map_id]
def addMapLM(self,map_id):
self.vMaps[map_id] = virtualGridMapLM(self.logger)
return self.vMaps[map_id]
def removeMap(self,map_id):
del self.vMaps[map_id]
return
def isempty(self,map_id):
return self.vMaps[map_id].getTotalLandmarks()==0
##########################################################################
# Adding sets of points
##########################################################################
def updateMapFromJSON(self, json_map, update = False):
d1 = json_map["LM_SEM"]
d2 = json_map["Canvas_SEM"]
stmap = d2['MAP']
map_df = pd.read_json(stmap)
if np.any(map_df.TYPE == 'TARGET'):
# Load new maps and replace them
self.loadMap(d1)
# Now we have to add all the landmarks from LM_SEM
self.loadMap(d2)
return
elif np.any(map_df.TYPE == 'CALIBRATED') and update:
v1 = self.getMap('LM_SEM')
v2 = self.getMap('Canvas_SEM')
# Now we have to see if there are only calibration coordinates or also targets
v1.deleteCalibrations()
v2.deleteCalibrations()
map_df = map_df[map_df.TYPE == 'CALIBRATED']
sem_coords = np.array(list(zip(map_df.COORDS_DESTINY_X, map_df.COORDS_DESTINY_Y, map_df.COORDS_DESTINY_Z)), dtype=np.float32)
map_coords = np.array(list(zip(map_df.COORDS_ORIGIN_X, map_df.COORDS_ORIGIN_Y, map_df.COORDS_ORIGIN_Z)), dtype=np.float32)
self.addCalibratedPointsToMap(sem_coords, map_coords, list(map_df.LANDMARK))
else:
raise NonValidMap('Non valid map for updating')
def addCalibratedPointsToMap(self, datasem, datamap, tags):
self.addSetPointsFromMicroscope(tags, datasem, "LM_SEM", updateModel=False)
self.addSetPoints(datamap, datasem, tags, "Canvas_SEM", PointType.calibrated, updateModel=False,
updateOccupancy=True)
v1 = self.getMap('LM_SEM')
v2 = self.getMap('Canvas_SEM')
v1.updateMap()
v2.updateMap()
return
def applyTransform(self,M,map_id,map_to_update):
self.vMaps[map_id].applyTransform(M,map_to_update)
def addSetPoints(self,pointsOrigin,pointsDestiny,names,map_id,point_type,updateModel=True,updateOccupancy = False):
"""
Given a sets of points in Origin, Destiny, their common landmark names and the map_id "Canvas_LM" or "LM_SEM"
point_type
"""
vMap = self.vMaps[map_id]
pointsDestiny = np.array(pointsDestiny,dtype=np.float32)
pointsOrigin = np.array(pointsOrigin,dtype = np.float32)
pD = pointsDestiny.shape
pO = pointsOrigin.shape
if(not bool(pD) or (pD[0] == 0)):
pointsDestiny = np.empty(pO)
pointsDestiny.fill(np.inf)
if(not bool(pO) or (pO[0]==0) ):
pointsOrigin = np.empty(pD)
pointsOrigin.fill(np.inf)
for i in range(len(pointsOrigin)):
coords_lm = pointsOrigin[i]
coords_sem = pointsDestiny[i]
nid = names[i]
vMap.addPoint(coords_lm, coords_sem, point_type, nid, [1., 1.], updateModel=False)
if(updateModel):
vMap.updateMap()
##########################################################################
# GRABBING FRAMES and Mapper to front END
##########################################################################
def prepare_scans(self, map_id, percent = 1, recalibration = False):
currentmap = self.getMap(map_id)
self.scanning_map = currentmap
letters_to_find = set()
self.scan_prepared = False
list_pointId = currentmap.getLandmarkIds()
if len(list_pointId) == 0:
return
if len(list_pointId)<5:
# Then select some random elements, starting from existent positions
# Get existent letter
pntid = list_pointId[0]
all_labels = currentmap.grid_map.getLabels()
all_labels = [ label for label in all_labels if '*' not in label ] # Avoid corners
all_labels = [ label for label in all_labels if '+' not in label ] # Avoid corners
letters_to_find = self.getRandomLetters(all_labels, percent+0.1)
distpid = currentmap.grid_map.getCoordinatesGrid([pntid])
to_find_map_coords = currentmap.grid_map.getCoordinatesGrid(letters_to_find)
cal_tree = KDTree(to_find_map_coords)
distances, indexes = cal_tree.query(distpid, k = 40, distance_upper_bound = np.inf)
fletters_to_find = [letters_to_find[i] for i in indexes[0]]
pOrigin = currentmap.grid_map.getCoordinatesGrid(fletters_to_find)
self.addSetPoints(pOrigin,[],fletters_to_find,"Canvas_SEM",PointType.non_calibrated,False)
self.scan_found = OrderedDict(zip(fletters_to_find, len(fletters_to_find) * [False]))
return
else:
#####
for pntid in list_pointId:
if(not currentmap.is_blocked(pntid)):
if(not currentmap.checkType(pntid,PointType.acquired) and not currentmap.checkType(pntid,PointType.target) ):
if(recalibration==False and not currentmap.checkType(pntid,PointType.calibrated)):
letters_to_find.add(pntid[0:2])
elif(recalibration == True):
letters_to_find.add(pntid[0:2])
# grid map
if percent < 1 or len(list(letters_to_find)) == 0:
letters_to_find = self.getRandomLetters(letters_to_find,percent)
else:
letters_to_find = list(letters_to_find)
# Sort by proximity to calibration points.
# We took all the calibration points map coordinates, and all the letter map coordinates
to_find_map_coords = currentmap.grid_map.getCoordinatesGrid(letters_to_find)
calibrated_ids = currentmap.getLandmarkIDsByType(PointType.calibrated)
calibrated_map_coords = currentmap.grid_map.getCoordinatesGrid(calibrated_ids)
# Now, we find from all letters to find which one is the closest
if np.any(calibrated_map_coords):
cal_tree = KDTree(calibrated_map_coords)
else:
return
dict_close = {}
for ind, el in enumerate(to_find_map_coords):
dict_close[ind] = cal_tree.query(el)
all_dist = dict_close.values()
all_dist = sorted(all_dist, key=lambda x: x[0])
closest_ind = all_dist[0][1]
closest_val = calibrated_map_coords[closest_ind]
find_tree = KDTree(to_find_map_coords)
distances,indexes = find_tree.query(closest_val,k = len(letters_to_find), distance_upper_bound = np.inf )
letters_to_find = [ letters_to_find[i] for i in indexes ]
self.scan_found = OrderedDict(zip(letters_to_find, len(letters_to_find) * [False])) # Create a map
return
def getListToScan(self):
"""
Sort scan keys by proximity to calibration points
:return:
"""
return self.scan_found.keys()
def getRandomLetters(self,letters_to_find,percent):
N = len(letters_to_find)
total = list(range(0, N))
np.random.shuffle(total)
if(N>20):
N = np.max([20.0,percent*N])
N = int(N)
total = total[0:N]
total = np.sort(total)
keys_selection = []
for ind,el in enumerate(letters_to_find):
if(ind in total):
keys_selection.append(el)
return keys_selection
def getCoordCenter(self,letter_id,map_id):
currentmap = self.getMap(map_id)
coords = currentmap.getLetterCoordinates(letter_id, 1)
ecoords = np.zeros(coords.shape)
ecoords[0] = coords[0]+currentmap.grid_map.spacing*0.5
ecoords[1] = coords[1]+currentmap.grid_map.spacing*0.5
ncoords = currentmap.point_to_Destiny(ecoords)
return ncoords
# letter_neighs = currentmap.getCornerNeighs(letter_id)
# neighs = []
# for sec_lett in letter_neighs:
# coords_stage = currentmap.getLetterCoordinates(sec_lett,2)
# neighs.append(coords_stage)
# neighs = np.array(neighs)
# cornerTop = neighs[0]
# xmin = np.min(neighs[:,0])
# xmax = np.max(neighs[:,0])
# ymin = np.min(neighs[:,1])
# ymax = np.max(neighs[:,1])
# x_c = (xmax+xmin)*0.5
# y_c = (ymax+ymin)*0.5
# coord_center = (x_c,y_c,0.0)
# return coord_center
def getCoordsFile(self,dir_coord,re_filename):
directories = glob.glob(dir_coord+'\*')
xd1 = self.filterPick(directories,re_filename)
if((not xd1)):
print("Error, coordinate files not detected")
c_file = directories[xd1[0]]
self.helper.readLM(c_file)
tags,indices = self.helper.unique_elements(self.helper.letters)
datalm = np.array(self.helper.coord_lm,np.float32)
datalm = datalm[indices]
datamap = np.array(self.helper.coord_map,np.float32) ########
datamap = datamap[indices]
return (datalm,datamap,tags)
else:
self.logger.info("Compute Line detection First.")
return
def transformToNewData(self,itags,icoords,coords_id,map_id):
vMap = self.vMaps[map_id]
# Search coords from map
ecoords = vMap.getCoordsFromLandmarks(itags,coords_id) # 1 origin 2 destiny
ecoords = np.squeeze(ecoords)
if (len(ecoords)<3):
return False
ncor2 = (icoords[0][0:2], icoords[1][0:2], icoords[2][0:2])
ncor = (ecoords[0][0:2], ecoords[1][0:2], ecoords[2][0:2])
# calculate transform
tform = tf.estimate_transform('affine', np.array(ncor), np.array(ncor2)) # 'affine'
#tform = tf.AffineTransform(matrix=None, scale=None, rotation=tform.rotation, shear=None, translation=tform.translation)
self.applyTransform(tform.params, map_id, coords_id)
return True
def addSetPointsFromMicroscope(self,itags,icoords,map_id,updateModel = True):
"""
Given points from a map, it transfers them as calibrated to the LM-SEM map
or as acquired.
Do not confuse this method with addSetPoints.
itags - letters or symbols associated to each coordinate
icoords = coordinates from one of the maps
map_id
updateModel - since it is a costly operation to update all local regions of the map, we can decide to update the model
later
"""
vMap = self.vMaps[map_id]
list_calibrated_names =[]
list_calibrated = []
list_acquired_names = []
list_acquired= []
# find if any of the tags matches with the tags we already have
# That is, the point is calibrated, non-calibrated or target
# We match it. Otherwise, we just add it as a landmark
for ind,tag in enumerate(itags):
if vMap.isin(tag) and not vMap.checkType(tag,PointType.acquired):
list_calibrated_names.append(tag)
list_calibrated.append(icoords[ind])
else:
list_acquired_names.append(tag)
list_acquired.append(icoords[ind])
if(list_calibrated_names):
self.addSetPoints([],list_calibrated,list_calibrated_names,"LM_SEM",PointType.calibrated,updateOccupancy=True,updateModel=updateModel)
if(list_acquired_names):
self.addSetPoints([], list_acquired, list_acquired_names, "LM_SEM",PointType.acquired,updateModel=False,updateOccupancy=True)
if(updateModel):
vMap.updateMap()
def blockPoint(self,point_id):
# For each map managed, blocks position using the occupation map
for el_map, m_map in self.vMaps.items():
m_map.blockPoint(point_id)
def unblockPoint(self, point_id):
# For each map managed, unblocks position using the occupation map
for el_map, m_map in self.vMaps.items():
m_map.unblockPoint(point_id)
def changeType(self, point_id, type):
for el_map, m_map in self.vMaps.items():
m_map.changeType(point_id, type)
def updateErrorByCV(self, map_id, num_neighs = 20, tolerance = 50):
vMap = self.vMaps[map_id]
## Get calibration points
d_first, d_second, tags = vMap.getLandmarksByType("CALIBRATED")
problematic_points = True
error_list = []
blocked_list = []
blocked_error_list = []
good_list = []
self.logger.info("Finding bad apples in predictions!")
for ind, tag in enumerate(tags):
if ind%5 == 0:
self.logger.info("# Iteration %s"% ind)
# Get all closest 20 neighbors for tag
neighs, ntags, distances = vMap.CalibratedPtp.getNeighs(d_second[ind,0:2], k = num_neighs, map_id=2)
if len(neighs)<4:
self.logger.warning("Not enough points to check quality of linear model.")
return
# Calculate local transform using neighbors (ONLY NEIGHBORS, YOU ARE NOT INCLUDED)
d1 = vMap.getCoordsFromLandmarks(ntags[1:],1)
d2 = neighs[1:]
H1, bi, ba = self.ransac_model(d1,d2)
# How bad is my linear model from lineal? correlations below xxx have to be removed
if H1 is None:
self.logger.warning("Cannot validate positions by cross_validation. It is not recommended to continue.")
return
# Get coordinates origin to predict
tcoords = vMap.getCoordsFromLandmarks([tag],1)
tmp = np.float32([tcoords[0,0], tcoords[0,1], 1.0])
# Get predicted coordinates
predicted = np.dot(H1, tmp)
predicted /= predicted[2]
predicted = np.float32([predicted[0], predicted[1], 0.0])
# Get current current coordinates
actual = vMap.getLandmark(tag, 2)
actual = np.float32([actual[0], actual[1], 0.0])
# difference between each point and predictions
m_dist = distance.euclidean(predicted, actual)
# save error for each individual point in the map
if m_dist > tolerance : # should be in micrometers!
self.logger.info("#!!# BLOCKED: Position :"+tag+ " with values "+str(actual)+" has exceedeed minimum error. Error found to be: "+str(m_dist))
blocked_list.append(tag)
blocked_error_list.append(m_dist)
else:
# remove them from the neighbors
self.logger.info("Position :" + tag + " with values " + str(actual) + ". Error found to be: " + str(m_dist))
error_list.append(m_dist)
good_list.append(tag)
avg_er = np.mean(error_list)
sd_er = np.std(error_list)
self.logger.info("Average error sample " + str(avg_er) + " +/-" + str(sd_er) + " um")
return blocked_list, blocked_error_list, good_list, error_list
def ransac_model(self, data_origin, data_destiny, min_error = 5, tolerance = 20, max_iterations = 200, stop_at_goal=True):
best_ac = tolerance
best_ic = 0
best_model = None
seed = random.randrange(sys.maxsize)
random.seed(seed)
data_or = list(data_origin)
n = len(data_or)
goal_reached = False
for sample_size in range(n, 4, -1):
old_set = set()
for i in range(max_iterations):
s = random.sample(range(n), int(sample_size))
ns = old_set - set(s)
if len(ns) == 0 and i>0 :
break
else :
old_set = set(s)
m = tf.estimate_transform('affine', data_origin[s,0:2], data_destiny[s,0:2]).params
ic = 0
ac_error = 0
for j in range(n):
data_origin[j,2] = 1
error = self.get_error(m, data_origin[j],data_destiny[j,0:2])
if error < tolerance:
ic += 1
if ic == 1 :
ac_error = error
else:
ac_error = (ac_error+error)*0.5
if ac_error < best_ac:
best_ic = ic
best_model = m
best_ac = ac_error
if best_ac < min_error:
goal_reached = True
break
if goal_reached:
break
return best_model, best_ic, best_ac
def get_error(self,M, ipoint, opoint):
predicted = np.dot(M, ipoint)
predicted /= predicted[2]
predicted = np.float32([predicted[0], predicted[1]])
m_dist = distance.euclidean(predicted, opoint)
return m_dist
``` |
{
"source": "josemiserra/navigation_drlnd",
"score": 2
} |
#### File: josemiserra/navigation_drlnd/execute_train.py
```python
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
import torch
from dqn_agent import Agent
def trainFunction(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
agent = Agent(state_size=37, action_size=4, seed=0, priority=True)
epsilons = []
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0]
score = 0
for t in range(max_t):
action = agent.act(state, eps)
env_info = env.step(action.astype(np.int32))[brain_name]
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay * eps) # decrease epsilon
epsilons.append(eps)
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
# if np.mean(scores_window)>=13.0:
print('\nEnvironment finished in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
return scores, epsilons
if __name__ == "__main__":
env = UnityEnvironment(file_name="Banana.exe")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
eps_start = 1.0
eps_end = 0.01
eps_decay = 0.992
n_episodes = 500
max_t = 300
scores, epsi = trainFunction(n_episodes, max_t, eps_start, eps_end, eps_decay)
``` |
{
"source": "josemiserra/tennis_maddpg",
"score": 3
} |
#### File: josemiserra/tennis_maddpg/maddpg.py
```python
import random
import copy
from collections import namedtuple, deque
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from model import Critic, Actor
LR_ACTOR = 1e-4
LR_CRITIC = 1e-3
TAU = 1e-3
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class MADDPG():
"""Multi agent designed for training. It contains the critics."""
def __init__(self, num_agents = 2, state_size = 24, action_size=2,
buffer_size=100000,
batch_size=512,
gamma=0.99,
update_every=2,
noise_start=1.0,
noise_decay=0.99999,
stop_noise=50000, seed = 31):
"""
Params
======
state_size(int): dimension of each observation state
action_size (int): dimension of each action
n_agents (int): number of distinct agents
buffer_size (int): replay buffer size
batch_size (int)
gamma (float): discount factor
noise_start (float): initial noise weighting factor
noise_decay (float): noise decay rate
update_every (int): how often to update the target network
stop_noise (int): max number of timesteps with noise applied in training
"""
self.buffer_size = buffer_size
self.batch_size = batch_size
self.update_every = update_every
self.gamma = gamma
self.num_agents = num_agents
self.state_size = state_size
self.noise_factor = noise_start
self.noise_decay = noise_decay
self.t_step = 0
self.stop_noise = stop_noise
# create two agents, each with their own actor and critic
self.critic_local = [Critic(num_agents, state_size, action_size, seed).to(device) for _ in range(num_agents)]
self.critic_target = [Critic(num_agents, state_size, action_size, seed).to(device) for _ in range(num_agents)]
self.critic_optimizer = [ optim.Adam(self.critic_local[i].parameters(), lr=LR_CRITIC) for i in range(num_agents) ]
self.agents = [Agent(i) for i in range(num_agents)]
for i in range(self.num_agents):
Agent.hard_copy_weights(self.critic_target[i], self.critic_local[i])
# create shared replay buffer
self.memory = ReplayBuffer(action_size, self.buffer_size, self.batch_size)
def step(self, n_states, n_actions, n_rewards, n_next_states, n_dones):
n_states = n_states.reshape(1, -1) # reshape into 1x48 for easier network input
n_next_states = n_next_states.reshape(1, -1)
self.memory.add(n_states, n_actions, n_rewards, n_next_states, n_dones)
# if stop_noise time steps are achieved turn off noise
if self.t_step > self.stop_noise:
self.noise_decay = 1.0
self.noise_factor = 1.0
self.t_step = self.t_step + 1
# Learn every update_every time steps.
if self.t_step % self.update_every == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.batch_size:
# sample from the replay buffer for each agent
experiences = [self.memory.sample() for _ in range(self.num_agents)]
self.learn(experiences, self.gamma)
def act(self, n_states, add_noise=True):
# calculate each action
joint_actions = []
for agent, state in zip(self.agents, n_states):
action = agent.act(state, noise_weight=self.noise_factor, add_noise=add_noise)
if add_noise:
self.noise_factor *= self.noise_decay
joint_actions.append(action)
return np.array(joint_actions).reshape(1, -1)
def learn(self, experiences, gamma):
# each agent uses its own actor to calculate next_actions
joint_next_actions = []
joint_actions = []
for i, agent in enumerate(self.agents):
states, _, _, next_states, _ = experiences[i]
agent_id = torch.tensor([i]).to(device)
state = states.reshape(-1, self.num_agents, self.state_size).index_select(1, agent_id).squeeze(1)
action = agent.actor_local(state)
joint_actions.append(action)
next_state = next_states.reshape(-1, self.num_agents, self.state_size).index_select(1, agent_id).squeeze(1)
next_action = agent.actor_target(next_state)
joint_next_actions.append(next_action)
# each agent learns from its experience sample
for i, agent in enumerate(self.agents):
states, actions, rewards, next_states, dones = experiences[i]
# ---------------------------- update critic ---------------------------- #
# get predicted next-state actions and Q values from target models
self.critic_optimizer[i].zero_grad()
agent_id = torch.tensor([i]).to(device)
actions_next = torch.cat(joint_next_actions, dim=1).to(device)
with torch.no_grad():
next_state = next_states.reshape(-1, self.num_agents, self.state_size).index_select(1, agent_id).squeeze(1).to(device)
q_targets_next = self.critic_target[i](next_state, actions_next)
# compute Q targets for current states (y_i)
state = states.reshape(-1, self.num_agents, self.state_size).index_select(1, agent_id).squeeze(1)
q_expected = self.critic_local[i](state, actions)
# q_targets = reward of this timestep + discount * Q(st+1,at+1) from target network
q_targets = rewards.index_select(1, agent_id) + (
gamma * q_targets_next * (1 - dones.index_select(1, agent_id)))
# compute critic loss
critic_loss = F.mse_loss(q_expected, q_targets.detach())
# minimize loss
critic_loss.backward()
self.critic_optimizer[i].step()
# ---------------------------- update actor ---------------------------- #
# compute actor loss
agent.actor_optimizer.zero_grad()
actions_pred = [actions if i == j else actions.detach() for j, actions in enumerate(joint_actions)]
actions_pred = torch.cat(actions_pred, dim=1).to(device)
state = states.reshape(-1, self.num_agents, self.state_size).index_select(1, agent_id).squeeze(1)
actor_loss = -self.critic_local[i](state, actions_pred).mean()
# minimize loss
actor_loss.backward()
agent.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
Agent.soft_update(self.critic_local[i], self.critic_target[i], TAU * 10)
Agent.soft_update(agent.actor_local, agent.actor_target, TAU)
def save(self):
# save models for local actor and critic of each agent
for i, agent in enumerate(self.agents):
torch.save(agent.actor_local.state_dict(), f"checkpoint_actor_agent_{i}.pth")
torch.save(self.critic_local[i].state_dict(), f"checkpoint_critic_agent_{i}.pth")
class Agent():
"""DDPG agent with just an actor."""
def __init__(self, agent_id, state_size = 24, action_size=2, seed=0):
"""
Params
======
action_size (int): dimension of each action
seed (int): Random seed
tau (float): for soft update of target parameters
lr_actor (float): learning rate for actor
lr_critic (float): learning rate for critic
weight_decay (float): L2 weight decay
"""
random.seed(seed)
self.id = agent_id
self.action_size = action_size
self.state_size = state_size
# Actor Network
self.actor_local = Actor(state_size, action_size, seed).to(device)
self.actor_target = Actor(state_size, action_size, seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
Agent.hard_copy_weights(self.actor_target, self.actor_local)
# Noise process for the act moment
self.noise = OUNoise(action_size, seed)
@staticmethod
def hard_copy_weights(target, source):
""" copy weights from source to target network (part of initialization)"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def act(self, state, noise_weight=1.0, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
# calculate action values
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state.unsqueeze(0)).cpu().data.numpy()
self.actor_local.train()
if add_noise:
self.noise_val = self.noise.sample() * noise_weight
action += self.noise_val
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
@staticmethod
def soft_update(local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.1):
"""Initialize parameters and noise process."""
random.seed(seed)
np.random.seed(seed)
self.size = size
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)
self.state = x + dx
return self.state
class ReplayBuffer():
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed = 47):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): Random seed
"""
random.seed(seed)
np.random.seed(seed)
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
``` |
{
"source": "josemlp91/cocineame",
"score": 2
} |
#### File: cocineame/cocineame/views.py
```python
from django.shortcuts import render
from . import settings
def index(request, *args, **kwargs):
print(settings.STATIC_ROOT)
return render(request, 'dist/index.html')
``` |
{
"source": "josemlp91/django_rest_mockup",
"score": 2
} |
#### File: django_rest_mockup/mockup/models.py
```python
from django.db import models
from jsonfield import JSONField
class MockupConfig(models.Model):
HTTP_METHOD = (
('GET', 'GET'),
('POST', 'POST'),
('PUT', 'PUT'),
('PATH', 'PATH'),
('DELETE', 'DELET'),
)
name = models.CharField(max_length=1024, blank=False, null=False, verbose_name=u"nombre")
url = models.CharField(max_length=1024, verbose_name=u"url", blank=False, null=False)
method = models.CharField(max_length=1024, choices=HTTP_METHOD, null=False, blank=False, verbose_name=u"metodo http")
url_param = JSONField(null=True, blank=True, verbose_name=u"parámetros por url")
get_param = JSONField(null=True, blank=True, verbose_name=u"parámetros get")
post_body_param = JSONField(null=True, blank=True, verbose_name=u"parámetros post")
content = JSONField(null=True, blank=True, verbose_name=u"contenido")
def __str__(self):
return self.name
``` |
{
"source": "josemlp91/djng",
"score": 2
} |
#### File: server/core/views.py
```python
import json
from django.shortcuts import render
def home(request):
context = {
'tasks': json.dumps({
'tasks': [
'Create new django app',
'Expose the data over the REST API',
'Create new ng-controller',
'...'
]
})
}
return render(request, 'index.html', context)
``` |
{
"source": "josemmercado96/hotel-bookings",
"score": 2
} |
#### File: hotel-bookings/bookings/views.py
```python
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from bookings.models import Booking
from bookings.serializers import BookingSerializer
# Create your views here.
@csrf_exempt
def list_booking(request):
if request.method == 'GET':
bookings = Booking.objects.all()
serializer = BookingSerializer(bookings, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = BookingSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def pay_booking(request, pk):
if request.method == 'PUT':
try:
booking = Booking.objects.get(pk=pk)
if booking.state != Booking.PENDING:
error = {
"error": "El estado de la reserva debe ser Pendiente"
}
return JsonResponse(error, status=400)
else:
booking.state = Booking.PAID_OUT
booking.save()
return HttpResponse(status=200)
except Booking.DoesNotExist:
return HttpResponse(status=404)
else:
return HttpResponse(status=404)
@csrf_exempt
def cancel_booking(request, pk):
if request.method == 'PUT':
try:
booking = Booking.objects.get(pk=pk)
if booking.state != Booking.PENDING:
error = {
"error": "El estado de la reserva debe ser Pendiente"
}
return JsonResponse(error, status=400)
else:
booking.state = Booking.DELETED
booking.save()
return HttpResponse(status=200)
except Booking.DoesNotExist:
return HttpResponse(status=404)
else:
return HttpResponse(status=404)
```
#### File: hotel-bookings/clients/views.py
```python
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from clients.models import Client
from clients.serializers import ClientSerializer
# Create your views here.
@csrf_exempt
def client_list(request):
if request.method == 'GET':
clients = Client.objects.all()
serializer = ClientSerializer(clients, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = ClientSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
```
#### File: hotel-bookings/room/models.py
```python
from django.db import models
# Create your models here.
class Room(models.Model):
code_room = models.CharField(max_length=10, unique=True, null=False)
beds = models.PositiveIntegerField(default=1)
is_suite = models.BooleanField(default=False, null=False)
def __str__(self):
return "{}".format(self.code_room)
``` |
{
"source": "josemolinagarcia/maya-math-nodes",
"score": 3
} |
#### File: maya-math-nodes/tests/test_clamp.py
```python
from node_test_case import NodeTestCase, cmds
class TestClamp(NodeTestCase):
def test_clamp(self):
node = self.create_node('Clamp', {'input': 5.0, 'inputMin': 0.0, 'inputMax': 2.0}, 2.0)
cmds.setAttr('{0}.{1}'.format(node, 'input'), -1.0)
self.assertAlmostEqual(cmds.getAttr('{0}.output'.format(node)), 0.0)
def test_clamp_int(self):
node = self.create_node('ClampInt', {'input': 5, 'inputMin': 0, 'inputMax': 2}, 2)
cmds.setAttr('{0}.{1}'.format(node, 'input'), -1)
self.assertAlmostEqual(cmds.getAttr('{0}.output'.format(node)), 0)
def test_clamp_angle(self):
node = self.create_node('ClampAngle', {'input': 5.0, 'inputMin': 0.0, 'inputMax': 2.0}, 2.0)
cmds.setAttr('{0}.{1}'.format(node, 'input'), -1.0)
self.assertAlmostEqual(cmds.getAttr('{0}.output'.format(node)), 0.0)
def test_remap(self):
self.create_node('Remap', {'input': 0.5, 'low1': 0.0, 'high1': 1.0, 'low2': 0.0, 'high2': 10.0}, 5.0)
def test_remap_angle(self):
self.create_node('RemapAngle', {'input': 0.5, 'low1': 0.0, 'high1': 1.0, 'low2': 0.0, 'high2': 10.0}, 5.0)
def test_remap_int(self):
self.create_node('RemapInt', {'input': 5, 'low1': 0, 'high1': 10, 'low2': 0, 'high2': 100}, 50)
def test_smoothstep(self):
self.create_node('Smoothstep', {'input': 0.3}, 0.216)
```
#### File: maya-math-nodes/tests/test_divide.py
```python
from node_test_case import NodeTestCase
class TestDivide(NodeTestCase):
def test_divide(self):
self.create_node('Divide', {'input1': 10.0, 'input2': 2.0}, 5.0)
def test_divide_by_int(self):
self.create_node('DivideByInt', {'input1': 10.0, 'input2': 2}, 5.0)
def test_divide_angle(self):
self.create_node('DivideAngle', {'input1': 10.0, 'input2': 2.0}, 5.0)
def test_divide_angle_by_int(self):
self.create_node('DivideAngleByInt', {'input1': 10.0, 'input2': 2}, 5.0)
def test_modulus_int(self):
self.create_node('ModulusInt', {'input1': 5, 'input2': 4}, 1)
```
#### File: maya-math-nodes/tests/test_interpolate.py
```python
from node_test_case import NodeTestCase, cmds
class TestInterpolate(NodeTestCase):
def test_lerp(self):
self.create_node('Lerp', {'input1': 0.0, 'input2': 10.0, 'alpha': 0.5}, 5.0)
def test_lerp_angle(self):
self.create_node('LerpAngle', {'input1': 0.0, 'input2': 10.0, 'alpha': 0.5}, 5.0)
def test_lerp_vector(self):
self.create_node('LerpVector',
{'input1': [0.0, 0.0, 0.0], 'input2': [10.0, 10.0, 10.0], 'alpha': 0.5},
[5.0, 5.0, 5.0])
def test_slerp_quaternion(self):
node = self.create_node('SlerpQuaternion',
{'input1': [0.0, 0.0, 0.0, 1.0], 'input2': [1.0, 0.0, 0.0, 0.0], 'alpha': 0.5},
[0.7071, 0.0, 0.0, 0.7071])
cmds.setAttr('{0}.{1}'.format(node, 'interpolationType'), 1)
self.assertItemsAlmostEqual(cmds.getAttr('{0}.output'.format(node))[0], [-0.7071, 0.0, 0.0, 0.7071], 4)
def test_lerp_matrix(self):
matrix1 = [1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
10.0, -10.0, 10.0, 1.0]
matrix2 = [-1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
5.0, -5.0, 5.0, 1.0]
out_matrix = [0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0,
7.5, -7.5, 7.5, 1.0]
self.create_node('LerpMatrix', {'input1': matrix1, 'input2': matrix2, 'alpha': 0.5}, out_matrix)
```
#### File: maya-math-nodes/tests/test_inverse.py
```python
from node_test_case import NodeTestCase
class TestInverse(NodeTestCase):
def test_inverse_matrix(self):
matrix = [0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0,
5.0, -5.0, 5.0, 1.0]
inverse_matrix = [0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
-1.0, 0.0, 0.0, 0.0,
5.0, 5.0, -5.0, 1.0]
self.create_node('InverseMatrix', {'input': matrix}, inverse_matrix)
def test_inverse_quaternion(self):
self.create_node('InverseQuaternion', {'input': [1.0, 0.0, 0.0, 0.0]}, [-1.0, 0.0, 0.0, 0.0])
def test_inverse_rotation(self):
self.create_node('InverseRotation', {'input': [45.0, 0.0, -15.0]}, [-45.0, 0.0, 15.0])
```
#### File: maya-math-nodes/tests/test_negate.py
```python
from node_test_case import NodeTestCase
class TestNegate(NodeTestCase):
def test_negate(self):
self.create_node('Negate', {'input': 2.0}, -2.0)
def test_negate_int(self):
self.create_node('NegateInt', {'input': 2}, -2)
def test_negate_angle(self):
self.create_node('NegateAngle', {'input': 2.0}, -2.0)
def test_negate_vector(self):
self.create_node('NegateVector', {'input': [1.0, -1.0, 0.0]}, [-1.0, 1.0, 0.0])
def test_not_bool(self):
self.create_node('NotBool', {'input': True}, False)
```
#### File: maya-math-nodes/tests/test_subtract.py
```python
from node_test_case import NodeTestCase
class TestSubtract(NodeTestCase):
def test_subtract(self):
self.create_node('Subtract', {'input1': 10.0, 'input2': 5.0}, 5.0)
def test_subtract_int(self):
self.create_node('SubtractInt', {'input1': 10, 'input2': 5}, 5)
def test_subtract_angle(self):
self.create_node('SubtractAngle', {'input1': 10.0, 'input2': 5.0}, 5.0)
def test_subtract_vector(self):
self.create_node('SubtractVector', {'input1': [5.0, 5.0, 5.0], 'input2': [2.0, 2.0, 2.0]}, [3.0, 3.0, 3.0])
``` |
{
"source": "josemonsalve2/PIMC",
"score": 2
} |
#### File: python/API_operaciones/pimcAPI.py
```python
from API_operaciones.insertar import insertarNuevoElemento
from API_operaciones.consulta import consultarElemento
from API_operaciones.consulta import consultarTodosFiltro
from API_operaciones.consulta import consultarTodosFiltroAvanzado
from API_operaciones.eliminar import eliminarElemento
from API_operaciones.modificar import modificarElemento
from API_operaciones.files import\
cargarArchivos,\
archivosElementoRelacional,\
descargarAchivoElementoRelacional,\
eliminarArchivoElementoRelacional,\
renombrarArchivoElementoRelacional
class pimc:
def insertarNuevoElemento(self, elementoRelacional, parametrosJSON):
insertarNuevoElemento(elementoRelacional, parametrosJSON)
def consultarElemento(self, elementoRelacional, parametrosJSON):
consultarElemento(elementoRelacional, parametrosJSON)
def consultarTodosFiltro(self, elementoRelacional, parametrosJSON):
consultarTodosFiltro(elementoRelacional, parametrosJSON)
def consultarTodosFiltroAvanzado(self, elementoRelacional, parametrosJSON):
consultarTodosFiltroAvanzado(elementoRelacional, parametrosJSON)
def eliminarElemento(self, elementoRelacional, parametrosJSON):
eliminarElemento(elementoRelacional, parametrosJSON)
def modificarElemento(self, elementoRelacional, parametrosJSON):
modificarElemento(elementoRelacional, parametrosJSON)
# Definiciones para archivos
def cargarArchivos(self, elementoRelacional, parametrosPOST):
cargarArchivos(elementoRelacional, parametrosPOST)
def archivosElementoRelacional(self, elementoRelacional, parametrosJSON):
archivosElementoRelacional(elementoRelacional, parametrosJSON)
def descargarAchivoElementoRelacional(self, elementoRelacional, parametrosJSON):
descargarAchivoElementoRelacional(elementoRelacional, parametrosJSON)
def eliminarArchivoElementoRelacional(self, elementoRelacional, parametrosJSON):
eliminarArchivoElementoRelacional(elementoRelacional, parametrosJSON)
def renombrarArchivoElementoRelacional(self, elementoRelacional, parametrosJSON):
renombrarArchivoElementoRelacional(elementoRelacional, parametrosJSON)
def autocompletarConsulta(self):
return
pimcAPI = pimc()
```
#### File: python/auth/correos.py
```python
from flask_mail import Message
from flask import render_template
from API_operaciones import mysql_connection
mail = mysql_connection.mail
def enviarCorreoConfirmacion(correoElectronico, nombreReal, nombreUsuario):
msg = Message("[PIMCD] Usuario creado")
msg.sender = "Fundacion Pro<NAME> <<EMAIL>>"
msg.recipients = [correoElectronico]
msg.bcc = ["Fundacion Proyecto Navio <<EMAIL>>"]
msg.html= render_template("confirmacionRegistro.html", nombreUsuario=nombreUsuario, nombreReal=nombreReal)
mail.send(msg)
def enviarCorreoActivacion(correoElectronico, nombreReal, nombreUsuario):
msg = Message("[PIMCD] Usuario activado")
msg.sender = "Fundacion Proyecto Navio <<EMAIL>>"
msg.recipients = [correoElectronico]
msg.bcc = ["Fundacion Proyecto Navio <<EMAIL>>"]
msg.html= render_template("confirmacionActivacion.html", nombreUsuario=nombreUsuario, nombreReal=nombreReal)
mail.send(msg)
def enviarCorreoActualizacion(correoElectronico, nombreReal, nombreUsuario, viejoNivel, nuevoNivel):
msg = Message("[PIMCD] Usuario actualizado")
msg.sender = "Fund<NAME> <<EMAIL>>"
msg.recipients = [correoElectronico]
msg.bcc = ["Fundacion Proyecto Navio <<EMAIL>>"]
msg.html = render_template("confirmacionActualizacion.html", nombreUsuario=nombreUsuario, nombreReal=nombreReal, nivelAnterior=viejoNivel, nivelActual=nuevoNivel)
mail.send(msg)
```
#### File: PIMC/python/pimcAPI_v02.py
```python
import traceback
from flask import Flask, request, jsonify, send_from_directory
from flask_jwt import JWT, jwt_required, current_identity
from API_operaciones import mysql_connection
from API_operaciones import pimcAPI
from auth import authentication
from tools.invalidUsage import InvalidUsage
pimc = pimcAPI.pimcAPI
jwt = authentication.jwt
app = mysql_connection.app
@app.route("/PIMC0.2/Insertar/<elemento_relacional>", methods=['POST'])
def insertarElementoRelacionalPIMC0_2(elemento_relacional):
if request.method == 'POST':
data = request.get_json()
if data:
try:
return jsonify(pimcAPI.insertarNuevoElemento(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
raise InvalidUsage('No se enviaron argumentos', status_code = 400)
else:
return ""
@app.route("/PIMC0.2/Consulta/<elemento_relacional>", methods=['GET'])
def consultarElementoRelacionalPIMC0_2(elemento_relacional):
if request.method == 'GET':
data = request.args.to_dict()
if data:
try:
return jsonify(pimcAPI.consultarElemento(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
raise InvalidUsage('No se enviaron argumentos', status_code = 400)
else:
return ""
@app.route("/PIMC0.2/Eliminar/<elemento_relacional>", methods=['DELETE'])
def eliminarElementoRelacionalPIMC0_2(elemento_relacional):
if request.method == 'DELETE':
data = request.args.to_dict()
if data:
try:
return jsonify(pimcAPI.eliminarElemento(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
raise InvalidUsage('No se enviaron argumentos', status_code = 400)
else:
return ""
@app.route("/PIMC0.2/Modificar/<elemento_relacional>", methods=['POST'])
def modificarElementoRelacionalPIMC0_2(elemento_relacional):
if request.method == 'POST':
data = request.get_json()
if data:
try:
return jsonify(pimcAPI.modificarElemento(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
raise InvalidUsage('No se enviaron argumentos', status_code = 400)
else:
return ""
@app.route("/PIMC0.2/ConsultarTodos/<elemento_relacional>", methods=['GET'])
def consultarTodosFiltroPIMC0_2(elemento_relacional):
if request.method == 'GET':
data = request.args.to_dict()
if not data:
data = {}
try:
return jsonify(pimcAPI.consultarTodosFiltro(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
@app.route("/PIMC0.2/ConsultarTodosAvanzado/<elemento_relacional>", methods=['POST'])
def consultarTodosFiltroAvanzaodPIMC0_2(elemento_relacional):
if request.method == 'POST':
data = request.get_json()
if not data:
data = {}
try:
return jsonify(pimcAPI.consultarTodosFiltroAvanzado(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
@app.route("/PIMC0.2/cargarArchivos/<elemento_relacional>", methods=['POST'])
def cargarArchivosPIMC0_2(elemento_relacional):
if request.method == 'POST':
data = request
try:
if not data:
raise ValueError("No se envio ningun archivo")
return jsonify(pimcAPI.cargarArchivos(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
@app.route("/PIMC0.2/listaArchivos/<elemento_relacional>", methods=['GET'])
def listaArchivosPIMC0_2(elemento_relacional):
if request.method == 'GET':
data = request.args.to_dict()
if not data:
data = {}
try:
return jsonify(pimcAPI.archivosElementoRelacional(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
@app.route("/PIMC0.2/descargarArchivo/<elemento_relacional>", methods=['GET'])
def descargarArchivosPIMC0_2(elemento_relacional):
if request.method == 'GET':
data = request.args.to_dict()
if not data:
data = {}
try:
archivoAEnviar = pimcAPI.descargarAchivoElementoRelacional(elemento_relacional, data)
return send_from_directory(archivoAEnviar['directorio'], archivoAEnviar['nombreArchivo'], as_attachment=True)
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
@app.route("/PIMC0.2/eliminarArchivo/<elemento_relacional>", methods=['GET'])
def eliminarArchivosPIMC0_2(elemento_relacional):
if request.method == 'GET':
data = request.args.to_dict()
if not data:
data = {}
try:
return jsonify(pimcAPI.eliminarArchivoElementoRelacional(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
@app.route("/PIMC0.2/renombrarArchivo/<elemento_relacional>", methods=['GET'])
def renombrarArchivosPIMC0_2(elemento_relacional):
if request.method == 'GET':
data = request.args.to_dict()
if not data:
data = {}
try:
return jsonify(pimcAPI.renombrarArchivoElementoRelacional(elemento_relacional, data))
except ValueError as e:
raise InvalidUsage("ERROR: " + str(e), status_code = 400)
except Exception as e:
raise InvalidUsage("ERROR: " + traceback.format_exc(), status_code = 400)
else:
return ""
``` |
{
"source": "josemonsalve2/projectsCV",
"score": 2
} |
#### File: www/python/ParserServer.py
```python
from bottle import route, run, template, hook, response
import ParserExito as PE
import simplejson as json
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
@route('/exito/fp')
def exito():
url_exito_fp="https://www.exito.com"
#PE.parse_exito_fp(url_exito_fp)
exito_parsed=PE.parse_exito_fp(url_exito_fp)
if (exito_parsed is None):
print("ERROR!! jsonExito is NULL\n")
jsonExito = json.dumps(exito_parsed,indent=4)
return jsonExito
@route('/exito/matress')
def exito():
url_exito_matress="http://www.exito.com/Hogar_y_decoracion-Dormitorio-Colchones/_/N-2cnf"
#PE.parse_exito_fp(url_exito_fp)
exito_parsed=PE.parse_exito_matress(url_exito_matress)
if (exito_parsed is None):
print("ERROR!! jsonExito is NULL\n")
jsonExito = json.dumps(exito_parsed,indent=4)
return jsonExito
run(host='192.168.10.101', port=12345)
``` |
{
"source": "jose-moralez/window_ops",
"score": 3
} |
#### File: window_ops/window_ops/utils.py
```python
__all__ = ['first_not_na']
# Internal Cell
from math import sqrt
from typing import Optional, Tuple
import numpy as np
from numba import njit # type: ignore
# Internal Cell
@njit
def _validate_rolling_sizes(window_size: int,
min_samples: Optional[int] = None) -> Tuple[int,int]:
# have to split the following if because of numba
if min_samples is None:
min_samples = window_size
if min_samples > window_size:
min_samples = window_size
return window_size, min_samples
@njit
def _gt(x: float, y: float) -> float:
return x - y
@njit
def _lt(x: float, y: float) -> float:
return -_gt(x, y)
# Cell
@njit
def first_not_na(input_array: np.ndarray) -> int:
"""Returns the index of the first non-na value in the array."""
for index, element in enumerate(input_array):
if not np.isnan(element):
return index
return input_array.size
``` |
{
"source": "josemorenoo/tracker",
"score": 2
} |
#### File: tracker/scripts/runner.py
```python
import boto3
from datetime import datetime, timedelta
import random
import time
import yaml
import sys
import traceback
from scripts.paths import RUNTIME_PATHS
import scripts.reporter.periodic_report as periodic_report
import scripts.twitter.post_to_twitter as post
import scripts.twitter.twitter_graphs as graphs
YESTERDAY = datetime.today() - timedelta(hours=24)
def make_report(report_date, mode="DAILY", make_raw_report=True, make_summary_report=True):
"""runs the daily report for today"""
periodic_report.run(report_date, mode, make_raw_report=make_raw_report, make_summary_report=make_summary_report)
def post_loc_chart(post_to_twitter=True, mode="DAILY", day=YESTERDAY):
"""
creates and posts graph
"""
graphs.create_top_by_loc_graph(day, mode=mode)
if post_to_twitter:
post.loc_chart(day, mode=mode)
def post_authors_chart(post_to_twitter=True, mode="DAILY", day=YESTERDAY):
graphs.create_top_by_num_authors_graph(day, mode=mode)
if post_to_twitter:
post.authors_chart(day, mode=mode)
def post_commits_chart(post_to_twitter=True, mode="DAILY", day=YESTERDAY):
graphs.create_top_commits_daily_graph(day, mode=mode)
if post_to_twitter:
post.top_commits_chart(day, mode=mode)
def randomize_and_post(funcs, delay_secs, post_to_twitter=True, mode="DAILY", day=YESTERDAY):
random_order_funcs = random.sample(funcs, len(funcs))
for f in random_order_funcs:
f(post_to_twitter, mode=mode, day=day)
time.sleep(delay_secs)
def make_report_and_post_all_charts(run_report=True,
post_to_twitter=True,
mode="DAILY",
delay_secs=30,
day=YESTERDAY,
make_raw_report=True,
make_summary_report=True):
"""
Creates daily report and posts all the graphs
"""
assert mode in ["WEEKLY", "DAILY"]
if mode=="WEEKLY":
day = day - timedelta(days=7)
if run_report:
make_report(
report_date=day,
mode=mode,
make_raw_report=make_raw_report,
make_summary_report=make_summary_report
)
randomize_and_post(funcs=[
post_loc_chart,
post_authors_chart,
post_commits_chart
],
delay_secs = delay_secs,
post_to_twitter=post_to_twitter,
mode=mode,
day=day)
def send_text_alert_to_admin(job_failed: bool, error: str):
try:
from scripts.keys import KEYS
session = boto3.Session(region_name='us-west-1', aws_secret_access_key=KEYS['secret'], aws_access_key_id=KEYS['key'])
ses = session.client('ses')
response = ses.send_email(
Source='<EMAIL>',
Destination={
'ToAddresses': [
'<EMAIL>',
]
},
Message={
'Subject': {
'Data': f"coincommit twitter job {'failed, check ec2' if job_failed else 'succeeded, check twitter'}",
'Charset': 'UTF-8'
},
'Body': {
'Text': {
'Data': 'This is text mail',
'Charset': 'UTF-8'
},
'Html': {
'Data': f'<h1>{error}</h1>',
'Charset': 'UTF-8'
}
}
}
)
print("emailing burner account")
except ImportError:
print('no AWS keys, no text sent')
if __name__ == "__main__":
args = sys.argv
if len(args) > 1:
runtime = args[1]
assert runtime in RUNTIME_PATHS
else:
runtime = 'DAILY_RUNTIME'
# get config values for this runtime (daily, weekly, etc)
with open(RUNTIME_PATHS[runtime], 'r') as file:
config = yaml.safe_load(file)
run_report: bool = config['run_report']
post_to_twitter: bool = config['post_to_twitter']
mode: str = config['mode']
delay_secs: int =config['delay_secs']
make_raw_report: bool =config['make_raw_report']
make_summary_report: bool =config['make_summary_report']
print(*config.items(), sep="\n")
# run everything
try:
make_report_and_post_all_charts(
run_report=run_report,
post_to_twitter=post_to_twitter,
mode=mode,
delay_secs=delay_secs,
make_raw_report=make_raw_report,
make_summary_report=make_summary_report)
print("\n\nDONE, SUCCESS\n\n")
send_text_alert_to_admin(job_failed=False, error='you good my man')
except Exception as e:
print(traceback.format_exc())
send_text_alert_to_admin(job_failed=True, error=traceback.format_exc())
```
#### File: scripts/sandbox/test_pydriller.py
```python
from pathlib import Path
from pydriller import Repository, Git
from datetime import datetime, time
import tempfile
from webapp.commit_handler import CommitHandler
def commit_test():
for commit in Repository('https://github.com/Loopring/loopring-explorer', since=datetime(2022, 2, 11)).traverse_commits():
print(f"got one: {commit.hash}, {commit.msg}")
ch = CommitHandler()
tmp_dir = tempfile.TemporaryDirectory()
tmp_dir_path = Path(tmp_dir.name)
tmp_dir_path.mkdir(parents=True, exist_ok=True)
tmp_dir_str = str(tmp_dir_path.resolve())
ch.create_commit(commit, tmp_dir_str)
tmp_dir.cleanup()
if __name__ == "__main__":
authors = []
for commit in Repository('https://github.com/Loopring/loopring-explorer', since=datetime(2021, 3, 1)).traverse_commits():
authors.append(commit.author.name)
print(set(authors))
```
#### File: tracker/scripts/scheduler.py
```python
from datetime import datetime, timedelta
from apscheduler.schedulers.background import BackgroundScheduler
import random
import time
import yaml
from scripts.paths import RUNTIME_PATHS
import scripts.reporter.periodic_report as periodic_report
import scripts.twitter.post_to_twitter as post
import scripts.twitter.twitter_graphs as graphs
YESTERDAY = datetime.today() - timedelta(hours=24)
def make_report(report_date, mode="DAILY", make_raw_report=True, make_summary_report=True):
"""runs the daily report for today"""
periodic_report.run(report_date, mode, make_raw_report=make_raw_report, make_summary_report=make_summary_report)
def post_loc_chart(post_to_twitter=True, mode="DAILY", day=YESTERDAY):
"""
creates and posts graph
"""
graphs.create_top_by_loc_graph(day, mode=mode)
if post_to_twitter:
post.loc_chart(day, mode=mode)
def post_authors_chart(post_to_twitter=True, mode="DAILY", day=YESTERDAY):
graphs.create_top_by_num_authors_graph(day, mode=mode)
if post_to_twitter:
post.authors_chart(day, mode=mode)
def post_commits_chart(post_to_twitter=True, mode="DAILY", day=YESTERDAY):
graphs.create_top_commits_daily_graph(day, mode=mode)
if post_to_twitter:
post.top_commits_chart(day, mode=mode)
def randomize_and_post(funcs, delay_secs, post_to_twitter=True, mode="DAILY", day=YESTERDAY):
random_order_funcs = random.sample(funcs, len(funcs))
for f in random_order_funcs:
f(post_to_twitter, mode=mode, day=day)
time.sleep(delay_secs)
def make_report_and_post_all_charts(run_report=True,
post_to_twitter=True,
mode="DAILY",
delay_secs=30,
day=YESTERDAY,
make_raw_report=True,
make_summary_report=True):
"""
Creates daily report and posts all the graphs
"""
assert mode in ["WEEKLY", "DAILY"]
if mode=="WEEKLY":
day = day - timedelta(days=7)
if run_report:
make_report(
report_date=day,
mode=mode,
make_raw_report=make_raw_report,
make_summary_report=make_summary_report
)
randomize_and_post(funcs=[
post_loc_chart,
post_authors_chart,
post_commits_chart
],
delay_secs = delay_secs,
post_to_twitter=post_to_twitter,
mode=mode,
day=day)
def show_jobs(sched):
#print(f"\n\njobs: {len(sched.get_jobs())}\n")
for job in sched.get_jobs():
print("\nname: %s, trigger: %s, next run: %s, handler: %s" % (
job.name, job.trigger, job.next_run_time, job.func))
if __name__ == "__main__":
# get config values for this runtime (daily, weekly, etc)
with open(RUNTIME_PATHS['DAILY_RUNTIME'], 'r') as file:
config = yaml.safe_load(file)
run_report: bool = config['run_report']
post_to_twitter: bool = config['post_to_twitter']
mode: str = config['mode']
delay_secs: int =config['delay_secs']
make_raw_report: bool =config['make_raw_report']
make_summary_report: bool =config['make_summary_report']
# run everything
make_report_and_post_all_charts(
run_report=run_report,
post_to_twitter=post_to_twitter,
mode=mode,
delay_secs=delay_secs,
make_raw_report=make_raw_report,
make_summary_report=make_summary_report)
```
#### File: scripts/twitter/file_extension_supplement.py
```python
from PIL import Image
from typing import List
from assets.file_extension_imgs.file_extensions import FILE_EXTENSIONS
import scripts.reporter.report_util as report_util
def add_ext_imgs_to_graph(bar_graph_img, bar_percentages: List[float], tokens_represented_in_graph: List[str], report_date, mode, top_n = 5):
"""
extensions is a list of lists containing extensions, each sublist represents one bar
in order from top to bottom
only add the top 5 to each bar graph
"""
get_ext_for_token = lambda token: [ext['extension'] for ext in report_util.get_file_extension_breakdown_from_summary_report(token, report_date, mode, verbose=False)]
extensions: List[List[str]] = [get_ext_for_token(token) for token in tokens_represented_in_graph][::-1]
for bar_idx in range(len(extensions)):
num_ext_for_this_bar = len(extensions[bar_idx])
if num_ext_for_this_bar < top_n:
num_ext_to_add = num_ext_for_this_bar
else:
num_ext_to_add = top_n
logo_idx = 0
skipped = 0
while logo_idx < num_ext_to_add:
ext_name = extensions[bar_idx][logo_idx]
if ext_name not in FILE_EXTENSIONS:
print(f"Warning: file extension missing from images: {ext_name}, skipping")
skipped += 1
logo_idx += 1
continue
else:
ext_img_path = FILE_EXTENSIONS[ext_name]['path']
bar_graph_img = combine_graph_and_ext_img(
bar_graph_img,
ext_img = Image.open(ext_img_path),
bar_from_top = bar_idx,
logo_idx = logo_idx - skipped,
top_n=top_n,
bar_percentage=bar_percentages[bar_idx])
logo_idx += 1
return bar_graph_img
def combine_graph_and_ext_img(bar_graph_img, ext_img, bar_from_top: int, logo_idx: int, top_n: int, bar_percentage: float):
"""
Combine a bar graph and a SINGLE file extension img.
This gets called in a loop multiple times.
bar_from_top is zero indexed, is the number of the bar starting from the top
logo_idx is zero indexed, is the number of logo for a single bar
"""
FILE_EXT_WIDTH_PAD = 20
BAR_VERTICAL_PAD = 32
# initialize canvas using bar graph
canvas = Image.new('RGB', (bar_graph_img.width, bar_graph_img.height))
canvas.paste(bar_graph_img, (0, 0))
# resize ext image and past
ext_img = ext_img.resize((17, 17), resample=Image.BICUBIC)
# these are the coordinates for the first bar, first position on the left
if bar_percentage < 0.25:
SCALING_FACTOR = 25 # how far right to move the ext images so they don't overlap with bar
left_offset = 134 + int(bar_percentage * SCALING_FACTOR * FILE_EXT_WIDTH_PAD)
else:
left_offset = 134
top_offset = 108
left_offset += logo_idx * FILE_EXT_WIDTH_PAD
top_offset += bar_from_top * BAR_VERTICAL_PAD
canvas.paste(ext_img, (left_offset, top_offset))
return canvas
if __name__ == '__main__':
pass
```
#### File: scripts/twitter/post_to_twitter.py
```python
import tweepy
from config.lockbox import TWITTER_KEYS
import scripts.reporter.report_util as util
from scripts.twitter.graph_names import GRAPH_NAMES
DAILY_REPORTS_PATH = "reports/daily"
def setup_api():
consumer_key = TWITTER_KEYS['consumer_key']
consumer_secret = TWITTER_KEYS['consumer_secret']
access_key = TWITTER_KEYS['access_key']
access_secret = TWITTER_KEYS['access_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
return api
def generate_tweet_text(report_date, metric, mode="DAILY"):
summary_report_dict = util.get_summary_report(report_date, mode)
token_hashtags = " ".join([f"#{each['token']}" for each in summary_report_dict[metric]])
if mode == "DAILY":
when = 'today'
if mode == "WEEKLY":
when = 'this week'
if metric == "top_by_num_commits":
status = f"Most active #crypto projects by #github commits {when} 👨💻\n\n"
if metric == "top_by_num_distinct_authors":
status = f"#crypto projects with most active developers {when} 👩💻\n\n"
if metric == "top_by_new_lines":
status = f"Most active #crypto project by new lines of code {when} 📈\n\n"
return status + token_hashtags
def post_chart_tweet(api, img_path, tweet_text):
"""this ones actually posts the tweet"""
print(f"posting img: {img_path}")
print(tweet_text)
api.update_status_with_media(status=tweet_text, filename=img_path)
def loc_chart(report_date, mode="DAILY"):
api = setup_api()
top_loc_img_path = f'reports/{mode.lower()}/{report_date.strftime("%Y-%m-%d")}/{GRAPH_NAMES["LOC_AND_EXT"]}'
loc_tweet_text = generate_tweet_text(report_date, "top_by_new_lines", mode=mode)
post_chart_tweet(api, top_loc_img_path, loc_tweet_text)
def authors_chart(report_date, mode="DAILY"):
api = setup_api()
top_distinct_authors_img_path = f'reports/{mode.lower()}/{report_date.strftime("%Y-%m-%d")}/{GRAPH_NAMES["AUTHORS_AND_EXT"]}'
authors_tweet_text = generate_tweet_text(report_date, "top_by_num_distinct_authors", mode=mode)
post_chart_tweet(api, top_distinct_authors_img_path, authors_tweet_text)
def top_commits_chart(report_date, mode="DAILY"):
api = setup_api()
top_commits_img_path = f'reports/{mode.lower()}/{report_date.strftime("%Y-%m-%d")}/{GRAPH_NAMES["COMMITS_AND_EXT"]}'
commits_tweet_text = generate_tweet_text(report_date, "top_by_num_commits", mode=mode)
post_chart_tweet(api, top_commits_img_path, commits_tweet_text)
if __name__ == "__main__":
pass
``` |
{
"source": "josemrsantos/zoopla_datamart",
"score": 2
} |
#### File: datamart/tests/test_Dimension.py
```python
from ..datamart import *
def test_create_dimension():
dimension = Dimension("test_dimension")
assert dimension.is_degenerate == False
def test_create_dimension_insert_2_identical_lines():
''' with 2 identical lines, only one gets stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test')
assert dimension.id_value == 1
assert len(list(dimension.values)) == 1
def test_create_dimension_insert_2_identical_lines_and_1_different():
''' with 2 identical lines and one different, only 2 get stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test2')
dimension.addDimensionLine('test')
assert dimension.id_value == 2
assert len(list(dimension.values)) == 2
```
#### File: josemrsantos/zoopla_datamart/extract.py
```python
import logging
import urllib
import json
import sys
import pickledb
import os.path
import time
import psycopg2
import StringIO
import datamart
from datamart.datamart import *
import settings
def set_price(str_value):
''' Sets the price to a range
ex: 100 = [100, 199] range
if invalid, return None
'''
try:
value=int(str_value)
except:
return None
if value<200:
return "100"
elif value<300:
return "200"
elif value<400:
return "300"
elif value<500:
return "400"
elif value<600:
return "500"
elif value<700:
return "600"
elif value<800:
return "700"
elif value<900:
return "800"
elif value<1000:
return "900"
return "1000"
def main(argv):
''' Get all house data from zoopla and put it on the DB (DM start schema)
'''
#### EXTRACT
print "start EXTRACT"
page_number = "1"
page_size = "100"
max_page = 3
key = settings.key
area = "London"
listing_status = "rent"
property_type = "houses"
minimum_price = "10"
data = []
for page_number in range(1,max_page):
url="http://api.zoopla.co.uk/api/v1/property_listings.json?area="+area+"&api_key="+key+"&page_size="+page_size+"&page_number="+str(page_number)+"&listing_status="+listing_status+"&order_by=age&property_type="+property_type+"&minimum_price="+minimum_price
data_url = urllib.urlopen(url)
data_raw = data_url.read()
json_data = json.loads(data_raw)
if json_data.has_key('listing'):
data += json_data['listing']
### TRANSFORM
print "start TRANSFORM"
dm_rent = DataMart(settings.connect_str)
dim_listing_id = DegenerateDimension("listing_id")
dim_num_floors = Dimension("dim_num_floors")
dim_price_range = Dimension("dim_price_range")
dim_property_type = Dimension("dim_property_type")
dim_agent_name = Dimension("dim_agent_name")
dim_num_bedrooms = Dimension("dim_num_bedrooms")
dim_num_bathrooms = Dimension("dim_num_bathrooms")
fact_rent_house = Fact("fact_rent_house")
fact_rent_house.addDimension(dim_listing_id)
fact_rent_house.addDimension(dim_num_floors)
fact_rent_house.addDimension(dim_num_bathrooms)
fact_rent_house.addDimension(dim_price_range)
fact_rent_house.addDimension(dim_property_type)
fact_rent_house.addDimension(dim_agent_name)
fact_rent_house.addDimension(dim_num_bedrooms)
dm_rent.addFact(fact_rent_house)
for l in data:
fact_rent_house.addFactLine((l['listing_id'], l['num_floors'], l['num_bathrooms'], set_price(l['price']), l['property_type'], l['agent_name'], l['num_bedrooms']))
### LOAD
print "start LOAD"
dm_rent.LoadDB()
# Call main if not imported
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "Jose-MRS/findysport",
"score": 2
} |
#### File: findysport/findysport_app/views.py
```python
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic import ListView, DetailView
from findysport_app.models import Usuario, Actividad, Local, Encargado, Apuntado, Grupo
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
def index(request):
return render(request, 'index.html')
class UsuarioListView(ListView):
model = Usuario
class ActividadListView(ListView):
model = Actividad
class LocalListView(ListView):
model = Local
class EncargadoListView(ListView):
model = Encargado
class ApuntadoListView(ListView):
model = Apuntado
class GrupoListView(ListView):
model = Grupo
class UsuarioDetailView(DetailView):
context_object_name = 'usuario'
queryset = Usuario.objects.all()
class ActividadDetailView(DetailView):
context_object_name = 'actividad'
queryset = Actividad.objects.all()
class LocalDetailView(DetailView):
context_object_name = 'local'
queryset = Local.objects.all()
class EncargadoDetailView(DetailView):
context_object_name = 'encargado'
queryset = Encargado.objects.all()
class ApuntadoDetailView(DetailView):
context_object_name = 'apuntado'
queryset = Apuntado.objects.all()
class GrupoDetailView(DetailView):
context_object_name = 'grupo'
queryset = Grupo.objects.all()
#-----------VISTAS CREATE UPDATE DELETE-----------------
class ActividadCreateView(CreateView):
model = Actividad
fields = ['nombre', 'local', 'tipo', 'descripcion', 'creador']
success_url = reverse_lazy('actividad-list')
class ActividadUpdateView(UpdateView):
model = Actividad
fields = ['nombre', 'local', 'tipo', 'descripcion', 'creador']
success_url = reverse_lazy('actividad-list')
class ActividadDeleteView(DeleteView):
model = Actividad
success_url = reverse_lazy('actividad-list')
#---------------------------------------------------------------------------------
class ApuntadoCreateView(CreateView):
model = Apuntado
fields = ['actividad', 'grupo']
success_url = reverse_lazy('apuntado-list')
class ApuntadoUpdateView(UpdateView):
model = Apuntado
fields = ['actividad', 'grupo']
success_url = reverse_lazy('apuntado-list')
class ApuntadoDeleteView(DeleteView):
model = Apuntado
success_url = reverse_lazy('apuntado-list')
#----------------------------------------------------------------------------------
class EncargadoCreateView(CreateView):
model = Encargado
fields = ['nombre_en', 'actividad']
success_url = reverse_lazy('encargado-list')
class EncargadoUpdateView(UpdateView):
model = Encargado
fields = ['nombre_en', 'actividad']
success_url = reverse_lazy('encargado-list')
class EncargadoDeleteView(DeleteView):
model = Encargado
success_url = reverse_lazy('encargado-list')
#--------------------------------------------------------------------------------------
class GrupoCreateView(CreateView):
model = Grupo
fields = ['nombre', 'actividad', 'horas', 'encargado', 'espacio_max']
success_url = reverse_lazy('grupo-list')
class GrupoUpdateView(UpdateView):
model = Grupo
fields = ['nombre', 'actividad', 'horas', 'encargado', 'espacio_max']
success_url = reverse_lazy('grupo-list')
class GrupoDeleteView(DeleteView):
model = Grupo
success_url = reverse_lazy('grupo-list')
#--------------------------------------------------------------------------------------
class LocalCreateView(CreateView):
model = Local
fields = ['nombre_local', 'direccion_local', 'ciudad']
success_url = reverse_lazy('local-list')
class LocalUpdateView(UpdateView):
model = Local
fields = ['nombre_local', 'direccion_local', 'ciudad']
success_url = reverse_lazy('local-list')
class LocalDeleteView(DeleteView):
model = Local
success_url = reverse_lazy('local-list')
#--------------------------------------------------------------------------------------
class UsuarioCreateView(CreateView):
model = Usuario
fields = ['nombre_local', 'direccion_local', 'ciudad']
success_url = reverse_lazy('usuario-list')
class UsuarioUpdateView(UpdateView):
model = Usuario
fields = ['nombre_local', 'direccion_local', 'ciudad']
success_url = reverse_lazy('usuario-list')
class UsuarioDeleteView(DeleteView):
model = Usuario
success_url = reverse_lazy('usuario-list')
#--------------------------------------------------------------------------------------
'''class AuthorCreateView(LoginRequiredMixin, CreateView):
model = Author
fields = ['name']
def form_valid(self, form):
form.instance.created_by = self.request.user
return super().form_valid(form)'''
``` |
{
"source": "josemusso/DEKR",
"score": 3
} |
#### File: lib/modules/angles_naming.py
```python
import pandas as pd
import math
import numpy as np
def one_angle(data,names):
df=data[['Second','Angle']]
df['time']=df['Second']
df[names]=pd.DataFrame(df["Angle"].to_list(), columns=names)
df[names[0]]=df[names[0]].astype(float)
df.drop(columns=['Angle', 'Second'], inplace=True)
return df
def two_angles(data,names):
df=data[['Second','Angle']]
df['time']=df['Second']
df[names]=pd.DataFrame(df["Angle"].to_list(), columns=names)
df[names[0]]=df[names[0]].astype(float)
df[names[1]]=df[names[1]].astype(float)
df.drop(columns=['Angle', 'Second'], inplace=True)
return df
def three_angles(data,names):
df=data[['Second','Angle']]
df['time']=df['Second']
df[names]=pd.DataFrame(df["Angle"].to_list(), columns=names)
df[names[0]]=df[names[0]].astype(float)
df[names[1]]=df[names[1]].astype(float)
df[names[2]]=df[names[2]].astype(float)
df.drop(columns=['Angle', 'Second'], inplace=True)
return df
def four_angles(data,names):
df=data[['Second','Angle']]
df['time']=df['Second']
df[names]=pd.DataFrame(df["Angle"].to_list(), columns=names)
df[names[0]]=df[names[0]].astype(float)
df[names[1]]=df[names[1]].astype(float)
df[names[2]]=df[names[2]].astype(float)
df[names[3]]=df[names[3]].astype(float)
df.drop(columns=['Angle', 'Second'], inplace=True)
return df
angle_names = {
'M_CL' : ['Inclinacion'],
'M_CP' : ['Inclinacion'],
'M_CHI' : ['Brazo'],
'M_CHD' : ['Brazo'],
'M_SB' : ['Brazo izquierdo', 'Brazo derecho'],
'M_CCO' : ['Brazo izquierdo', 'Brazo derecho'],
'M_RC' : ['Rotacion'],
'M_LC' : ['Brazo izquierdo','Cadera izquierda','Brazo derecho','Cadera derecha'],
'M_CCA' : ['Inclinacion'],
'M_CR' : ['Inclinacion'],
'E_CLI' : ['Inclinacion'],
'E_CLD' : ['Inclinacion'],
'E_CP' : ['Inclinacion'],
'E_P' : ['Brazos','Columna'],
'E_ICD' : ['Brazo','Cadera'],
'E_ICI' : ['Brazo','Cadera'],
'E_FC' : ['Cadera'],
'E_GD' : ['Rodilla'],
'E_GI' : ['Rodilla'],
'E_GED' : ['Rodilla','Cadera','Inclinacion'],
'E_GEI' : ['Rodilla','Cadera','Inclinacion'],
'D_ME' : ['Rodilla','Cadera'],
'D_TG' : ['Rodilla'],
'D_SL' : ['Rodillas'],
'D_BJ' : ['Brazos','Piernas'],
'D_AA' : ['Desplazamiento'],
'D_B' : ['Hombro'],
'D_SC' : ['Rodilla','Codo'],
'D_RC' : ['CD-RI','CI-RD'],
'A_RC' : ['Inclinacion'],
'A_T' : ['Codo'],
'A_A' : ['Desplazamiento brazos'],
'A_FC' : ['Cadera'],
'A_IF' : ['Desplazamiento'],
'A_BP' : ['Brazos','Piernas'],
'A_FE' : ['Brazos','Caderas'],
'A_PS' : ['Cadera','Columna'],
'A_PS' : ['Cadera','Columna'],
'A_MT' : ['MD_PI','MI_PD'],
'F_SF' : ['Rodillas','Talones'],
'F_SL' : ['Rodilla','Cadera'],
'F_AUI' : ['Rodilla','Cadera'],
'F_AUD' : ['Rodilla','Cadera']
}
upper_exercises = {
'M_CL' : True,
'M_CP' : True,
'M_CHI' : True,
'M_CHD' : True,
'M_SB' : True,
'M_CCO' : True,
'M_RC' : True,
'M_LC' : False,
'M_CCA' : False,
'M_CR' : False,
'E_CLI' : True,
'E_CLD' : True,
'E_CP' : True,
'E_P' : False,
'E_ICD' : False,
'E_ICI' : False,
'E_FC' : False,
'E_GD' : False,
'E_GI' : False,
'E_GED' : False,
'E_GEI' : False,
'D_ME' : False,
'D_TG' : False,
'D_SL' : False,
'D_BJ' : False,
'D_AA' : False,
'D_B' : True,
'D_SC' : False,
'D_RC' : False,
'A_RC' : True,
'A_T' : True,
'A_A' : True,
'A_FC' : False,
'A_IF' : False,
'A_BP' : False,
'A_FE' : False,
'A_PS' : False,
'A_MT' : False,
'F_SF' : False,
'F_SL' : False,
'F_AUI' : False,
'F_AUD' : False,
'L_FCODO': False
}
separate_angles = {
1: one_angle,
2: two_angles,
3: three_angles,
4: four_angles
}
def save_json(data,tag,path):
try:
angles = angle_names[tag]
n_angles = len(angles)
df = separate_angles[n_angles](data,angles)
print(df)
df.to_json(path)
except:
return None
return df
```
#### File: lib/modules/draw.py
```python
import math
import cv2
import numpy as np
import csv
from numpy.lib import stride_tricks
import modules.dataScienceMediapipe as dataScience
previous_position = []
theta, phi = math.pi / 4, -math.pi / 6
should_rotate = False
scale_dx = 800
scale_dy = 800
class Plotter3d:
SKELETON_EDGES = np.array([[11, 10], [10, 9], [9, 0], [0, 3], [3, 4], [4, 5], [0, 6], [6, 7], [7, 8], [0, 12],
[12, 13], [13, 14], [0, 1], [1, 15], [15, 16], [1, 17], [17, 18]])
def __init__(self, canvas_size, origin=(0.5, 0.5), scale=1):
self.origin = np.array(
[origin[1] * canvas_size[1], origin[0] * canvas_size[0]], dtype=np.float32) # x, y
self.scale = np.float32(scale)
self.theta = 0
self.phi = 0
axis_length = 200
axes = [
np.array([[-axis_length/2, -axis_length/2, 0],
[axis_length/2, -axis_length/2, 0]], dtype=np.float32),
np.array([[-axis_length/2, -axis_length/2, 0],
[-axis_length/2, axis_length/2, 0]], dtype=np.float32),
np.array([[-axis_length/2, -axis_length/2, 0], [-axis_length/2, -axis_length/2, axis_length]], dtype=np.float32)]
step = 20
for step_id in range(axis_length // step + 1): # add grid
axes.append(np.array([[-axis_length / 2, -axis_length / 2 + step_id * step, 0],
[axis_length / 2, -axis_length / 2 + step_id * step, 0]], dtype=np.float32))
axes.append(np.array([[-axis_length / 2 + step_id * step, -axis_length / 2, 0],
[-axis_length / 2 + step_id * step, axis_length / 2, 0]], dtype=np.float32))
self.axes = np.array(axes)
def plot(self, img, vertices, edges):
global theta, phi
img.fill(0)
R = self._get_rotation(theta, phi)
self._draw_axes(img, R)
if len(edges) != 0:
self._plot_edges(img, vertices, edges, R)
def _draw_axes(self, img, R):
axes_2d = np.dot(self.axes, R)
axes_2d = axes_2d * self.scale + self.origin
for axe in axes_2d:
axe = axe.astype(int)
cv2.line(img, tuple(axe[0]), tuple(axe[1]),
(128, 128, 128), 1, cv2.LINE_AA)
def _plot_edges(self, img, vertices, edges, R):
vertices_2d = np.dot(vertices, R)
edges_vertices = vertices_2d.reshape(
(-1, 2))[edges] * self.scale + self.origin
for edge_vertices in edges_vertices:
edge_vertices = edge_vertices.astype(int)
cv2.line(img, tuple(edge_vertices[0]), tuple(
edge_vertices[1]), (255, 255, 255), 1, cv2.LINE_AA)
def _get_rotation(self, theta, phi):
sin, cos = math.sin, math.cos
return np.array([
[cos(theta), sin(theta) * sin(phi)],
[-sin(theta), cos(theta) * sin(phi)],
[0, -cos(phi)]
], dtype=np.float32) # transposed
@staticmethod
def mouse_callback(event, x, y, flags, params):
global previous_position, theta, phi, should_rotate, scale_dx, scale_dy
if event == cv2.EVENT_LBUTTONDOWN:
previous_position = [x, y]
should_rotate = True
if event == cv2.EVENT_MOUSEMOVE and should_rotate:
theta += (x - previous_position[0]) / scale_dx * 2 * math.pi
phi -= (y - previous_position[1]) / scale_dy * 2 * math.pi * 2
phi = max(min(math.pi / 2, phi), -math.pi / 2)
previous_position = [x, y]
if event == cv2.EVENT_LBUTTONUP:
should_rotate = False
body_edges = np.array(
[[0, 1], # neck - nose
[1, 16], [16, 18], # nose - l_eye - l_ear
[1, 15], [15, 17], # nose - r_eye - r_ear
[0, 3], [3, 4], [4, 5], # neck - l_shoulder - l_elbow - l_wrist
[0, 9], [9, 10], [10, 11], # neck - r_shoulder - r_elbow - r_wrist
[0, 6], [6, 7], [7, 8], # neck - l_hip - l_knee - l_ankle
[0, 12], [12, 13], [13, 14]]) # neck - r_hip - r_knee - r_ankle
# body_edges = np.array(
# [[0,1]]) # neck - r_hip - r_knee - r_ankle
def detect_side(poses):
right=0
left=0
for pose in poses:
pose = np.array(pose[0:-1]).reshape((-1, 3)).transpose()
right_points=(
pose[2][17]+
pose[2][9]+
pose[2][10]+
pose[2][11]+
pose[2][12]+
pose[2][13]+
pose[2][14]
)
left_points=(
pose[2][18]+
pose[2][3]+
pose[2][4]+
pose[2][5]+
pose[2][6]+
pose[2][7]+
pose[2][8]
)
right+=right_points
left+=left_points
if(left>right):
side=False
else:
side=True
return side
def draw_angle(img, teta, width, height, index, message):
try:
chart_position = [20+index*180, height-20]
arrow = 80
teta = teta * np.pi/180
xf = int(np.cos(teta)*arrow)+chart_position[0]
yf = chart_position[1] - int(np.sin(teta)*arrow)
origin_rect = chart_position[0]-10
if xf < chart_position[0]:
chart_position[0] = chart_position[0] - xf + 20 + index*180
xf = int(np.cos(teta)*arrow)+chart_position[0]
origin_rect = xf - 10
cv2.rectangle(img, (origin_rect, chart_position[1]+10),
(chart_position[0]+arrow+20, chart_position[1]-arrow-10), (255, 255, 255), -1)
cv2.arrowedLine(img, (chart_position[0], chart_position[1]), (
chart_position[0]+arrow, chart_position[1]), (255, 0, 0), 2)
cv2.arrowedLine(
img, (chart_position[0], chart_position[1]), (xf, yf), (255, 0, 0), 2)
cv2.ellipse(img, (chart_position[0], chart_position[1]), (int(
arrow/2), int(arrow/2)), 0, -teta*180/np.pi-10, 10, (0, 232, 255), 2)
cv2.putText(img, str(round(teta*180/np.pi, 2)), (chart_position[0]+int(arrow/2), chart_position[1]-int(
arrow/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img, message, (chart_position[0]+5, chart_position[1]-70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
except Exception as e:
print(e)
def draw_poses(img, poses_2d, width, height, n_frame, file_name, n_original_fps,tag):
side = detect_side(poses_2d)
for pose in poses_2d:
pose = np.array(pose[0:-1]).reshape((-1, 3)).transpose()
was_found = pose[2] > 0
for edge in body_edges:
if was_found[edge[0]] and was_found[edge[1]]:
cv2.line(img, tuple(pose[0:2, edge[0]].astype(np.int32)), tuple(pose[0:2, edge[1]].astype(np.int32)),
(255, 255, 0), 4, cv2.LINE_AA)
exercise = dataScience.Exercises(tag,pose,side)
angles = exercise.calculate()
teta = ''
if angles != None:
teta = []
for i in range(0, len(angles)):
teta.append(round(angles[i]['value'], 2))
print(angles)
with open('/home/kenny/media/data/'+file_name+'.csv', 'a') as csvfile:
fieldnames = ['second', 'angle','kpt_0','kpt_1','kpt_2','kpt_3','kpt_4','kpt_5','kpt_6','kpt_7','kpt_8','kpt_9','kpt_10','kpt_11','kpt_12','kpt_13','kpt_14','kpt_15','kpt_16','kpt_17','kpt_18']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'second': str(n_frame/n_original_fps), 'angle': teta,'kpt_0':tuple(pose[0:2, 0].astype(np.int32)),'kpt_1':tuple(pose[0:2, 1].astype(np.int32)),'kpt_2':tuple(pose[0:2, 2].astype(np.int32)),'kpt_3':tuple(pose[0:2, 3].astype(np.int32)),'kpt_4':tuple(pose[0:2, 4].astype(np.int32)),'kpt_5':tuple(pose[0:2, 5].astype(np.int32)),'kpt_6':tuple(pose[0:2, 6].astype(np.int32)),'kpt_7':tuple(pose[0:2, 7].astype(np.int32)),'kpt_8':tuple(pose[0:2, 8].astype(np.int32)),'kpt_9':tuple(pose[0:2, 9].astype(np.int32)),'kpt_10':tuple(pose[0:2, 10].astype(np.int32)),'kpt_11':tuple(pose[0:2, 11].astype(np.int32)),'kpt_12':tuple(pose[0:2, 12].astype(np.int32)),'kpt_13':tuple(pose[0:2, 13].astype(np.int32)),'kpt_14':tuple(pose[0:2, 14].astype(np.int32)),'kpt_15':tuple(pose[0:2, 15].astype(np.int32)),'kpt_16':tuple(pose[0:2, 16].astype(np.int32)),'kpt_17':tuple(pose[0:2, 17].astype(np.int32)),'kpt_18':tuple(pose[0:2, 18].astype(np.int32))})
for kpt_id in range(pose.shape[1]):
if pose[2, kpt_id] != -1:
if kpt_id == 100:
cv2.circle(img, tuple(pose[0:2, kpt_id].astype(
np.int32)), 7, (255, 0, 0), -1, cv2.LINE_AA)
else:
cv2.circle(img, tuple(pose[0:2, kpt_id].astype(
np.int32)), 5, (0, 255, 255), -1, cv2.LINE_AA)
cv2.putText(img, str(kpt_id), tuple(pose[0:2, kpt_id].astype(np.int32)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 0, 0), 1, cv2.LINE_AA)
# cv2.putText(img, 'teta-2D: '+str(teta), (50,20), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 0, 0), 1, cv2.LINE_AA)
if angles != None:
for i in range(0,len(angles)):
draw_angle(img, round(angles[i]['value'],2), width, height, i, angles[i]['title'])
```
#### File: DEKR/tools/eval_exercise.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import re
import shutil
import time
import sys
sys.path.append("../lib")
import boto3
import cv2
import numpy as np
from PIL import Image
import ffmpeg
import matplotlib.pyplot as plt
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import _init_paths
import models
from config import cfg
from config import update_config
from core.inference import get_multi_stage_outputs
from core.inference import aggregate_results
from core.nms import pose_nms
from core.match import match_pose_to_heatmap
from utils.transforms import resize_align_multi_scale
from utils.transforms import get_final_preds
from utils.transforms import get_multi_scale_size
from utils.transforms import up_interpolate
# angles and exercise eval imports
import modules.dataScienceMediapipe as dataScience
import modules.score as score
import pandas as pd
pd.options.mode.chained_assignment = None
import json
from collections import deque
from statistics import mean, stdev
import matplotlib.pyplot as plt
if torch.cuda.is_available():
print('Using GPU: ' + torch.cuda.get_device_name(0))
CTX = torch.device('cuda')
else:
print('Using CPU')
torch.device('cpu')
COCO_KEYPOINT_INDEXES = {
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
}
CROWDPOSE_KEYPOINT_INDEXES = {
0: 'left_shoulder',
1: 'right_shoulder',
2: 'left_elbow',
3: 'right_elbow',
4: 'left_wrist',
5: 'right_wrist',
6: 'left_hip',
7: 'right_hip',
8: 'left_knee',
9: 'right_knee',
10: 'left_ankle',
11: 'right_ankle',
12: 'head',
13: 'neck'
}
CROWDPOSE_KEYPOINT_SEGMENTS = [
{'name':'left_humerus', 'segment':[0,2]},
{'name':'right_humerus', 'segment':[1,3]},
{'name':'left_radius', 'segment':[2,4]},
{'name':'right_radius', 'segment':[3,5]},
{'name':'left_femur', 'segment':[6,8]},
{'name':'right_femur', 'segment':[7,9]},
{'name':'left_tibia', 'segment':[8,10]},
{'name':'right_tibia', 'segment':[9,11]},
{'name':'left_core', 'segment':[0,6]},
{'name':'right_core', 'segment':[1,7]}]
def draw_skeleton(image, points, config_dataset):
skeleton_coco = [
# # [16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8],
# # [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]
# [15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7],
# [6, 8], [7, 9], [8, 10], [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]
[15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7],
[6, 8], [7, 9], [8, 10], [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], # [3, 5], [4, 6]
[0, 5], [0, 6]
]
skeleton_crowdpose = [
[10, 8], [8, 6], [11, 9], [9, 7], [6, 7], [0, 6], [1, 7], [0, 1], [0, 2],
[1, 3], [2, 4], [3, 5], [1, 13], [0, 13], [13, 12]
]
# select skeleton to draw
if cfg.DATASET.DATASET_TEST == 'coco':
skeleton = skeleton_coco
color = (0,255,255)
else:
skeleton = skeleton_crowdpose
color = (255,0,255)
for i, joint in enumerate(skeleton):
pt1, pt2 = points[joint]
image = cv2.line(
image, (int(pt1[0]), int(pt1[1])), (int(pt2[0]), int(pt2[1])),
color, 2)
return image
def draw_skeleton_ept(image, points, config_dataset, angles, angles_buffer, count_ext):
skeleton_coco = [
# # [16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8],
# # [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]
# [15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7],
# [6, 8], [7, 9], [8, 10], [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]
[15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7],
[6, 8], [7, 9], [8, 10], [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], # [3, 5], [4, 6]
[0, 5], [0, 6]
]
skeleton_crowdpose = [
[10, 8], [8, 6], [11, 9], [9, 7], [6, 7], [0, 6], [1, 7], [0, 1], [0, 2],
[1, 3], [2, 4], [3, 5], [1, 13], [0, 13], [13, 12]
]
# select skeleton to draw
if cfg.DATASET.DATASET_TEST == 'coco':
skeleton = skeleton_coco
color = (0,255,255)
else:
skeleton = skeleton_crowdpose
color = (0,255,0) # change to green
# draw skeleton to new image with skeleton+black background
height, width, channels = image.shape
# print(image.shape)
skeleton_only = np.zeros((height,width,3), np.float32)
# print(skeleton_only.shape)
# draw all lines
for i, joint in enumerate(skeleton):
# print(type(points))
pt1, pt2 = points[joint]
image = cv2.line(image, (int(pt1[0]), int(pt1[1])), (int(pt2[0]), int(pt2[1])),
color, 2)
# draw skeleton on dark image
skeleton_only = cv2.line(skeleton_only, (int(pt1[0]), int(pt1[1])), (int(pt2[0]), int(pt2[1])),
color, 2)
# draw skeleton lines based on corresponding angle variation
for angle in angles:
# get only the last 15 values from buffer for visualization
angle_unwrapped = np.rad2deg(np.unwrap(np.deg2rad(angles_buffer[angle['title']])))
buffer = list(angle_unwrapped)[-15:]
if len(buffer)<3:
continue # cannot get stdev with too few values
pt1, pt2 = points[angle['segment']]
delta_angle = stdev(buffer)
max_var = 5
delta_angle_norm = (min(delta_angle, max_var))/max_var # define max stddev to be red segment
# print(angle['title'] +' '+ str(delta_angle_norm))
color = (0,int(255*(1-delta_angle_norm)),int(255*(delta_angle_norm))) #BGR
# print(color)
image = cv2.line(image, (int(pt1[0]), int(pt1[1])), (int(pt2[0]), int(pt2[1])),
color, 2)
# draw on the black image
skeleton_only = cv2.line(skeleton_only, (int(pt1[0]), int(pt1[1])), (int(pt2[0]), int(pt2[1])),
color, 2)
# draw joint circle radious proportional to angle described
# ONLY FOR EPT TAG
angle = [angles[2].get('value'),angles[3].get('value'),
angles[0].get('value'),angles[1].get('value'),
0,0,
angles[4].get('value'),angles[5].get('value'),
angles[6].get('value'),angles[7].get('value'),
0,0,
0,0]
count = 0
for point in points:
# same order as defined
x_coord, y_coord = int(point[0]), int(point[1])
cv2.circle(image, (x_coord, y_coord), int(4+(((abs(angle[count])/360))*50)), (255, 0, 0), 3)
# draw joints on black image
cv2.circle(skeleton_only, (x_coord, y_coord), int(4+(((abs(angle[count])/360))*50)), (255, 0, 0), 3)
count +=1
# write black image + skeleton
# print(skeleton_only.shape)
# cv2.imshow('skeleton_only',skeleton_only)
return image, skeleton_only
# function to blend two images at certain time and with certain transition
# length, to create demo video.
def blend_2_images_to_video(img1, img2, start_at, transition_length, count):
increment = 1/transition_length
rel_count = count-start_at
# before transition
if count < start_at:
alpha=0
# during transition
elif (count >= start_at) and (count<=start_at+transition_length):
alpha = rel_count*increment
# after completed transition
elif count>start_at+transition_length:
alpha=1
print(count)
print('alpha: ', str(alpha))
img_blend = cv2.addWeighted(img1,1-alpha, img2,alpha,0)
return img_blend
def plot_angles(frame ,points, angles, angles_buffer, count):
# ONLY FOR EPT TAG
# draw skeleton lines based on corresponding angle variation
my_dpi=200
fig= plt.figure(figsize=(3200/my_dpi, 2000/my_dpi))
n = 1
for angle in angles:
# angle = angles[0]
x = np.arange(len(angles_buffer[angle['title']]))
y = np.rad2deg(np.unwrap(np.deg2rad(angles_buffer[angle['title']])))
plt.subplot(len(angles),1,n)
plt.plot(x,y,'k-', lw=2, label=angle['title'])
# plt.ylim([0, 360])
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
# set margins only when phase detected, denoise
min_val = min(y)
max_val = max(y)
plt.ylim([min_val-30, max_val+30])
range = max(y) - min(y)
# if range > 30:
# plt.margins(y=0.25)
# else:
# plt.ylim()
plt.axis('off')
plt.xticks([], [])
n+=1
# convert plt object to numpy rgb matrix
fig.canvas.draw()
graph_image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8')
graph_image = graph_image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# cv2.imwrite('graph_image_{:08d}.jpg'.format(count),graph_image)
plt.close(fig)
return graph_image
def joints_ghosting(image, coords_buffer, count, selected_joints):
# skip first 3 frames
if count in [1,2,3]:
return image
# for every frame stored in coords_buffer, draw a line
# print(coords_buffer)
# pick every frame in the deque
for i in range(len(coords_buffer)-1):
points = coords_buffer[-i-1][0]
prev_points = coords_buffer[-i-2][0]
# pick every joint in the frame coords
j = -1
for point, prev_point in zip(points, prev_points):
# draw lines only for selected joints
j+=1
if not j in selected_joints:
continue
# print(point)
# print(prev_point)
# same order as defined
prev_x_coord, prev_y_coord = int(prev_point[0]), int(prev_point[1])
x_coord, y_coord = int(point[0]), int(point[1])
image = cv2.line(image, (prev_x_coord, prev_y_coord), (x_coord, y_coord), (178,178,178), 4)
return image
def get_pose_estimation_prediction(cfg, model, image, vis_thre, selected_keypoint, transforms):
# size at scale 1.0
base_size, center, scale = get_multi_scale_size(
image, cfg.DATASET.INPUT_SIZE, 1.0, 1.0
)
with torch.no_grad():
heatmap_sum = 0
poses = []
for scale in sorted(cfg.TEST.SCALE_FACTOR, reverse=True):
image_resized, center, scale_resized = resize_align_multi_scale(
image, cfg.DATASET.INPUT_SIZE, scale, 1.0
)
image_resized = transforms(image_resized)
image_resized = image_resized.unsqueeze(0).cuda()
heatmap, posemap = get_multi_stage_outputs(
cfg, model, image_resized, cfg.TEST.FLIP_TEST
)
heatmap_sum, poses = aggregate_results(
cfg, heatmap_sum, poses, heatmap, posemap, scale
)
heatmap_slice = []
heatmap_slice = heatmap_sum.cpu().numpy()[0,selected_keypoint]
heatmap_avg = heatmap_sum/len(cfg.TEST.SCALE_FACTOR)
poses, scores = pose_nms(cfg, heatmap_avg, poses)
if len(scores) == 0:
return []
else:
if cfg.TEST.MATCH_HMP:
poses = match_pose_to_heatmap(cfg, poses, heatmap_avg)
final_poses = get_final_preds(
poses, center, scale_resized, base_size
)
final_results = []
for i in range(len(scores)):
if scores[i] > vis_thre:
final_results.append(final_poses[i])
if len(final_results) == 0:
return [],[]
return final_results, heatmap_slice
def prepare_output_dirs(prefix='/output/'):
pose_dir = os.path.join(prefix, "pose")
if os.path.exists(pose_dir) and os.path.isdir(pose_dir):
shutil.rmtree(pose_dir)
os.makedirs(pose_dir, exist_ok=True)
return pose_dir
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, required=True)
parser.add_argument('--videoFile', type=str, required=True)
parser.add_argument('--outputDir', type=str, default='/output/')
parser.add_argument('--inferenceFps', type=int, default=1)
parser.add_argument('--visthre', type=float, default=0)
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args expected by supporting codebase
args.modelDir = ''
args.logDir = ''
args.dataDir = ''
args.prevModelDir = ''
return args
def main():
# transformation
pose_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
args = parse_args()
update_config(cfg, args)
pose_dir = prepare_output_dirs(args.outputDir)
csv_output_rows = []
# import model architecture
pose_model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
cfg, is_train=False
)
# import weights
if cfg.TEST.MODEL_FILE:
print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
pose_model.load_state_dict(torch.load(
cfg.TEST.MODEL_FILE), strict=False)
else:
raise ValueError('expected model defined in config at TEST.MODEL_FILE')
pose_model.to(CTX)
pose_model.eval()
# Loading an video
vidcap = cv2.VideoCapture(args.videoFile)
fps = vidcap.get(cv2.CAP_PROP_FPS)
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
if fps < args.inferenceFps:
raise ValueError('Video file not found!')
skip_frame_cnt = round(fps / args.inferenceFps)
frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
###### PARAMS
# select keypoint joint to display in heatmap, view COCO INDEXES to choose
selected_keypoint = 0
# tag and side are examples by now
# tag:
tag = 'EPT' # EPT para demos de uso de brazo y piernas
# side: True: der, False: izq
side = True
# adjust dimensions if rotation is needed
rotate = False
if rotate:
frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# define writers to save videos
video_dets_name = '{}/{}_basico.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
video_heatmaps_name = '{}/{}_pose_heatmap.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
video_ept_name = '{}/{}_medio.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
outcap = cv2.VideoWriter(video_dets_name,
cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width, frame_height))
# outcap_heatmap = cv2.VideoWriter(video_heatmaps_name,
# cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width, frame_height))
outcap_ept = cv2.VideoWriter(video_ept_name,
cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width, frame_height))
video_graph_name = '{}/{}_avanzado.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
outcap_graph = cv2.VideoWriter(video_graph_name,
cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width+(2*frame_height), frame_height))
count = 0
now_full= time.time()
data = []
# deque: store angle values over frames
buffer_maxlen = 600
angles_buffer={
'Left Elbow':deque([], maxlen=buffer_maxlen),
'Right Elbow':deque([], maxlen=buffer_maxlen),
'Left Shoulder':deque([], maxlen=buffer_maxlen),
'Right Shoulder':deque([], maxlen=buffer_maxlen),
'Left Hip':deque([], maxlen=buffer_maxlen),
'Right Hip':deque([], maxlen=buffer_maxlen),
'Left Knee':deque([], maxlen=buffer_maxlen),
'Right Knee':deque([], maxlen=buffer_maxlen)
}
coords_buffer = deque([],maxlen=30)
while vidcap.isOpened():
total_now = time.time()
ret, image_bgr = vidcap.read()
count += 1
if rotate:
image_bgr = cv2.rotate(image_bgr, cv2.cv2.ROTATE_90_CLOCKWISE)
# image_bgr = cv2.rotate(image_bgr, cv2.cv2.ROTATE_90_COUNTERCLOCKWISE)
# image_bgr = cv2.flip(image_bgr, 0)
# image_bgr = cv2.flip(image_bgr, 1)
if not ret:
break
# if count % skip_frame_cnt != 0:
# continue
print('Processing frame {} out of {}'.format(str(count),str(length)))
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_pose = image_rgb.copy()
# Clone 1 image for debugging purpose
image_debug = image_bgr.copy()
now = time.time()
# added return heatmap_slice
pose_preds, heatmap_slice = get_pose_estimation_prediction(cfg,
pose_model,
image_pose,
args.visthre,
selected_keypoint,
transforms=pose_transform)
## OPTIONAL: keep only the most confident detection
if pose_preds:
pose_preds = [pose_preds[0]]
then = time.time()
# save heatmap_slice as image over original image
# print(heatmap_slice.shape)
# print(np.max(heatmap_slice))
# print(np.min(heatmap_slice))
# plt.imshow(heatmap_slice, cmap='hot', interpolation='nearest')
# plt.show()
# plt.savefig(os.path.join(pose_dir, 'heatmap_{:08d}.jpg'.format(count)))
# generate 3 chann Gray image
image_gray = np.asarray(cv2.cvtColor(image_debug, cv2.COLOR_BGR2GRAY), np.float32)
image_gray_3chan=cv2.cvtColor(image_gray, cv2.COLOR_GRAY2BGR)
# case where person is detected
if pose_preds:
heatmap_slice_image = (heatmap_slice/np.max(heatmap_slice))*255.0
heatmap_slice_image = cv2.resize(heatmap_slice_image,(frame_width,frame_height))
heatmap_slice_image_3chan=np.zeros((frame_height,frame_width,3), np.float32)
heatmap_slice_image_3chan[:, :, 2] = heatmap_slice_image
image_w_heatmap = cv2.addWeighted(image_gray_3chan,0.5,heatmap_slice_image_3chan,0.5,0)
# write heatmap image
cv2.imwrite(os.path.join(pose_dir, 'heatmap_{:08d}.jpg'.format(count)), image_w_heatmap)
print("Found person pose at {:03.2f} fps".format(1/(then - now)))
# stop processing if too slow (stuck)
if 1/(then - now) < 0.5:
break
new_csv_row = []
for coords in pose_preds:
# Draw each point on image
for coord in coords:
x_coord, y_coord = int(coord[0]), int(coord[1])
cv2.circle(image_debug, (x_coord, y_coord), 4, (255, 0, 0), 2)
new_csv_row.extend([x_coord, y_coord])
# draw skeleton
draw_skeleton(image_debug, coords, cfg.DATASET.DATASET_TEST)
csv_output_rows.append(new_csv_row)
#################
# format detections as Aictive server mediapipe_test for ex. eval.
#################
# pose_pred[persona][punto][x:0 o y:1]
# ver si estan normalizados
# config depends on train used: COCO or CROWDPOSE
if cfg.DATASET.DATASET_TEST == 'coco':
array_x = [
abs((pose_preds[0][6][0]+pose_preds[0][5][0])/2), # chest mid (artificial)
pose_preds[0][0][0], # nose
0, #
pose_preds[0][5][0], # left_shoulder
pose_preds[0][7][0], # left_elbow
pose_preds[0][9][0], # left_wrist
pose_preds[0][11][0], # left_hip
pose_preds[0][13][0], # left_knee
pose_preds[0][15][0], # left_ankle
pose_preds[0][6][0], # right_shoulder
pose_preds[0][8][0], # right_elbow
pose_preds[0][10][0], # right_wrist
pose_preds[0][12][0], # right_hip
pose_preds[0][14][0], # right_knee
pose_preds[0][16][0], # right_ankle
pose_preds[0][2][0], # right_eye
pose_preds[0][1][0], # left_eye
pose_preds[0][4][0], # right_ear
pose_preds[0][3][0], # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
array_y = [
abs((pose_preds[0][6][1]+pose_preds[0][5][1])/2), # chest mid (artificial)
pose_preds[0][0][1], # nose
0, #
pose_preds[0][5][1], # left_shoulder
pose_preds[0][7][1], # left_elbow
pose_preds[0][9][1], # left_wrist
pose_preds[0][11][1], # left_hip
pose_preds[0][13][1], # left_knee
pose_preds[0][15][1], # left_ankle
pose_preds[0][6][1], # right_shoulder
pose_preds[0][8][1], # right_elbow
pose_preds[0][10][1], # right_wrist
pose_preds[0][12][1], # right_hip
pose_preds[0][14][1], # right_knee
pose_preds[0][16][1], # right_ankle
pose_preds[0][2][1], # right_eye
pose_preds[0][1][1], # left_eye
pose_preds[0][4][1], # right_ear
pose_preds[0][3][1], # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
# CROWDPOSE CASE
else:
array_x = [
pose_preds[0][13][1], # chest mid (neck) 0
pose_preds[0][12][0], # nose 1
0, # 2
pose_preds[0][0][0], # left_shoulder 3
pose_preds[0][2][0], # left_elbow 4
pose_preds[0][4][0], # left_wrist 5
pose_preds[0][6][0], # left_hip 6
pose_preds[0][8][0], # left_knee 7
pose_preds[0][10][0], # left_ankle 8
pose_preds[0][1][0], # right_shoulder 9
pose_preds[0][3][0], # right_elbow 10
pose_preds[0][5][0], # right_wrist 11
pose_preds[0][7][0], # right_hip 12
pose_preds[0][9][0], # right_knee 13
pose_preds[0][11][0], # right_ankle 14
0, # right_eye
0, # left_eye
0, # right_ear
0, # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
array_y = [
pose_preds[0][13][1], # chest mid (neck)
pose_preds[0][12][1], # nose
0, #
pose_preds[0][0][1], # left_shoulder
pose_preds[0][2][1], # left_elbow
pose_preds[0][4][1], # left_wrist
pose_preds[0][6][1], # left_hip
pose_preds[0][8][1], # left_knee
pose_preds[0][10][1], # left_ankle
pose_preds[0][1][1], # right_shoulder
pose_preds[0][3][1], # right_elbow
pose_preds[0][5][1], # right_wrist
pose_preds[0][7][1], # right_hip
pose_preds[0][9][1], # right_knee
pose_preds[0][11][1], # right_ankle
0, # right_eye
0, # left_eye
0, # right_ear
0, # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
# visibility, NOT AVAILABLE BUT CAN BE INFERRED WITH NOSE AND EARS KPs
array_v = [
0, # chest mid (artificial)
0, # nose
0, #
0, # left_shoulder
0, # left_elbow
0, # left_wrist
0, # left_hip
0, # left_knee
0, # left_ankle
0, # right_shoulder
0, # right_elbow
0, # right_wrist
0, # right_hip
0, # right_knee
0, # right_ankle
0, # right_eye
0, # left_eye
0, # right_ear
0, # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
# case no person detected in frame
else:
image_w_heatmap = image_gray_3chan
cv2.imwrite(os.path.join(pose_dir, 'heatmap_{:08d}.jpg'.format(count)), image_w_heatmap)
print("No person pose found at {:03.2f} fps".format(1/(then - now)))
# append empty row on csv
new_csv_row = []
csv_output_rows.append(new_csv_row)
# define detections as empty for ex eval.
array_x=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
array_y=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
array_v=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# write fps in image
total_then = time.time()
text = "{:03.2f} fps".format(1/(total_then - total_now))
cv2.putText(image_debug, text, (100, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 2, cv2.LINE_AA)
# write detections image
img_file = os.path.join(pose_dir, 'pose_{:08d}.jpg'.format(count))
cv2.imwrite(img_file, image_debug)
# write detections and heatmap video
outcap.write(np.uint8(image_debug))
# outcap_heatmap.write(np.uint8(image_w_heatmap))
# after writing both dets and heatmaps videos, calculate angles
poseEstimate = [array_x, array_y, array_v]
poseEstimate = np.array(poseEstimate)
exercise = dataScience.Exercises(tag, poseEstimate, side)
angles = exercise.calculate()
# print(angles)
# case angles are detected
if angles != None:
teta = []
for i in range(0, len(angles)):
teta.append(round(angles[i]['value'], 2))
# time corresponding to the frame [in secs] (-1 to start at 0:00)
frame_time = round((count-1)/fps,3)
# frame data contains [time, angles, positions]
frame_data = [
str(frame_time),
teta,
tuple(poseEstimate[0:2, 0]),
tuple(poseEstimate[0:2, 1]),
tuple(poseEstimate[0:2, 2]),
tuple(poseEstimate[0:2, 3]),
tuple(poseEstimate[0:2, 4]),
tuple(poseEstimate[0:2, 5]),
tuple(poseEstimate[0:2, 6]),
tuple(poseEstimate[0:2, 7]),
tuple(poseEstimate[0:2, 8]),
tuple(poseEstimate[0:2, 9]),
tuple(poseEstimate[0:2, 10]),
tuple(poseEstimate[0:2, 11]),
tuple(poseEstimate[0:2, 12]),
tuple(poseEstimate[0:2, 13]),
tuple(poseEstimate[0:2, 14]),
tuple(poseEstimate[0:2, 15]),
tuple(poseEstimate[0:2, 16]),
tuple(poseEstimate[0:2, 17]),
tuple(poseEstimate[0:2, 18])
]
data.append(frame_data)
# draw skeleton based on angle values
# iteration over different person detections
for coords in pose_preds:
# Draw each point on image
# for coord in coords:
# x_coord, y_coord = int(coord[0]), int(coord[1])
# cv2.circle(image_debug, (x_coord, y_coord), 4, (255, 0, 0), 2)
# new_csv_row.extend([x_coord, y_coord])
# store angle values over 30 frames
for angle in angles:
# append angle value to buffer
angles_buffer[angle['title']].append(angle['value'])
# analyze angle variation on the last 30 frames
## SKELETON ONLY IMAGE ##
image_colors, skeleton_only = draw_skeleton_ept(image_bgr, coords, cfg.DATASET.DATASET_TEST,angles,angles_buffer, count)
if not pose_preds:
continue
# write detections image
img_file = os.path.join(pose_dir, 'ept_pose_{:08d}.jpg'.format(count))
cv2.imwrite(img_file, image_colors)
# write skeleton img
skeleton_img_file = os.path.join(pose_dir, 'skeleton_{:08d}.jpg'.format(count))
cv2.imwrite(skeleton_img_file, skeleton_only)
## TRANSITION TO DARK IMAGE ##
# generate blend video w transition to only skeleton
start_transition_at = 2*30
transition_length = 2*30 # 90 frames = 3 sec
# img_blend = blend_2_images_to_video(image_colors.astype(np.float32),
# skeleton_only,
# start_transition_at,
# transition_length,
# count)
img_blend=image_colors.astype(np.float32)
# outcap_ept.write(np.uint8(img_blend))
## GENERATE GRAPH ##
# generate image with angles overlay
graph_image = plot_angles(np.uint8(img_blend), coords, angles, angles_buffer, count)
# inverse to create white lines graph
graph_image = cv2.bitwise_not(graph_image)
# add graph to video
graph_image = cv2.resize(graph_image,(frame_height*2, frame_height))
# roi to insert graph
rows,cols,channels = graph_image.shape
# two options: paste image or vstack image
# img_blend[-rows:, 0:cols] = graph_image
## JOINTS GHOSTING ##
# select joints to highlight:
selected_joints = [
# 0, # 'left_shoulder'
# 1, # 'right_shoulder',
# 2, # 'left_elbow',
# 3, # 'right_elbow',
# 4, # 'left_wrist',
# 5, # 'right_wrist',
# 6, # 'left_hip',
# 7, # 'right_hip',
# 8, # 'left_knee',
# 9, # 'right_knee',
# 10, # 'left_ankle',
# 11, # 'right_ankle',
# 12, # 'head',
# 13 # 'neck'
]
coords_buffer.append([coords])
ghosting_blend_image = joints_ghosting(img_blend, coords_buffer, count, selected_joints)
outcap_ept.write(np.uint8(ghosting_blend_image))
## STACK IMAGE AND GRAPH ##
img_blend_and_graph = np.hstack((ghosting_blend_image, graph_image))
# print(img_blend_and_graph.shape)
outcap_graph.write(np.uint8(img_blend_and_graph))
if count == 15*25:
break
# print(angles_buffer)
# create df with whole video info to pass to score.Exercises
fieldnames = ['Second', 'Angle', 'kpt_0', 'kpt_1', 'kpt_2', 'kpt_3', 'kpt_4', 'kpt_5',
'kpt_6', 'kpt_7', 'kpt_8', 'kpt_9', 'kpt_10', 'kpt_11', 'kpt_12', 'kpt_13',
'kpt_14', 'kpt_15', 'kpt_16', 'kpt_17', 'kpt_18']
df = pd.DataFrame(data, columns=fieldnames)
df['Second'] = df['Second'].astype(float)
# save df for further use
df.to_csv('output/{}_dataframe.csv'.format(tag))
# evaluate exercise, get scores
exercise_sc = score.Exercises(tag, df)
print(df[['Second', 'Angle']].describe())
score_result = exercise_sc.calculate()
# convert to json and fill
print(json.dumps(score_result, indent=4))
json_path = 'output/json/'+tag+'.json'
with open(json_path, 'w') as f:
json.dump(score_result, f, indent=4)
then_full= time.time()
print("Processing complete at average {:03.2f} fps".format(count/(then_full - now_full)))
print('Total processing time: {} secs'.format(then_full - now_full))
# write csv
csv_headers = ['frame']
if cfg.DATASET.DATASET_TEST == 'coco':
for keypoint in COCO_KEYPOINT_INDEXES.values():
csv_headers.extend([keypoint+'_x', keypoint+'_y'])
elif cfg.DATASET.DATASET_TEST == 'crowd_pose':
for keypoint in COCO_KEYPOINT_INDEXES.values():
csv_headers.extend([keypoint+'_x', keypoint+'_y'])
else:
raise ValueError('Please implement keypoint_index for new dataset: %s.' % cfg.DATASET.DATASET_TEST)
csv_output_filename = os.path.join(args.outputDir, 'pose-data.csv')
with open(csv_output_filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(csv_headers)
csvwriter.writerows(csv_output_rows)
vidcap.release()
outcap.release()
# outcap_heatmap.release()
outcap_ept.release()
outcap_graph.release()
cv2.destroyAllWindows()
########## send output files to S3 bucket research-test-s3-bucket
s3_client = boto3.client('s3')
# upload dets video
s3_client.upload_file(video_dets_name, 'research-test-s3-bucket', video_dets_name)
# upload heatmaps video
s3_client.upload_file(video_heatmaps_name, 'research-test-s3-bucket', video_heatmaps_name)
# upload csv
s3_client.upload_file(csv_output_filename, 'research-test-s3-bucket', csv_output_filename)
# upload dataframe csv
csv_dataframe_filename = os.path.join(args.outputDir, '{}_dataframe.csv'.format(tag))
s3_client.upload_file(csv_dataframe_filename, 'research-test-s3-bucket', csv_dataframe_filename)
# upload json
json_filename = os.path.join(args.outputDir, 'json/'+tag+'.json')
s3_client.upload_file(json_filename, 'research-test-s3-bucket', json_filename)
# get download links
download_link_dets = s3_client.generate_presigned_url('get_object',
Params={'Bucket': 'research-test-s3-bucket',
'Key': video_dets_name},
ExpiresIn=300)
download_link_heatmaps = s3_client.generate_presigned_url('get_object',
Params={'Bucket': 'research-test-s3-bucket',
'Key': video_heatmaps_name},
ExpiresIn=300)
download_link_dataframe = s3_client.generate_presigned_url('get_object',
Params={'Bucket': 'research-test-s3-bucket',
'Key': csv_dataframe_filename},
ExpiresIn=300)
download_link_json = s3_client.generate_presigned_url('get_object',
Params={'Bucket': 'research-test-s3-bucket',
'Key': json_filename},
ExpiresIn=300)
print('Files uploaded to S3 bucket.')
print('Download DETECTIONS video:\n {}'.format(download_link_dets))
print('Download HEATMAPS video:\n {}'.format(download_link_heatmaps))
print('Download DATAFRAME CSV:\n {}'.format(download_link_dataframe))
print('Download JSON:\n {}'.format(download_link_json))
print('Download links expire in 5 min.')
if __name__ == '__main__':
main()
``` |
{
"source": "josenaldo/ipb",
"score": 2
} |
#### File: ipb/namelist/signals.py
```python
import os
import tweepy as tweepy
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.sites.models import Site
import logging
from .models import DevilName
logger = logging.getLogger(__name__)
@receiver(post_save, sender=DevilName)
def tweet_devilname(sender, instance, created, **kwargs):
devilname = instance
twitter_on = os.environ.get('TWITTER_ON', False)
logger.info('Tuitando: ' + devilname.name)
if twitter_on and created:
twitter_auth_keys = {
"consumer_key": os.environ.get('TWITTER_API_KEY', False),
"consumer_secret": os.environ.get('TWITTER_API_SECRET_KEY', False),
"access_token": os.environ.get('TWITTER_ACCESS_TOKEN', False),
"access_token_secret": os.environ.get('TWITTER_ACCESS_TOKEN_SECRET', False),
}
auth = tweepy.OAuthHandler(
twitter_auth_keys['consumer_key'],
twitter_auth_keys['consumer_secret']
)
auth.set_access_token(
twitter_auth_keys['access_token'],
twitter_auth_keys['access_token_secret']
)
api = tweepy.API(auth)
domain = Site.objects.get_current().domain
url = DevilName.get_absolute_url(devilname)
full_url= f'https://{domain}{url}'
char_count = 179
name = info = (devilname.name[:char_count] + '...') if len(devilname.name) > char_count else devilname.name
# 21 caracteres
tweet = f'Novo Nome do Capeta: \'{name}\'.\n\n{full_url}'
try:
api.update_status(tweet)
logger.info('Twitter enviado: ' + tweet)
except tweepy.TweepError as error:
if error.api_code == 187:
logger.error('duplicate message')
else:
logger.error(error.reason)
else:
logger.info('Tweet nao precisa ser enviado: ' + devilname.name)
``` |
{
"source": "josenava/meal-calendar",
"score": 3
} |
#### File: app/auth/endpoints.py
```python
from app.database import get_db
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from app.users.models import User
from .exceptions import WrongCredentials
from .factories import build_auth_user_service
from .oauth2 import oauth2_scheme
router = APIRouter()
def get_current_user(
db: Session = Depends(get_db),
token: str = Depends(oauth2_scheme)
) -> User:
auth_service = build_auth_user_service(db)
try:
user = auth_service.get_user_from_token(token)
if user is None:
raise WrongCredentials
except WrongCredentials:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
return user
@router.post("/token", status_code=status.HTTP_201_CREATED)
def login(form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(get_db)):
service = build_auth_user_service(db)
try:
token = service.authenticate(form_data.username, form_data.password)
except WrongCredentials:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
return {"access_token": token, "token_type": "bearer"}
@router.get("/logout")
def logout(current_user: User = Depends(get_current_user), db: Session = Depends(get_db)):
service = build_auth_user_service(db)
service.unauthenticate(current_user)
return {}
```
#### File: app/meals/endpoints.py
```python
from datetime import date
from typing import List
from uuid import UUID
from app.auth.endpoints import get_current_user
from app.database import get_db
from app.users.models import User
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from .exceptions import ActionNotAllowed, MealAlreadyExists, MealNotFound
from .factories import (build_create_meal_service, build_delete_meal_service,
build_get_meals_service, build_update_meal_service)
from .requests import CreateMealRequest, UpdateMealRequest
from .responses import Meal
router = APIRouter()
@router.get("", status_code=status.HTTP_200_OK, response_model=List[Meal])
def get_meals(
start_date: date,
end_date: date,
current_user: User = Depends(get_current_user),
db: Session = Depends(get_db),
):
if start_date > end_date:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Start date is greater than end_date {start_date=}, {end_date=}")
service = build_get_meals_service(db)
meals = service.execute(current_user.id, start_date, end_date)
return meals
@router.post("", status_code=status.HTTP_201_CREATED, response_model=Meal)
def create_meal(
meal_request: CreateMealRequest,
current_user: User = Depends(get_current_user),
db: Session = Depends(get_db),
):
service = build_create_meal_service(db)
try:
meal = service.execute(meal_request, current_user)
except MealAlreadyExists:
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY)
return meal
@router.put("/{meal_id}", status_code=status.HTTP_200_OK)
def update_meal(
meal_id: UUID,
meal_request: UpdateMealRequest,
current_user: User = Depends(get_current_user),
db: Session = Depends(get_db),
):
service = build_update_meal_service(db)
try:
service.execute(meal_request, current_user.id)
except MealNotFound:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
except ActionNotAllowed:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
return {'id': meal_id}
@router.delete("/{meal_id}", status_code=status.HTTP_204_NO_CONTENT)
def delete_meal(
meal_id: UUID,
current_user: User = Depends(get_current_user),
db: Session = Depends(get_db),
):
service = build_delete_meal_service(db)
try:
service.execute(meal_id, current_user.id)
except MealNotFound:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
except ActionNotAllowed:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
return {}
```
#### File: app/meals/factories.py
```python
from sqlalchemy.orm import Session
from .repositories import MealRepository
from .services import (CreateMealService, DeleteMealService, GetMealsService,
UpdateMealService)
def build_create_meal_service(db: Session) -> CreateMealService:
return CreateMealService(MealRepository(db))
def build_update_meal_service(db: Session) -> UpdateMealService:
return UpdateMealService(MealRepository(db))
def build_delete_meal_service(db: Session) -> DeleteMealService:
return DeleteMealService(MealRepository(db))
def build_get_meals_service(db: Session) -> GetMealsService:
return GetMealsService(MealRepository(db))
```
#### File: tests/users/test_services.py
```python
from unittest.mock import Mock
import pytest
from app.users.exceptions import UserAlreadyExists
from app.users.services import UserSignupService
@pytest.mark.unit
class TestUserSignupService:
def test_existing_user_raises_user_already_exists(self):
signup_data = Mock(email="<EMAIL>")
user_repository = Mock()
user_repository.get_by_email.return_value = Mock(email="<EMAIL>")
user_service = UserSignupService(user_repository)
with pytest.raises(UserAlreadyExists):
user_service.execute(signup_data)
```
#### File: app/users/endpoints.py
```python
from app.database import get_db
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from app.auth.endpoints import get_current_user
from .exceptions import UserAlreadyExists
from .factories import build_user_signup_service
from .models import User
from .requests import SignupForm
router = APIRouter()
@router.post("/signup", status_code=status.HTTP_201_CREATED)
def create_user(signup_form: SignupForm, db: Session = Depends(get_db)):
service = build_user_signup_service(db)
try:
service.execute(signup_form)
except UserAlreadyExists:
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="User already exists"
)
return {}
@router.get("/me")
def me(current_user: User = Depends(get_current_user)):
return {}
```
#### File: app/users/models.py
```python
from sqlalchemy import Boolean, Column, DateTime, Integer, Sequence, String
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from passlib.context import CryptContext
from app.database import Base
class User(Base):
__tablename__ = "users"
user_id_seq = Sequence('user_id_seq', metadata=Base.metadata)
id = Column(Integer, user_id_seq, server_default=user_id_seq.next_value(), primary_key=True, index=True)
email = Column(String, unique=True, index=True, nullable=False)
hashed_password = Column(String, nullable=False)
is_active = Column(Boolean, default=False)
created_at = Column(DateTime, default=func.now())
auth = relationship("AuthUser", uselist=False, back_populates="user")
meals = relationship("Meal", cascade="all, delete")
@classmethod
def create(cls, id: int, email: str, plain_password: str) -> "User":
hashed_password = cls.hash_password(<PASSWORD>)
return cls(id=id, email=email, hashed_password=<PASSWORD>, is_active=True)
@staticmethod
def hash_password(password: str) -> str:
pwd_context = CryptContext(schemes=["bcrypt"])
return pwd_context.hash(password)
def verify_password(self, plain_password: str) -> bool:
pwd_context = CryptContext(schemes=["bcrypt"])
return pwd_context.verify(plain_password, self.hashed_password)
``` |
{
"source": "josenavarro-famoco/flask-pg",
"score": 2
} |
#### File: flask-pg/pogo/ext_api.py
```python
import logging
from flask import Flask, jsonify, render_template, request
from custom_exceptions import GeneralPogoException
from api import PokeAuthSession
from location import Location
import time
import sys
from pokedex import pokedex
from inventory import items
app = Flask(__name__)
BASE_PATH = ''
API_PATH = '/api/1'
sessions = {}
users = []
def setupLogger():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('Line %(lineno)d,%(filename)s - %(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def encounterAndCatch(session, pokemon, thresholdP=0.5, limit=5, delay=2):
# Start encounter
encounter = session.encounterPokemon(pokemon)
# Grab needed data from proto
chances = encounter.capture_probability.capture_probability
balls = encounter.capture_probability.pokeball_type
bag = session.checkInventory().bag
# Have we used a razz berry yet?
berried = False
# Make sure we aren't oer limit
count = 0
# Attempt catch
while True:
bestBall = 2
RAZZ_BERRY = 701
# Check for balls and see if we pass
# wanted threshold
for i in range(len(balls)):
if balls[i] in bag and bag[balls[i]] > 0:
if chances[i] > thresholdP:
bestBall = balls[i]
break
if not berried and RAZZ_BERRY in bag and bag[RAZZ_BERRY]:
logging.info("Using a RAZZ_BERRY")
session.useItemCapture(RAZZ_BERRY, pokemon)
berried = True
time.sleep(delay)
continue
# Try to catch it!!
logging.info("Using a %s" % items[bestBall])
attempt = session.catchPokemon(pokemon, bestBall)
time.sleep(delay)
# Success or run away
if attempt.status == 1:
return attempt
# CATCH_FLEE is bad news
if attempt.status == 3:
logging.info("Possible soft ban.")
return attempt
# Only try up to x attempts
count += 1
if count >= limit:
logging.info("Over catch limit")
return None
# Catch a pokemon at a given point
def walkAndCatch(session, pokemon):
if pokemon:
logging.info("Catching %s:" % pokedex[pokemon.pokemon_data.pokemon_id])
session.walkTo(pokemon.latitude, pokemon.longitude, step=2.8)
enc = encounterAndCatch(session, pokemon)
logging.info(enc)
def parseResponseResult(result, operation=''):
"""
result: SUCCESS candy_awarded: 1
"""
body = {}
body['result'] = getattr(result, "result", None)
if body['result'] == 'SUCCESS':
if operation == 'FREE_POKEMON':
body['candy_awarded'] = getattr(result, "candy_awarded", None)
return body
def parseWildPokemon(pokemon):
#logging.info(str(pokemon))
pok = {}
pokemonId = getattr(pokemon, "pokemon_id", None)
if not pokemonId:
pokemonId = pokemon.pokemon_data.pokemon_id
pok['pokemon_id'] = pokemonId
pok['rarity'] = pokedex.getRarityById(pokemonId)
pok['name'] = pokedex[pokemonId]
pok['encounter_id'] = getattr(pokemon, "encounter_id", None)
pok['last_modified_timestamp_ms'] = getattr(pokemon, "last_modified_timestamp_ms", None)
pok['latitude'] = getattr(pokemon, "latitude", None)
pok['longitude'] = getattr(pokemon, "longitude", None)
pok['spawn_point_id'] = getattr(pokemon, "spawn_point_id", None)
pok['time_till_hidden_ms'] = getattr(pokemon, "time_till_hidden_ms", None)
return pok
def parsePartyPokemon(pokemon, detail=False):
"""
id: 17633600020994617271
pokemon_id: TENTACOOL
cp: 224
stamina: 35
stamina_max: 35
move_1: BUBBLE_FAST
move_2: WRAP
height_m: 0.742571890354
weight_kg: 33.6002044678
individual_attack: 11
individual_defense: 2
individual_stamina: 4
cp_multiplier: 0.422500014305
pokeball: ITEM_GREAT_BALL
captured_cell_id: 5171193400942133248
creation_time_ms: 1469649774858
"""
short = ['id','stamina_max','cp','cp_multiplier','individual_attack','individual_defense','individual_stamina']
full = ['stamina','move_1','move_2','height_m','weight_kg',
'pokeball','captured_cell_id','creation_time_ms']
props = []
#logging.info(str(pokemon))
pok = {}
pokemonId = getattr(pokemon, "pokemon_id", None)
pok['pokemon_id'] = pokemonId
pok['rarity'] = pokedex.getRarityById(pokemonId)
pok['name'] = pokedex[pokemonId]
if detail:
props = short + full
else:
props = short
for value in props:
pok[value] = getattr(pokemon, value, None)
return pok
def parseEggs(egg):
"""
egg free:
id: 4555645718830338274
is_egg: true
egg_km_walked_target: 5.0
captured_cell_id: 5171192829494427648
creation_time_ms: 1469698248933
egg ocup:
id: 4555645718830338274
is_egg: true
egg_km_walked_target: 5.0
captured_cell_id: 5171192829494427648
egg_incubator_id: "EggIncubatorProto4824214944684084552"
creation_time_ms: 1469698248933
"""
parsed_egg = {}
parsed_egg['id'] = getattr(egg, "id", None)
parsed_egg['egg_km_walked_target'] = getattr(egg, "egg_km_walked_target", None)
parsed_egg['captured_cell_id'] = getattr(egg, "captured_cell_id", None)
parsed_egg['is_egg'] = getattr(egg, "is_egg", None)
parsed_egg['egg_incubator_id'] = getattr(egg, "egg_incubator_id", None)
parsed_egg['creation_time_ms'] = getattr(egg, "creation_time_ms", None)
return parsed_egg
def parseProfile(profile):
"""
success: true
player_data {
creation_timestamp_ms: 1467992781323
username: "jm8nav"
team: BLUE
tutorial_state: LEGAL_SCREEN
tutorial_state: AVATAR_SELECTION
tutorial_state: POKEMON_CAPTURE
tutorial_state: NAME_SELECTION
tutorial_state: FIRST_TIME_EXPERIENCE_COMPLETE
avatar {
skin: 1
hair: 3
shirt: 2
pants: 1
hat: 2
shoes: 2
eyes: 3
backpack: 2
}
max_pokemon_storage: 250
max_item_storage: 350
daily_bonus {
next_defender_bonus_collect_timestamp_ms: 1469541558462
}
equipped_badge { }
contact_settings {
send_marketing_emails: true
}
currencies {
name: "POKECOIN"
amount: 20
}
currencies {
name: "STARDUST"
amount: 29966
}
}
"""
body = {}
fields = ['creation_timestamp_ms','username','team','max_pokemon_storage','max_item_storage']
logging.info(getattr(profile, "success", None))
if getattr(profile, "success", False) == True:
player_data = getattr(profile, 'player_data', None)
for field in fields:
body[field] = getattr(player_data, field, None)
#avatar_data = getattr(player_data, 'avatar', None)
#avatar = {}
#for prop in avatar_data:
# avatar[prop] = avatar_data[prop]
#body['avatar'] = avatar
currencies = getattr(player_data, 'currencies', None)
for currency in currencies:
body[currency.name.lower()] = currency.amount
return body
@app.route(BASE_PATH + "/")
def home():
"""Render website's home page."""
return render_template('home.html')
@app.route(API_PATH + "/")
def index():
"""Render website's home page."""
return str(users)
@app.route(BASE_PATH + "/login", methods=['POST'])
def login_data():
if request.json:
mydata = request.json
username = mydata.get("username")
password = mydata.get("password")
auth = mydata.get("auth")
location = mydata.get("location")
if username == None or password == None or auth == None or location == None:
return jsonify(error="missing value"), 400
poko_session = PokeAuthSession(
username,
password,
auth,
geo_key=None
)
session = poko_session.authenticate(locationLookup=location)
if session:
global sessions
global users
sessions[username] = session
users.append(username)
logging.info(users)
return jsonify(data=str(session))
else:
return jsonify(error=str(session)), 400
else:
return jsonify(error="no values receives"), 400
@app.route(BASE_PATH + "/login/<auth_type>/<user>/<password>/<location>")
def login(auth_type, user, password, location):
"""
Access Token: <KEY>
Endpoint: https://pgorelease.nianticlabs.com/plfe/528/rpc
Location:
Coordinates:
50.8503396 4.3517103 0.0
"""
poko_session = PokeAuthSession(
user,
password,
auth_type,
geo_key=None
)
session = poko_session.authenticate(locationLookup=location)
if session:
global sessions
global users
sessions[user] = session
users.append(user)
logging.info(users)
#access_token = getattr(session, "access_token", None)
#endpoint = getattr(session, "Endpoint", None)
#location = getattr(session, "Location", None)
#if access_token != None and endpoint != None and location != None:
# return jsonify(access_token=access_token, endpoint=endpoint, location=location)
#else:
# return jsonify(session), 400
return str(session)
@app.route(BASE_PATH + "/<user>/profile")
def profile(user):
profile = parseProfile(sessions[user].getProfile())
return render_template('profile.html', profile=profile)
@app.route(API_PATH + "/<user>/profile")
def api_profile(user):
return jsonify(data=parseProfile(sessions[user].getProfile()))
@app.route(API_PATH + "/<user>/items")
def items(user):
return jsonify(data=sessions[user].getInventory())
@app.route(API_PATH + "/<user>/items/candy")
def items_candy(user):
return jsonify(candies=sessions[user].getInventory().candies)
@app.route(BASE_PATH + "/<user>/items/eggs")
def api_eggs(user):
eggs = sessions[user].getInventory().eggs
list_eggs = []
for egg in eggs:
list_eggs.append(parseEggs(egg))
return render_template('items_eggs.html', eggs=list_eggs)
@app.route(API_PATH + "/<user>/items/eggs")
def eggs(user):
eggs = sessions[user].getInventory().eggs
list_eggs = []
for egg in eggs:
list_eggs.append(parseEggs(egg))
return jsonify(eggs=list_eggs)
@app.route(BASE_PATH + "/<user>/pokemons/nearby")
def pokemons_nearby(user):
cells = sessions[user].getMapObjects()
latitude, longitude, _ = sessions[user].getCoordinates()
logging.info("Current pos: %f, %f" % (latitude, longitude))
list_pokemons = []
for cell in cells.map_cells:
pokemons = [p for p in cell.wild_pokemons] + [p for p in cell.catchable_pokemons]
for pokemon in pokemons:
list_pokemons.append(parseWildPokemon(pokemon))
return render_template('pokemons_nearby.html', user=user, pokemons=list_pokemons)
@app.route(API_PATH + "/<user>/pokemons/nearby")
def api_pokemons_nearby(user):
cells = sessions[user].getMapObjects()
latitude, longitude, _ = sessions[user].getCoordinates()
logging.info("Current pos: %f, %f" % (latitude, longitude))
list_pokemons = []
for cell in cells.map_cells:
pokemons = [p for p in cell.wild_pokemons]
for pokemon in pokemons:
list_pokemons.append(parseWildPokemon(pokemon))
return jsonify(data=list_pokemons, count=len(list_pokemons))
@app.route(API_PATH + "/<user>/pokemons/nearby/<index_pokemon>")
def pokemons_nearby_detail(user, index_pokemon):
"""
encounter_id: 7755420385361159741
last_modified_timestamp_ms: 1469694984766
latitude: 50.8503661336
longitude: 4.35151228998
spawn_point_id: "47c3c387213"
pokemon_data {
pokemon_id: ZUBAT
}
time_till_hidden_ms: 148718
"""
index = int(index_pokemon) - 1
cells = sessions[user].getMapObjects()
latitude, longitude, _ = sessions[user].getCoordinates()
logging.info("Current pos: %f, %f" % (latitude, longitude))
list_pokemons = []
for cell in cells.map_cells:
pokemons = [p for p in cell.wild_pokemons]
for pokemon in pokemons:
list_pokemons.append(pokemon)
return jsonify(data=parseWildPokemon(list_pokemons[index]))
@app.route(API_PATH + "/<user>/pokemons/nearby/<index_pokemon>/capture")
def pokemons_nearby_detail_capture(user, index_pokemon):
"""
encounter_id: 7755420385361159741
last_modified_timestamp_ms: 1469694984766
latitude: 50.8503661336
longitude: 4.35151228998
spawn_point_id: "47c3c387213"
pokemon_data {
pokemon_id: ZUBAT
}
time_till_hidden_ms: 148718
"""
index = int(index_pokemon) - 1
cells = sessions[user].getMapObjects()
latitude, longitude, _ = sessions[user].getCoordinates()
logging.info("Current pos: %f, %f" % (latitude, longitude))
list_pokemons = []
for cell in cells.map_cells:
pokemons = [p for p in cell.wild_pokemons]
for pokemon in pokemons:
list_pokemons.append(pokemon)
result_capture = walkAndCatch(sessions[user], list_pokemons[index])
return jsonify(result=str(result_capture))
@app.route(BASE_PATH + "/<user>/pokemons/party")
def pokemon_party(user):
inventory = sessions[user].checkInventory()
list_pokemons = []
for pokemon in inventory.party:
list_pokemons.append(parsePartyPokemon(pokemon))
sorted_list = sorted(list_pokemons, key=lambda pokemon: pokemon['pokemon_id'])
return render_template('pokemons_party.html', user=user, pokemons=sorted_list)
@app.route(API_PATH + "/<user>/pokemons/party")
def api_pokemon_party(user):
cp = int(request.args.get('cp', 0))
inventory = sessions[user].checkInventory()
#app.logger.info('CP ' + str(cp))
list_pokemons = []
for pokemon in inventory.party:
parsed_pokemon = parsePartyPokemon(pokemon)
if cp > 0:
#app.logger.info('Pokemon CP ' + str(parsed_pokemon['cp']) + str(parsed_pokemon['cp'] < cp))
if parsed_pokemon['cp'] < cp:
list_pokemons.append(parsed_pokemon)
else:
list_pokemons.append(parsed_pokemon)
sorted_list = sorted(list_pokemons, key=lambda pokemon: pokemon['pokemon_id'])
return jsonify({ 'data': sorted_list, 'count': len(sorted_list) })
@app.route(API_PATH + "/<user>/pokemons/party/<pokemon_id>")
def pokemon_party_detail(user, pokemon_id):
inventory = sessions[user].checkInventory()
pokemon_id = int(pokemon_id)
for pokemon in inventory.party:
if pokemon_id == getattr(pokemon, 'id', None):
return jsonify({ 'data': parsePartyPokemon(pokemon, detail=True) })
return jsonify({ 'data': {} })
@app.route(API_PATH + "/<user>/pokemons/party/<pokemon_id>/free")
def pokemon_party_free(user, pokemon_id):
inventory = sessions[user].checkInventory()
pokemon_id = int(pokemon_id)
for pokemon in inventory.party:
if pokemon_id == getattr(pokemon, 'id', None):
result = sessions[user].releasePokemon(pokemon)
return jsonify({ 'data': parseResponseResult(result)})
return jsonify({ 'data': {} })
@app.errorhandler(404)
def page_not_found(error):
"""Custom 404 page."""
return render_template('error404.html'), 404
if __name__ == '__main__':
"""
poko_session = PokeAuthSession(
username,
password,
auth_type,
geo_key=None
)
session = poko_session.authenticate(locationLookup='Brussels')
if session:
sessions[username] = session
app.run()
else:
sys.exit(-1)
"""
setupLogger()
logging.debug('Logger set up')
app.run()
```
#### File: flask-pg/pogo/util.py
```python
import struct
import time
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def encodeLocation(loc):
return (f2i(loc.latitude), f2i(loc.longitude), f2i(loc.altitude))
def getMs():
return int(round(time.time() * 1000))
``` |
{
"source": "josenavas/glowing-dangerzone",
"score": 2
} |
#### File: glowing-dangerzone/gd/config.py
```python
from os import environ
from os.path import dirname, abspath, join
from future import standard_library
with standard_library.hooks():
from configparser import ConfigParser
class GDConfig(object):
"""Holds the glowing-dangerzone configuration
Attributes
----------
user : str
The postgres user to connect to the postgres server
password : str
The password for the previous user
database : str
The database to connect to
host : str
The host where the postgres server lives
port : str
The port to use to connect to the postgres server
admin_user : str
The administrator user to connect to the postgres server
admin_password : str
The password for the administrator user
"""
def __init__(self):
# If GD_CONFIG_FP is not set, default to the example in the repo
try:
conf_fp = environ['GD_CONFIG_FP']
except KeyError:
conf_fp = join(dirname(abspath(__file__)),
'support_files', 'config.txt')
# parse the config bits
config = ConfigParser()
with open(conf_fp) as f:
config.readfp(f)
self.user = config.get('postgres', 'USER')
self.password = config.get('postgres', 'PASSWORD') or None
self.database = config.get('postgres', 'DATABASE')
self.host = config.get('postgres', 'HOST')
self.port = config.getint('postgres', 'PORT')
self.admin_user = config.get('postgres', 'ADMIN_USER') or None
self.admin_password = config.get('postgres', 'ADMIN_PASSWORD') or None
gd_config = GDConfig()
```
#### File: gd/test/test_sql_connection.py
```python
from unittest import TestCase, main
from psycopg2._psycopg import connection, cursor
from psycopg2 import connect, ProgrammingError
from psycopg2.extensions import (ISOLATION_LEVEL_AUTOCOMMIT,
ISOLATION_LEVEL_READ_COMMITTED)
from gd import gd_config
from gd.sql_connection import SQLConnectionHandler
from gd.exceptions import GDExecutionError, GDConnectionError
DB_LAYOUT = """CREATE TABLE test_table (
str_column varchar DEFAULT 'foo' NOT NULL,
bool_column bool DEFAULT True NOT NULL,
int_column bigint NOT NULL
);"""
class TestConnHandler(TestCase):
def setUp(self):
# First check that we are connected to the test database, so we are
# sure that we are not destroying anything
if gd_config.database != "sql_handler_test":
raise RuntimeError(
"Not running the tests since the system is not connected to "
"the test database 'sql_handler_test'")
# Destroy the test database and create it again, so the tests are
# independent and the test database is always available
with connect(user=gd_config.admin_user,
password=gd_config.<PASSWORD>, host=gd_config.host,
port=gd_config.port) as con:
# Set the isolation level to autocommit so we can drop the database
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with con.cursor() as cur:
try:
cur.execute("DROP DATABASE sql_handler_test")
except ProgrammingError:
# Means that the sql_handler_test database does not exist
# This will happen on test_init_connection_error
pass
# Create the database again
cur.execute("CREATE DATABASE sql_handler_test")
with connect(user=gd_config.user, password=gd_config.password,
host=gd_config.host, port=gd_config.port,
database=gd_config.database) as con:
with con.cursor() as cur:
cur.execute(DB_LAYOUT)
# Instantiate a conn_handler for the tests
self.conn_handler = SQLConnectionHandler()
def tearDown(self):
# We need to delete the conn_handler, so the connection is closed
del self.conn_handler
def _populate_test_table(self):
sql = ("INSERT INTO test_table (str_column, bool_column, int_column) "
"VALUES (%s, %s, %s)")
sql_args = [('test1', True, 1), ('test2', True, 2),
('test3', False, 3), ('test4', False, 4)]
con = connect(user=gd_config.user, password=<PASSWORD>,
host=gd_config.host, port=gd_config.port,
database=gd_config.database)
with con.cursor() as cur:
cur.executemany(sql, sql_args)
con.commit()
con.close()
def _assert_sql_equal(self, exp):
con = connect(user=gd_config.user, password=<PASSWORD>,
host=gd_config.host, port=gd_config.port,
database=gd_config.database)
with con.cursor() as cur:
cur.execute("SELECT * FROM test_table")
obs = cur.fetchall()
con.commit()
con.close()
self.assertEqual(obs, exp)
def test_init(self):
"""init successfully initializes the handler"""
obs = SQLConnectionHandler()
self.assertEqual(obs.admin, 'no_admin')
self.assertEqual(obs.queues, {})
self.assertTrue(isinstance(obs._connection, connection))
def test_init_admin_error(self):
"""Init raises an error if admin is an unrecognized value"""
with self.assertRaises(GDConnectionError):
SQLConnectionHandler(admin='not a valid value')
def test_init_admin_with_database(self):
"""Init works with admin_with_database"""
obs = SQLConnectionHandler(admin='admin_with_database')
self.assertEqual(obs.admin, 'admin_with_database')
self.assertEqual(obs.queues, {})
self.assertTrue(isinstance(obs._connection, connection))
def test_init_admin_without_database(self):
"""Init works with admin_with_database"""
obs = SQLConnectionHandler(admin='admin_without_database')
self.assertEqual(obs.admin, 'admin_without_database')
self.assertEqual(obs.queues, {})
self.assertTrue(isinstance(obs._connection, connection))
def test_init_connection_error(self):
"""init raises an error if cannot connect to the database"""
# We first need to close all the connexions
self.conn_handler._connection.close()
# In order to force a connection failure, remove the test database
with connect(user=gd_config.admin_user,
password=<PASSWORD>, host=gd_config.host,
port=gd_config.port) as con:
# Set the isolation level to autocommit so we can drop the database
con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with con.cursor() as cur:
cur.execute("DROP DATABASE sql_handler_test")
with self.assertRaises(GDConnectionError):
SQLConnectionHandler()
def test_autocommit(self):
"""correctly retrieves if the autocommit is activated or not"""
self.assertFalse(self.conn_handler.autocommit)
self.conn_handler._connection.set_isolation_level(
ISOLATION_LEVEL_AUTOCOMMIT)
self.assertTrue(self.conn_handler.autocommit)
def test_autocommit_setter(self):
"""correctly activates/deactivates the autocommit"""
self.assertEqual(self.conn_handler._connection.isolation_level,
ISOLATION_LEVEL_READ_COMMITTED)
self.conn_handler.autocommit = True
self.assertEqual(self.conn_handler._connection.isolation_level,
ISOLATION_LEVEL_AUTOCOMMIT)
self.conn_handler.autocommit = False
self.assertEqual(self.conn_handler._connection.isolation_level,
ISOLATION_LEVEL_READ_COMMITTED)
def test_autocommit_setter_error(self):
"""autocommit raises an error if the parameter is not a boolean
"""
with self.assertRaises(TypeError):
self.conn_handler.autocommit = 'not a valid value'
def test_check_sql_args(self):
"""check_sql_args returns the execution to the caller if type is ok"""
self.conn_handler._check_sql_args(['a', 'list'])
self.conn_handler._check_sql_args(('a', 'tuple'))
self.conn_handler._check_sql_args({'a': 'dict'})
self.conn_handler._check_sql_args(None)
def test_check_sql_args_error(self):
"""check_sql_args raises an error with unsupported types"""
with self.assertRaises(TypeError):
self.conn_handler._check_sql_args("a string")
with self.assertRaises(TypeError):
self.conn_handler._check_sql_args(1)
with self.assertRaises(TypeError):
self.conn_handler._check_sql_args(1.2)
def test_sql_executor_no_sql_args(self):
"""sql_executor works with no sql arguments"""
sql = "INSERT INTO test_table (int_column) VALUES (1)"
with self.conn_handler._sql_executor(sql) as cur:
self.assertTrue(cur, cursor)
self._assert_sql_equal([('foo', True, 1)])
def test_sql_executor_with_sql_args(self):
"""sql_executor works with sql arguments"""
sql = "INSERT INTO test_table (int_column) VALUES (%s)"
with self.conn_handler._sql_executor(sql, sql_args=(1,)) as cur:
self.assertTrue(cur, cursor)
self._assert_sql_equal([('foo', True, 1)])
def test_sql_executor_many(self):
"""sql_executor works with many"""
sql = "INSERT INTO test_table (int_column) VALUES (%s)"
sql_args = [(1,), (2,)]
with self.conn_handler._sql_executor(sql, sql_args=sql_args,
many=True) as cur:
self.assertTrue(cur, cursor)
self._assert_sql_equal([('foo', True, 1), ('foo', True, 2)])
def test_execute_no_sql_args(self):
"""execute works with no arguments"""
sql = "INSERT INTO test_table (int_column) VALUES (1)"
self.conn_handler.execute(sql)
self._assert_sql_equal([('foo', True, 1)])
def test_execute_with_sql_args(self):
"""execute works with arguments"""
sql = "INSERT INTO test_table (int_column) VALUES (%s)"
self.conn_handler.execute(sql, (1,))
self._assert_sql_equal([('foo', True, 1)])
def test_executemany(self):
"""executemany works as expected"""
sql = "INSERT INTO test_table (int_column) VALUES (%s)"
self.conn_handler.executemany(sql, [(1,), (2,)])
self._assert_sql_equal([('foo', True, 1), ('foo', True, 2)])
def test_execute_fetchone_no_sql_args(self):
"""execute_fetchone works with no arguments"""
self._populate_test_table()
sql = "SELECT str_column FROM test_table WHERE int_column = 1"
obs = self.conn_handler.execute_fetchone(sql)
self.assertEqual(obs, ['test1'])
def test_execute_fetchone_with_sql_args(self):
"""execute_fetchone works with arguments"""
self._populate_test_table()
sql = "SELECT str_column FROM test_table WHERE int_column = %s"
obs = self.conn_handler.execute_fetchone(sql, (2,))
self.assertEqual(obs, ['test2'])
def test_execute_fetchall_no_sql_args(self):
"""execute_fetchall works with no arguments"""
self._populate_test_table()
sql = "SELECT * FROM test_table WHERE bool_column = False"
obs = self.conn_handler.execute_fetchall(sql)
self.assertEqual(obs, [['test3', False, 3], ['test4', False, 4]])
def test_execute_fetchall_with_sql_args(self):
"""execute_fetchall works with arguments"""
self._populate_test_table()
sql = "SELECT * FROM test_table WHERE bool_column = %s"
obs = self.conn_handler.execute_fetchall(sql, (True, ))
self.assertEqual(obs, [['test1', True, 1], ['test2', True, 2]])
def test_create_queue(self):
"""create_queue initializes a new queue"""
self.assertEqual(self.conn_handler.queues, {})
self.conn_handler.create_queue("test_queue")
self.assertEqual(self.conn_handler.queues, {'test_queue': []})
def test_create_queue_error(self):
"""create_queue raises an error if the queue already exists"""
self.conn_handler.create_queue("test_queue")
with self.assertRaises(KeyError):
self.conn_handler.create_queue("test_queue")
def test_list_queues(self):
"""test_list_queues works correctly"""
self.assertEqual(self.conn_handler.list_queues(), [])
self.conn_handler.create_queue("test_queue")
self.assertEqual(self.conn_handler.list_queues(), ["test_queue"])
def test_add_to_queue(self):
"""add_to_queue works correctly"""
self.conn_handler.create_queue("test_queue")
self.assertEqual(self.conn_handler.queues, {"test_queue": []})
sql1 = "INSERT INTO test_table (int_column) VALUES (%s)"
sql_args1 = (1,)
self.conn_handler.add_to_queue("test_queue", sql1, sql_args1)
self.assertEqual(self.conn_handler.queues,
{"test_queue": [(sql1, sql_args1)]})
sql2 = "INSERT INTO test_table (int_column) VALUES (2)"
self.conn_handler.add_to_queue("test_queue", sql2)
self.assertEqual(self.conn_handler.queues,
{"test_queue": [(sql1, sql_args1), (sql2, None)]})
def test_add_to_queue_many(self):
"""add_to_queue works with many"""
self.conn_handler.create_queue("test_queue")
self.assertEqual(self.conn_handler.queues, {"test_queue": []})
sql = "INSERT INTO test_table (int_column) VALUES (%s)"
sql_args = [(1,), (2,), (3,)]
self.conn_handler.add_to_queue("test_queue", sql, sql_args, many=True)
self.assertEqual(self.conn_handler.queues,
{"test_queue": [(sql, (1,)), (sql, (2,)),
(sql, (3,))]})
def test_execute_queue(self):
self.conn_handler.create_queue("test_queue")
self.conn_handler.add_to_queue(
"test_queue",
"INSERT INTO test_table (str_column, int_column) VALUES (%s, %s)",
['test_insert', '2'])
self.conn_handler.add_to_queue(
"test_queue",
"UPDATE test_table SET int_column = 20, bool_column = FALSE "
"WHERE str_column = %s",
['test_insert'])
obs = self.conn_handler.execute_queue("test_queue")
self.assertEqual(obs, [])
self._assert_sql_equal([('test_insert', False, 20)])
def test_execute_queue_many(self):
sql = "INSERT INTO test_table (str_column, int_column) VALUES (%s, %s)"
sql_args = [('insert1', 1), ('insert2', 2), ('insert3', 3)]
self.conn_handler.create_queue("test_queue")
self.conn_handler.add_to_queue("test_queue", sql, sql_args, many=True)
self.conn_handler.add_to_queue(
"test_queue",
"UPDATE test_table SET int_column = 20, bool_column = FALSE "
"WHERE str_column = %s",
['insert2'])
obs = self.conn_handler.execute_queue('test_queue')
self.assertEqual(obs, [])
self._assert_sql_equal([('insert1', True, 1), ('insert3', True, 3),
('insert2', False, 20)])
def test_execute_queue_last_return(self):
self.conn_handler.create_queue("test_queue")
self.conn_handler.add_to_queue(
"test_queue",
"INSERT INTO test_table (str_column, int_column) VALUES (%s, %s)",
['test_insert', '2'])
self.conn_handler.add_to_queue(
"test_queue",
"UPDATE test_table SET bool_column = FALSE WHERE str_column = %s "
"RETURNING int_column",
['test_insert'])
obs = self.conn_handler.execute_queue("test_queue")
self.assertEqual(obs, [2])
def test_execute_queue_placeholders(self):
self.conn_handler.create_queue("test_queue")
self.conn_handler.add_to_queue(
"test_queue",
"INSERT INTO test_table (int_column) VALUES (%s) "
"RETURNING str_column", (2,))
self.conn_handler.add_to_queue(
"test_queue",
"UPDATE test_table SET bool_column = FALSE WHERE str_column = %s",
('{0}',))
obs = self.conn_handler.execute_queue("test_queue")
self.assertEqual(obs, [])
self._assert_sql_equal([('foo', False, 2)])
def test_queue_fail(self):
"""Fail if no results data exists for substitution"""
self.conn_handler.create_queue("test_queue")
self.conn_handler.add_to_queue(
"test_queue",
"INSERT INTO test_table (int_column) VALUES (%s)", (2,))
self.conn_handler.add_to_queue(
"test_queue",
"UPDATE test_table SET bool_column = FALSE WHERE str_column = %s",
('{0}',))
with self.assertRaises(GDExecutionError):
self.conn_handler.execute_queue("test_queue")
# make sure rollback correctly
self._assert_sql_equal([])
def test_huge_queue(self):
self.conn_handler.create_queue("test_queue")
# add tons of inserts to queue
for x in range(1000):
self.conn_handler.add_to_queue(
"test_queue",
"INSERT INTO test_table (int_column) VALUES (%s)", (x,))
# add failing insert as final item in queue
self.conn_handler.add_to_queue(
"test_queue",
"INSERT INTO NO_TABLE (some_column) VALUES (1)")
with self.assertRaises(GDExecutionError):
self.conn_handler.execute_queue("test_queue")
# make sure rollback correctly
self._assert_sql_equal([])
if __name__ == "__main__":
main()
``` |
{
"source": "josenavas/mustached-octo-ironman",
"score": 2
} |
#### File: mustached-octo-ironman/moi/job.py
```python
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
```
#### File: moi/test/test_group.py
```python
from json import dumps
from unittest import TestCase, main
from moi import r_client
from moi.group import Group
class GroupTests(TestCase):
def setUp(self):
r_client.hset('user-id-map', 'testing', 'testing')
r_client.sadd('testing:children', 'a')
r_client.sadd('testing:children', 'b')
r_client.sadd('testing:children', 'c')
r_client.set('a', '{"type": "job", "id": "a", "name": "a"}')
r_client.set('b', '{"type": "job", "id": "b", "name": "b"}')
r_client.set('c', '{"type": "job", "id": "c", "name": "c"}')
r_client.set('d', '{"type": "job", "id": "d", "name": "other job"}')
r_client.set('e', '{"type": "job", "id": "e", "name": "other job e"}')
self.obj = Group('testing')
self.to_delete = ['testing', 'testing:jobs', 'testing:children',
'user-id-map', 'a', 'b', 'c', 'd', 'e']
def tearDown(self):
for key in self.to_delete:
r_client.delete(key)
def test_init(self):
self.assertEqual(self.obj.group_children, 'testing:children')
self.assertEqual(self.obj.group_pubsub, 'testing:pubsub')
self.assertEqual(self.obj.forwarder('foo'), None)
def test_traverse_simple(self):
exp = {'a', 'b', 'c'}
obs = {obj['id'] for obj in self.obj.traverse('testing')}
self.assertEqual(obs, exp)
def test_traverse_removed_child(self):
r_client.delete('b')
exp = {'a', 'c'}
obs = {obj['id'] for obj in self.obj.traverse('testing')}
self.assertEqual(obs, exp)
self.assertEqual(r_client.smembers('testing:children'), exp)
def test_traverse_complex(self):
r_client.sadd('testing:children', 'd')
r_client.sadd('d:children', 'd_a', 'd_b')
r_client.set('d', '{"type": "group", "id": "d", "name": "d"}')
r_client.set('d_a', '{"type": "job", "id": "d_a", "name": "d_a"}')
r_client.set('d_b', '{"type": "job", "id": "d_b", "name": "d_b"}')
self.to_delete.append('d:children')
self.to_delete.append('d_a')
self.to_delete.append('d_b')
exp = {'a', 'b', 'c', 'd', 'd_a', 'd_b'}
obs = {obj['id'] for obj in self.obj.traverse('testing')}
self.assertEqual(obs, exp)
def test_del(self):
pass # unsure how to test
def test_close(self):
pass # unsure how to test
def test_decode(self):
obs = self.obj._decode(dumps({'foo': ['bar']}))
self.assertEqual(obs, {'foo': ['bar']})
def test_listen_for_updates(self):
pass # nothing to test...
def test_listen_to_node(self):
self.assertItemsEqual(self.obj._listening_to.items(),
[('a:pubsub', 'a'),
('b:pubsub', 'b'),
('c:pubsub', 'c')])
def test_unlisten_to_node(self):
self.assertEqual(self.obj.unlisten_to_node('b'), 'b')
self.assertItemsEqual(self.obj._listening_to.items(),
[('a:pubsub', 'a'),
('c:pubsub', 'c')])
self.assertEqual(self.obj.unlisten_to_node('foo'), None)
def test_callback(self):
class forwarder(object):
def __init__(self):
self.result = None
def __call__(self, data):
self.result = list(data)
fwd = forwarder()
self.obj.forwarder = fwd
self.obj.callback(('message', 'testing:pubsub', dumps({'get': ['b']})))
self.assertEqual(fwd.result,
[{'get': {u'id': u'b',
u'name': u'b',
u'type': u'job'}}])
self.obj.callback(('message', 'a:pubsub', dumps({'update': ['a']})))
self.assertEqual(fwd.result, [{'update': {u'id': u'a',
u'name': u'a',
u'type': u'job'}}])
with self.assertRaises(ValueError):
self.obj.callback(('message', 'testing:pubsub',
dumps({'foo': ['bar']})))
self.assertEqual(self.obj.callback(('a', 'b', 'c')), None)
def test_action(self):
class forwarder(object):
def __init__(self):
self.result = None
def __call__(self, data):
self.result = list(data)
fwd = forwarder()
self.obj.forwarder = fwd
self.obj.action('add', ['d', 'e'])
self.assertItemsEqual(fwd.result, [
{'add': {u'id': u'd', u'name': u'other job', u'type': u'job'}},
{'add': {u'id': u'e', u'name': u'other job e', u'type': u'job'}}])
self.obj.action('remove', ['e', 'd'])
self.assertItemsEqual(fwd.result, [
{'remove':
{u'id': u'e', u'name': u'other job e', u'type': u'job'}},
{'remove':
{u'id': u'd', u'name': u'other job', u'type': u'job'}}])
self.obj.action('remove', ['d'])
self.assertEqual(fwd.result, [])
with self.assertRaises(TypeError):
self.obj.action('add', 'foo')
with self.assertRaises(ValueError):
self.obj.action('foo', ['d'])
def test_job_action(self):
class forwarder(object):
def __init__(self):
self.result = None
def __call__(self, data):
self.result = list(data)
fwd = forwarder()
self.obj.forwarder = fwd
self.obj.job_action('update', ['a', 'b'])
self.assertEqual(fwd.result, [{'update': {u'id': u'a',
u'name': u'a',
u'type': u'job'}},
{'update': {u'id': u'b',
u'name': u'b',
u'type': u'job'}}])
with self.assertRaises(TypeError):
self.obj.job_action('add', 'foo')
with self.assertRaises(ValueError):
self.obj.job_action('foo', ['d'])
def test_action_add(self):
resp = self.obj._action_add(['d', 'f', 'e'])
self.assertItemsEqual(resp, [
{u'id': u'd', u'name': u'other job', u'type': u'job'},
{u'id': u'e', u'name': u'other job e', u'type': u'job'}])
self.assertIn('d:pubsub', self.obj._listening_to)
self.assertIn('e:pubsub', self.obj._listening_to)
self.assertNotIn('f:pubsub', self.obj._listening_to)
def test_action_remove(self):
self.obj._action_add(['d', 'f', 'e'])
resp = self.obj._action_remove(['a', 'd', 'f', 'c', 'e'])
self.assertItemsEqual(resp, [
{u'id': u'a', u'name': u'a', u'type': u'job'},
{u'id': u'd', u'name': u'other job', u'type': u'job'},
{u'id': u'c', u'name': u'c', u'type': u'job'},
{u'id': u'e', u'name': u'other job e', u'type': u'job'}])
self.assertNotIn('a:pubsub', self.obj._listening_to)
self.assertNotIn('c:pubsub', self.obj._listening_to)
self.assertNotIn('d:pubsub', self.obj._listening_to)
self.assertNotIn('e:pubsub', self.obj._listening_to)
self.assertNotIn('f:pubsub', self.obj._listening_to)
self.assertEqual(r_client.smembers('testing:children'), {'b'})
def test_action_get(self):
resp = self.obj._action_get(['d', 'f', 'e', None])
self.assertItemsEqual(resp, [
{u'id': u'd', u'name': u'other job', u'type': u'job'},
{u'id': u'e', u'name': u'other job e', u'type': u'job'}])
if __name__ == '__main__':
main()
``` |
{
"source": "jose-neta/python-aleixo50",
"score": 3
} |
#### File: python-aleixo50/aleixo50/dish.py
```python
class Dish(object):
def __init__(self, name, ingredients=[], instructions=[]):
self.name = name
self.ingredients = ingredients
self.instructions = instructions
def __repr__(self):
return 'Dish({0.name!r}, {0.ingredients!r}, {0.instructions!r})'.format(self)
def __str__(self):
return 'Dish({0.name})'.format(self)
```
#### File: python-aleixo50/aleixo50/rand.py
```python
import random
from .dishes import dishes
def rand():
return random.choice(dishes)
``` |
{
"source": "JosenJin/hqchartPy2",
"score": 2
} |
#### File: HQChartPy2/HQChartPy2/hqchartpy2_tushare_test.py
```python
from hqchartpy2_fast import FastHQChart,PERIOD_ID
from hqchartpy2_pandas import HQChartPy2Helper
from hqchartpy2_tushare import TushareHQChartData, TushareKLocalHQChartData, HQResultTest
from hqchartpy2_tushare_config import TushareConfig
import json
import time
import numpy as np
import pandas as pd
import datetime
import uuid
class HQSelectDataFrameResult():
def __init__(self):
self.dfResult={} #保存所有的执行结果 key=代码 value=df数据
self.Error=[]
# 执行成功回调
def RunSuccess(self, symbol, jsData, jobID):
log="[HQSelectDataFrameResult::RunSuccess] {0} success".format(symbol)
print (log)
data=HQChartPy2Helper.JsonDataToPandas(jsData, symbol) # 指标数据转dataFrame
self.dfResult[symbol]=data
# 执行失败回调
def RunFailed(self, code, symbol, error,jobID) :
log="[HQSelectDataFrameResult::RunFailed] {0}\n{1} failed\n{2}".format(code, symbol,error)
self.Error.append(error)
print(log)
class TushareKLocalHQChartData_Txt(TushareKLocalHQChartData):
def LoadKLineData2(self, symbol, period, right, startDate, endDate):
# 调用父类直接读csv
# return super().LoadKLineData(symbol, period, right, startDate, endDate)
filePath='{0}/day/{1}.txt'.format(self.CachePath,symbol)
klineData=pd.read_csv(filePath,sep = " ", header=None)
klineData.columns = ['date','open',"high","low", "close", "vol", "amount"]
klineData["yclose"]=klineData["close"] # !!!!!少前收盘数据,
print(klineData)
return klineData
# 股票执行测试
def RunIndexTest(runConfig):
jsConfig = json.dumps(runConfig) # 运行配置项
# hqData=TushareKLocalHQChartData_Txt(TushareConfig.TUSHARE_AUTHORIZATION_KEY,startDate=20200421, endDate=20201231, cachePath="test_data\\") # 实例化数据类
hqData=TushareHQChartData(TushareConfig.TUSHARE_AUTHORIZATION_KEY,startDate=20200421, endDate=20210325) # 实例化数据类
result=HQSelectDataFrameResult() # 实例计算结果接收类
start = time.process_time()
res=FastHQChart.Run(jsConfig,hqData,proSuccess=result.RunSuccess, procFailed=result.RunFailed)
elapsed = (time.process_time() - start)
log='''RunSingleStockIndex()
---------------------------------------------------------------
耗时:{0}s,
股票个数:{1},
脚本:
{2}
执行是否成功:{3}
---------------------------------------------------------------'''.format(elapsed,len(runConfig['Symbol']), runConfig["Script"], res)
print(log)
if (res==True):
for item in result.dfResult.items() :
symbol= item[0]
print('{0} 数据:'.format(symbol))
print(item[1])
# item[1].to_csv("test_result/{0}.csv".format(symbol))
if __name__ == '__main__':
if (TushareConfig.HQCHART_AUTHORIZATION_KEY==None) :
# 请求试用账户, 把mac地址改成你本机的mac地址
TushareConfig.HQCHART_AUTHORIZATION_KEY=FastHQChart.GetTrialAuthorize(mac="D<KEY>")
FastHQChart.Initialization(TushareConfig.HQCHART_AUTHORIZATION_KEY) # 初始化HQChartPy插件
FastHQChart.SetLog(1)
runConfig={
# 指标脚本
"Script":'''
K:KDJ.K;
D:KDJ.D;
J:KDJ.J;
K_W:KDJ.K#WEEK;
D_W:KDJ.D#WEEK;
J_W:KDJ.J#WEEK;
''',
# 脚本参数
"Args": [ { "Name":"M1", "Value":15 }, { "Name":"M2", "Value":20 }, { "Name":"M3", "Value":30} ],
# 周期 复权
"Period":0, # 周期 0=日线 1=周线 2=月线 3=年线 9=季线
"Right":0, # 复权 0=不复权 1=前复权 2=后复权
"Symbol":["600000.sh","000001.sz"], # 计算多个股票指标
"OutCount":-1, # 输出最新的100条数据
#jobID (可选)
"JobID":str(uuid.uuid1())
}
# 测试股票指标计算
RunIndexTest(runConfig)
``` |
{
"source": "JoseNL27/SimplePoblationSim",
"score": 4
} |
#### File: JoseNL27/SimplePoblationSim/main.py
```python
from scipy.stats import norm
import random
#Opening and clearing the file where the code will write how many people are alive each day.
file = open("poblation.txt","r+")
file.truncate(0)
file.close()
peopleDictionary = []
x= 0
y = 0
startingPob = 10
#Setting up the class.
class Person():
def __init__(self):
self.age = int((norm.rvs(size=1,loc=0.5,scale=0.15)[0]*10).round(0)) #Using a Gaussian distribution to randomize with accuracy the starter age for eacch gen0 member.
self.death = False #Obviously each member will start alive.
self.hunger = 1 #Defining the starter hunger for each member.
def start(): #Function who adds the gen0 individuals to the dictionary.
for x in range(0,startingPob):
person = Person()
peopleDictionary.append(person)
def day(): #Function for each day rutine.
if len([person for person in peopleDictionary if person.death == False]) > 500: #It sets the food limit.
food = 400
else: #If the food limit isn´t reached there´ll be food for the 75% of the poblation.
food = int(len([person for person in peopleDictionary if person.death == False])*0.75)
for person in [person for person in peopleDictionary if person.death == False]: #Starts each member functions.
#print("#",peopleDictionary.index(person))
if person.hunger >= 2 and food > 0:
person.hunger = person.hunger - 2
food = food - 1
if person.hunger <= 1 and len([person for person in peopleDictionary if person.death == False]) > 1 and person.age in range (2,8):
bornRate = random.randint(0,100)
if bornRate < 56:
newBorn()
person.age += 1
person.hunger += 1
if person.age > 10:
person.death = True
peopleDictionary.remove(person)
if person.hunger > 5:
person.death = True
peopleDictionary.remove(person)
def newBorn():
person = Person()
peopleDictionary.append(person)
person.age = 0
start()
for y in range(0,300):
day()
print("DAY", y)
print("|||",len([person for person in peopleDictionary if person.death == False]))
saveFile1 = open("poblation.txt", "a")
write1 = str(len([person for person in peopleDictionary if person.death == False])) + "\n"
saveFile1.write(write1)
saveFile1.close()
y + 1
``` |
{
"source": "JoseNoriegaa/pydottie",
"score": 4
} |
#### File: src/pydottie/exists.py
```python
from typing import Dict
# Utilities
from .get import get
def exists(obj: Dict, path: str) -> bool:
"""
Check if a value exists in a dictionary.
Caution: If the value is None this function will return False even if the key exists.
Time complexity: O(n)
Args:
obj: The dictionary to check.
path: The path to the value.
Returns:
bool: True if the value exists, False otherwise.
"""
return get(obj, path) is not None
```
#### File: src/pydottie/flatten.py
```python
from typing import Dict
from typing import Optional
def flatten(obj: Dict, seperator: Optional[str] = None) -> dict:
"""Flatten a dictionary.
Time complexity: O(n * m) where n is the number of keys in the dictionary and m is the number of nested keys.
Args:
obj (Dict): The dictionary to flatten.
seperator (Union[str, None]): The seperator to use.
Returns:
Dict: The flattened dictionary.
"""
if not isinstance(seperator, str):
seperator = '.'
flattened = {}
current = None
nested = None
for key in obj:
current = obj[key]
if isinstance(current, dict):
nested = flatten(current, seperator)
for _key in nested:
flattened[key + seperator + _key] = nested[_key]
else:
flattened[key] = current
return flattened
```
#### File: pydottie/tests/test_paths.py
```python
from unittest import TestCase
# Utilitites
from src.pydottie import paths
class PathsTestCase(TestCase):
"""
Test cases for the paths function.
"""
def test_raise_exception_for_non_objects(self):
"""
Test that the function raises an exception for non-objects.
"""
self.assertRaises(TypeError, paths, 'no object')
def test_return_keys_of_flat_object(self):
"""
Test that the function returns the keys of a flat object.
"""
self.assertEqual(paths({ 'a': 1, 'b': 2 }), ['a', 'b'])
def test_return_paths_of_deeply_nested_object(self):
"""
Test that the function returns the paths of a deeply nested object.
"""
obj = {
'a': 1,
'b': {
'c': 2,
'd': { 'e': 3 }
}
}
self.assertEqual(paths(obj), ['a', 'b.c', 'b.d.e'])
def test_include_keys_of_null_objects(self):
"""
Test that the function includes keys of null objects.
"""
obj = {
'nonNullKey': 1,
'nullKey': None
}
self.assertEqual(paths(obj), ['nonNullKey', 'nullKey'])
```
#### File: pydottie/tests/test_set_value.py
```python
from unittest import TestCase
# Utilities
from src.pydottie import set_value
class SetValueTestCase(TestCase):
"""
Test cases for the set value function.
"""
def setUp(self) -> None:
self.data = {
'foo': {
'bar': 'baa'
}
}
def test_set_nested_values_on_existing_structure(self):
"""
Should set nested values on existing structure.
"""
set_value(self.data, 'foo.bar', 'baz')
self.assertEqual(self.data['foo']['bar'], 'baz')
def test_create_nested_structure_if_not_existing(self):
"""
Should create nested structure if not existing.
"""
set_value(self.data, 'level1.level2', 'foo')
self.assertEqual(self.data['level1']['level2'], 'foo')
self.assertIsInstance(self.data['level1'], dict)
def test_handle_setting_a_nested_value_on_an_undefined_value(self):
"""
Should handle setting a nested value on an undefined value (should convert undefined to object).
"""
data = {
'values': None
}
set_value(data, 'values.level1', 'foo')
self.assertEqual(data['values']['level1'], 'foo')
def test_set_with_a_list_path(self):
"""
Should be able to set with a list path.
"""
data = {
'some.dot.containing': {
'value': 'foo'
}
}
set_value(data, ['some.dot.containing', 'value'], 'razzamataz')
self.assertEqual(data['some.dot.containing']['value'], 'razzamataz')
def test_raise_exception_when_setting_a_nested_value_on_an_existing_key_with_a_non_object_value(self):
"""
Should raise an exception when setting a nested value on an existing key with a non-object value.
"""
self.assertRaises(ValueError, set_value, self.data, 'foo.bar.baz', 'someValue')
def test_overwrite_a_nested_non_object_value_on_force(self):
"""
Should overwrite a nested non-object value on force: true.
"""
set_value(self.data, 'foo.bar.baz', 'someValue', force=True)
self.assertEqual(self.data['foo']['bar']['baz'], 'someValue')
``` |
{
"source": "josenunesr93/sample.voice.gateway",
"score": 3
} |
#### File: python/callerProfileAPI/callerProfileAPI.py
```python
import os, requests, json, string, datetime, logging, time, csv
from os.path import join, dirname
# __file__ refers to the file settings.py
APP_PROFILE_API = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logging_comp_name = "Caller Profile API Stub"
file_name = 'callerProfile.csv'
json_file_name = 'callerProfile.json'
def readProfiles():
with open(os.path.join(APP_PROFILE_API, file_name), 'rb') as csvfile:
csvprofiles = csv.DictReader(csvfile)
profiles = []
for row in csvprofiles:
#print(row)
profiles.append(row)
csvfile.close()
return profiles
csvfile.close()
return None
def readCustomers():
with open(os.path.join(APP_PROFILE_API, json_file_name), 'rb') as json_data:
data = json.load(json_data)
return data
def writeCustomers(customers):
with open(os.path.join(APP_PROFILE_API, json_file_name), 'w') as outfile:
json.dump(customers, outfile)
def getCustomerByName(name):
customers = readCustomers()
customer = findProfileByFirstName(customers, name)
return customer
def updateCustomer(customer):
customers = readCustomers()
for ix, row in enumerate(customers):
if 'customer' in row:
if 'id' in row['customer'] and row['customer']['id'] == customer['customer']['id']:
customers[ix] = customer
writeCustomers(customers)
return True
return False
def getCustomerByID(id):
customers = readCustomers()
customer = findProfileByID(customers, id)
return customer
def findProfileByFirstName(customers, firstname):
for row in customers:
if 'customer' in row:
if 'Firstname' in row['customer']:
if row['customer']['Firstname'] == firstname:
return row
return None
def findProfileByID(customers, id):
for row in customers:
if 'customer' in row and 'id' in row['customer']:
if row['customer']['id'] == id:
return row
return None
def getAccountBalanceByName(customer, accountName):
# looking for account by Name
if 'accounts' in customer:
for account in customer['accounts']:
if 'name' in account and account['name'] == accountName:
return account['balance']
else:
return 0
def getAccountBalanceByID(customer, accountID):
# looking for account by ID
if 'accounts' in customer:
for account in customer['accounts']:
if 'id' in account and account['id'] == accountID:
return account['balance']
return 0
def getAccountByID(customer, accountID):
# looking for account by ID
if 'accounts' in customer:
for account in customer['accounts']:
if 'id' in account and account['id'] == accountID:
return account
return None
def setAccount(customer, inaccount):
if 'accounts' in customer:
for ix, act in enumerate(customer['accounts']):
if 'id' in act and act['id'] == inaccount['id']:
customer['accounts'][ix] = inaccount
return customer
def getLoanBalanceByName(customer, loanName):
if 'loans' in customer:
for loan in customer['loans']:
if 'name' in loan and loan['name'] == loanName:
return loan['balance']
return 0
def getLoanBalanceByID(customer, loanID):
if 'loans' in customer:
for loan in customer['loans']:
if 'id' in loan and loan['id'] == loanID:
return loan['balance']
return 0
def hasLoanTypeByName(customer, loanType):
if 'loans' in customer:
for loan in customer['loans']:
if 'name' in loan and loan['name'] == loanType:
return True
return False
def hasLoanTypeByID(customer, loanID):
if 'loans' in customer:
for loan in customer['loans']:
if 'id' in loan and loan['id'] == loanID:
return True
return False
def getLoanByID(customer, loanID):
if 'loans' in customer:
for loan in customer['loans']:
if 'id' in loan and loan['id'] == loanID:
return loan
return None
def setLoan(customer, inloan):
if 'loans' in customer:
for ix, loan in enumerate(customer['loans']):
if 'id' in loan and loan['id'] == inloan['id']:
customer['loans'][ix] = inloan
return customer
def hasCardByID(customer, cardID):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'id' in card and card['id'] == cardID:
return True
return False
def hasCardByNum(customer, num):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'cardnum' in card and card['cardnum'] == num:
return True
return False
def getCardByID(customer, cardID):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'id' in card and card['id'] == cardID:
return card
return None
def getCardByNum(customer, cardNum):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'cardnum' in card and card['cardnum'] == cardNum:
return card
return None
def getCardByName(customer, name):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'name' in card and card['name'] == name:
return card
return None
def getCardBalanceByNum(customer, num):
card = getCardByNum(customer,num)
if card and 'balance' in card:
return card['balance']
return 0
def setCreditCard(customer, incard, num):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'name' in card:
if card['name'] == incard:
card['cardnum'] = num
return customer
def getCardBalanceByID(customer, cardID):
if 'creditcards' in customer:
for card in customer['creditcards']:
if 'id' in card and card['id'] == cardID:
return card['balance']
return 0
def makeLoanPayment(profile, loan, account, amount):
if hasLoanTypeByID(profile,loan['id']) and getLoanBalanceByID(profile,loan['id']) > amount and getAccountBalanceByID(profile,account['id']) > amount:
loan = decrementLoan(profile,loan['id'],amount)
setLoan(profile,loan)
account = decrementAccount(profile,account['id'],amount)
setAccount(profile,account)
return True
else:
return False
def makeCardPayment(profile, card, account, amount):
if hasCardByID(profile,card['id']) and getCardBalanceByNum(profile,card['id']) > amount and getAccountBalanceByID(profile,account['id']) > amount:
loan = decrementCard(profile,card['id'],amount)
setCreditCard(profile,loan)
account = decrementAccount(profile,account['id'],amount)
setAccount(profile,account)
return True
else:
return False
def decrementAccount(customer,id,amount):
account = getAccountByID(customer, id)
account = decrementAccountBalance(account, amount)
return account
def decrementAccountBalance(account, amount):
if 'balance' in account:
account['balance'] = account['balance'] - amount
return account
def decrementLoan(customer,id,amount):
loan = getLoanByID(customer, id)
loan = decrementLoanBalance(loan, amount)
return loan
def decrementLoanBalance(loan, amount):
if 'balance' in loan:
loan['balance'] = loan['balance'] - amount
return loan
def decrementCard(customer,id,amount):
account = getCardByID(customer, id)
account = decrementCardBalance(account, amount)
return account
def decrementCardBalance(card, amount):
if 'balance' in card:
card['balance'] = card['balance'] - amount
return card
def runTest():
print("starting " + logging_comp_name)
name = 'Brian'
profile = getCustomerByName(name)
print(profile)
balance = getAccountBalanceByName(profile,'checking')
print("Checking Balance is : " + str(balance))
balance = getAccountBalanceByID(profile,30)
print("Money Market balance is :" + str(balance))
if hasLoanTypeByName(profile,"studentloan"):
print("Has studentloan")
if not hasLoanTypeByName(profile,"mortgage"):
print("doen't have mortgage")
balance = getLoanBalanceByID(profile,40)
print("Student Loand Balance is :" + str(balance))
balance = getLoanBalanceByName(profile,"autoloan")
print("Auto Loan balance is : " + str(balance))
loan = getLoanByID(profile,40)
account = getAccountByID(profile,30)
print(loan)
print(account)
amount = 250
print(hasLoanTypeByID(profile,loan['id']))
print(getLoanBalanceByID(profile,loan['id']))
print(getAccountBalanceByID(profile,account['id']))
if makeLoanPayment(profile,loan,account,amount):
print("Payment Made")
if updateCustomer(profile):
print(profile)
print("Saved Payment Data")
print("Loan Balance is: " + str(getLoanBalanceByID(profile,40)))
print("Account Balance is: " + str(getAccountBalanceByID(profile,30)))
else:
print("Payment Failed")
print(hasCardByNum(profile,'1357'))
print(getCardByID(profile,'1357'))
print(getCardBalanceByID(profile,'1357'))
card = getCardByID(profile,'1357')
if makeCardPayment(profile,card,account,amount):
print("Payment Made")
if updateCustomer(profile):
print(profile)
print("Saved Payment Data")
print("Card Balance is: " + str(getCardBalanceByID(profile,'1357')))
print("Account Balance is: " + str(getAccountBalanceByID(profile,30)))
else:
print("Payment Failed")
#runTest()
```
#### File: soe/python/callSystemOfRecordAfterConversation.py
```python
import os, requests, json, string, datetime, logging, time
from os.path import join, dirname
from weblogger import addLogEntry
import voiceProxySettings
from voiceProxyUtilities import check_wcsActionSignal, replaceOutputTagValue
from callerProfileAPI.callerProfileAPI import getCustomerByName, makeLoanPayment, updateCustomer, getLoanByID, getAccountByID, getCardByID, getCustomerByID
from callConversation import callConversationService
from checkConversationSignal import wcsSignals
logging_comp_name = "callSystemOfRecordAfterConversation"
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
balanceEntityTypeList = ['Accounts','Loans','Credit-Card']
balanceEntityValueList = ['Money Market','Checking','Savings','Credit Card','Auto Loan','Mortgage','Student Loan','Discover Card']
#------- Check SOR After Conversation Methods -----------------
def callSORAfterConv(message):
message = callSystemOfRecordAfterConversation(message)
return message
def callSystemOfRecordAfterConversation(message):
if check_wcsActionSignal(message,'getProfile'):
logging.info("Grabing the user profile")
message = doGetCustomer(message)
if check_wcsActionSignal(message,'lookupAccountBalanceTag'):
message = doGetCustomer(message)
tag = '<accountBalance>'
id = message['context']['profile']['customer']['id']
customer = getCustomerByID(id)
acctID =message['context']['paymentAccount']
account = getID(customer,acctID,'accounts')
message = replaceOutputTagValue(message,tag,account['balance'])
return message
if check_wcsActionSignal(message,'makePayment'):
logging.info("Calling API to make a payment")
if doMakePayment(message):
message = doGetCustomer(message)
if 'payment' in message['context']:
del message['context']['payment']
if check_wcsActionSignal(message,'checkBalanceAskAgain'):
logging.info("Checking balance and Calling Conversation again based on signal")
message['input']['text'] = message['context']['origInput']
del message['output']
del message['intents']
message = doGetCustomer(message)
message = populateBalances(message)
message = callConversationService(message)
message = wcsSignals(message)
message = callSORAfterConv(message)
return message
return message
#------ End Check SOR After Conversation Methods ---------------
def doMakePayment(message):
id = message['context']['profile']['Firstname']
customer = getCustomerByName(id)
loan = message['context']['payment']['type']
account =message['context']['payment']['account']
amount = message['context']['payment']['amount']
loanID = getID(customer,loan,'loans')
acctID = getID(customer,account,'accounts')
if makeLoanPayment(customer,loanID,acctID,amount):
updateCustomer(customer)
return True
else:
return False
def doGetCustomer(message):
name = message['context']['callerProfile']['firstname']
profile = getCustomerByName(name.strip())
logging.info(profile['customer'])
message['context']['profile'] = profile['customer']
return message
def getLoanID(customer, loan):
return getID(customer,loan,'loans')
def getAccountID(customer, acct):
return getID(customer,acct,'accounts')
def getCardID(customer, acct):
return getID(customer,acct,'creditcards')
def getID(customer, type, list):
if list in customer and len(customer[list])>0:
for x in customer[list]:
if x['name'] == type:
return x
return None
def populateBalances(message):
# need to get the name and the balances and add them to the callerProfile in the balance array
# this is because the Watson Dialog service doesn't allow for looping through JSON arrays.
entityBalance = loopBalanceEntities(message)
message['context']['callerProfile']['balanceAmount'] = entityBalance[0]['balance']
message['context']['callerProfile']['balanceName'] = entityBalance[0]['name']
return message
def loopBalanceEntities(message):
entityBalance = []
if 'entities' in message and len(message['entities'])>0:
for entity in message['entities']:
if isValidBalanceEntity(entity):
entityBalance.append(getBalanceForEntity(message,entity))
return entityBalance
return None
def isValidBalanceEntity(entity):
if entity and entity['entity'] in balanceEntityTypeList:
return True
else:
return False
def getBalanceForEntity(message,entity):
# Need to return a list with the Entity Value and the Balance
if entity['value'] in balanceEntityValueList:
entityBalance = {}
entityBalance['name'] = entity['value']
entityBalance['balance'] = getBalanceAmount(message,entity['value'])
return entityBalance
return None
def getBalanceAmount(message,value):
logging.info(value)
if value == 'Money Market':
return getBalanceFromList(message,'accounts','moneymarket')
if value == 'Checking':
return getBalanceFromList(message,'accounts','checking')
if value == 'Savings':
return getBalanceFromList(message,'accounts','savings')
if value == 'Student Loan':
return getBalanceFromList(message,'loans','studentloan')
if value == 'Auto Loan':
return getBalanceFromList(message,'loans','autoloan')
if value == 'Mortgage':
return getBalanceFromList(message,'loans','mortgage')
if value == 'Discover Card':
return getBalanceFromList(message,'creditcards','Discover')
return 0
def getBalanceFromList(message,listname,type):
for account in message['context']['profile'][listname]:
if account['name'] == type:
return account['balance']
return None
``` |
{
"source": "Jose-Nyamos/DTS",
"score": 2
} |
#### File: Jose-Nyamos/DTS/tasks.py
```python
from django_cron import CronJobBase, Schedule
import smtplib
from django.contrib.auth.models import User
import datetime
from notifications.signals import notify
from celery.task import periodic_task
from celery.schedules import crontab
import json
import timetable_create as tc
from django.db.models import Q
from . models import TimeTable, UserProfile
@periodic_task(run_every=crontab(minute=5))
def email_notification_function():
detail_dict = {}
user_details = {}
if datetime.datetime.now() == datetime.datetime.now().replace(hour=8, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period1!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period1
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "1st period")
elif datetime.datetime.now() == datetime.datetime.now().replace(hour=9, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period2!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period2
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "2nd period")
elif datetime.datetime.now() == datetime.datetime.now().replace(hour=10, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period3!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period3
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "3rd period")
elif datetime.datetime.now() == datetime.datetime.now().replace(hour=12, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period4!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period4
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "4th period")
elif datetime.datetime.now() == datetime.datetime.now().replace(hour=13, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period5!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period5
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "5th period")
elif datetime.datetime.now() == datetime.datetime.now().replace(hour=14, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period6!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period6
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "6th period")
elif datetime.datetime.now() == datetime.datetime.now().replace(hour=15, minute=50, second=0, microsecond=0):
detail_dict.clear()
user_details.clear()
query_rows = list(
TimeTable.objects.raw("Select * from timetable_timetable WHERE Period7!='Free' AND DateToday=%s",
[datetime.date.today()]))
for row in query_rows:
detail_dict["Class"] = row.Period7
user_details[str(row.user)] = detail_dict
detail_dict = {}
send_email(user_details)
send_notification(user_details, "7th period")
def send_email(user_details):
for user, value in user_details.iteritems():
user_details[user]["Email"] = User.objects.get(username=user).email
timenow = datetime.datetime.now()+ datetime.timedelta(minutes =10)
FROM = '<EMAIL>'
TO = user_details[user]["Email"]
SUBJECT = "Next Class Details"
TEXT = "Hello %s! \nYour next class is at the %s standard at %s, and it starts in 10 mins! " \
"\nPlease be on time. \nRegards \nManagement"\
% (user, user_details[user]["Class"], timenow.strftime("%X"))
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login('<EMAIL>', '<PASSWORD>')
server.sendmail(FROM, TO, message)
server.close()
def send_notification(user_details,period_info):
username_list = user_details.keys()
for user in username_list:
message = "Your "+period_info+" is in "+user_details[user]["Class"]+" standard."
notify.send(user, recipient=User.objects.get(username=user), verb=message)
@periodic_task(run_every=crontab(minute=0, hour='0'))
def update_profile():
user_list = []
for user in User.objects.all():
user_list.append(User.objects.get(username= str(user)))
result = []
for user_instance in user_list:
result.append(json.loads(str(UserProfile.objects.get(user=user_instance))))
result_dict = {}
for d in result:
for k, v in d.iteritems():
result_dict.setdefault(k, []).append(v)
for user in user_list:
if len(TimeTable.objects.filter(Q(DateToday__gte=datetime.date.today()) &
Q(DateToday__lte=datetime.date.today() + datetime.timedelta(days=3))
).filter(user=user)) == 0:
for day in range(0,4):
for k, v in result_dict.iteritems():
result_dict[k][0]["Class Taken"] = []
result_dict[k][0]["Class Timings Today"] = []
newRoaster = tc.createTimeTableJson(result_dict, 7, 5)
TimeTable.objects.create(user=user,
DateToday=str(datetime.date.today()+ datetime.timedelta(days=day)),
Period1=newRoaster[user.username]["Class Taken"][0],
Period2=newRoaster[user.username]["Class Taken"][1],
Period3=newRoaster[user.username]["Class Taken"][2],
Period4=newRoaster[user.username]["Class Taken"][3],
Period5=newRoaster[user.username]["Class Taken"][4],
Period6=newRoaster[user.username]["Class Taken"][5],
Period7=newRoaster[user.username]["Class Taken"][6])
``` |
{
"source": "jose-oc/sequoia-python-client-sdk",
"score": 2
} |
#### File: sequoia-python-client-sdk/sequoia/client.py
```python
import json
import logging
import re
import uuid
from string import Template
from urllib.parse import parse_qs, quote, urlencode, urlparse
from sequoia import env, error, http, registry
from sequoia.auth import AuthFactory, AuthType
from sequoia.http import HttpResponse
DIRECT_MODEL = 'direct'
class Client(object):
"""OAuth2 Compliant Client SDK for interacting with Sequoia services.
"""
# pylint: disable-msg=too-many-arguments
def __init__(self, registry_url, proxies=None, user_agent=None, backoff_strategy=None, adapters=None,
request_timeout=None, model_resolution=None, correlation_id=None, user_id=None, application_id=None,
content_type=None, **auth_kwargs):
logging.debug('Client initialising with registry_url=%s ', registry_url)
self._registry_url = registry_url
self._request_timeout = request_timeout or env.DEFAULT_REQUEST_TIMEOUT_SECONDS
self._proxies = proxies
self._user_agent = user_agent
self._correlation_id = correlation_id.strip() if correlation_id else None
self.user_id = user_id.strip() if user_id else None
self.application_id = application_id.strip() if application_id else None
self._model_resolution = model_resolution
self._registry = self._initialize_registry(adapters, backoff_strategy, content_type, **auth_kwargs)
self._auth = AuthFactory.create(token_url=self._get_token_url(auth_kwargs.get("auth_type", None)),
request_timeout=self._request_timeout,
**auth_kwargs)
self._auth.register_adapters(adapters)
self._auth.init_session()
self._http = http.HttpExecutor(self._auth,
proxies=self._proxies,
user_agent=self._user_agent,
session=self._auth.session,
request_timeout=self._request_timeout,
correlation_id=self._correlation_id,
user_id=self.user_id,
application_id=self.application_id,
backoff_strategy=backoff_strategy,
content_type=content_type)
def _initialize_registry(self, adapters, backoff_strategy, content_type, **auth_kwargs):
auth = AuthFactory.create(**auth_kwargs) if auth_kwargs.get("auth_type",
None) == AuthType.MUTUAL else AuthFactory.create(
auth_type=AuthType.NO_AUTH)
auth.register_adapters(adapters)
http_executor = http.HttpExecutor(auth,
proxies=self._proxies,
user_agent=self._user_agent,
session=auth.session,
request_timeout=self._request_timeout,
correlation_id=self._correlation_id,
user_id=self.user_id,
application_id=self.application_id,
backoff_strategy=backoff_strategy,
content_type=content_type)
return registry.Registry(self._registry_url, http_executor)
def _get_token_url(self, auth_type):
if auth_type == AuthType.MUTUAL:
return None
identity = self._registry['identity'].location
return identity + '/oauth/token'
def __getattr__(self, item):
return self._create_service_proxy(item)
def __getitem__(self, item):
return self._create_service_proxy(item)
def _create_service_proxy(self, item):
if not item.startswith('_'):
return ServiceProxy(self._http, self._registry[item], self._model_resolution)
return self.__dict__.get(item)
class ServiceProxy(object):
_service_models = dict()
def __init__(self, http, service, model_resolution=None):
self._service = service
self._http = http
if model_resolution:
try:
self._descriptor = ServiceProxy._service_models.get(service)
if not self._descriptor:
self._descriptor = self._http.get(service.location + '/descriptor/raw?_pretty=true').json
ServiceProxy._service_models[service] = self._descriptor
except Exception:
self._descriptor = None
logging.exception('Service `%s` model could not be fetched')
def __getattr__(self, resource):
return self._create_endpoint_proxy(resource)
def _create_endpoint_proxy(self, resource):
if not resource.startswith('_') and not resource == 'business':
return ResourceEndpointProxy(self._http, self._service, resource, descriptor=self._descriptor)
return self.__dict__.get(resource)
def __getitem__(self, resource):
if resource != 'business':
return self._create_endpoint_proxy(resource)
return self.business
def business(self, path_template):
return BusinessEndpointProxy(self._http, self._service, path_template=path_template)
class GenericEndpointProxy(object):
def __init__(self):
self.http = None
def _add_correlation_id(self):
self.http.common_headers['X-Correlation-ID'] = self.http.correlation_id \
if self.http.correlation_id else \
ResourceEndpointProxy._build_correlation_id(
self.http.user_id,
self.http.application_id)
@staticmethod
def _build_correlation_id(user_id=None, application_id=None):
if user_id is not None and application_id is not None:
return "/".join((user_id, application_id, str(uuid.uuid4())))
return None
class ResourceEndpointProxy(GenericEndpointProxy):
"""Proxy endpoint providing read/store/browse operations over Sequoia API endpoint.
"""
def __init__(self, http, service, resource, descriptor=None):
super().__init__()
self.http = http
self.service = service
self.resource = resource
self.service = service
self.url = service.location + '/data/' + resource
self.descriptor = descriptor
def read(self, owner, ref, retry_when_empty_result=None):
self._add_correlation_id()
return self.http.get(self.url + '/' + ref, self._create_owner_param(owner), resource_name=self.resource,
retry_when_empty_result=retry_when_empty_result)
def store(self, owner, json_object):
self._add_correlation_id()
return self.http.post(self.url + '/', json_object, self._create_owner_param(owner), resource_name=self.resource)
def browse(self, owner, criteria=None, fields=None, query_string=None, prefetch_pages=1,
retry_when_empty_result=None):
self._add_correlation_id()
params = criteria.get_criteria_params() if criteria else {}
params.update(self._create_owner_param(owner))
params.update(self._create_fields_params(fields))
return PageBrowser(endpoint=self, resource_name=self.resource, criteria=criteria,
query_string=query_string, params=params, prefetch_pages=prefetch_pages,
retry_when_empty_result=retry_when_empty_result)
def _create_fields_params(self, fields):
if fields:
return {'fields': ','.join(sorted(map(str, fields)))}
return {}
def delete(self, owner, ref):
self._add_correlation_id()
if isinstance(ref, list):
refs = ",".join(ref)
else:
refs = ref
params = dict()
params.update(ResourceEndpointProxy._create_owner_param(owner))
return self.http.delete(self.url + "/" + refs, params=params, resource_name=self.resource)
def update(self, owner, json_string, ref, version):
# Fixme Version header is no longer supported by resourceful API
self._add_correlation_id()
json_object = json.loads(json_string)
ResourceEndpointProxy.validate_reference_to_update_with_json_reference(json_object[0], ref)
params = dict()
params.update(ResourceEndpointProxy._create_owner_param(owner))
headers = ResourceEndpointProxy._create_version_header(version)
try:
return self.http.put(self.url + '/' + ref, json_string, params, headers=headers,
resource_name=self.resource)
except error.HttpError as e:
if self._is_not_matching_version_exception(e):
raise error.NotMatchingVersion('Document cannot be updated. Version does not match.', cause=e)
raise e
@staticmethod
def _create_owner_param(owner):
return {'owner': owner}
@staticmethod
def validate_reference_to_update_with_json_reference(json, ref):
if 'ref' not in json or 'owner' not in json or 'name' not in json:
raise error.ReferencesMismatchException(
'Reference to update %s does not match with the resource reference. '
'Resource does not contain ref, owner or name' % ref)
if json['ref'] != ref:
raise error.ReferencesMismatchException(
'Reference to update %s does not match with the resource reference %s.' % (ref, json['ref']))
resource_reference = "%s:%s" % (json['owner'], json['name'])
if resource_reference != ref:
raise error.ReferencesMismatchException(
'Reference to update %s does not match with the resource reference %s.' % (ref, resource_reference))
@staticmethod
def _create_version_header(version):
return {'If-Match': '"' + version + '"'}
@staticmethod
def _is_not_matching_version_exception(e):
return e.status_code == 412 and e.message['error'] == 'Precondition Failed' \
and e.message['message'] == 'document cannot be changed - versions do not match'
class LinkedResourcesPageBrowser(object):
def __init__(self, endpoint, main_page_browser, resource, owner):
self._endpoint = endpoint
self._owner = owner
self._main_page_browser = main_page_browser
self._resource = resource
@property
def resources(self):
if all([self._main_page_browser.full_json, 'linked' in self._main_page_browser.full_json,
self._resource in self._main_page_browser.full_json['linked']]):
return self._main_page_browser.full_json['linked'][self._resource]
return None
def __iter__(self):
for main_page in self._main_page_browser:
next_items = self._next_urls_in_linked_resources()
if main_page.full_json['linked'][self._resource]:
yield main_page.full_json['linked'][self._resource]
if next_items:
for next_item in next_items:
next_items_page_browser = PageBrowser(endpoint=self._endpoint, resource_name=self._resource,
query_string=urlparse(next_item).query,
params={'owner': self._owner})
for next_item_page in next_items_page_browser:
yield next_item_page.resources
def _next_urls_in_linked_resources(self):
return self._get_unique_continue_links(self._linked_links())
def _linked_links(self):
if self._main_page_browser.full_json and all([
'linked' in self._main_page_browser.full_json['meta'],
self._resource in self._main_page_browser.full_json['meta']['linked']]):
return self._main_page_browser.full_json['meta']['linked'][self._resource]
return []
def _get_unique_continue_links(self, meta_section):
"""
Given the meta section of a resource from a Sequoia service response with a number of `continue` and `request`
links this function returns the `continue` link which is unique, this is, that doesn't appear in any of the
`request` links.
It's a way to identify the link to the next page.
:param meta_section: list of dicts with fields `request` and `continue`.
:return: the link to the next page.
"""
continue_params = self._get_unique_continue_param(meta_section)
unique_continue_link = self._get_continue_links_matching_continue_param(meta_section, continue_params)
return unique_continue_link
def _get_continue_param(self, link):
return parse_qs(urlparse(link).query).get('continue', [None])[0]
def _get_unique_continue_param(self, meta_section):
request_links = set(self._get_continue_param(link['request']) for link in meta_section if 'request' in link)
continue_links = set(self._get_continue_param(link['continue']) for link in meta_section if 'continue' in link)
unique_continue_param = continue_links.difference(request_links)
return unique_continue_param
def _get_continue_links_matching_continue_param(self, meta_section, continue_params):
if not continue_params:
return set()
return {link['continue'] for continue_param in continue_params
for link in meta_section if 'continue' in link and quote(continue_param) in link['continue']}
class PageBrowser(object):
"""
Sequoia resource service pagination browser. This browser will fetch the content of `prefetch_pages` first pages
and then will do lazy pagination load of rest of pages till finding a page with no next link.
"""
def __init__(self, endpoint=None, resource_name=None, criteria=None, query_string=None, params=None,
prefetch_pages=1, retry_when_empty_result=None):
self._response_cache = []
self._resource_name = resource_name
self._endpoint = endpoint
self.params = params
self._criteria = criteria
self._retry_when_empty_result = retry_when_empty_result
self.response_builder = ResponseBuilder(descriptor=endpoint.descriptor, criteria=self._criteria)
self.query_string = query_string
self.url = self._build_url()
self.next_url = None
if prefetch_pages > 0:
self._prefetch(prefetch_pages)
def _prefetch(self, pages):
i = pages
while i:
self.next_url, response = self._fetch()
if response:
self._response_cache.append(response)
if not self.next_url:
break
i -= 1
def _fetch(self):
url = self.next_url or self.url
params = self._get_params_for_request()
self._remove_owner_if_needed(self.params, url)
response = self._endpoint.http.get(url, params=params, resource_name=self._resource_name,
retry_when_empty_result=self._retry_when_empty_result)
response_wrapper = self._get_response(self._endpoint, response)
return self._get_next_url(response), response_wrapper
def _get_params_for_request(self):
return None if self.next_url else self.params
def _get_next_url(self, response):
if self._next_page(response):
return '%s%s' % (self._endpoint.service.location, self._next_page(response))
if self._continue_param(response):
return self._build_url_from_continue_param(response)
return None
def _get_response(self, endpoint, response):
return HttpResponse(response.raw, resource_name=endpoint.resource,
model_builder=self.response_builder.build) if endpoint.descriptor else response
def _build_url(self):
url_without_params = '%s/data/%s' % (self._endpoint.service.location, self._resource_name)
return '%s?%s' % (url_without_params, self.query_string) if self.query_string else url_without_params
def _continue_param(self, response):
return response.data['meta']['continue'] if 'continue' in response.data['meta'] else ''
def _build_url_from_continue_param(self, response):
return self._endpoint.service.location + self._continue_param(response)
def linked(self, resource):
return LinkedResourcesPageBrowser(self._endpoint, self, resource, self.params.get('owner'))
def __getattr__(self, name):
if self._response_cache:
return getattr(self._response_cache[0], name)
return None
def __iter__(self):
for cache_item in self._response_cache:
yield cache_item
while self.next_url:
self.next_url, response = self._fetch()
self._response_cache.append(response)
yield response
def _next_page(self, response):
return response.full_json['meta'].get('next', None)
def _remove_owner_if_needed(self, params, url):
if self._query_string_contains_owner(url):
params.pop('owner', None)
return params
return params
def _query_string_contains_owner(self, url):
result = urlparse(url)
return 'owner' in parse_qs(result.query)
class BusinessEndpointProxy(GenericEndpointProxy):
"""Proxy endpoint providing read/store/browse operations over Sequoia API Business Endpoints with NOAUTH.
"""
def __init__(self, http, service, path_template):
super().__init__()
self.http = http
self.service = service
self.url = service.location
self.path_template = path_template
def store(self, service, owner, content, ref, params=None):
self._add_correlation_id()
url_template = Template(self.path_template)
params_formatted = None
if params:
params_formatted = '?' + urlencode(params)
url = self.url + url_template.safe_substitute(service=service, owner=owner, ref=ref,
params=params_formatted if params else '')
response = self.http.post(url, content, None, None, resource_name=None)
return HttpResponse(response.raw, resource_name=None, model_builder=None)
def browse(self, service, **kwargs):
self._add_correlation_id()
url_template = Template(self.path_template)
url = self.url + url_template.safe_substitute(service=service, **kwargs)
return self.http.get(url, resource_name=None)
@staticmethod
def _create_owner_param(owner):
return {'owner': owner}
class ResponseBuilder(object):
def __init__(self, descriptor=None, criteria=None):
# TODO Discover model in installed libraries
self._descriptor = descriptor
self._criteria = criteria
def build(self, response_json, resource_name):
if response_json.get(resource_name):
return self._build_with_criteria_and_descriptor(response_json, resource_name)
logging.warning('Resource `%s` not found in response.', resource_name)
return None
def _build_with_criteria_and_descriptor(self, response_json, resource_name):
if self._criteria and self._descriptor:
return [self._create_model_instance(resource_name, resource, response_json.get('linked')) for
resource in response_json.get(resource_name)]
return response_json.get(resource_name)
def _get_class_name(self, main_resource_name):
return self._descriptor['resourcefuls'][main_resource_name]['singularName']
def _get_relationship_key(self, main_resource_name, related_resoure_name):
try:
return self._descriptor['resourcefuls'][main_resource_name]['relationships'][related_resoure_name][
'fieldNamePath']
except KeyError:
logging.warning('Included resource `%s` not listed as relationship in `%s` service metadata',
related_resoure_name, main_resource_name)
return None
def _create_model_instance(self, main_resource_name, main_resource, linked=None):
return self._resolve_direct_inclusions(main_resource_name, main_resource, linked)
def _resolve_direct_inclusions(self, main_resource_name, main_resource, linked=None):
if linked:
for inclusion in self._criteria.inclusion_entries:
if inclusion.resource_name in linked:
main_resource[inclusion.resource_name] = self._resolve_direct_inclusion(inclusion.resource_name,
linked, main_resource_name,
main_resource)
else:
logging.info('Resources `%s` not included in response', inclusion.resource_name)
return main_resource
def _resolve_direct_inclusion(self, resource_name, linked, parent_resource_name, parent_resource):
linked_inclusions = linked[resource_name]
relation_field = self._get_relationship_key(parent_resource_name, resource_name)
if not relation_field:
logging.info('Child resource `%s` could not be linked to `%s` parent resources', resource_name,
parent_resource_name)
return None
if relation_field in parent_resource:
if linked_inclusions and 'ref' not in linked_inclusions[0]:
logging.info('Linked resources with no `ref` field, linked resources skipped')
return None
return [self._create_model_instance(resource_name, entry, None)
for entry in linked_inclusions if entry['ref'] in parent_resource[relation_field]]
logging.info('Parent resource `%s` with no linked `%s` resources', parent_resource_name,
resource_name)
return None
def _dash_to_camelcase(self, value):
return re.sub(r'(?!^)-([a-zA-Z])', lambda m: m.group(1).upper(), value).title()
``` |
{
"source": "JoseOr1j/PyGame",
"score": 3
} |
#### File: JoseOr1j/PyGame/main.py
```python
import pygame
import random
WIDTH = 1200
HEIGHT = 1000
BACKGROUND = (0,0,0)
class Ball:
def __init__(self):
self.image = pygame.image.load("ball.png")
self.speed = [random.randrange(-4,4), 3]
self.rect = self.image.get_rect()
self.alive = True
def update(self):
if self.rect.top < 0:
self.speed[1] = -self.speed[1]
self.speed[0] = random.randrange(-2, 2)
elif self.rect.left < 0 or self.rect.right > WIDTH:
self.speed[0] = -self.speed[0]
elif self.rect.bottom > HEIGHT:
self.alive = False
self.move()
def move(self):
self.rect = self.rect.move(self.speed)
def main():
clock = pygame.time.Clock()
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
ball1 = Ball()
ball2 = Ball()
ball3 = Ball()
balls = [ball1, ball2, ball3]
num_successful_throws = 0
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
for ball in balls:
if ball.rect.collidepoint(pygame.mouse.get_pos()):
ball.speed[0] = random.randrange(-4, 4)
ball.speed[1] = -2
num_successful_throws += 1
break
if num_successful_throws > 3:
ball = Ball()
balls.append(ball)
num_successful_throws = 0
screen.fill(BACKGROUND)
for i, ball in enumerate(balls):
if ball.alive:
screen.blit(ball.image, ball.rect)
ball.update()
if not ball.alive:
del balls[i]
pygame.display.flip()
clock.tick(60)
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
for ball in balls:
if ball.rect.collidepoint(pygame.mouse.get_pos()):
ball.speed[0] = random.randrange(-4, 4)
ball.speed[1] = -2
num_successful_throws += 1
break
if __name__ == "__main__":
main()
'''
class Enemy:
def __init__(self):
self.image = pygame.image.load("xyz.png")
self.speed = [random.randrange(-4,4),3]
self.rect = self.image.get_rect()
self.alive = True
def update(self):
if self.rect.top < 0:
self.speed[2] = -self.speed[1]
self.speed[1] = random.randrange(-2,2)
self.speed[0] = random.randint(-1,1,)
elif self.rect.left < 0 or self.rect.right > WIDTH:
self.speed[0] = -self.speed[0]
elif self.rect.bottom > HEIGHT:
self.alive = False
self.moves()
def moves(self):
self.rect = self.rect.moves(self.speed)
'''
``` |
{
"source": "joseotaviobiscaia/aulaS_CURSO",
"score": 3
} |
#### File: Notebooks/arquivos_auxiliares/codigo_1.py
```python
def funcao1():
print('funcao1')
def funcao2():
print('funcao2')
if __name__=='__main__':
funcao1()
funcao2()
print('Este código está sendo executado como script')
else:
print('Este código está sendo chamado como módulo')
``` |
{
"source": "Jose-Oton/airflow_project",
"score": 2
} |
#### File: Jose-Oton/airflow_project/5th_pipeline.py
```python
from airflow import DAG
from datetime import timedelta, datetime
from airflow.utils.dates import days_ago
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocCreateClusterOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocDeleteClusterOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocSubmitPySparkJobOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocSubmitJobOperator
from airflow.utils import trigger_rule
# DataprocSubmitPySparkJobOperator(
# task_id="store_stock",
# main="gs://your_bucket/datapipelines/pyspark/pyspark_transformation_joseOton.py",
# cluster_name="spark-cluster-{{ ds_nodash }}",
# dataproc_jars=["gs://spark-lib/bigquery/spark-bigquery-latest.jar"], #JAR para que Spark pueda leer de BigQuery
# region='us-central1',
# gcp_conn_id='google_cloud_default'
# ).generate_job()
#2. Utilizar Variables
PROJECT_ID = Variable.get("project")
STORAGE_BUCKET = Variable.get("storage_bucket")
default_dag_args = {
"start_date": days_ago(1),
"owner": "<NAME>"
}
def is_weekend(execution_date=None):
date = datetime.strptime(execution_date, "%Y-%m-%d")
if date.isoweekday() < 6:
return "store_stock"
return "weekend"
# DEFINIMOS DAG
with DAG(
dag_id='5th_exercise',
description='Running a PySpark Job on GCP',
schedule_interval='@daily',
default_args=default_dag_args,
max_active_runs=1,
user_defined_macros={"project": PROJECT_ID},#5. Macros en Airflow
) as dag:
dag.doc_md = __doc__ #Para documentar un DAG
create_dataproc = DataprocCreateClusterOperator(
task_id="create_dataproc",
project_id='{{ project }}',
cluster_name="spark-cluster-{{ ds_nodash }}",
num_workers=2,
storage_bucket=STORAGE_BUCKET,
region="us-central1"
)
create_dataproc.doc_md = """## Crear cluster de Dataproc
Crea un cluster de Dataproc en el proyecto de GCP
"""
# 3. Agregar elementos de lógica para ejecutar uno u otro pipeline
do_analytics = BranchPythonOperator(
task_id="do_analytics",
python_callable=is_weekend,
op_kwargs={"execution_date": "{{ ds }}"}, # 4. Jinja Templating
)
do_analytics.doc_md = """## Evalua que dia de la semana es
Crea un cluster de Dataproc en el proyecto de GCP.
"""
store_stock = DataprocSubmitJobOperator(
task_id="store_stock",
project_id=PROJECT_ID,
location='us-central1',
job={
'reference': {'project_id': '{{ project }}',
'job_id': '{{task.task_id}}_{{ds_nodash}}_2446afcc_joseOton'}, ## si puede haber cambio.
'placement': {'cluster_name': 'spark-cluster-{{ ds_nodash }}'},
'labels': {'airflow-version': 'v2-1-0'},
'pyspark_job': {
'jar_file_uris': ['gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar'],
'main_python_file_uri': 'gs://your_bucket/datapipelines/pyspark/pyspark_transformation_joseOton.py'
}
},
gcp_conn_id='google_cloud_default'
)
store_stock.doc_md = """## Spark Transformation
Ejecuta las transformaciones con Spark.
"""
weekend = BashOperator(
task_id="weekend",
bash_command='echo "\'$TODAY\' is weekend so the pipeline hasnt been executed."',
env={'TODAY': '2021-06-20'},
)
weekend.doc_md = """## Imprime el día de la semana
Se ejecuta en caso sea fin de semana.
"""
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster",
project_id=PROJECT_ID,
cluster_name="spark-cluster-{{ ds_nodash }}",
trigger_rule="all_done",
region='us-central1'
#zone='us-central1-a'
)
delete_cluster.doc_md = """## Borrar Cluster de Dataproc
Elimina el cluster de Dataproc.
"""
# SETEAR LAS DEPEDENDENCIAS DEL DAG
(create_dataproc >>
do_analytics >> [
store_stock,
weekend,
] >> delete_cluster)
``` |
{
"source": "josep110/trading-scraper",
"score": 3
} |
#### File: josep110/trading-scraper/scraper.py
```python
import logging, time
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',level=logging.INFO)
class Crawler: # Generic web crawling object primarily using BeautifulSoup for site traversal.
def __init__(self, urls=[]):
self.visited = []
self.unvisited = urls
def download_url(self, url):
return requests.get(url).text
def get_linked_urls(self, url, html):
soup = BeautifulSoup(html, 'html.parser') # uses standard Python html parser.
for link in soup.find_all('a'):
path = link.get('href')
if path and path.startswith('/'):
path = urljoin(url, path)
yield path
def add_url_to_visit(self, url):
if url not in self.visited and url not in self.unvisited:
self.unvisited.append(url)
def crawl(self,url):
html = self.download_url(url)
for url in self.get_linked_urls(url, html):
self.add_url_to_visit(url)
def run(self):
while self.unvisited:
url = self.unvisited.pop(0)
logging.info(f'Crawling: {url}')
try:
self.crawl(url)
except Exception:
logging.exception(f'Failed to crawl: {url}')
finally:
self.visited.append(url)
def navigate(self, url):
run()
def go(url):
Crawler(urls=[url]).run()
``` |
{
"source": "josepablocam/ams",
"score": 3
} |
#### File: ams/analysis/combined_wins_plot.py
```python
from argparse import ArgumentParser
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def plot(combined_df):
systems = [
"Weak Spec.", "Weak Spec. + Search", "Expert + Search", "AMS + Search"
]
combined_df["order"] = combined_df["name"].map(lambda x: systems.index(x))
combined_df = combined_df.sort_values("order")
fig, ax = plt.subplots(1)
ax = sns.barplot(
data=combined_df,
x="search",
y="wins",
hue="name",
ci=None,
ax=ax,
)
ax.set_xlabel("Search")
ax.set_ylabel("Number of Wins")
plt.legend(
loc='upper center',
bbox_to_anchor=(0.5, 1.3),
title="Approach",
ncol=2
)
plt.tight_layout()
return ax
def combine_dfs(input_paths, search_names):
acc = []
for path, search in zip(input_paths, search_names):
df = pd.read_csv(path)
df["search"] = search
acc.append(df)
combined_df = pd.concat(acc, axis=0)
combined_df = combined_df[combined_df["Specification"] == "Total"]
combined_df = pd.melt(
combined_df,
id_vars=["Specification", "search"],
var_name="name",
value_name="wins",
)
return combined_df
def get_args():
parser = ArgumentParser(description="Combine wins plots")
parser.add_argument("--input", type=str, nargs="+", help="Wins.csv files")
parser.add_argument(
"--search",
type=str,
nargs="+",
help="Search names aligned with input files",
)
parser.add_argument("--output", type=str, help="Output path")
return parser.parse_args()
def main():
args = get_args()
df = combine_dfs(args.input, args.search)
ax = plot(df)
ax.get_figure().savefig(args.output)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: ams/analysis/distribution_hyperparameters.py
```python
from argparse import ArgumentParser
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import pickle
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pmlb
import seaborn as sns
import sklearn
import sklearn.base
import sklearn.model_selection
import tqdm
from core.extract_sklearn_api import (
APICollection,
APIClass,
APIClassParameter,
)
from core.utils import get_component_constructor
from core.extract_parameters import UNKValue
from core.generate_search_space import generate_parameters
from analysis.utils import emp_cdf, plot_emp_cdf
from datasets.utils import fetch_data
def count_params_per_class(api_collection, exclude):
results = {}
for c in api_collection.classes:
params = [p for p in c.children if p.param_name not in exclude]
results[c.path] = len(params)
return results
def distribution_ratio_params_tuned(df, cts_per_class, exclude,
plot_steps=200):
df = df.copy()
df["ignore"] = df["key"].isin(exclude)
df_ct = df.groupby(
["id",
"func"])["key"].agg(lambda x: len([e for e in x if e is not None]))
df_ct = df_ct.to_frame(name="ct").reset_index()
df_ignore = df.groupby(
["id",
"func"])["ignore"].sum().to_frame(name="ct_ignore").reset_index()
df_ct = pd.merge(df_ct, df_ignore, how="left", on=["id", "func"])
df_ct["ct_ignore"] = df_ct["ct_ignore"].fillna(0.0)
df_ct["final_ct"] = df_ct["ct"] - df_ct["ct_ignore"]
df_ct["reference_ct"] = df_ct["func"].map(cts_per_class)
df_ct["ratio"] = df_ct["final_ct"] / df_ct["reference_ct"]
# drop entries with zero reference
df_ct = df_ct[df_ct["reference_ct"] > 0].reset_index()
plot = plot_emp_cdf(df_ct["ratio"], np.linspace(0, 1.0, plot_steps))
plot.set_xlabel("Ratio of hyperparameters tuned to available")
plot.set_ylabel("Empirical CDF (Calls)")
return df_ct, plot
# people tend to not tune all hyper-parameters
def jaccard_similarity(s1, s2):
if len(s1) == 0 and len(s2) == 0:
return 1.0
i = s1.intersection(s2)
u = s1.union(s2)
return len(i) / float(len(u))
def distribution_distance_params_tuned(df,
exclude,
n_samples=100,
plot_steps=200):
df = df.copy()
df = df[~df["key"].isin(exclude)]
df = df.groupby([
"id", "func"
])["key"].apply(lambda x: frozenset([e for e in x if e is not None]))
df = df.to_frame(name="params").reset_index()
distances = []
for func in tqdm.tqdm(df["func"].unique()):
df_func = df[df["func"] == func].reset_index(drop=True)
n = df_func.shape[0]
if n > n_samples:
df_func = df_func.sample(n=n_samples).reset_index(drop=True)
n = n_samples
for i in tqdm.tqdm(range(0, n)):
for j in range(i + 1, n):
row1 = df_func.iloc[i]["params"]
row2 = df_func.iloc[j]["params"]
jacc_dist = 1.0 - jaccard_similarity(row1, row2)
distances.append((func, jacc_dist))
df_dist = pd.DataFrame(distances, columns=["func", "dist"])
df_dist = df_dist.groupby("func")["dist"].mean().to_frame(name="dist")
df_dist = df_dist.reset_index()
plot = plot_emp_cdf(df_dist["dist"].values,
np.linspace(0.0, 1.0, plot_steps))
plot.set_xlabel("Mean Jaccard Distance of Parameters Tuned Across Calls")
plot.set_ylabel("Empirical CDF (Components)")
return df_dist, plot
def distribution_param_values(df, exclude):
df = df.copy()
df = df[~df["key"].isin(exclude)]
df_cts = df.groupby(["func", "key"])["value"].agg(lambda x: len(set(x)))
df_cts = df_cts.to_frame(name="ct_unique_values").reset_index()
min_val = 1
max_val = df_cts.ct_unique_values.max()
plot = plot_emp_cdf(df_cts.ct_unique_values.values,
np.arange(min_val, max_val + 1, 1.0))
plot.set_xlabel("Unique values in calls")
plot.set_ylabel("Empirical CDF (Component Hyperparameters)")
return df_cts, plot
def get_top_n_classifiers(df, n):
funcs = df["func"].unique()
clfs = [
f for f in funcs
if sklearn.base.is_classifier(get_component_constructor(f))
]
df_clfs = df[df["func"].isin(clfs)]
# single entry per call
df_clfs = df_clfs.groupby(["func", "id"]).head(1)
df_cts = df_clfs.groupby("func").size()
df_top = df_cts.sort_values(ascending=False).head(n)
return df_top.index.values
def evaluate_component_possible_improvement(X,
y,
component,
param_summary,
num_params,
num_values,
exclude,
scoring="f1_macro",
cv=5,
random_state=None):
params = generate_parameters(
component,
param_summary,
num_params,
num_values,
exclude_params=exclude,
add_default=True,
)
obj = get_component_constructor(component)
param_grid = sklearn.model_selection.ParameterGrid(params)
acc_scores = []
for ps in tqdm.tqdm(param_grid):
if random_state is not None:
np.random.seed(random_state)
try:
score = np.mean(
sklearn.model_selection.cross_val_score(
obj(**ps),
X,
y,
scoring=scoring,
cv=cv,
n_jobs=-1,
))
acc_scores.append(score)
except:
pass
# Note: this is the max of the cross-validation score
# we only use this to show the room for *possible* improvement
# if we had started with the best configuration
# This is **not** equivalent to the test score
# since a true test score would perform CV only on the
# training set (we instead are choosing the config that performed
# best on the test CV splits...so this is only indicative of the
# *possible* improvement, not actually what we would observe in
# practice)
assert len(acc_scores) > 0, "All params failed"
best_score = np.max(acc_scores)
if random_state is not None:
np.random.seed(random_state)
default_score = np.mean(
sklearn.model_selection.cross_val_score(
obj(),
X,
y,
scoring=scoring,
cv=cv,
n_jobs=-1,
))
return default_score, best_score
def performance_of_tuned_vs_default(
datasets,
df_params_raw,
df_params_summary,
exclude,
num_clfs=5,
num_params=3,
num_values=3,
scoring="f1_macro",
cv=5,
random_state=42,
):
clfs = get_top_n_classifiers(df_params_raw, n=num_clfs)
# SVMs can take a looongg time to fit
clfs = [c for c in clfs if "svm" not in c]
results = []
for c in tqdm.tqdm(clfs):
for d in datasets:
X, y = fetch_data(d)
default_score, best_score = evaluate_component_possible_improvement(
X,
y,
c,
df_params_summary,
num_params,
num_values,
exclude,
scoring=scoring,
cv=cv,
random_state=random_state,
)
results.append((c, d, default_score, best_score))
df_results = pd.DataFrame(
results,
columns=["classifier", "dataset", "default_score", "best_score"],
)
df_results[
"ratio"] = df_results["best_score"] / df_results["default_score"]
return df_results
def summarize_performance(df_perf):
df_perf = df_perf.copy()
df_perf["pct"] = (df_perf["ratio"] - 1.0) * 100.0
df_perf["classifier_basename"] = df_perf["classifier"].map(
lambda x: x.split(".")[-1])
fig, ax = plt.subplots(1)
plot = sns.scatterplot(
data=df_perf,
x="classifier_basename",
y="pct",
hue="dataset",
ax=ax,
)
plt.xticks(rotation=45)
plot.set_xlabel("Classifier")
plot.set_ylabel("Possible improvement over defaults (%)")
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left", ncol=1)
plt.tight_layout()
return plot
def get_args():
parser = ArgumentParser(
description="Analysis of Kaggle hyperparameter data")
parser.add_argument(
"--params_raw", type=str, help="Path to pickled -raw parameters")
parser.add_argument(
"--params_summary",
type=str,
help="Path to pickled -summary parameters")
parser.add_argument(
"--num_params",
type=int,
help="Hyperparams per component",
default=3,
)
parser.add_argument(
"--num_values",
type=int,
help="Values per hyperparam",
default=3,
)
parser.add_argument(
"--cv",
type=int,
help="CV iterations",
default=5,
)
parser.add_argument(
"--scoring",
type=str,
help="Scoring function",
default="f1_macro",
)
parser.add_argument(
"--num_clfs",
type=int,
help="Top n classifiers to eval",
default="n_clfs",
)
parser.add_argument(
"--datasets",
type=str,
nargs="+",
help="Datasets",
)
parser.add_argument(
"--api", type=str, help="Path to pickled API collection")
parser.add_argument(
"--random_state",
type=int,
help="RNG seed",
default=42,
)
parser.add_argument("--output", type=str, help="Output directory")
return parser.parse_args()
def main():
args = get_args()
df_raw = pd.read_pickle(args.params_raw)
df_summary = pd.read_pickle(args.params_summary)
with open(args.api, "rb") as fin:
api_collection = pickle.load(fin)
if not os.path.exists(args.output):
os.makedirs(args.output)
np.random.seed(args.random_state)
# nuisance hyperparameters (i.e. just implementation details)
exclude = [
"verbose",
"random_state",
"cv",
"n_jobs",
"prefit",
"refit",
]
reference_counts = count_params_per_class(api_collection, exclude)
df_num_tuned, plot_num_tuned = distribution_ratio_params_tuned(
df_raw,
reference_counts,
exclude,
)
df_num_tuned.to_csv(
os.path.join(args.output, "num_params_tuned.csv"),
index=False,
)
plot_num_tuned.get_figure().savefig(
os.path.join(args.output, "num_params_tuned.pdf"))
df_dist_tuned, plot_dist_tuned = distribution_distance_params_tuned(
df_raw,
exclude,
)
df_dist_tuned.to_csv(
os.path.join(args.output, "distance_params_tuned.csv"),
index=False,
)
plot_dist_tuned.get_figure().savefig(
os.path.join(args.output, "distance_params_tuned.pdf"))
df_dist_values, plot_dist_values = distribution_param_values(
df_raw, exclude)
df_dist_values.to_csv(
os.path.join(args.output, "num_param_values.csv"),
index=False,
)
plot_dist_values.get_figure().savefig(
os.path.join(args.output, "num_param_values.pdf"))
df_perf = performance_of_tuned_vs_default(
args.datasets,
df_raw,
df_summary,
exclude,
num_clfs=args.num_clfs,
num_params=args.num_params,
num_values=args.num_values,
scoring=args.scoring,
cv=args.cv,
random_state=args.random_state,
)
df_perf.to_csv(os.path.join(args.output, "perf.csv"), index=False)
perf_plot = summarize_performance(df_perf)
perf_plot.get_figure().savefig(os.path.join(args.output, "perf.pdf"))
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: ams/analysis/frequency_operators.py
```python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from argparse import ArgumentParser
from collections import Counter, defaultdict
import glob
import json
import matplotlib
# Note that for this one we can't use 'text.usetex'
# that seems to error out when using multipage pdf...
# the alternative with pdf/ps.fonttype works
# matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
import numpy as np
import pandas as pd
import tqdm
from analysis import pipeline_to_tree
from analysis import utils
def extract_operators(pipeline):
stack = [pipeline_to_tree.to_tree(pipeline)]
ops = []
while len(stack) > 0:
curr = stack.pop()
if curr.label == "root":
stack.extend(curr.children)
elif curr.label.startswith(("sklearn", "xgboost", "tpot")):
if not curr.label.startswith("sklearn.pipeline.Pipeline"):
ops.append(curr.label)
stack.extend(curr.children)
else:
continue
return ops
def operator_counters(df):
counters = defaultdict(lambda: Counter())
for _, row in tqdm.tqdm(df.iterrows()):
ops = extract_operators(row.fitted_pipeline)
counters[row.folder].update(ops)
counters = {
folder: [(op, ct) for op, ct in counter.most_common(len(counter))]
for folder, counter in counters.items()
}
return counters
def get_short_op(op):
return op.split(".")[-1]
def get_spec_text(spec):
ops = [get_short_op(op) for op in spec]
ops_str = ", ".join(ops)
return ops_str
def load_specs(root_folder):
files = glob.glob(os.path.join(root_folder, "*", "simple_config.json"))
specs = {}
for p in files:
p_parts = p.split("/")
if not p_parts[-2].startswith("q"):
continue
with open(p, "r") as fin:
s = json.load(fin)
folder = p_parts[-2]
specs[folder] = s
return specs
def plot_operator_distribution(
counters,
specs,
k,
output,
combine=False,
title=None,
):
axes = []
for folder, counts in counters.items():
counts = [(get_short_op(op), ct) for op, ct in counts]
# sort in descending order
counts = sorted(counts, key=lambda x: x[1], reverse=True)
xs, ys = zip(*counts)
ys = np.array(ys)
# normalize
ys_norm = ys / np.sum(ys)
# only keep k for plotting
xs = xs[:k]
ys_norm = ys_norm[:k]
fig, ax = plt.subplots(1)
axes.append(ax)
plot_df = pd.DataFrame(zip(xs, ys_norm), columns=["x", "y"])
plot_df.plot(kind="bar", x="x", y="y", ax=ax)
ax.set_xlabel("Components")
ax.set_ylabel("% of components")
spec_text = None
if specs is not None:
spec_text = get_spec_text(specs[folder])
if title is not None:
extra = folder if spec_text is None else spec_text
ax_title = title.format(extra)
else:
ax_title = "Distribution"
ax.set_title(ax_title, fontsize=8)
ax.get_legend().remove()
plt.xticks(rotation=90)
plt.tight_layout()
if not combine:
plot_path = os.path.join(output, folder + ".pdf")
fig.savefig(plot_path)
plt.close()
if combine:
pdf_path = os.path.join(output, "combined.pdf")
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_path)
for ax in axes:
pdf.savefig(ax.get_figure())
pdf.close()
plt.close()
def get_args():
parser = ArgumentParser(
description=
"Bar plots for frequency (%) of top k operators in optimized pipelines"
)
parser.add_argument("--input", type=str, help="Input folder with results")
parser.add_argument("--specs", type=str, help="Input folder with specs")
parser.add_argument("--name", type=str, help="System name", default="sys")
parser.add_argument("--k", type=int, help="Top K operators", default=10)
parser.add_argument("--output", type=str, help="Output directory")
parser.add_argument(
"--combine",
action="store_true",
help="Put all plots in one pdf",
)
parser.add_argument("--title", type=str, help="Title format")
return parser.parse_args()
def main():
args = get_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
specs = None
if args.specs is not None:
specs = load_specs(args.specs)
df = utils.collect_results(args.input)
print("Operator counts for", args.name)
df = df[df["name"] == args.name].reset_index(drop=True)
counters = operator_counters(df)
plot_operator_distribution(
counters,
specs,
args.k,
args.output,
combine=args.combine,
title=args.title,
)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: ams/analysis/relevance_markings.py
```python
from argparse import ArgumentParser
import os
import matplotlib
matplotlib.use("Agg")
matplotlib.rcParams['text.usetex'] = True
import pandas as pd
import seaborn as sns
def summarize_manual_markings(df):
ks = [1, 5, 10]
df = df.copy()
df = df.sort_values(["sampled_id", "result_position"], ascending=True)
# some times .astype can be funky, so just be explicit about 1.0/0.0
df["relevant"] = df["relevant"].map(lambda x: 1.0 if x else 0.0)
result_df = df[["sampled_id"]].drop_duplicates()
for k in ks:
top_k_df = df.groupby("sampled_id").head(k).groupby("sampled_id")[[
"relevant"
]].mean()
top_k_df = top_k_df.reset_index()
top_k_df = top_k_df.rename(columns={"relevant": "top_{}".format(k)})
result_df = pd.merge(result_df, top_k_df, how="left", on="sampled_id")
return result_df
def plot_markings(combined_df):
combined_df_flat = pd.melt(combined_df, id_vars=["sampled_id", "name"])
combined_df_flat["variable"] = combined_df_flat["variable"].map(
lambda x: " ".join(x.capitalize().split("_"))
)
ax = sns.barplot(
data=combined_df_flat, x="variable", y="value", hue="name"
)
ax.set_xlabel("Cutoff")
ax.set_ylabel("Fraction Functionally Related")
ax.legend(title="Approach")
return ax
def get_args():
parser = ArgumentParser(
description=
"Summarize analysis of functionally related components",
)
parser.add_argument(
"--inputs",
nargs="+",
type=str,
help="Manually rated CSV with results to summarize",
)
parser.add_argument(
"--names",
nargs="+",
type=str,
help="Names for results",
)
parser.add_argument(
"--output",
type=str,
help="Output",
)
return parser.parse_args()
def main():
args = get_args()
acc = []
if args.names is None:
args.names = ["df_{}".format(i) for i, _ in enumerate(args.inputs)]
for name, path in zip(args.names, args.inputs):
df = pd.read_csv(path)
summary_df = summarize_manual_markings(df)
summary_df["name"] = name
acc.append(summary_df)
combined_df = pd.concat(acc, axis=0)
ax = plot_markings(combined_df)
if args.output:
dir_path = os.path.dirname(args.output)
if len(dir_path) > 0 and not os.path.exists(dir_path):
os.makedirs(dir_path)
ax.get_figure().savefig(args.output)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: ams/core/extract_sklearn_api.py
```python
from argparse import ArgumentParser
import inspect
import importlib
import pickle
import re
import string
import numpy as np
import sklearn
import sklearn.base
import sklearn.compose
import sklearn.datasets
import sklearn.exceptions
import sklearn.dummy
import sklearn.inspection
import sklearn.metrics
import sklearn.model_selection
import sklearn.multioutput
import sklearn.pipeline
import sklearn.utils
import sklearn.tree
import sklearn.semi_supervised
from core import nlp
EXCLUDED = [
sklearn.compose,
sklearn.config_context,
sklearn.datasets,
sklearn.dummy,
sklearn.exceptions,
sklearn.inspection,
sklearn.metrics,
sklearn.model_selection,
sklearn.multioutput,
sklearn.pipeline,
sklearn.tree.export_graphviz,
sklearn.show_versions,
sklearn.utils,
sklearn.semi_supervised,
]
def get_short_class_desc(docstr):
short_desc_lines = []
accumulating = False
for line in docstr.split("\n"):
line = line.strip()
if len(line) == 0 and not accumulating:
continue
elif (len(line) == 0 and accumulating) or line == "Parameters":
return "\n".join(short_desc_lines)
else:
accumulating = True
short_desc_lines.append(line)
# just in case...
return "\n".join(short_desc_lines)
def get_signature_params(class_):
try:
return inspect.signature(class_).parameters
except ValueError:
# need to fall back to parse signature differently
print("Failed on", class_)
return None
def get_expected_params(class_, class_elem):
parameters = get_signature_params(class_)
if parameters is None:
return []
parsed_params = []
param_names = parameters.keys()
parsed_param_docs = parse_param_docs(class_elem.docstring, param_names)
for param_name in param_names:
if param_name in ["args", "kwargs"]:
continue
param = parameters[param_name]
param_elem = APIClassParameter(
param_name,
param,
class_elem,
doc_dict=parsed_param_docs,
)
parsed_params.append(param_elem)
return parsed_params
def parse_param_docs(class_docs, param_names):
in_param_section = False
parse_this_param = False
param_names = list(param_names)
curr_param = None
curr_param_docs = []
param_docs = {}
for line in class_docs.split("\n"):
line = line.strip()
if line == "Parameters":
in_param_section = True
elif line == "Attributes":
# done parsing parameters
break
elif in_param_section:
is_new_param_line = re.match("([a-zA-Z0-9_])+ : ", line)
if is_new_param_line is not None:
new_param = is_new_param_line.group(0)[:-3]
if len(curr_param_docs) > 0:
param_docs[curr_param] = "\n".join(curr_param_docs)
curr_param_docs = []
if new_param in param_names:
param_names.remove(new_param)
curr_param = new_param
parse_this_param = True
else:
curr_param = None
parse_this_param = False
if parse_this_param:
if len(line) > 0:
curr_param_docs.append(line)
else:
# hit first empty line after short description
# done parsing this param
parse_this_param = False
else:
continue
if curr_param is not None and len(curr_param_docs) > 0:
param_docs[curr_param] = "\n".join(curr_param_docs)
return param_docs
def build_class_elem(elem, module, collected_info):
path = module.__name__ + "." + elem.__name__
class_elem = APIClass(elem, path)
collected_info[path] = class_elem
def traverse_module_(stack, collected_info, expanded, root_name, exclude):
while len(stack) > 0:
parent = stack.pop()
if not inspect.ismodule(parent):
raise ValueError("Stack should only contain module types")
parent_name = parent.__name__
expanded.add(parent)
if not parent_name.startswith(root_name):
continue
possible_children_names = set([])
try:
possible_children_names.update(parent.__all__)
except AttributeError:
continue
for child_name in possible_children_names:
imp_path = parent_name + "." + child_name
try:
print("Trying to import", imp_path)
child = importlib.import_module(imp_path)
except ModuleNotFoundError:
print("Failed to import", imp_path)
pass
try:
child = getattr(parent, child_name)
except AttributeError:
continue
try:
hash(child)
except TypeError:
# can't hash, so don't care about it
# can't compare to things already expanded or excluded
continue
if child in exclude or child in expanded:
# don't want it or already collected it
continue
if inspect.isclass(child):
try:
child()
except NotImplementedError:
# abstract classes, don't want these
continue
except TypeError:
# have required args, that's fine
pass
build_class_elem(child, parent, collected_info)
expanded.add(child)
elif inspect.ismodule(child):
stack.append(child)
else:
continue
def traverse_module(module):
stack = [module]
collected_info = {}
expanded = set([])
traverse_module_(
stack,
collected_info,
expanded,
module.__name__,
EXCLUDED,
)
return collected_info
def split_camel_case(token):
matches = re.findall(r'[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))', token)
if len(matches) == 0:
return [token]
else:
return matches
def split_snake_case(token):
if "_" in token:
return token.split("_")
else:
return [token]
def split_api_name_subtokens(basename):
result_tokens = set()
subtokens = split_snake_case(basename)
result_tokens.update(subtokens)
for tk in subtokens:
result_tokens.update(split_camel_case(tk))
return set([t.lower() for t in result_tokens])
def extend_class_description(path):
extra_tokens = []
for token in path.split("."):
extra_tokens.extend(split_api_name_subtokens(token))
extra_tokens.append(token)
extra_tokens = set(extra_tokens)
return " ".join([t.lower() for t in extra_tokens])
class APIClass(object):
def __init__(self, class_, path):
self.docstring = class_.__doc__
self.description = get_short_class_desc(self.docstring)
self.path = path
self.children = get_expected_params(class_, self)
self.arg_names = [c.param_name for c in self.children]
ext_tokens = extend_class_description(path)
self.embedded_text = ext_tokens + " " + self.description.lower()
self.vector = nlp.vectorize(
self.embedded_text,
remove_stop_words=True,
)
self.filter_tokens = self.get_filter_tokens()
self.is_regression_op = sklearn.base.is_regressor(class_)
self.is_classification_op = sklearn.base.is_classifier(class_)
def get_filter_tokens(self):
basename = self.path.split(".")[-1]
subtokens = split_api_name_subtokens(basename)
subtokens.add(basename.lower())
# lemmatize subtokens
lemmas = [nlp.lemmatize(t) for t in subtokens]
subtokens.update(lemmas)
return subtokens
def __str__(self):
return "APIClass({})".format(self.path)
def __repr__(self):
return str(self)
class APIClassParameter(object):
def __init__(self, param_name, param, class_elem, doc_dict=None):
if doc_dict is None:
doc_dict = parse_param_docs(class_elem, [param_name])
self.param_name = param_name
self.description = doc_dict.get(param_name, "")
self.path = class_elem.path + ":" + param_name
self.parent = class_elem
self.default_value = param.default
self.vector = nlp.vectorize(
self.description.lower() + param_name,
remove_stop_words=True,
)
self.filter_tokens = self.get_filter_tokens()
def get_filter_tokens(self):
param_name = self.path.split(":")[-1]
subtokens = split_api_name_subtokens(param_name)
subtokens.add(param_name)
lemmas = [nlp.lemmatize(t) for t in subtokens]
subtokens.update(lemmas)
return subtokens
def __str__(self):
return "APIClassParameter({})".format(self.path)
def __repr__(self):
return str(self)
class APICollection(object):
def __init__(self, root_module):
self.module_info = traverse_module(root_module)
self.classes = list(self.module_info.values())
# remove Base (abstract) clases
self.classes = [
c for c in self.classes
if not c.path.split(".")[-1].startswith("Base")
]
self.params = [p for c in self.classes for p in c.children]
self.all_elems = self.classes + self.params
self.paths = [e.path for e in self.all_elems]
self.matrix = None
self.filter_tokens = self.get_filter_tokens()
self.basenames = [c.path.split(".")[-1] for c in self.classes]
def get_filter_tokens(self):
subtokens = set([])
for e in self.classes:
# remove numeric
tokens = [t for t in e.filter_tokens if t not in string.digits]
subtokens.update(tokens)
return subtokens
def build_matrix(self):
num_rows = len(self.classes)
num_cols = self.classes[0].vector.shape[0]
matrix = np.zeros((num_rows, num_cols), dtype=np.float32)
for ix, elem in enumerate(self.classes):
matrix[ix, :] = elem.vector
self.matrix = matrix
def get_matrix(self):
if self.matrix is None:
self.build_matrix()
return self.matrix
def get_args():
parser = ArgumentParser(description="Extract sklearn API")
parser.add_argument(
"--output",
type=str,
help="Path to dump pickled results",
)
return parser.parse_args()
def main():
args = get_args()
api_collection = APICollection(sklearn)
with open(args.output, "wb") as fout:
pickle.dump(api_collection, fout)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: ams/core/mp_utils.py
```python
import multiprocessing as mp
from multiprocessing.context import TimeoutError
import sys
MP_INITIALIZED = False
def init_mp():
# change start method to avoid issues with crashes/freezes
# discussed in
# http://scikit-learn.org/stable/faq.html#why-do-i-sometime-get-a-crash-freeze-with-n-jobs-1-under-osx-or-linux
global MP_INITIALIZED
if MP_INITIALIZED:
return
try:
print("Setting mp start method to forkserver")
mp.set_start_method('forkserver')
MP_INITIALIZED = True
except RuntimeError:
# already set
pass
def run(seconds, fun, *args, **kwargs):
if seconds > 0:
pool = mp.Pool(processes=1)
try:
proc = pool.apply_async(fun, args, kwargs)
result = proc.get(seconds)
return result
finally:
pool.terminate()
pool.close()
else:
# if no timeout, then no point
# in incurring cost of running as separate process
# so call locally
return fun(*args, **kwargs)
```
#### File: ams/datasets/utils.py
```python
import os
import pandas as pd
ROOT_DATA_FOLDER = os.environ.get(
"DATA", os.path.join(os.path.dirname(__file__), "../data/")
)
assert ROOT_DATA_FOLDER is not None
DATASETS_FOLDER = os.path.abspath(
os.path.join(ROOT_DATA_FOLDER, "benchmarks-datasets")
)
if not os.path.exists(DATASETS_FOLDER):
raise Exception("Missing benchmarks-datasets folder:", DATASETS_FOLDER)
def fetch_data(name):
dataset_path = os.path.join(DATASETS_FOLDER, name + ".tsv")
dataset = pd.read_csv(dataset_path, sep='\t')
X = dataset.drop('target', axis=1).values
y = dataset['target'].values
return X, y
``` |
{
"source": "josepablocam/changestructor",
"score": 3
} |
#### File: chg/db/git_log_to_db.py
```python
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from chg.platform import git
from chg.db.database import get_store
import tqdm
def log_to_db(store):
print("Git log to database")
log_entries = git.log()
# from oldest to newest
log_entries = list(reversed(log_entries))
n = len(log_entries)
for ix in tqdm.tqdm(list(range(1, n))):
prev_commit = log_entries[ix - 1]
curr_commit = log_entries[ix]
old_hash = prev_commit["abbreviated_commit"]
new_hash = curr_commit["abbreviated_commit"]
chunk = git.diff_from_to(old_hash, new_hash)
question = "Commit: "
answer = curr_commit["subject"] + curr_commit["body"]
answered = [(question, answer)]
# TODO: include
# code and dialogue embeddings
chunk_id = store.record_chunk((old_hash, chunk, new_hash))
store.record_dialogue((chunk_id, answered))
def get_args():
parser = ArgumentParser(
description="Record all git commits to chgstructor database",
formatter_class=ArgumentDefaultsHelpFormatter
)
return parser.parse_args()
def main():
_ = get_args()
store = get_store()
log_to_db(store)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: chg/embed/basic.py
```python
from argparse import ArgumentParser
import os
import subprocess
import numpy as np
from transformers import RobertaTokenizer, RobertaModel
import torch
import tqdm
from chg.db.database import get_store
# fix odd fault...
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def remove_color_ascii(msg):
proc = subprocess.Popen(
"sed 's/\x1b\[[0-9;]*m//g'",
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
)
output, _ = proc.communicate(msg.encode())
return output.decode().strip()
def normalize_vectors(mat):
# make vectors unit norm
norm = np.sqrt(np.sum(mat**2, axis=1))
# set to 1.0 to avoid nan
norm[norm == 0] = 1.0
norm_mat = mat / norm.reshape(-1, 1)
return norm_mat
class BasicEmbedder(object):
def __init__(self):
self.tokenizer = RobertaTokenizer.from_pretrained(
"microsoft/codebert-base"
)
self.model = RobertaModel.from_pretrained("microsoft/codebert-base")
self.model = self.model.to("cpu")
# self.model = self.model.eval()
self.max_len = self.model.config.max_position_embeddings
def embed_(self, txt):
tokens = [self.tokenizer.cls_token]
tokens = self.tokenizer.tokenize(txt)
# split up chunks according to max_len
embeddings = []
chunk_len = self.max_len - 4
for i in tqdm.tqdm(list(range(0, len(tokens), chunk_len))):
chunk = [self.tokenizer.cls_token]
chunk.extend(tokens[i:(i + chunk_len)])
chunk.append(self.tokenizer.sep_token)
chunk_token_ids = self.tokenizer.convert_tokens_to_ids(chunk)
with torch.no_grad():
chunk_embedding = self.model(
torch.tensor(chunk_token_ids)[None, :]
)[0]
# average over tokens
chunk_embedding = chunk_embedding.mean(dim=1)
embeddings.append(chunk_embedding)
embeddings = torch.stack(embeddings)
# average over chunks
txt_embedding = embeddings.mean(dim=0)
txt_embedding = txt_embedding.numpy()
# unit norm
txt_embedding = normalize_vectors(txt_embedding)
txt_embedding = txt_embedding.flatten()
return txt_embedding
def embed_code(self, code):
return self.embed_(remove_color_ascii(code))
def embed_nl(self, nl):
return self.embed_(nl)
def embed_dialogue(self, question_and_answers):
# empty history
if len(question_and_answers) == 0:
question_and_answers = [("", "")]
merged_dialogue = "\n".join(
"{}:{}".format(q, a) for q, a in question_and_answers
)
return self.embed_nl(merged_dialogue)
def get_args():
parser = ArgumentParser(description="Embed chg database")
return parser.parse_args()
def main():
_ = get_args()
embedder = BasicEmbedder()
store = get_store()
# need to embed every chunk
chunk_ids = store.run_query(
"SELECT id FROM Chunks WHERE chunk IS NOT NULL"
)
chunk_ids = [row[0] for row in chunk_ids]
print("Embedding code and dialogue for {} chunks".format(len(chunk_ids)))
for chunk_id in tqdm.tqdm(chunk_ids):
chunk = store.run_query(
"SELECT chunk FROM Chunks WHERE id={}".format(chunk_id)
)
assert len(chunk) == 1, "Chunks should be uniquely identified"
chunk = chunk[0]
code_embedding = embedder.embed_code(chunk[0])
# embed dialogue associated with this chunk
dialogue = store.run_query(
"SELECT question, answer FROM Dialogue WHERE chunk_id={} ORDER BY id"
.format(chunk_id)
)
assert len(dialogue) >= 1, "Should have at least one commit message"
nl_embedding = embedder.embed_dialogue(dialogue)
store.record_embeddings((chunk_id, code_embedding, nl_embedding))
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: chg/ui/simple_cli_ui.py
```python
class SimpleCLIUI(object):
def __init__(self, prompt_marker=">", dev=False):
self.dev = dev
self.prompt_marker = prompt_marker
def display_chunk(self, chunk):
print(chunk)
def display_question(self, question):
print("Question: {}".format(question))
def display_search_result(self, result):
print(result)
def prompt(self, msg, options=None):
formatted_msg = "{} {} ".format(self.prompt_marker, msg)
if options is not None:
formatted_msg += "[Options={}] ".format(options)
res = input(formatted_msg)
res = res.strip()
if options is None or res in options:
return res
else:
# prompt again
self.prompt(msg, options=options)
def annotate_helper(self, chunker, store, annotator, platform):
for chunk in chunker.get_chunks():
# show chunk to user initially
self.display_chunk(chunk)
answer = self.prompt("Stage?", ["Y", "n"])
if answer == "n":
# skip
continue
if not self.dev:
chunker.stage(chunk)
# annotater gets access to chunk
# so can produce relevant questions
annotator.consume_chunk(chunk)
answered = []
while not annotator.done():
question = annotator.ask()
self.display_question(question)
answer = self.prompt("")
# annotator can update its internal state
# based on answer (e.g. new question based on previous answer)
annotator.consume_answer(answer)
answered.append((question, answer))
# changes induced by the chunk (i.e. this diff)
# are committed directly by `chg` (i.e. the user
# no longer needs to interact with `git commit`)
old_hash = platform.hash()
# some annotators may want to generate the commit message
# directly from the user's dialogue
# rather than prompt user for explicit commit message
if annotator.has_commit_message():
# but user can always override
generate_msg = self.prompt("Generate commit msg?", ["Y", "n"])
if generate_msg == "Y":
msg = annotator.get_commit_message()
else:
msg = self.prompt("Commit message: ")
else:
msg = self.prompt("Commit message: ")
# if user writes commit message, we should take that
# as more info for db
answered.append(("Commit message", msg))
# just for dev
if not self.dev:
chunker.commit(msg, chunk)
new_hash = platform.hash()
# info is only stored in the database after the commit
# has taken place
# TODO: if the user exits or crashes before this
# the file system will reflect git changes, but not
# any info in chg database, we should fix this...
chunk_id = store.record_chunk((old_hash, chunk, new_hash))
store.record_dialogue((chunk_id, answered))
def annotate(self, chunker, store, annotator, platform):
try:
self.annotate_helper(chunker, store, annotator, platform)
except (EOFError, KeyboardInterrupt):
return
def ask(self, searcher, k=5):
try:
while True:
user_question = self.prompt("Question: ")
results = searcher.search(user_question, k=k)
for r in results:
self.display_search_result(r)
except (EOFError, KeyboardInterrupt):
return
``` |
{
"source": "josepablocam/common-code-extraction",
"score": 2
} |
#### File: common-code-extraction/analysis/pipeline_time_analysis.py
```python
from argparse import ArgumentParser
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 14})
import pandas as pd
import seaborn as sns
def get_times(path):
with open(path, "r") as fin:
lines = [entry.split(":") for entry in fin.readlines()]
data = [(e[0], float(e[1])) for e in lines]
df = pd.DataFrame(data, columns=["script", "time"])
df["dataset"] = path.split("/")[1]
# to match timeout naming convention
df["script"] = df["script"].map(lambda x: x.replace("/", "_"))
return df
def get_times_df(paths, rename_map=None):
dfs = []
for p in paths:
dfs.append(get_times(p))
df = pd.concat(dfs)
if rename_map is not None:
df["dataset"] = df["dataset"].map(lambda x: rename_map.get(x, x))
return df
def compare_times(
df_plain,
df_full,
max_plot=None,
timedout=None,
timeout=None,
):
if timedout is not None:
print(
"Setting time to {} for {} timedout scripts".format(
timeout, len(timedout)
)
)
assert timeout > 0
assert all(s not in df_full["script"].values for s in timedout)
df_timedout = pd.DataFrame(
[(s, timeout) for s in timedout],
columns=["script", "time"],
)
df_full = pd.concat([df_full, df_timedout])
df_full = pd.merge(
df_full,
df_plain,
how="inner",
on=["dataset", "script"],
suffixes=("_full", "_plain")
)
# valid timing numbers only
valid = (df_full["time_full"] > 0) & (df_full["time_plain"] > 0)
print("Valid: {} / {}".format(valid.sum(), valid.shape[0]))
df_full = df_full[valid]
df_full["ratio"] = df_full["time_full"] / df_full["time_plain"]
# if smaller, clamp to 1.0 can't actually be smaller, so noise
df_full["ratio"] = df_full["ratio"].map(lambda x: 1.0 if x < 1.0 else x)
summary_df = df_full.groupby("dataset")["ratio"].describe()
print("Summary")
print(summary_df)
fig, ax = plt.subplots(1)
# ax.set_aspect(1)
unique_datasets = df_full["dataset"].unique().tolist()
assert len(unique_datasets) == 3
palette = dict(zip(unique_datasets, sns.color_palette()))
sns.ecdfplot(data=df_full, x="ratio", hue="dataset")
sns.scatterplot(
data=df_full,
x="ratio",
y=[0] * df_full.shape[0],
hue="dataset",
palette=palette,
)
# mark one per
for d in unique_datasets:
median = df_full[df_full["dataset"] == d]["ratio"].median()
median_label = "Median {}={:.2f}".format(d, median)
ax.axvline(
x=median,
ymin=0.0,
ymax=1.0,
label=median_label,
linestyle="dashed",
color=palette[d],
)
ax.set_ylim(-0.01, 1.0)
if max_plot is not None and df_full["ratio"].max() > max_plot:
print("Clamping x-axis to {}".format(max_plot))
over_max = df_full["ratio"][df_full["ratio"] > max_plot]
print("Removes: {}".format(over_max.values.tolist()))
ax.set_xlim(1.0, max_plot)
ax.set_xlabel("Execution Time Ratio")
ax.set_ylabel("Empirical Cumulative Distribution")
plt.legend(loc="best", prop={"size": 12})
plt.tight_layout()
return summary_df, ax
def get_args():
parser = ArgumentParser(description="Compute time ratios")
parser.add_argument(
"--plain",
type=str,
nargs="+",
help="List of time files for plain executions"
)
parser.add_argument(
"--full",
type=str,
nargs="+",
help="List of time files for full (pipeline) executions"
)
parser.add_argument(
"--max_plot",
type=float,
help=
"If not none, only show points below max for plotting (but report)",
)
parser.add_argument(
"--timedout",
type=str,
nargs="+",
help="Scripts that time out when instrumented (set time to max)",
)
parser.add_argument(
"--timeout",
type=int,
help="Seconds to use as time for timedout",
)
parser.add_argument(
"--rename", type=str, nargs="+", help="List of renamings (orig:new)"
)
parser.add_argument("--output_dir", type=str, help="Output directory")
return parser.parse_args()
def main():
args = get_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
rename_map = None
if args.rename is not None:
rename_map = dict([r.split(":") for r in args.rename])
df_plain = get_times_df(args.plain, rename_map)
df_full = get_times_df(args.full, rename_map)
summary, ax = compare_times(
df_plain,
df_full,
max_plot=args.max_plot,
timedout=args.timedout,
timeout=args.timeout,
)
summary_path = os.path.join(args.output_dir, "time_summary.csv")
summary.to_csv(summary_path)
plot_path = os.path.join(args.output_dir, "time_ecdf.pdf")
ax.get_figure().savefig(plot_path)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: executability-results/loan_data/source_code.py
```python
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
return data
#=============
# Function 1
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data['term'] = data['term'].apply((lambda x: x.lstrip()))
return data
#=============
# Function 2
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
data['earliest_cr_line_year'] = data['earliest_cr_line'].dt.year
return data
#=============
# Function 3
def cleaning_func_3(data):
# additional context code from user definitions
def impute_missing_algo(df, target, cats, cols, algo):
y = pd.DataFrame(df[target])
X = df[cols].copy()
X.drop(cats, axis=1, inplace=True)
cats = pd.get_dummies(df[cats])
X = pd.concat([X, cats], axis=1)
y['null'] = y[target].isnull()
y['null'] = y.loc[:, target].isnull()
X['null'] = y[target].isnull()
y_missing = y[(y['null'] == True)]
y_notmissing = y[(y['null'] == False)]
X_missing = X[(X['null'] == True)]
X_notmissing = X[(X['null'] == False)]
y_missing.loc[:, target] = ''
dfs = [y_missing, y_notmissing, X_missing, X_notmissing]
for df in dfs:
df.drop('null', inplace=True, axis=1)
y_missing = y_missing.values.ravel(order='C')
y_notmissing = y_notmissing.values.ravel(order='C')
X_missing = X_missing.as_matrix()
X_notmissing = X_notmissing.as_matrix()
algo.fit(X_notmissing, y_notmissing)
y_missing = algo.predict(X_missing)
y.loc[((y['null'] == True), target)] = y_missing
y.loc[((y['null'] == False), target)] = y_notmissing
return y[target]
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5, n_estimators=100, max_features=1)
catiables = ['term', 'purpose', 'grade']
columns = ['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate', 'grade', 'purpose', 'term']
data['earliest_cr_line_year'] = impute_missing_algo(data, 'earliest_cr_line_year', catiables, columns, rf)
return data
#=============
# Function 4
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data['emp_length'] = data['emp_length'].astype(int)
return data
#=============
# Function 5
def cleaning_func_5(data):
# additional context code from user definitions
def impute_missing_algo(df, target, cats, cols, algo):
y = pd.DataFrame(df[target])
X = df[cols].copy()
X.drop(cats, axis=1, inplace=True)
cats = pd.get_dummies(df[cats])
X = pd.concat([X, cats], axis=1)
y['null'] = y[target].isnull()
y['null'] = y.loc[:, target].isnull()
X['null'] = y[target].isnull()
y_missing = y[(y['null'] == True)]
y_notmissing = y[(y['null'] == False)]
X_missing = X[(X['null'] == True)]
X_notmissing = X[(X['null'] == False)]
y_missing.loc[:, target] = ''
dfs = [y_missing, y_notmissing, X_missing, X_notmissing]
for df in dfs:
df.drop('null', inplace=True, axis=1)
y_missing = y_missing.values.ravel(order='C')
y_notmissing = y_notmissing.values.ravel(order='C')
X_missing = X_missing.as_matrix()
X_notmissing = X_notmissing.as_matrix()
algo.fit(X_notmissing, y_notmissing)
y_missing = algo.predict(X_missing)
y.loc[((y['null'] == True), target)] = y_missing
y.loc[((y['null'] == False), target)] = y_notmissing
return y[target]
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5, n_estimators=100, max_features=1)
catiables = ['term', 'purpose', 'grade']
columns = ['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate', 'grade', 'purpose', 'term']
data['emp_length'] = impute_missing_algo(data, 'emp_length', catiables, columns, rf)
return data
#=============
# Function 6
def cleaning_func_6(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.issue_d = pd.Series(data.issue_d).str.replace('-2015', '')
return data
#=============
# Function 7
def cleaning_func_8(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
s = pd.value_counts(data['earliest_cr_line']).to_frame().reset_index()
s.columns = ['date', 'count']
return s
#=============
# Function 8
def cleaning_func_9(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
s = pd.value_counts(data['earliest_cr_line']).to_frame().reset_index()
s['year'] = s['date'].dt.year
return s
#=============
# Function 9
def cleaning_func_11(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data['emp_length'] = data['emp_length'].astype(int)
s = pd.value_counts(data['emp_length']).to_frame().reset_index()
s.columns = ['type', 'count']
return s
#=============
# Function 10
def cleaning_func_12(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
s = pd.value_counts(data['earliest_cr_line']).to_frame().reset_index()
s['month'] = s['date'].dt.month
return s
#=============
# Function 11
def cleaning_func_0(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['term'] = dataset['term'].astype('category').cat.codes
return dataset
#=============
# Function 12
def cleaning_func_1(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['verification_status'] = dataset['verification_status'].astype('category').cat.codes
return dataset
#=============
# Function 13
def cleaning_func_2(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['purpose'] = dataset['purpose'].astype('category').cat.codes
return dataset
#=============
# Function 14
def cleaning_func_3(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['application_type'] = dataset['application_type'].astype('category').cat.codes
return dataset
#=============
# Function 15
def cleaning_func_4(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['addr_state'] = dataset['addr_state'].astype('category').cat.codes
return dataset
#=============
# Function 16
def cleaning_func_5(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['sub_grade'] = dataset['sub_grade'].astype('category').cat.codes
return dataset
#=============
# Function 17
def cleaning_func_6(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
return dataset
#=============
# Function 18
def cleaning_func_7(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['initial_list_status'] = dataset['initial_list_status'].astype('category').cat.codes
return dataset
#=============
# Function 19
def cleaning_func_8(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['emp_length'] = dataset['emp_length'].astype('category').cat.codes
return dataset
#=============
# Function 20
def cleaning_func_9(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['verification_status_joint'] = dataset['verification_status_joint'].astype('category').cat.codes
return dataset
#=============
# Function 21
def cleaning_func_10(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['home_ownership'] = dataset['home_ownership'].astype('category').cat.codes
return dataset
#=============
# Function 22
def cleaning_func_11(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['pymnt_plan'] = dataset['pymnt_plan'].astype('category').cat.codes
return dataset
#=============
# Function 23
def cleaning_func_12(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['grade'] = dataset['grade'].astype('category').cat.codes
return dataset
#=============
# Function 24
def cleaning_func_13(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['earliest_cr_line'] = pd.to_datetime(dataset['earliest_cr_line'])
dataset['earliest_cr_line'] = ((dataset['earliest_cr_line'] - dataset['earliest_cr_line'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 25
def cleaning_func_14(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['last_pymnt_d'] = pd.to_datetime(dataset['last_pymnt_d'])
dataset['last_pymnt_d'] = ((dataset['last_pymnt_d'] - dataset['last_pymnt_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 26
def cleaning_func_15(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['last_credit_pull_d'] = pd.to_datetime(dataset['last_credit_pull_d'])
dataset['last_credit_pull_d'] = ((dataset['last_credit_pull_d'] - dataset['last_credit_pull_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 27
def cleaning_func_16(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['issue_d'] = pd.to_datetime(dataset['issue_d'])
dataset['issue_d'] = ((dataset['issue_d'] - dataset['issue_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 28
def cleaning_func_17(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['next_pymnt_d'] = pd.to_datetime(dataset['next_pymnt_d'])
dataset['next_pymnt_d'] = ((dataset['next_pymnt_d'] - dataset['next_pymnt_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 29
def cleaning_func_18(dataset):
# additional context code from user definitions
def LoanResult(status):
if ((status == 5) or (status == 1) or (status == 7)):
return 1
else:
return 0
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
non_numerics = [x for x in dataset.columns if (not ((dataset[x].dtype == np.float64) or (dataset[x].dtype == np.int8) or (dataset[x].dtype == np.int64)))]
df = dataset
df = df.drop(non_numerics, 1)
df['loan_status'] = df['loan_status'].apply(LoanResult)
return df
#=============
# Function 30
def cleaning_func_19(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
non_numerics = [x for x in dataset.columns if (not ((dataset[x].dtype == np.float64) or (dataset[x].dtype == np.int8) or (dataset[x].dtype == np.int64)))]
df = dataset
return df
#=============
# Function 31
def cleaning_func_0(df):
# additional context code from user definitions
def status_class(text):
if (text == 'Fully Paid'):
return 'Fully Paid'
elif ((text == 'Charged Off') or (text == 'Default')):
return 'Default'
elif ((text == 'Current') or (text == 'Issued')):
return 'Current'
else:
return 'Late'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['status_class'] = df['loan_status'].apply(status_class)
return df
#=============
# Function 32
def cleaning_func_1(df):
# additional context code from user definitions
def emp_length_class(text):
if ((text == '< 1 year') or (text == '1 year') or (text == '2 years') or (text == '3 years')):
return '<=3 years'
elif ((text == '4 years') or (text == '5 years') or (text == '6 years')):
return '4-6 years'
elif ((text == '7 years') or (text == '8 years') or (text == '9 years')):
return '7-9 years'
elif (text == '10+ years'):
return '>=10 years'
else:
return None
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['emp_length_class'] = df['emp_length'].apply(emp_length_class)
return df
#=============
# Function 33
def cleaning_func_2(df):
# additional context code from user definitions
def inc_class(num):
if (num <= 50000):
return '<=50000'
elif (num <= 75000):
return '50000-75000'
elif (num <= 100000):
return '75000-100000'
elif (num <= 125000):
return '100000-125000'
elif (num <= 150000):
return '125000-150000'
else:
return '>150000'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['inc_class'] = df['annual_inc'].apply(inc_class)
return df
#=============
# Function 34
def cleaning_func_3(df):
# additional context code from user definitions
def loan_class(num):
if (num <= 10000):
return '<=10000'
elif (num <= 20000):
return '10000-20000'
elif (num <= 30000):
return '20000-30000'
else:
return '>30000'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['loan_class'] = df['loan_amnt'].apply(loan_class)
return df
#=============
# Function 35
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
return df
#=============
# Function 36
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
return df
#=============
# Function 37
def cleaning_func_6(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
return df
#=============
# Function 38
def cleaning_func_7(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
new_df = df[(df['addr_state'] == x)]
new_df['weighted'] = ((new_df['int_rate'] / 100) * new_df['funded_amnt'])
return new_df
#=============
# Function 39
def cleaning_func_9(df):
# additional context code from user definitions
def purpose_class(text):
if ((text == 'debt_consolidation') or (text == 'credit_card')):
return 'refinance'
elif ((text == 'house') or (text == 'home_improvement') or (text == 'renewable_energy') or (text == 'moving')):
return 'home'
elif ((text == 'car') or (text == 'major_purchase')):
return 'major_purchase'
else:
return 'other'
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
df['purpose'] = df['purpose'].apply(purpose_class)
return df
#=============
# Function 40
def cleaning_func_12(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
df_plot = df_plot.round(decimals=2)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col].astype(str)
return df_plot
#=============
# Function 41
def cleaning_func_13(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
return df_plot
#=============
# Function 42
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
df_plot = df_plot.round(decimals=2)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
state_average_int_rate = df.groupby('addr_state').agg({'int_rate': np.average, 'id': np.count_nonzero, 'annual_inc': np.average})
state_average_int_rate['interest'] = state_average_int_rate['int_rate']
return state_average_int_rate
#=============
# Function 43
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
df_plot = df_plot.round(decimals=2)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
state_average_int_rate = df.groupby('addr_state').agg({'int_rate': np.average, 'id': np.count_nonzero, 'annual_inc': np.average})
state_average_int_rate['id'] = state_average_int_rate['id'].astype(str)
return state_average_int_rate
#=============
# Function 44
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
df_plot = df_plot.round(decimals=2)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
state_average_int_rate = df.groupby('addr_state').agg({'int_rate': np.average, 'id': np.count_nonzero, 'annual_inc': np.average})
state_average_int_rate['int_rate'] = (('Average Interest Rate: ' + state_average_int_rate['int_rate'].apply((lambda x: str(round(x, 2))))) + '%')
return state_average_int_rate
#=============
# Function 45
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
df_plot = df_plot.round(decimals=2)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
state_average_int_rate = df.groupby('addr_state').agg({'int_rate': np.average, 'id': np.count_nonzero, 'annual_inc': np.average})
state_average_int_rate['annual_inc'] = (state_average_int_rate['annual_inc'] / 1000.0)
state_average_int_rate['annual_inc'] = state_average_int_rate['annual_inc'].apply((lambda x: str(round(x, 2))))
return state_average_int_rate
#=============
# Function 46
def cleaning_func_19(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
State_List = []
Loan_Amount = []
Average_Balance = []
Default_Rate = []
Weighted_Rate = []
Average_Income = []
Average_Employment_Length = []
Average_Inq_12 = []
Average_Inq_6 = []
from collections import OrderedDict
combine_data = OrderedDict([('Loan_Funding', Loan_Amount), ('Average_Balance', Average_Balance), ('Default_Rate', Default_Rate), ('Weighted_Rate', Weighted_Rate), ('Average_Income', Average_Income), ('Average_Employment_Length', Average_Employment_Length), ('Average_DTI', DTI_Average), ('12m_Inquiries', Average_Inq_12), ('6m_Inquiries', Average_Inq_6), ('code', State_List)])
df_plot = pd.DataFrame.from_dict(combine_data)
df_plot = df_plot.round(decimals=2)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col].astype(str)
df_plot[col] = df_plot[col]
df_plot[col].astype = df_plot[col].astype
state_average_int_rate = df.groupby('addr_state').agg({'int_rate': np.average, 'id': np.count_nonzero, 'annual_inc': np.average})
state_average_int_rate['id'] = state_average_int_rate['id'].astype(str)
state_average_int_rate['annual_inc'] = (state_average_int_rate['annual_inc'] / 1000.0)
state_average_int_rate['annual_inc'] = state_average_int_rate['annual_inc'].apply((lambda x: str(round(x, 2))))
state_average_int_rate['text'] = ((((('Number of Applicants: ' + state_average_int_rate['id']) + '<br>') + 'Average Annual Inc: $') + state_average_int_rate['annual_inc']) + 'k')
return state_average_int_rate
#=============
# Function 47
def cleaning_func_20(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
y_train = train[['loan_status']]
return y_train
#=============
# Function 48
def cleaning_func_22(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
return df
#=============
# Function 49
def cleaning_func_23(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
return x_train
#=============
# Function 50
def cleaning_func_25(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
y_train = train[['loan_status']]
x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))]
y_test = test[['loan_status']]
return y_test
#=============
# Function 51
def cleaning_func_26(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
y_train = train[['loan_status']]
x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))]
y_test = test[['loan_status']]
method = ['Decision Tree', 'Random Forests', 'Logistic Regression']
false_paid = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test'])
default_identified = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test'])
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
model = tree.DecisionTreeClassifier(max_depth=5, criterion='entropy', class_weight={0: 0.15, 1: 0.85})
from sklearn.metrics import confusion_matrix
import numpy as np
p_train = model.predict(x_train)
p_test = model.predict(x_test)
(fully_paid, default) = confusion_matrix(p_train, np.array(y_train))
false_paid.loc[('train', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('train', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
(fully_paid, default) = confusion_matrix(p_test, np.array(y_test))
false_paid.loc[('test', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
return false_paid
#=============
# Function 52
def cleaning_func_27(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
return train
#=============
# Function 53
def cleaning_func_28(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
return df
#=============
# Function 54
def cleaning_func_30(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
return dummy_df
#=============
# Function 55
def cleaning_func_35(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
return df
#=============
# Function 56
def cleaning_func_36(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
y_train = train[['loan_status']]
x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))]
return x_test
#=============
# Function 57
def cleaning_func_37(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
y_train = train[['loan_status']]
x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))]
y_test = test[['loan_status']]
method = ['Decision Tree', 'Random Forests', 'Logistic Regression']
false_paid = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test'])
default_identified = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test'])
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
model = tree.DecisionTreeClassifier(max_depth=5, criterion='entropy', class_weight={0: 0.15, 1: 0.85})
from sklearn.metrics import confusion_matrix
import numpy as np
p_train = model.predict(x_train)
p_test = model.predict(x_test)
(fully_paid, default) = confusion_matrix(p_train, np.array(y_train))
false_paid.loc[('train', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('train', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
(fully_paid, default) = confusion_matrix(p_test, np.array(y_test))
false_paid.loc[('test', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('test', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(max_depth=6, n_estimators=10, class_weight={0: 0.15, 1: 0.85})
from sklearn.metrics import confusion_matrix
p_train = model.predict(x_train)
p_test = model.predict(x_test)
(fully_paid, default) = confusion_matrix(p_train, np.array(y_train))
false_paid.loc[('train', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('train', 'Random Forests')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
(fully_paid, default) = confusion_matrix(p_test, np.array(y_test))
false_paid.loc[('test', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
return false_paid
#=============
# Function 58
def cleaning_func_39(df):
# additional context code from user definitions
def status_binary(text):
if (text == 'Fully Paid'):
return 0
elif ((text == 'Current') or (text == 'Issued')):
return (- 1)
else:
return 1
# core cleaning code
import pandas as pd
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
df = df.dropna()
df['loan_status'] = df['loan_status'].apply(status_binary)
df = df[(df['loan_status'] != (- 1))]
dummy_df = pd.get_dummies(df[['home_ownership', 'verification_status', 'purpose', 'term']])
df = df.drop(['home_ownership', 'verification_status', 'purpose', 'term'], axis=1)
df = pd.concat([df, dummy_df], axis=1)
mapping_dict = {'emp_length': {'10+ years': 10, '9 years': 9, '8 years': 8, '7 years': 7, '6 years': 6, '5 years': 5, '4 years': 4, '3 years': 3, '2 years': 2, '1 year': 1, '< 1 year': 0, 'n/a': 0}, 'grade': {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7}}
df = df.replace(mapping_dict)
cols = list(df)
df = df.ix[(slice(None, None, None), cols)]
from sklearn.model_selection import train_test_split
(train, test) = train_test_split(df, test_size=0.3)
x_train = train.iloc[(slice(0, None, None), slice(1, 34, None))]
y_train = train[['loan_status']]
x_test = test.iloc[(slice(0, None, None), slice(1, 34, None))]
y_test = test[['loan_status']]
method = ['Decision Tree', 'Random Forests', 'Logistic Regression']
false_paid = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test'])
default_identified = pd.DataFrame([[0, 0, 0], [0, 0, 0]], columns=method, index=['train', 'test'])
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
model = tree.DecisionTreeClassifier(max_depth=5, criterion='entropy', class_weight={0: 0.15, 1: 0.85})
from sklearn.metrics import confusion_matrix
import numpy as np
p_train = model.predict(x_train)
p_test = model.predict(x_test)
(fully_paid, default) = confusion_matrix(p_train, np.array(y_train))
false_paid.loc[('train', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('train', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
(fully_paid, default) = confusion_matrix(p_test, np.array(y_test))
false_paid.loc[('test', 'Decision Tree')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('test', 'Decision Tree')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(max_depth=6, n_estimators=10, class_weight={0: 0.15, 1: 0.85})
from sklearn.metrics import confusion_matrix
p_train = model.predict(x_train)
p_test = model.predict(x_test)
(fully_paid, default) = confusion_matrix(p_train, np.array(y_train))
false_paid.loc[('train', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('train', 'Random Forests')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
(fully_paid, default) = confusion_matrix(p_test, np.array(y_test))
false_paid.loc[('test', 'Random Forests')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
default_identified.loc[('test', 'Random Forests')] = ((100 * default[1]) / (default[1] + fully_paid[1]))
from sklearn.linear_model import LogisticRegression
import numpy as np
model = LogisticRegression(class_weight={0: 0.15, 1: 0.85})
from sklearn.metrics import confusion_matrix
p_train = model.predict(x_train)
(fully_paid, default) = confusion_matrix(p_train, np.array(y_train))
false_paid.loc[('train', 'Logistic Regression')] = ((100 * fully_paid[1]) / (fully_paid[0] + fully_paid[1]))
return false_paid
#=============
# Function 59
def cleaning_func_0(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1)
return loan
#=============
# Function 60
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median())
return loan
#=============
# Function 61
def cleaning_func_2(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median())
return loan
#=============
# Function 62
def cleaning_func_3(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc'])
return loan
#=============
# Function 63
def cleaning_func_4(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median())
return loan
#=============
# Function 64
def cleaning_func_5(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc'])
return loan
#=============
# Function 65
def cleaning_func_6(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med'])
return loan
#=============
# Function 66
def cleaning_func_7(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median())
return loan
#=============
# Function 67
def cleaning_func_8(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['title'] = np.where(loan['title'].isnull(), 0, loan['title'])
return loan
#=============
# Function 68
def cleaning_func_9(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median())
return loan
#=============
# Function 69
def cleaning_func_10(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs'])
return loan
#=============
# Function 70
def cleaning_func_11(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq'])
return loan
#=============
# Function 71
def cleaning_func_12(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths'])
return loan
#=============
# Function 72
def cleaning_func_13(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec'])
return loan
#=============
# Function 73
def cleaning_func_14(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title'])
return loan
#=============
# Function 74
def cleaning_func_15(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq'])
return loan
#=============
# Function 75
def cleaning_func_0(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt)
return loan
#=============
# Function 76
def cleaning_func_1(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)]
return loan
#=============
# Function 77
def cleaning_func_2(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_year'] = loan.issue_d.str[slice(4, None, None)]
return loan
#=============
# Function 78
def cleaning_func_0(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan.loc[((df_loan.loan_status == 'Does not meet the credit policy. Status:Fully Paid'), 'loan_status')] = 'NMCP Fully Paid'
df_loan.loc[((df_loan.loan_status == 'Does not meet the credit policy. Status:Charged Off'), 'loan_status')] = 'NMCP Charged Off'
return df_loan
#=============
# Function 79
def cleaning_func_1(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
(df_loan['issue_month'], df_loan['issue_year']) = df_loan['issue_d'].str.split('-', 1).str
return df_loan
#=============
# Function 80
def cleaning_func_2(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan['int_round'] = df_loan['int_rate'].round(0).astype(int)
return df_loan
#=============
# Function 81
def cleaning_func_3(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
(df_loan['issue_month'], df_loan['issue_year']) = df_loan['issue_d'].str.split('-', 1).str
months_order = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
df_loan['issue_month'] = pd.Categorical(df_loan['issue_month'], categories=months_order, ordered=True)
return df_loan
#=============
# Function 82
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df.mths_since_last_delinq = df.mths_since_last_delinq.fillna(df.mths_since_last_delinq.median())
return df
#=============
# Function 83
def cleaning_func_1(df):
# core cleaning code
import numpy as np
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df['good_loan'] = np.where((((df.loan_status == 'Fully Paid') | (df.loan_status == 'Current')) | (df.loan_status == 'Does not meet the credit policy. Status:Fully Paid')), 1, 0)
return df
#=============
# Function 84
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['bad_loan'] = 0
return data
#=============
# Function 85
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
return data
#=============
# Function 86
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
bad_indicators = ['Charged Off ', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Default Receiver', 'Late (16-30 days)', 'Late (31-120 days)']
data.loc[(data.loan_status.isin(bad_indicators), 'bad_loan')] = 1
return data
#=============
# Function 87
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['month'] = data['issue_dt'].dt.month
return data
#=============
# Function 88
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['year'] = data['issue_dt'].dt.year
return data
#=============
# Function 89
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.loc[(df['loan_status'] != 'Current')]
return df
#=============
# Function 90
def cleaning_func_1(df):
# additional context code from user definitions
def duplicate_columns(df, return_dataframe=False, verbose=True):
'\n a function to detect and possibly remove duplicated columns for a pandas dataframe\n '
from pandas.core.common import array_equivalent
groups = df.columns.to_series().groupby(df.dtypes).groups
duplicated_columns = []
for (dtype, col_names) in groups.items():
column_values = df[col_names]
num_columns = len(col_names)
for i in range(num_columns):
column_i = column_values.iloc[:, i].values
for j in range((i + 1), num_columns):
column_j = column_values.iloc[:, j].values
if array_equivalent(column_i, column_j):
if verbose:
print('column {} is a duplicate of column {}'.format(col_names[i], col_names[j]))
duplicated_columns.append(col_names[i])
break
if (not return_dataframe):
return duplicated_columns
else:
return df.drop(labels=duplicated_columns, axis=1)
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.loc[(df['loan_status'] != 'Current')]
df = duplicate_columns(df, return_dataframe=True)
df['loan_Default'] = int(0)
return df
#=============
# Function 91
def cleaning_func_0(loans):
# core cleaning code
import pandas as pd
date = ['issue_d', 'last_pymnt_d']
cols = ['issue_d', 'term', 'int_rate', 'loan_amnt', 'total_pymnt', 'last_pymnt_d', 'sub_grade', 'grade', 'loan_status']
# loans = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=date, usecols=cols, infer_datetime_format=True)
latest = loans['issue_d'].max()
finished_bool = (((loans['issue_d'] < (latest - pd.DateOffset(years=3))) & (loans['term'] == ' 36 months')) | ((loans['issue_d'] < (latest - pd.DateOffset(years=5))) & (loans['term'] == ' 60 months')))
finished_loans = loans.loc[finished_bool]
finished_loans['roi'] = (((finished_loans.total_pymnt / finished_loans.loan_amnt) - 1) * 100)
return finished_loans
#=============
# Function 92
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
return df
#=============
# Function 93
def cleaning_func_1(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
return df
#=============
# Function 94
def cleaning_func_2(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
terms = []
df1.term = terms
return df1
#=============
# Function 95
def cleaning_func_3(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
return df
#=============
# Function 96
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
emp_lengths = []
df1.emp_length = emp_lengths
return df1
#=============
# Function 97
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
return df1
#=============
# Function 98
def cleaning_func_6(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
df1['revol_util_nan'] = (pd.isnull(df1.revol_util) * 1)
return df1
#=============
# Function 99
def cleaning_func_7(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
return df1
#=============
# Function 100
def cleaning_func_10(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
df1['mths_since_last_delinq_nan'] = (np.isnan(df1.mths_since_last_delinq) * 1)
return df1
#=============
# Function 101
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
df1.tot_coll_amt = df1.tot_coll_amt.replace(np.nan, 0)
return df1
#=============
# Function 102
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
df1['mths_since_last_record_nan'] = (np.isnan(df1.mths_since_last_record) * 1)
return df1
#=============
# Function 103
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
emp_lengths = []
df1.emp_length = emp_lengths
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
return df1
#=============
# Function 104
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
emp_lengths = []
df1.emp_length = emp_lengths
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
df1['emp_length_nan'] = (pd.isnull(df1.emp_length) * 1)
return df1
#=============
# Function 105
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
df1.tot_cur_bal = df1.tot_cur_bal.replace(np.nan, 0)
return df1
#=============
# Function 106
def cleaning_func_21(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import Imputer
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
imp = Imputer(strategy='median')
df1.total_rev_hi_lim = imp.fit_transform(df1.total_rev_hi_lim.reshape((- 1), 1))
return df1
#=============
# Function 107
def cleaning_func_25(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import Imputer
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
imp = Imputer(strategy='most_frequent')
df1.collections_12_mths_ex_med = imp.fit_transform(df1.collections_12_mths_ex_med.reshape((- 1), 1))
return df1
#=============
# Function 108
def cleaning_func_26(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import Imputer
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
imp = Imputer(strategy='mean')
df1.revol_util = imp.fit_transform(df1.revol_util.values.reshape((- 1), 1))
return df1
#=============
# Function 109
def cleaning_func_27(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import Imputer
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
imp = Imputer(strategy='most_frequent')
msld = imp.fit_transform(df1.mths_since_last_delinq.values.reshape((- 1), 1))
df1.mths_since_last_delinq = msld
return df1
#=============
# Function 110
def cleaning_func_30(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1.issue_d = pd.to_datetime(df1.issue_d, format='%b-%Y')
return df1
#=============
# Function 111
def cleaning_func_34(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1.earliest_cr_line = pd.to_datetime(df1.earliest_cr_line, format='%b-%Y')
return df1
#=============
# Function 112
def cleaning_func_35(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import Imputer
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
imp = Imputer(strategy='median')
mslr = imp.fit_transform(df1.mths_since_last_record.values.reshape((- 1), 1))
df1.mths_since_last_record = mslr
return df1
#=============
# Function 113
def cleaning_func_37(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import Imputer
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
emp_lengths = []
df1.emp_length = emp_lengths
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
imp = Imputer(strategy='median')
df1.emp_length = imp.fit_transform(df1.emp_length.values.reshape((- 1), 1))
return df1
#=============
# Function 114
def cleaning_func_39(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1.issue_d = pd.to_datetime(df1.issue_d, format='%b-%Y')
df1['issue_d_year'] = df1.issue_d.dt.year
return df1
#=============
# Function 115
def cleaning_func_40(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1['int_rate'] = df1.int_rate.astype(str).astype(float)
return df1
#=============
# Function 116
def cleaning_func_41(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1.earliest_cr_line = pd.to_datetime(df1.earliest_cr_line, format='%b-%Y')
df1['earliest_cr_line_year'] = df1.earliest_cr_line.dt.year
return df1
#=============
# Function 117
def cleaning_func_42(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1.issue_d = pd.to_datetime(df1.issue_d, format='%b-%Y')
df1['issue_d_month'] = df1.issue_d.dt.month
return df1
#=============
# Function 118
def cleaning_func_43(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1.earliest_cr_line = pd.to_datetime(df1.earliest_cr_line, format='%b-%Y')
df1['earliest_cr_line_month'] = df1.earliest_cr_line.dt.month
return df1
#=============
# Function 119
def cleaning_func_44(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
terms = []
df1.term = terms
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
return df1
#=============
# Function 120
def cleaning_func_45(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
terms = []
df1.term = terms
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1['term'] = df1.term.astype(str).astype(int)
return df1
#=============
# Function 121
def cleaning_func_47(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
return df1
#=============
# Function 122
def cleaning_func_48(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df2 = df1[((df1['loan_status'] == 'Fully Paid') | (df1['loan_status'] == 'Charged Off'))]
targets = []
df2['target'] = targets
return df2
#=============
# Function 123
def cleaning_func_49(df):
# core cleaning code
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# df = pd.read_csv('../input/loan.csv')
df = df[((df.loan_status == 'Fully Paid') | (df.loan_status == 'Charged Off'))]
df = df[(df['pymnt_plan'] == 'n')]
df = df[(df['application_type'] == 'INDIVIDUAL')]
df1 = df.drop(columns=['policy_code', 'next_pymnt_d', 'out_prncp', 'out_prncp_inv', 'pymnt_plan', 'initial_list_status', 'member_id', 'id', 'url', 'application_type', 'grade', 'annual_inc_joint', 'dti_joint'])
df1 = df1.drop(columns=['verification_status_joint', 'open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m'])
df1 = df1.drop(columns=['mths_since_last_major_derog'])
lbl_enc = LabelEncoder()
df1[(x + '_old')] = df[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1[(x + '_old')] = df[x]
df1[x] = df1[x]
df1[x] = lbl_enc.fit_transform(df1[x])
df1['text'] = ((((df1.emp_title + ' ') + df1.title) + ' ') + df1.desc)
df1['text'] = df1['text'].fillna('nan')
return df1
#=============
# Function 124
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
badLoan = ['Charged Off', 'Default', 'Late (31-120 days)', 'Late (16-30 days)', 'In Grace Period', 'Does not meet the credit policy. Status:Charged Off']
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
df['isBad'] = [(1 if (x in badLoan) else 0) for x in df.loan_status]
return df
#=============
# Function 125
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'Num_Loans']
return perStatedf
#=============
# Function 126
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
return df.groupby('addr_state', as_index=False).count()
#=============
# Function 127
def cleaning_func_7(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'loan_amt']
return perStatedf
#=============
# Function 128
def cleaning_func_9(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf.columns = ['State', 'badLoans']
return perStatedf
#=============
# Function 129
def cleaning_func_11(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
perStatedf.columns = ['State', 'totalLoans']
return perStatedf
#=============
# Function 130
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.Num_Loans / perStatedf.Pop)
return perStatedf
#=============
# Function 131
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 132
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return statePopdf
#=============
# Function 133
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 134
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 135
def cleaning_func_19(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.loan_amt / perStatedf.Pop)
return perStatedf
#=============
# Function 136
def cleaning_func_20(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 137
def cleaning_func_21(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return statePopdf
#=============
# Function 138
def cleaning_func_23(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.badLoans / perStatedf.Pop)
return perStatedf
#=============
# Function 139
def cleaning_func_26(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
return perStatedf
#=============
# Function 140
def cleaning_func_27(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
return df.groupby('addr_state', as_index=False).sum()
#=============
# Function 141
def cleaning_func_28(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
badLoansdf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf = pd.merge(perStatedf, badLoansdf, on=['State'], how='inner')
perStatedf['percentBadLoans'] = ((perStatedf.badLoans / perStatedf.totalLoans) * 100)
return perStatedf
#=============
# Function 142
def cleaning_func_29(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
badLoansdf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
return badLoansdf
#=============
# Function 143
def cleaning_func_0(loan):
# core cleaning code
import pandas as pd
from collections import Counter
# loan = pd.read_csv('../input/loan.csv')
loan = loan[(loan.loan_status != 'Current')]
c = Counter(list(loan.loan_status))
mmp = {x[0]: 1 for x in c.most_common(20)}
loan['target'] = loan['loan_status'].map(mmp)
return loan
#=============
# Function 144
def cleaning_func_2(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
return category_two_data
#=============
# Function 145
def cleaning_func_3(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
return category_one_data
#=============
# Function 146
def cleaning_func_4(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
emp_title = new_data_df[8]
emp_title = pd.DataFrame(emp_title)
emp_title.columns = ['Employee Title']
return emp_title
#=============
# Function 147
def cleaning_func_5(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
return new_data_df
#=============
# Function 148
def cleaning_func_6(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
title = new_data_df[19]
title = pd.DataFrame(title)
title.columns = ['Title']
return title
#=============
# Function 149
def cleaning_func_7(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
return status_labels
#=============
# Function 150
def cleaning_func_8(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
status_home_status.columns = ['Home Status', 'status_labels']
return status_home_status
#=============
# Function 151
def cleaning_func_9(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
return home_status
#=============
# Function 152
def cleaning_func_11(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
status_state.columns = ['State', 'status_labels']
return status_state
#=============
# Function 153
def cleaning_func_13(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
status_ver_stat.columns = ['Verification Status', 'status_labels']
return status_ver_stat
#=============
# Function 154
def cleaning_func_14(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
status_installment_grade.columns = ['Installment_grade', 'status_labels']
return status_installment_grade
#=============
# Function 155
def cleaning_func_16(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
status_annual_groups.columns = ['Annual_income_grp', 'status_labels']
return status_annual_groups
#=============
# Function 156
def cleaning_func_17(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
return binned_annual_income
#=============
# Function 157
def cleaning_func_18(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
return status_labels
#=============
# Function 158
def cleaning_func_19(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
status_installment_groups.columns = ['Installment_amt_grp', 'status_labels']
return status_installment_groups
#=============
# Function 159
def cleaning_func_20(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
return binned_installment_amt
#=============
# Function 160
def cleaning_func_21(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
cat_cols = [4, 7, 8, 9, 11, 14, 16, 18, 19, 20, 24, 25, 32, 33, 37, 38, 39]
cat_df = data_2.iloc[(slice(None, None, None), cat_cols)].values
cat_df = pd.DataFrame(cat_df)
return cat_df
#=============
# Function 161
def cleaning_func_22(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
return data_2
#=============
# Function 162
def cleaning_func_23(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
cat_cols = [4, 7, 8, 9, 11, 14, 16, 18, 19, 20, 24, 25, 32, 33, 37, 38, 39]
return data_2.iloc[(slice(None, None, None), cat_cols)]
#=============
# Function 163
def cleaning_func_25(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
return new_data_copy
#=============
# Function 164
def cleaning_func_26(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
return data_2
#=============
# Function 165
def cleaning_func_27(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
cat_cols = [4, 7, 8, 9, 11, 14, 16, 18, 19, 20, 24, 25, 32, 33, 37, 38, 39]
cat_df = data_2.iloc[(slice(None, None, None), cat_cols)].values
cat_df = pd.DataFrame(cat_df)
c = [11, 12, 13, 15]
cat_df = cat_df.drop(cat_df.columns[c], axis=1)
r = range(0, 13)
cat_df.columns = r
return cat_df
#=============
# Function 166
def cleaning_func_29(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
rename_2 = range(0, 41)
data_2.columns = rename_2
cat_plus_time_cols = [4, 7, 8, 9, 11, 12, 14, 16, 17, 18, 19, 20, 24, 25, 32, 33, 34, 36, 37, 38, 39]
data_2_copy = data_2
non_cat = data_2_copy.drop(data_2_copy.columns[cat_plus_time_cols], axis=1)
rename = range(0, 20)
non_cat.columns = rename
return non_cat
#=============
# Function 167
def cleaning_func_30(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
rename_2 = range(0, 41)
data_2.columns = rename_2
cat_plus_time_cols = [4, 7, 8, 9, 11, 12, 14, 16, 17, 18, 19, 20, 24, 25, 32, 33, 34, 36, 37, 38, 39]
data_2_copy = data_2
non_cat = data_2_copy.drop(data_2_copy.columns[cat_plus_time_cols], axis=1)
rename = range(0, 20)
non_cat.columns = rename
non_cat = non_cat.drop(non_cat.columns[7], axis=1)
renaming_df = range(0, 19)
non_cat.columns = renaming_df
return non_cat
#=============
# Function 168
def cleaning_func_32(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data_copy = np.vstack((category_one_data, category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
data_2 = new_data_copy
col_nos = []
i = 0
i = (i + 1)
data_2 = data_2.drop(data_2.columns[col_nos], axis=1)
rename_1 = range(0, 49)
data_2.columns = rename_1
cols_remove = [0, 10, 11, 17, 18, 19, 20, 21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis=1)
rename_2 = range(0, 41)
data_2.columns = rename_2
cat_plus_time_cols = [4, 7, 8, 9, 11, 12, 14, 16, 17, 18, 19, 20, 24, 25, 32, 33, 34, 36, 37, 38, 39]
data_2_copy = data_2
return data_2_copy
#=============
# Function 169
def cleaning_func_33(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
return plot_stack
#=============
# Function 170
def cleaning_func_34(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
plot_stack = plot_stack.drop(plot_stack.columns[2], axis=1)
plot_stack.columns = ['Installment_amt_grp', 'Charged Off', 'Fully Paid']
return plot_stack
#=============
# Function 171
def cleaning_func_35(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
return status_installment_groups
#=============
# Function 172
def cleaning_func_36(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
return Fully_paid
#=============
# Function 173
def cleaning_func_37(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
return Charged_off
#=============
# Function 174
def cleaning_func_38(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
return status_installment_grade
#=============
# Function 175
def cleaning_func_39(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
return Charged_off_grade
#=============
# Function 176
def cleaning_func_40(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
return Fully_Paid_grade
#=============
# Function 177
def cleaning_func_41(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
plot_stack_1 = plot_stack_1.drop(plot_stack_1.columns[2], axis=1)
plot_stack_1.columns = ['Installment_grade_grp', 'Charged Off', 'Fully Paid']
return plot_stack_1
#=============
# Function 178
def cleaning_func_42(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
return installment_grade
#=============
# Function 179
def cleaning_func_43(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
return plot_stack_1
#=============
# Function 180
def cleaning_func_44(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
return plot_stack_3
#=============
# Function 181
def cleaning_func_45(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
return Fully_Paid_home_status
#=============
# Function 182
def cleaning_func_46(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
plot_stack_3 = plot_stack_3.drop(plot_stack_3.columns[2], axis=1)
plot_stack_3.columns = ['Home Status', 'Charged Off', 'Fully Paid']
return plot_stack_3
#=============
# Function 183
def cleaning_func_47(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
return Charged_off_home_status
#=============
# Function 184
def cleaning_func_48(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
return plot_home_status_44
#=============
# Function 185
def cleaning_func_49(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
return home_status
#=============
# Function 186
def cleaning_func_50(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
return plot_home_status_55
#=============
# Function 187
def cleaning_func_51(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
return plot_home_status_55
#=============
# Function 188
def cleaning_func_52(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
return status_home_status
#=============
# Function 189
def cleaning_func_53(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
return plot_annual_income_77
#=============
# Function 190
def cleaning_func_54(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
return status_annual_groups
#=============
# Function 191
def cleaning_func_55(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
return plot_annual_income_66
#=============
# Function 192
def cleaning_func_56(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
return plot_stack_4
#=============
# Function 193
def cleaning_func_57(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
return binned_annual_income
#=============
# Function 194
def cleaning_func_58(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
plot_stack_4 = plot_stack_4.drop(plot_stack_4.columns[2], axis=1)
plot_stack_4.columns = ['Annual Income Group', 'Charged Off', 'Fully Paid']
return plot_stack_4
#=============
# Function 195
def cleaning_func_59(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
return Fully_Paid_annual_income
#=============
# Function 196
def cleaning_func_60(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
return Charged_off_annual_income
#=============
# Function 197
def cleaning_func_61(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
return plot_state_88
#=============
# Function 198
def cleaning_func_62(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
return plot_stack_5
#=============
# Function 199
def cleaning_func_63(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
return Charged_off_state
#=============
# Function 200
def cleaning_func_64(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
return plot_state_88
#=============
# Function 201
def cleaning_func_65(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
plot_stack_5 = plot_stack_5.drop(plot_stack_5.columns[2], axis=1)
plot_stack_5.columns = ['state', 'Charged Off', 'Fully Paid']
return plot_stack_5
#=============
# Function 202
def cleaning_func_68(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
return Fully_Paid_state
#=============
# Function 203
def cleaning_func_69(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
return state
#=============
# Function 204
def cleaning_func_70(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
return status_state
#=============
# Function 205
def cleaning_func_71(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
plot_stack_5 = plot_stack_5.drop(plot_stack_5.columns[2], axis=1)
return plot_stack_5
#=============
# Function 206
def cleaning_func_72(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
plot_stack_5 = plot_stack_5.drop(plot_stack_5.columns[2], axis=1)
totals = [(i + j) for (i, j) in zip(plot_stack_5['Charged Off'], plot_stack_5['Fully Paid'])]
C_Off = [((i / j) * 100) for (i, j) in zip(plot_stack_5['Charged Off'], totals)]
C_Off = pd.DataFrame(C_Off)
return C_Off
#=============
# Function 207
def cleaning_func_73(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
plot_stack_5 = plot_stack_5.drop(plot_stack_5.columns[2], axis=1)
totals = [(i + j) for (i, j) in zip(plot_stack_5['Charged Off'], plot_stack_5['Fully Paid'])]
C_Off = [((i / j) * 100) for (i, j) in zip(plot_stack_5['Charged Off'], totals)]
C_Off = pd.DataFrame(C_Off)
temp_plot = np.hstack((plot_stack_5, C_Off))
temp_plot = pd.DataFrame(temp_plot)
temp_plot.columns = ['state', 'Charged Off', 'Fully Paid', '% Charged Off']
return temp_plot
#=============
# Function 208
def cleaning_func_74(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
plot_stack_5 = plot_stack_5.drop(plot_stack_5.columns[2], axis=1)
totals = [(i + j) for (i, j) in zip(plot_stack_5['Charged Off'], plot_stack_5['Fully Paid'])]
C_Off = [((i / j) * 100) for (i, j) in zip(plot_stack_5['Charged Off'], totals)]
C_Off = pd.DataFrame(C_Off)
temp_plot = np.hstack((plot_stack_5, C_Off))
temp_plot = pd.DataFrame(temp_plot)
temp_plot = np.array(temp_plot.sort_values(by='% Charged Off', ascending=False))
temp_plot = pd.DataFrame(temp_plot)
temp_plot.columns = ['state', 'Charged Off', 'Fully Paid', '% Charged Off']
return temp_plot
#=============
# Function 209
def cleaning_func_75(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
plot_stack_5 = plot_stack_5.drop(plot_stack_5.columns[2], axis=1)
totals = [(i + j) for (i, j) in zip(plot_stack_5['Charged Off'], plot_stack_5['Fully Paid'])]
C_Off = [((i / j) * 100) for (i, j) in zip(plot_stack_5['Charged Off'], totals)]
C_Off = pd.DataFrame(C_Off)
temp_plot = np.hstack((plot_stack_5, C_Off))
temp_plot = pd.DataFrame(temp_plot)
return temp_plot
#=============
# Function 210
def cleaning_func_77(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
Charged_off_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 1)]
return Charged_off_ver_stat
#=============
# Function 211
def cleaning_func_78(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
Charged_off_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 1)]
temp_71 = Charged_off_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat = np.array(np.unique(temp_71, return_counts=True))
plot_ver_stat_101 = pd.DataFrame(plot_ver_stat.T)
Fully_Paid_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 0)]
return Fully_Paid_ver_stat
#=============
# Function 212
def cleaning_func_79(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
Charged_off_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 1)]
temp_71 = Charged_off_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat = np.array(np.unique(temp_71, return_counts=True))
plot_ver_stat_101 = pd.DataFrame(plot_ver_stat.T)
return plot_ver_stat_101
#=============
# Function 213
def cleaning_func_80(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
return ver_stat
#=============
# Function 214
def cleaning_func_81(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
Charged_off_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 1)]
temp_71 = Charged_off_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat = np.array(np.unique(temp_71, return_counts=True))
plot_ver_stat_101 = pd.DataFrame(plot_ver_stat.T)
Fully_Paid_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 0)]
temp_72 = Fully_Paid_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat_2 = np.array(np.unique(temp_72, return_counts=True))
plot_ver_stat_111 = pd.DataFrame(plot_ver_stat_2.T)
plot_stack_6 = np.hstack((plot_ver_stat_101, plot_ver_stat_111))
plot_stack_6 = pd.DataFrame(plot_stack_6)
plot_stack_6 = plot_stack_6.drop(plot_stack_6.columns[2], axis=1)
plot_stack_6.columns = ['Verification Status', 'Charged Off', 'Fully Paid']
return plot_stack_6
#=============
# Function 215
def cleaning_func_82(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
Charged_off_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 1)]
temp_71 = Charged_off_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat = np.array(np.unique(temp_71, return_counts=True))
plot_ver_stat_101 = pd.DataFrame(plot_ver_stat.T)
Fully_Paid_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 0)]
temp_72 = Fully_Paid_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat_2 = np.array(np.unique(temp_72, return_counts=True))
plot_ver_stat_111 = pd.DataFrame(plot_ver_stat_2.T)
plot_stack_6 = np.hstack((plot_ver_stat_101, plot_ver_stat_111))
plot_stack_6 = pd.DataFrame(plot_stack_6)
return plot_stack_6
#=============
# Function 216
def cleaning_func_83(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
return status_ver_stat
#=============
# Function 217
def cleaning_func_84(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data_1 = pd.DataFrame(data)
category_one_data = data_1[(data_1.loan_status == 'Fully Paid')]
category_two_data = data_1[(data_1.loan_status == 'Charged Off')]
new_data = np.vstack((category_one_data, category_two_data))
new_data = new_data[(slice(None, None, None), slice(2, (- 30), None))]
new_data_df = pd.DataFrame(new_data)
installment_amt = new_data[(slice(None, None, None), 5)]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size, 1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new))
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt, status_labels)))
Charged_off = status_installment_groups[(status_installment_groups.status_labels == 1)]
temp_1 = Charged_off.iloc[(slice(None, None, None), 0)].values
plot_1 = np.array(np.unique(temp_1, return_counts=True))
plot_1 = plot_1[(slice(None, None, None), slice(None, (- 1), None))]
plot_11 = plot_1.T
Fully_paid = status_installment_groups[(status_installment_groups.status_labels == 0)]
temp_2 = Fully_paid.iloc[(slice(None, None, None), 0)].values
plot_2 = np.array(np.unique(temp_2, return_counts=True))
plot_22 = plot_2.T
plot_stack = np.hstack((plot_11, plot_22))
plot_stack = pd.DataFrame(plot_stack)
installment_grade = new_data[(slice(None, None, None), 6)]
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade, status_labels)))
Charged_off_grade = status_installment_grade[(status_installment_grade.status_labels == 1)]
temp_11 = Charged_off_grade.iloc[(slice(None, None, None), 0)].values
plot_grade = np.array(np.unique(temp_11, return_counts=True))
plot_grade_11 = plot_grade.T
Fully_Paid_grade = status_installment_grade[(status_installment_grade.status_labels == 0)]
temp_22 = Fully_Paid_grade.iloc[(slice(None, None, None), 0)].values
plot_grade_2 = np.array(np.unique(temp_22, return_counts=True))
plot_grade_22 = plot_grade_2.T
plot_stack_1 = np.hstack((plot_grade_11, plot_grade_22))
plot_stack_1 = pd.DataFrame(plot_stack_1)
home_status = new_data_df[10]
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status, status_labels)))
Charged_off_home_status = status_home_status[(status_home_status.status_labels == 1)]
temp_41 = Charged_off_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status = np.array(np.unique(temp_41, return_counts=True))
plot_home_status_44 = pd.DataFrame(plot_home_status.T)
Fully_Paid_home_status = status_home_status[(status_home_status.status_labels == 0)]
temp_42 = Fully_Paid_home_status.iloc[(slice(None, None, None), 0)].values
plot_home_status_2 = np.array(np.unique(temp_42, return_counts=True))
plot_home_status_55 = pd.DataFrame(plot_home_status_2.T)
plot_home_status_55 = plot_home_status_55.drop(0)
plot_stack_3 = np.hstack((plot_home_status_44, plot_home_status_55))
plot_stack_3 = pd.DataFrame(plot_stack_3)
annual_income = new_data[(slice(None, None, None), 11)]
bins_2 = np.array([40000, 70000, 100000, 150000])
annual_income = annual_income.astype(float).reshape(annual_income.size, 1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income, status_labels)))
Charged_off_annual_income = status_annual_groups[(status_annual_groups.status_labels == 1)]
temp_51 = Charged_off_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income = np.array(np.unique(temp_51, return_counts=True))
plot_annual_income_66 = pd.DataFrame(plot_annual_income.T)
Fully_Paid_annual_income = status_annual_groups[(status_annual_groups.status_labels == 0)]
temp_52 = Fully_Paid_annual_income.iloc[(slice(None, None, None), 0)].values
plot_annual_income_2 = np.array(np.unique(temp_52, return_counts=True))
plot_annual_income_77 = pd.DataFrame(plot_annual_income_2.T)
plot_stack_4 = np.hstack((plot_annual_income_66, plot_annual_income_77))
plot_stack_4 = pd.DataFrame(plot_stack_4)
state = new_data_df[21]
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state, status_labels)))
Charged_off_state = status_state[(status_state.status_labels == 1)]
temp_61 = Charged_off_state.iloc[(slice(None, None, None), 0)].values
plot_state = np.array(np.unique(temp_61, return_counts=True))
plot_state_88 = pd.DataFrame(plot_state.T)
Fully_Paid_state = status_state[(status_state.status_labels == 0)]
temp_62 = Fully_Paid_state.iloc[(slice(None, None, None), 0)].values
plot_state_2 = np.array(np.unique(temp_62, return_counts=True))
plot_state_99 = pd.DataFrame(plot_state_2.T)
plot_state_88 = plot_state_88.drop(7)
plot_state_99 = plot_state_99.drop([7, 21, 28])
plot_stack_5 = np.hstack((plot_state_88, plot_state_99))
plot_stack_5 = pd.DataFrame(plot_stack_5)
ver_stat = new_data_df[12]
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat, status_labels)))
Charged_off_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 1)]
temp_71 = Charged_off_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat = np.array(np.unique(temp_71, return_counts=True))
plot_ver_stat_101 = pd.DataFrame(plot_ver_stat.T)
Fully_Paid_ver_stat = status_ver_stat[(status_ver_stat.status_labels == 0)]
temp_72 = Fully_Paid_ver_stat.iloc[(slice(None, None, None), 0)].values
plot_ver_stat_2 = np.array(np.unique(temp_72, return_counts=True))
plot_ver_stat_111 = pd.DataFrame(plot_ver_stat_2.T)
return plot_ver_stat_111
#=============
# Function 218
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['emp_length_int'] = np.nan
return df
#=============
# Function 219
def cleaning_func_1(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['income_category'] = np.nan
return df
#=============
# Function 220
def cleaning_func_2(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['loan_condition'] = np.nan
return df
#=============
# Function 221
def cleaning_func_3(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['loan_condition_int'] = np.nan
return df
#=============
# Function 222
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['interest_payments'] = np.nan
return df
#=============
# Function 223
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['region'] = np.nan
return df
#=============
# Function 224
def cleaning_func_6(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['complete_date'] = pd.to_datetime(df['issue_d'])
return df
#=============
# Function 225
def cleaning_func_7(df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = df['loan_status'].apply(loan_condition)
return df
#=============
# Function 226
def cleaning_func_8(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
dt_series = pd.to_datetime(df['issue_d'])
df['year'] = dt_series.dt.year
return df
#=============
# Function 227
def cleaning_func_9(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
df['complete_date'] = pd.to_datetime(df['issue_d'])
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates['issue_d'] = [month.to_period('M') for month in group_dates['complete_date']]
return group_dates
#=============
# Function 228
def cleaning_func_10(df):
# additional context code from user definitions
def finding_regions(state):
if (state in west):
return 'West'
elif (state in south_west):
return 'SouthWest'
elif (state in south_east):
return 'SouthEast'
elif (state in mid_west):
return 'MidWest'
elif (state in north_east):
return 'NorthEast'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
west = ['CA', 'OR', 'UT', 'WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI', 'MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = df['addr_state'].apply(finding_regions)
return df
#=============
# Function 229
def cleaning_func_11(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['last_credit_pull_d'] = complete_df.groupby('region')['last_credit_pull_d'].transform((lambda x: x.fillna(x.mode)))
return complete_df
#=============
# Function 230
def cleaning_func_12(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
return df
#=============
# Function 231
def cleaning_func_13(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['total_acc'] = complete_df.groupby('region')['total_acc'].transform((lambda x: x.fillna(x.median())))
return complete_df
#=============
# Function 232
def cleaning_func_14(col,df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
return col
#=============
# Function 233
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['delinq_2yrs'] = complete_df.groupby('region')['delinq_2yrs'].transform((lambda x: x.fillna(x.mean())))
return complete_df
#=============
# Function 234
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['last_pymnt_d'] = complete_df.groupby('region')['last_pymnt_d'].transform((lambda x: x.fillna(x.mode)))
return complete_df
#=============
# Function 235
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['earliest_cr_line'] = complete_df.groupby('region')['earliest_cr_line'].transform((lambda x: x.fillna(x.mode)))
return complete_df
#=============
# Function 236
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['pub_rec'] = complete_df.groupby('region')['pub_rec'].transform((lambda x: x.fillna(x.median())))
return complete_df
#=============
# Function 237
def cleaning_func_19(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['next_pymnt_d'] = complete_df.groupby('region')['next_pymnt_d'].transform((lambda x: x.fillna(x.mode)))
return complete_df
#=============
# Function 238
def cleaning_func_20(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
return group_dates
#=============
# Function 239
def cleaning_func_22(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
return group_dates
#=============
# Function 240
def cleaning_func_23(col,df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
return col
#=============
# Function 241
def cleaning_func_24(col,df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
return col
#=============
# Function 242
def cleaning_func_25(col,df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
col.loc[((col['interest_rate'] <= 13.23), 'interest_payments')] = 'Low'
col.loc[((col['interest_rate'] > 13.23), 'interest_payments')] = 'High'
return col
#=============
# Function 243
def cleaning_func_26(col,df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
complete_df = df.copy()
complete_df[col] = complete_df[col].fillna(0)
complete_df[col] = complete_df[col]
complete_df[col].fillna = complete_df[col].fillna
complete_df[col] = complete_df[col].fillna(0)
complete_df['annual_income'] = complete_df.groupby('region')['annual_income'].transform((lambda x: x.fillna(x.mean())))
return complete_df
#=============
# Function 244
def cleaning_func_27(col,df):
# additional context code from user definitions
def finding_regions(state):
if (state in west):
return 'West'
elif (state in south_west):
return 'SouthWest'
elif (state in south_east):
return 'SouthEast'
elif (state in mid_west):
return 'MidWest'
elif (state in north_east):
return 'NorthEast'
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
west = ['CA', 'OR', 'UT', 'WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI', 'MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = np.nan
df['region'] = df['addr_state'].apply(finding_regions)
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
badloans_df = df.loc[(df['loan_condition'] == 'Bad Loan')]
number_of_loanstatus = pd.crosstab(badloans_df['region'], badloans_df['loan_status'])
number_of_loanstatus['Total'] = number_of_loanstatus.sum(axis=1)
return number_of_loanstatus
#=============
# Function 245
def cleaning_func_28(col,df):
# additional context code from user definitions
def finding_regions(state):
if (state in west):
return 'West'
elif (state in south_west):
return 'SouthWest'
elif (state in south_east):
return 'SouthEast'
elif (state in mid_west):
return 'MidWest'
elif (state in north_east):
return 'NorthEast'
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
west = ['CA', 'OR', 'UT', 'WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI', 'MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = np.nan
df['region'] = df['addr_state'].apply(finding_regions)
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
return col
#=============
# Function 246
def cleaning_func_29(col,df):
# additional context code from user definitions
def finding_regions(state):
if (state in west):
return 'West'
elif (state in south_west):
return 'SouthWest'
elif (state in south_east):
return 'SouthEast'
elif (state in mid_west):
return 'MidWest'
elif (state in north_east):
return 'NorthEast'
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
west = ['CA', 'OR', 'UT', 'WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI', 'MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = np.nan
df['region'] = df['addr_state'].apply(finding_regions)
return df
#=============
# Function 247
def cleaning_func_30(col,df):
# additional context code from user definitions
def finding_regions(state):
if (state in west):
return 'West'
elif (state in south_west):
return 'SouthWest'
elif (state in south_east):
return 'SouthEast'
elif (state in mid_west):
return 'MidWest'
elif (state in north_east):
return 'NorthEast'
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
west = ['CA', 'OR', 'UT', 'WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI', 'MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = np.nan
df['region'] = df['addr_state'].apply(finding_regions)
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
badloans_df = df.loc[(df['loan_condition'] == 'Bad Loan')]
return badloans_df
#=============
# Function 248
def cleaning_func_31(col,df):
# additional context code from user definitions
def finding_regions(state):
if (state in west):
return 'West'
elif (state in south_west):
return 'SouthWest'
elif (state in south_east):
return 'SouthEast'
elif (state in mid_west):
return 'MidWest'
elif (state in north_east):
return 'NorthEast'
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
west = ['CA', 'OR', 'UT', 'WA', 'CO', 'NV', 'AK', 'MT', 'HI', 'WY', 'ID']
south_west = ['AZ', 'TX', 'NM', 'OK']
south_east = ['GA', 'NC', 'VA', 'FL', 'KY', 'SC', 'LA', 'AL', 'WV', 'DC', 'AR', 'DE', 'MS', 'TN']
mid_west = ['IL', 'MO', 'MN', 'OH', 'WI', 'KS', 'MI', 'SD', 'IA', 'NE', 'IN', 'ND']
north_east = ['CT', 'NY', 'PA', 'NJ', 'RI', 'MA', 'MD', 'VT', 'NH', 'ME']
df['region'] = np.nan
df['region'] = df['addr_state'].apply(finding_regions)
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
badloans_df = df.loc[(df['loan_condition'] == 'Bad Loan')]
loan_status_cross = pd.crosstab(badloans_df['region'], badloans_df['loan_status']).apply((lambda x: ((x / x.sum()) * 100)))
loan_status_cross['In Grace Period'] = loan_status_cross['In Grace Period'].apply((lambda x: round(x, 2)))
return loan_status_cross
#=============
# Function 249
def cleaning_func_38(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates['loan_amount'] = (group_dates['loan_amount'] / 1000)
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
by_interest_rate = df.groupby(['region', 'addr_state'], as_index=False).interest_rate.mean()
return by_interest_rate
#=============
# Function 250
def cleaning_func_39(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates['loan_amount'] = (group_dates['loan_amount'] / 1000)
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
by_interest_rate = df.groupby(['region', 'addr_state'], as_index=False).interest_rate.mean()
by_income = df.groupby(['region', 'addr_state'], as_index=False).annual_income.mean()
return by_income
#=============
# Function 251
def cleaning_func_40(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates['loan_amount'] = (group_dates['loan_amount'] / 1000)
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
by_interest_rate = df.groupby(['region', 'addr_state'], as_index=False).interest_rate.mean()
by_income = df.groupby(['region', 'addr_state'], as_index=False).annual_income.mean()
states = by_loan_amount['addr_state'].values.tolist()
average_loan_amounts = by_loan_amount['loan_amount'].values.tolist()
average_interest_rates = by_interest_rate['interest_rate'].values.tolist()
average_annual_income = by_income['annual_income'].values.tolist()
from collections import OrderedDict
metrics_data = OrderedDict([('state_codes', states), ('issued_loans', average_loan_amounts), ('interest_rate', average_interest_rates), ('annual_income', average_annual_income)])
metrics_df = pd.DataFrame.from_dict(metrics_data)
return metrics_df
#=============
# Function 252
def cleaning_func_41(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates['loan_amount'] = (group_dates['loan_amount'] / 1000)
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
return by_loan_amount
#=============
# Function 253
def cleaning_func_42(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
group_dates = df.groupby(['complete_date', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates = group_dates.groupby(['issue_d', 'region'], as_index=False).sum()
group_dates['loan_amount'] = (group_dates['loan_amount'] / 1000)
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
by_interest_rate = df.groupby(['region', 'addr_state'], as_index=False).interest_rate.mean()
by_income = df.groupby(['region', 'addr_state'], as_index=False).annual_income.mean()
states = by_loan_amount['addr_state'].values.tolist()
average_loan_amounts = by_loan_amount['loan_amount'].values.tolist()
average_interest_rates = by_interest_rate['interest_rate'].values.tolist()
average_annual_income = by_income['annual_income'].values.tolist()
from collections import OrderedDict
metrics_data = OrderedDict([('state_codes', states), ('issued_loans', average_loan_amounts), ('interest_rate', average_interest_rates), ('annual_income', average_annual_income)])
metrics_df = pd.DataFrame.from_dict(metrics_data)
metrics_df = metrics_df.round(decimals=2)
metrics_df[col] = metrics_df[col].astype(str)
metrics_df[col] = metrics_df[col]
metrics_df[col].astype = metrics_df[col].astype
metrics_df[col] = metrics_df[col].astype(str)
metrics_df['text'] = ((((((metrics_df['state_codes'] + '<br>') + 'Average loan interest rate: ') + metrics_df['interest_rate']) + '<br>') + 'Average annual income: ') + metrics_df['annual_income'])
return metrics_df
#=============
# Function 254
def cleaning_func_43(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
return loan_condition_bystate
#=============
# Function 255
def cleaning_func_44(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
return by_emp_length
#=============
# Function 256
def cleaning_func_45(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
return cross_condition
#=============
# Function 257
def cleaning_func_46(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
return col
#=============
# Function 258
def cleaning_func_47(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
condition_ratio = (cross_condition['Bad Loan'] / cross_condition['Good Loan'])
by_dti = df.groupby(['region', 'addr_state'], as_index=False).dti.mean()
return by_dti
#=============
# Function 259
def cleaning_func_48(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
condition_ratio = (cross_condition['Bad Loan'] / cross_condition['Good Loan'])
by_dti = df.groupby(['region', 'addr_state'], as_index=False).dti.mean()
state_codes = sorted(states)
default_ratio = condition_ratio.values.tolist()
average_dti = by_dti['dti'].values.tolist()
average_emp_length = by_emp_length['emp_length_int'].values.tolist()
number_of_badloans = loan_condition_bystate['Bad Loan'].values.tolist()
percentage_ofall_badloans = percentage_loan_contributor['Bad Loan'].values.tolist()
risk_data = OrderedDict([('state_codes', state_codes), ('default_ratio', default_ratio), ('badloans_amount', number_of_badloans), ('percentage_of_badloans', percentage_ofall_badloans), ('average_dti', average_dti), ('average_emp_length', average_emp_length)])
risk_df = pd.DataFrame.from_dict(risk_data)
return risk_df
#=============
# Function 260
def cleaning_func_50(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
condition_ratio = (cross_condition['Bad Loan'] / cross_condition['Good Loan'])
by_dti = df.groupby(['region', 'addr_state'], as_index=False).dti.mean()
state_codes = sorted(states)
default_ratio = condition_ratio.values.tolist()
average_dti = by_dti['dti'].values.tolist()
average_emp_length = by_emp_length['emp_length_int'].values.tolist()
number_of_badloans = loan_condition_bystate['Bad Loan'].values.tolist()
percentage_ofall_badloans = percentage_loan_contributor['Bad Loan'].values.tolist()
risk_data = OrderedDict([('state_codes', state_codes), ('default_ratio', default_ratio), ('badloans_amount', number_of_badloans), ('percentage_of_badloans', percentage_ofall_badloans), ('average_dti', average_dti), ('average_emp_length', average_emp_length)])
risk_df = pd.DataFrame.from_dict(risk_data)
risk_df = risk_df.round(decimals=3)
risk_df[col] = risk_df[col].astype(str)
risk_df[col] = risk_df[col]
risk_df[col].astype = risk_df[col].astype
risk_df[col] = risk_df[col].astype(str)
risk_df['text'] = (((((((((((((risk_df['state_codes'] + '<br>') + 'Number of Bad Loans: ') + risk_df['badloans_amount']) + '<br>') + 'Percentage of all Bad Loans: ') + risk_df['percentage_of_badloans']) + '%') + '<br>') + 'Average Debt-to-Income Ratio: ') + risk_df['average_dti']) + '<br>') + 'Average Length of Employment: ') + risk_df['average_emp_length'])
return risk_df
#=============
# Function 261
def cleaning_func_51(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
return by_loan_amount
#=============
# Function 262
def cleaning_func_53(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
return percentage_loan_contributor
#=============
# Function 263
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
return data
#=============
# Function 264
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.next_pymnt_d = pd.to_datetime(data.next_pymnt_d)
return data
#=============
# Function 265
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.issue_d = pd.to_datetime(data.issue_d)
return data
#=============
# Function 266
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_pymnt_d = pd.to_datetime(data.last_pymnt_d)
return data
#=============
# Function 267
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_credit_pull_d = pd.to_datetime(data.last_credit_pull_d)
return data
#=============
# Function 268
def cleaning_func_5(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
data.earliest_cr_line = earliest_cr_line.dt.year
return data
#=============
# Function 269
def cleaning_func_7(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
return data
#=============
# Function 270
def cleaning_func_8(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data['recovery'] = np.where((data.recoveries != 0.0), 1, 0)
return data
#=============
# Function 271
def cleaning_func_9(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.emp_length = data.emp_length.replace(np.nan, 0)
return data
#=============
# Function 272
def cleaning_func_10(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.dti_joint = data.dti_joint.replace(np.nan, 0)
return data
#=============
# Function 273
def cleaning_func_11(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.annual_inc_joint = data.annual_inc_joint.replace(np.nan, 0)
return data
#=============
# Function 274
def cleaning_func_12(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.verification_status_joint = data.verification_status_joint.replace(np.nan, 'None')
return data
#=============
# Function 275
def cleaning_func_13(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
return data
#=============
# Function 276
def cleaning_func_14(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
return data
#=============
# Function 277
def cleaning_func_15(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
data.loc[(data.mths_since_last_record.notnull(), 'public_record')] = 1
data.loc[(data.mths_since_last_record.isnull(), 'public_record')] = 0
return data
#=============
# Function 278
def cleaning_func_16(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.revol_util = data.revol_util.replace(np.nan, 0)
return data
#=============
# Function 279
def cleaning_func_17(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_pymnt_d = pd.to_datetime(data.last_pymnt_d)
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
data.loc[(data.mths_since_last_record.notnull(), 'public_record')] = 1
data.loc[(data.mths_since_last_record.isnull(), 'public_record')] = 0
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.last_pymnt_d.notnull(), 'pymnt_received')] = 1
data.loc[(data.last_pymnt_d.isnull(), 'pymnt_received')] = 0
return data
#=============
# Function 280
def cleaning_func_0(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan['empl_exp'] = 'experienced'
return loan
#=============
# Function 281
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
return loan
#=============
# Function 282
def cleaning_func_2(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
return loan
#=============
# Function 283
def cleaning_func_3(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan = loan.drop('emp_length', axis=1)
loan['target'] = 0
return loan
#=============
# Function 284
def cleaning_func_4(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan = loan.drop('emp_length', axis=1)
mask = (loan.loan_status == 'Charged Off')
loan.loc[(mask, 'target')] = 1
return loan
#=============
# Function 285
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan.loc[((loan['emp_length'] == '< 1 year'), 'empl_exp')] = 'inexp'
loan.loc[((loan['emp_length'] == '1 year'), 'empl_exp')] = 'new'
loan.loc[((loan['emp_length'] == '2 years'), 'empl_exp')] = 'new'
loan.loc[((loan['emp_length'] == '3 years'), 'empl_exp')] = 'new'
loan.loc[((loan['emp_length'] == '4 years'), 'empl_exp')] = 'intermed'
loan.loc[((loan['emp_length'] == '5 years'), 'empl_exp')] = 'intermed'
loan.loc[((loan['emp_length'] == '6 years'), 'empl_exp')] = 'intermed'
loan.loc[((loan['emp_length'] == '7 years'), 'empl_exp')] = 'seasoned'
loan.loc[((loan['emp_length'] == '8 years'), 'empl_exp')] = 'seasoned'
loan.loc[((loan['emp_length'] == '9 years'), 'empl_exp')] = 'seasoned'
loan.loc[((loan['emp_length'] == 'n/a'), 'empl_exp')] = 'unknown'
return loan
#=============
# Function 286
def cleaning_func_0(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan['issue_date'] = df_loan['issue_d']
return df_loan
#=============
# Function 287
def cleaning_func_1(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan['issue_d'] = pd.to_datetime(df_loan['issue_d'])
return df_loan
#=============
# Function 288
def cleaning_func_3(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan['issue_d'] = pd.to_datetime(df_loan['issue_d'])
df_loan.index = df_loan['issue_d']
return df_loan
#=============
# Function 289
def cleaning_func_4(df_loan):
# core cleaning code
import pandas as pd
# df_loan = pd.read_csv('../input/loan.csv', low_memory=False)
df_loan_dt = df_loan[slice('2010-01-01', '2015-12-01', None)]
df_loan_dt['emp_title'] = df_loan_dt['emp_title'].replace({'RN': 'Registered Nurse'})
df_loan_dt['emp_title'] = df_loan_dt['emp_title'].replace({'manager': 'Manager'})
df_loan_dt['emp_title'] = df_loan_dt['emp_title'].replace({'driver': 'Driver'})
df_loan_dt['emp_title'] = df_loan_dt['emp_title'].replace({'supervisor': 'Supervisor'})
df_loan_dt['emp_title'] = df_loan_dt['emp_title'].replace({'owner': 'Owner'})
return df_loan_dt
#=============
```
#### File: executability-results/loan_data_v2/source_code.py
```python
def cleaning_func_0(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1)
return loan
#=============
# Function 1
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median())
return loan
#=============
# Function 2
def cleaning_func_2(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title'])
return loan
#=============
# Function 3
def cleaning_func_3(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq'])
return loan
#=============
# Function 4
def cleaning_func_4(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs'])
return loan
#=============
# Function 5
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median())
return loan
#=============
# Function 6
def cleaning_func_6(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['title'] = np.where(loan['title'].isnull(), 0, loan['title'])
return loan
#=============
# Function 7
def cleaning_func_7(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median())
return loan
#=============
# Function 8
def cleaning_func_8(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths'])
return loan
#=============
# Function 9
def cleaning_func_9(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc'])
return loan
#=============
# Function 10
def cleaning_func_10(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median())
return loan
#=============
# Function 11
def cleaning_func_11(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc'])
return loan
#=============
# Function 12
def cleaning_func_12(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med'])
return loan
#=============
# Function 13
def cleaning_func_13(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median())
return loan
#=============
# Function 14
def cleaning_func_14(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec'])
return loan
#=============
# Function 15
def cleaning_func_15(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq'])
return loan
#=============
# Function 16
def cleaning_func_0(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt)
return loan
#=============
# Function 17
def cleaning_func_1(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)]
return loan
#=============
# Function 18
def cleaning_func_2(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_year'] = loan.issue_d.str[slice(4, None, None)]
return loan
#=============
# Function 19
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['bad_loan'] = 0
return data
#=============
# Function 20
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
bad_indicators = ['Charged Off ', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Default Receiver', 'Late (16-30 days)', 'Late (31-120 days)']
data.loc[(data.loan_status.isin(bad_indicators), 'bad_loan')] = 1
return data
#=============
# Function 21
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
return data
#=============
# Function 22
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['month'] = data['issue_dt'].dt.month
return data
#=============
# Function 23
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['year'] = data['issue_dt'].dt.year
return data
#=============
# Function 24
def cleaning_func_0(loans):
# core cleaning code
import pandas as pd
date = ['issue_d', 'last_pymnt_d']
cols = ['issue_d', 'term', 'int_rate', 'loan_amnt', 'total_pymnt', 'last_pymnt_d', 'sub_grade', 'grade', 'loan_status']
# loans = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=date, usecols=cols, infer_datetime_format=True)
latest = loans['issue_d'].max()
finished_bool = (((loans['issue_d'] < (latest - pd.DateOffset(years=3))) & (loans['term'] == ' 36 months')) | ((loans['issue_d'] < (latest - pd.DateOffset(years=5))) & (loans['term'] == ' 60 months')))
finished_loans = loans.loc[finished_bool]
finished_loans['roi'] = (((finished_loans.total_pymnt / finished_loans.loan_amnt) - 1) * 100)
return finished_loans
#=============
# Function 25
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
badLoan = ['Charged Off', 'Default', 'Late (31-120 days)', 'Late (16-30 days)', 'In Grace Period', 'Does not meet the credit policy. Status:Charged Off']
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
df['isBad'] = [(1 if (x in badLoan) else 0) for x in df.loan_status]
return df
#=============
# Function 26
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'Num_Loans']
return perStatedf
#=============
# Function 27
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
return df.groupby('addr_state', as_index=False).count()
#=============
# Function 28
def cleaning_func_6(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'loan_amt']
return perStatedf
#=============
# Function 29
def cleaning_func_8(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf.columns = ['State', 'badLoans']
return perStatedf
#=============
# Function 30
def cleaning_func_10(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
perStatedf.columns = ['State', 'totalLoans']
return perStatedf
#=============
# Function 31
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 32
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.Num_Loans / perStatedf.Pop)
return perStatedf
#=============
# Function 33
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 34
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return statePopdf
#=============
# Function 35
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 36
def cleaning_func_19(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.loan_amt / perStatedf.Pop)
return perStatedf
#=============
# Function 37
def cleaning_func_20(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return df.groupby('addr_state', as_index=False).sum()
#=============
# Function 38
def cleaning_func_21(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 39
def cleaning_func_23(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
return perStatedf
#=============
# Function 40
def cleaning_func_24(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.badLoans / perStatedf.Pop)
return perStatedf
#=============
# Function 41
def cleaning_func_27(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
return perStatedf
#=============
# Function 42
def cleaning_func_28(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
badLoansdf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf = pd.merge(perStatedf, badLoansdf, on=['State'], how='inner')
perStatedf['percentBadLoans'] = ((perStatedf.badLoans / perStatedf.totalLoans) * 100)
return perStatedf
#=============
# Function 43
def cleaning_func_29(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
badLoansdf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
return badLoansdf
#=============
# Function 44
def cleaning_func_0(loan):
# core cleaning code
import pandas as pd
from collections import Counter
# loan = pd.read_csv('../input/loan.csv')
loan = loan[(loan.loan_status != 'Current')]
c = Counter(list(loan.loan_status))
mmp = {x[0]: 1 for x in c.most_common(20)}
loan['target'] = loan['loan_status'].map(mmp)
return loan
#=============
# Function 45
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_credit_pull_d = pd.to_datetime(data.last_credit_pull_d)
return data
#=============
# Function 46
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
return data
#=============
# Function 47
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.next_pymnt_d = pd.to_datetime(data.next_pymnt_d)
return data
#=============
# Function 48
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.issue_d = pd.to_datetime(data.issue_d)
return data
#=============
# Function 49
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_pymnt_d = pd.to_datetime(data.last_pymnt_d)
return data
#=============
# Function 50
def cleaning_func_5(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
data.earliest_cr_line = earliest_cr_line.dt.year
return data
#=============
# Function 51
def cleaning_func_6(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
return data
#=============
# Function 52
def cleaning_func_8(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.emp_length = data.emp_length.replace(np.nan, 0)
return data
#=============
# Function 53
def cleaning_func_9(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data['recovery'] = np.where((data.recoveries != 0.0), 1, 0)
return data
#=============
# Function 54
def cleaning_func_10(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.dti_joint = data.dti_joint.replace(np.nan, 0)
return data
#=============
# Function 55
def cleaning_func_11(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.annual_inc_joint = data.annual_inc_joint.replace(np.nan, 0)
return data
#=============
# Function 56
def cleaning_func_12(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.verification_status_joint = data.verification_status_joint.replace(np.nan, 'None')
return data
#=============
# Function 57
def cleaning_func_13(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
return data
#=============
# Function 58
def cleaning_func_14(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
return data
#=============
# Function 59
def cleaning_func_15(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
data.loc[(data.mths_since_last_record.notnull(), 'public_record')] = 1
data.loc[(data.mths_since_last_record.isnull(), 'public_record')] = 0
return data
#=============
# Function 60
def cleaning_func_16(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
data.loc[(data.mths_since_last_record.notnull(), 'public_record')] = 1
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.revol_util = data.revol_util.replace(np.nan, 0)
return data
#=============
# Function 61
def cleaning_func_17(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_pymnt_d = pd.to_datetime(data.last_pymnt_d)
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
data.loc[(data.mths_since_last_record.notnull(), 'public_record')] = 1
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.last_pymnt_d.notnull(), 'pymnt_received')] = 1
data.loc[(data.last_pymnt_d.isnull(), 'pymnt_received')] = 0
return data
#=============
# Function 62
def cleaning_func_0(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
return loan
#=============
# Function 63
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan['empl_exp'] = 'experienced'
return loan
#=============
# Function 64
def cleaning_func_2(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan = loan.drop('emp_length', axis=1)
loan['target'] = 0
return loan
#=============
# Function 65
def cleaning_func_3(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
return loan
#=============
# Function 66
def cleaning_func_4(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan = loan.drop('emp_length', axis=1)
mask = (loan.loan_status == 'Charged Off')
loan.loc[(mask, 'target')] = 1
return loan
#=============
# Function 67
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv')
del_cols = ['id', 'member_id', 'policy_code', 'url', 'zip_code', 'addr_state', 'pymnt_plan', 'emp_title', 'application_type', 'acc_now_delinq', 'title', 'collections_12_mths_ex_med', 'collection_recovery_fee']
loan = loan.drop(del_cols, axis=1)
loan = loan[(loan['loan_status'] != 'Current')]
loan.loc[((loan['emp_length'] == '< 1 year'), 'empl_exp')] = 'inexp'
loan.loc[((loan['emp_length'] == '1 year'), 'empl_exp')] = 'new'
loan.loc[((loan['emp_length'] == '2 years'), 'empl_exp')] = 'new'
loan.loc[((loan['emp_length'] == '3 years'), 'empl_exp')] = 'new'
loan.loc[((loan['emp_length'] == '4 years'), 'empl_exp')] = 'intermed'
loan.loc[((loan['emp_length'] == '5 years'), 'empl_exp')] = 'intermed'
loan.loc[((loan['emp_length'] == '6 years'), 'empl_exp')] = 'intermed'
loan.loc[((loan['emp_length'] == '7 years'), 'empl_exp')] = 'seasoned'
loan.loc[((loan['emp_length'] == '8 years'), 'empl_exp')] = 'seasoned'
loan.loc[((loan['emp_length'] == '9 years'), 'empl_exp')] = 'seasoned'
loan.loc[((loan['emp_length'] == 'n/a'), 'empl_exp')] = 'unknown'
return loan
#=============
```
#### File: house_sales/converted_notebooks/kernel_121.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 5))
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn import linear_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
import seaborn as sns
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import RFE
def performance_metric(y_true, y_predict, normalize=True):
score = r2_score(y_true, y_predict)
return score
data = pd.read_csv("../input/kc_house_data.csv", encoding = "ISO-8859-1")
Y = data["price"]
X = data[["bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated", "zipcode", "lat", "long"]]
colnames = X.columns
#ranking columns
ranks = {}
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
for i, col in enumerate(X.columns):
# 3 plots here hence 1, 3
plt.subplot(1, 15, i+1)
x = X[col]
y = Y
plt.plot(x, y, 'o')
# Create regression line
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))
plt.title(col)
plt.xlabel(col)
plt.ylabel('prices')
#Splitting the datasets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=10)
#Models
#Decision Tree Regressor
DTR = tree.DecisionTreeRegressor()
DTR = DTR.fit(X_train,y_train)
ranks["DTR"] = ranking(np.abs(DTR.feature_importances_), colnames)
Y_target_DTR = DTR.predict(X_test)
#Decision Tree Classifier
DTC = DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=0)
DTC = DTC.fit(X_train, y_train)
ranks["DTC"] = ranking(np.abs(DTC.feature_importances_), colnames)
Y_target_DTC = DTC.predict(X_test)
#LARS Lasso
LARS_L = linear_model.LassoLars(alpha=.4)
LARS_L = LARS_L.fit(X_train, y_train)
ranks["LARS_L"] = ranking(np.abs(LARS_L.coef_), colnames)
Y_target_lars_l = LARS_L.predict(X_test)
#Bayesian Ridge
BR = linear_model.BayesianRidge()
BR = BR.fit(X_train, y_train)
ranks["BR"] = ranking(np.abs(BR.coef_), colnames)
Y_target_BR = BR.predict(X_test)
#Random Forest Regressor
RFR = RandomForestRegressor(n_jobs=-1, n_estimators=50, verbose=0)
RFR = RFR.fit(X_train,y_train)
ranks["RFR"] = ranking(RFR.feature_importances_, colnames);
#print(ranks["RFR"])
Y_target_RFR = RFR.predict(X_test)
#Recursive Feature Elimination on Random Forest Regressor
RFE_RFR = RFE(RFR, n_features_to_select=10, step = 1)
RFE_RFR.fit(X_train,y_train)
Y_target_RFE_RFR = RFE_RFR.predict(X_test)
#Extra Trees Classifier
ETC = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)
ETC = ETC.fit(X_train, y_train)
ranks["ETC"] = ranking(np.abs(ETC.feature_importances_), colnames)
Y_target_ETC = ETC.predict(X_test)
#Recursive Feature Elimination on Decision Tree Regressor
RFE = RFE(DTR, n_features_to_select=10, step =1 )
RFE.fit(X_train,y_train)
Y_target_RFE = RFE.predict(X_test)
#Ranking inputs
r = {}
for name in colnames:
r[name] = round(np.mean([ranks[method][name]
for method in ranks.keys()]), 2)
methods = sorted(ranks.keys())
ranks["Mean"] = r
methods.append("Mean")
print("\t%s" % "\t".join(methods))
for name in colnames:
print("%s\t%s" % (name, "\t".join(map(str,
[ranks[method][name] for method in methods]))))
#seaborn plot
#create dataframe
meanplot = pd.DataFrame(list(r.items()), columns= ['Feature','Mean Ranking'])
meanplot = meanplot.sort_values('Mean Ranking', ascending=False)
#plot proper
sns.factorplot(x="Mean Ranking", y="Feature", data = meanplot, kind="bar",
size=14, aspect=1.9, palette='coolwarm')
#R2 metrics for each model
print("\nR2 score, Decision Tree Regressor:")
print(performance_metric(y_test, Y_target_DTR))
print("\nR2 score, Decision Tree Classifier:")
print(performance_metric(y_test, Y_target_DTC))
print("\nR2 score, LARS Lasso:")
print(performance_metric(y_test, Y_target_lars_l))
print("\nR2 score, Bayesian Ridge:")
print(performance_metric(y_test, Y_target_BR))
print("\nR2 score, Random Forest Regressor:")
print(performance_metric(y_test, Y_target_RFR))
print("\nR2 score, Recursive Feature Eliminition on Random Forest Regressor:")
print(performance_metric(y_test, Y_target_RFE_RFR))
print("\nR2 score, Extra Trees Classifier:")
print(performance_metric(y_test, Y_target_ETC))
print("\nR2 score, Recursive Feature Eliminition on Decision Tree Regressor:")
print(performance_metric(y_test, Y_target_RFE))
# In[ ]:
```
#### File: house_sales/converted_notebooks/kernel_16.py
```python
def fc_vif(dfxxx):
from sklearn.linear_model import LinearRegression
df_vif = dfxxx.drop(["price"],axis=1)
for cname in df_vif.columns:
y=df_vif[cname]
X=df_vif.drop(cname, axis=1)
regr = LinearRegression(fit_intercept=True)
regr.fit(X, y)
rsquared = regr.score(X,y)
#print(cname,":" ,1/(1-np.power(rsquared,2)))
if rsquared == 1:
print(cname,X.columns[(regr.coef_> 0.5) | (regr.coef_ < -0.5)])
# In[2]:
###############################
### 変数の選択 MAE:AIC
###############################
def fc_var(X, y):
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.feature_selection import SelectKBest,f_regression
N = len(X)
for k in range(1,len(X.columns)):
skb = SelectKBest(f_regression,k=k).fit(X,y)
sup = skb.get_support()
X_selected = X.transpose()[sup].transpose()
regr = linear_model.LinearRegression()
model = regr.fit(X_selected,y)
met = mean_absolute_error(model.predict(X_selected),y)
aic = N*np.log((met**2).sum()/N) + 2*k
print('k:',k,'MAE:',met,'AIC:',aic,X.columns[k])
# # 2. データの読み込み
# In[2]:
# モジュールの読み込み
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.options.display.max_rows = 10 # 常に10行だけ表示
# In[4]:
# データの読み込み
#df000 = pd.read_csv("kc_house_data.csv")
df000 = pd.read_csv("../input/kc_house_data.csv")
display(df000.head())
# # 3. データの前処理
# In[5]:
df600 = df000.drop(['date'],axis=1) #dataの削除
#相関係数表示
df600.corr().style.background_gradient().format("{:.2f}") # わかりやすく色付け表示
# In[6]:
# マルチコの検出 VIFの計算
rc = fc_vif(df600)
# #### sqft_basementを削除
# 理由 sqft_basement + sqft_above = sqft_living のため強相関であり、
# また、sqft_basementには"0"が含まれるため
# In[7]:
df700 = df600.drop(['sqft_basement','yr_renovated','zipcode','id'],axis=1)
for c in df700.columns: # 列の分だけ繰り返す
if (c != "price") & (c != "date"): # ただし、price自身と日付は除く
df000[[c,"price"]].plot(kind="scatter",x=c,y="price") # priceとの散布図
# In[8]:
# マルチコの検出 VIFの計算(再度)→
rc = fc_vif(df700)
# マルチコは検出されなくなった
# In[9]:
df800 = df700
X = df800.drop(['price'],axis=1)
y = df800['price']
#V変数の選択
rc = fc_var(X, y)
# In[10]:
from sklearn.linear_model import LinearRegression
regr = LinearRegression(fit_intercept=True).fit(X,y)
pd.Series(regr.coef_,index=X.columns).sort_values() .plot(kind='barh',figsize=(6,8))
# # 4. Lasso Regression(回帰分析)
# 必要そうな特徴量だけを自動で取捨選択してくれる
# In[18]:
# データをリセット
df800 = df700
X = df800.drop(['price'],axis=1)
y = df800['price']
# In[19]:
# グリッドサーチ
from sklearn.model_selection import train_test_split # データ分割用
from sklearn.model_selection import KFold # 交差検証用
from sklearn.model_selection import GridSearchCV # グリッドサーチ
from sklearn.metrics import confusion_matrix
# 学習データとテストデータに分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# 交差検証用に分割
kf = KFold(n_splits=5, random_state=1234, shuffle=True)
df_result = pd.DataFrame()
model001 = []
from sklearn.linear_model import Lasso # Lasso回帰用
# 優れたハイパーパラメータを見つけたいモデル
model001 = Lasso()
# 試行するハイパーパラメータ
parms1 = [
{"alpha":np.logspace(-3,1,100)},
]
grid_search = GridSearchCV(model001, # モデルを渡す
param_grid = parms1, # 試行してほしいパラメータを渡す
cv=10, # 汎化性能を調べる
)
grid_search.fit(X,y) # グリッドサーチにハイパーパラメータを探す
print(grid_search.best_score_) # 最も良かったスコア
print(grid_search.best_params_) # 上記を記録したパラメータの組み合わせ
print(grid_search.best_estimator_.get_params())
# In[20]:
from sklearn.linear_model import Lasso # Lasso回帰用
from sklearn.metrics import mean_squared_error, mean_absolute_error #MAE,MAE用
from sklearn.model_selection import KFold # 交差検証用
from sklearn.model_selection import train_test_split # データ分割用
#--------------------------------------------
# データの整形——説明変数xの各次元を正規化
#--------------------------------------------
from sklearn import preprocessing # 正規化用
sc = preprocessing.StandardScaler()
sc.fit(X)
X = sc.transform(X)
#--------------------------------------------
# 学習データとテストデータに分割
X_train,X_test,y_train,y_test = train_test_split(np.array(X),np.array(y),test_size=0.2,random_state=42)
kf = KFold(n_splits=5, random_state=1234, shuffle=True)
df_result = pd.DataFrame()
models = []
for i,(train_index, val_index) in enumerate(kf.split(X_train, y_train)):
X_train_train, X_train_val = X_train[train_index], X_train[val_index]
y_train_train, y_train_val = y_train[train_index], y_train[val_index]
regr = Lasso(alpha=10.0, max_iter=1000, copy_X=True) # Lassoを適用(ハイパーパラメーターにをセット)
regr.fit(X_train_train, y_train_train)
models.append(regr)
y_pred = regr.predict(X_train_val)
df999 = pd.DataFrame({"y_val":y_train_val, "y_pred":y_pred})
df_result = pd.concat([df_result, df999], axis=0)
# validation dataによる評価指標の算出
y_val = df_result["y_val"]
y_pred = df_result["y_pred"]
mse = mean_squared_error(y_val, y_pred)
mae = mean_absolute_error(y_val, y_pred)
print("**** Training set score( {} ): MSE={:.3f} RMSE={:.3f} MAE={:.3f} Score={:.3f} ****".format(i,round(mse,3),round(np.sqrt(mse), 3),round(mae,3),regr.score(X_train, y_train)))
# In[21]:
#--------------------------------------------
# 交差検証:テスト実施
#--------------------------------------------
z = 1 # 訓練で一番良かったものをセット
y_pred = models[z].predict(X_test)
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
print("**** Test set score( {} ): MSE={:.3f} RMSE={:.3f} MAE={:.3f} Score={:.3f} ****".format(z,round(mse,3),round(np.sqrt(mse), 3),round(mae,3),regr.score(X_test, y_test)))
print("**** Number of features used: {} ****".format(np.sum(regr.coef_ != 0)))
# # 5. RandomForest(ランダムフォレスト)
#
# パラメーター指定なし
# In[22]:
# データをリセット
df800 = df700
X = df800.drop(['price'],axis=1)
y = df800['price']
base_model = []
# In[23]:
from sklearn.ensemble import RandomForestRegressor # RandomForestライブラリ
from sklearn.metrics import mean_squared_error, mean_absolute_error #MAE,MAE用
from sklearn.model_selection import KFold # 交差検証用
from sklearn.model_selection import train_test_split # データ分割用
#--------------------------------------------
# データの整形——説明変数xの各次元を正規化
#--------------------------------------------
from sklearn import preprocessing # 正規化用
sc = preprocessing.StandardScaler()
sc.fit(X)
X = sc.transform(X)
#--------------------------------------------
# 学習データとテストデータに分割
X_train,X_test,y_train,y_test = train_test_split(np.array(X),np.array(y),test_size=0.2,random_state=42)
kf = KFold(n_splits=5, random_state=1234, shuffle=True)
df_result = pd.DataFrame()
base_model = []
for i,(train_index, val_index) in enumerate(kf.split(X_train, y_train)):
X_train_train, X_train_val = X_train[train_index], X_train[val_index]
y_train_train, y_train_val = y_train[train_index], y_train[val_index]
regr = RandomForestRegressor()
regr.fit(X_train_train, y_train_train)
base_model.append(regr)
y_pred = regr.predict(X_train_val)
df999 = pd.DataFrame({"y_val": y_train_val, "y_pred": y_pred})
df_result = pd.concat([df_result, df999], axis=0)
# validation dataによる評価指標の算出
y_val = df_result["y_val"]
y_pred = df_result["y_pred"]
mse = mean_squared_error(y_val, y_pred)
mae = mean_absolute_error(y_val, y_pred)
print("**** Training set score( {} ): MSE={:.3f} RMSE={:.3f} MAE={:.3f} Score={:.3f} ****".format(i,round(mse,3),round(np.sqrt(mse), 3),round(mae,3),regr.score(X_train, y_train)))
# In[25]:
#--------------------------------------------
# 交差検証:テスト実施
#--------------------------------------------
z = 0 # 訓練で一番良かったものをセット
y_pred = base_model[z].predict(X_test)
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
print("**** Test set score( {} ): MSE={:.3f} RMSE={:.3f} MAE={:.3f} Score={:.3f} ****".format(z,round(mse,3),round(np.sqrt(mse), 3),round(mae,3),regr.score(X_test, y_test)))
print('Parameters currently in use:')
from pprint import pprint
pprint(regr.get_params())
# 過学習気味?なのでチューニングしてみる
# # 5. RandomForest(ランダムフォレスト)
#
# * n_estimators =フォレスト内の樹木の数
# * max_features =ノードの分割に考慮されるフィーチャの最大数
# * max_depth =各決定木のレベルの最大数
# * min_samples_split =ノードが分割される前にノードに配置されたデータポイントの最小数
# * min_samples_leaf =リーフノードで許容されるデータポイントの最小数
# * bootstrap =データポイントをサンプリングする方法(置換の有無にかかわらず)
# In[26]:
# データをリセット
df800 = df700
X = df800.drop(['price'],axis=1)
y = df800['price']
base_model = []
# In[35]:
# グリッドサーチ
from sklearn.model_selection import train_test_split # データ分割用
from sklearn.model_selection import KFold # 交差検証用
from sklearn.model_selection import GridSearchCV # グリッドサーチ
from sklearn.metrics import mean_squared_error, mean_absolute_error #MAE,MAE用
from sklearn.metrics import confusion_matrix
# 学習データとテストデータに分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# 交差検証用に分割
kf = KFold(n_splits=5, random_state=1234, shuffle=True)
df_result = pd.DataFrame()
# チューニングしたいモデル
from sklearn.ensemble import RandomForestRegressor # RandomForestライブラリ
base_model = RandomForestRegressor()
# 試行するハイパーパラメータ
random_grid = {
'n_estimators':[10, 100, 200, 400],
'max_depth':[1, 9, 15],
'min_samples_leaf':[3, 5, 9],
'min_samples_split':[3, 5, 9],
'bootstrap':[True, False],
'n_jobs': [-1],
}
#pprint(random_grid)
print("-- GridSearch --")
# -------- パラメーターチューニング
grid_search = GridSearchCV(base_model, # モデルを渡す
random_grid, # 試行してほしいパラメータを渡す
cv=3 # 汎化性能を調べる
)
grid_search.fit(X,y) # グリッドサーチにハイパーパラメータを探す
print(grid_search.best_score_) # 最も良かったスコア
print(grid_search.best_params_) # 上記を記録したパラメータの組み合わせ
pprint(grid_search.best_estimator_.get_params())
# In[49]:
# 交差検証
from sklearn.ensemble import RandomForestRegressor # RandomForestライブラリ
from sklearn.metrics import mean_squared_error, mean_absolute_error #MAE,MAE用
from sklearn.model_selection import KFold # 交差検証用
from sklearn.model_selection import train_test_split # データ分割用
# 学習データとテストデータに分割
X_train,X_test,y_train,y_test = train_test_split(np.array(X),np.array(y),test_size=0.2,random_state=42)
kf = KFold(n_splits=5, random_state=1234, shuffle=True)
df_result = pd.DataFrame()
base_model = []
for i,(train_index, val_index) in enumerate(kf.split(X_train, y_train)):
X_train_train, X_train_val = X_train[train_index], X_train[val_index]
y_train_train, y_train_val = y_train[train_index], y_train[val_index]
regr = RandomForestRegressor(
bootstrap = True,
criterion = 'mse',
max_depth = 7, # 15 ->
max_features = 'auto',
max_leaf_nodes = None,
min_impurity_decrease = 0.0,
min_impurity_split = None,
min_samples_leaf = 3,
min_samples_split = 5,
min_weight_fraction_leaf = 0.0,
n_estimators = 400,
n_jobs = -1,
oob_score = False,
random_state = None,
verbose = 0,
warm_start = False,
)
regr.fit(X_train_train, y_train_train)
base_model.append(regr)
y_pred = regr.predict(X_train_val)
df999 = pd.DataFrame({"y_val": y_train_val, "y_pred": y_pred})
df_result = pd.concat([df_result, df999], axis=0)
# validation dataによる評価指標の算出
y_val = df_result["y_val"]
y_pred = df_result["y_pred"]
mse = mean_squared_error(y_val, y_pred)
mae = mean_absolute_error(y_val, y_pred)
print("**** Training set score( {} ): MSE={:.3f} RMSE={:.3f} MAE={:.3f} Score={:.3f} ****".format(i,round(mse,3),round(np.sqrt(mse), 3),round(mae,3),regr.score(X_train, y_train)))
# In[50]:
#--------------------------------------------
# 交差検証:テスト実施
#--------------------------------------------
z = 0 # 訓練で一番良かったものをセット
y_pred = base_model[z].predict(X_test)
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
print("**** Test set score( {} ): MSE={:.3f} RMSE={:.3f} MAE={:.3f} Score={:.3f} ****".format(z,round(mse,3),round(np.sqrt(mse), 3),round(mae,3),regr.score(X_test, y_test)))
print('Parameters currently in use:')
from pprint import pprint
pprint(regr.get_params())
# RandomForestは高いスコアを出せるが、過学習にならないためのチューニングが必要なのかなぁ
# In[ ]:
```
#### File: house_sales/converted_notebooks/kernel_195.py
```python
from pandas import read_csv
data = read_csv("../input/kc_house_data.csv")
# In[ ]:
data.head()
# In[ ]:
data.shape
# In[ ]:
data.isnull().any()
# In[ ]:
target = "price"
features = data.drop(target,1).columns
# In[ ]:
features_by_dtype = {}
for f in features:
dtype = str(data[f].dtype)
if dtype not in features_by_dtype.keys():
features_by_dtype[dtype] = [f]
else:
features_by_dtype[dtype] += [f]
for k in features_by_dtype.keys():
string = "%s: %s" % (k , len(features_by_dtype[k]))
print(string)
# In[ ]:
keys = iter(features_by_dtype.keys())
# In[ ]:
k = next(keys)
dtype_list = features_by_dtype[k]
for d in dtype_list:
string = "%s: %s" % (d,len(data[d].unique()))
print(string)
# In[ ]:
count_features = ["bedrooms"]
# In[ ]:
categorical_features = ["waterfront"]
# In[ ]:
count_features += ["view", "condition", "grade"]
# In[ ]:
categorical_features += ["zipcode"]
# In[ ]:
temporal_features = ["yr_renovated", "yr_built"]
# In[ ]:
numerical_features = [f for f in dtype_list if not f in categorical_features + temporal_features + ["id"]]
# In[ ]:
k = next(keys)
dtype_list = features_by_dtype[k]
for d in dtype_list:
string = "%s: %s" % (d,len(data[d].unique()))
print(string)
# In[ ]:
temporal_features += dtype_list
# In[ ]:
k = next(keys)
dtype_list = features_by_dtype[k]
for d in dtype_list:
string = "%s: %s" % (d,len(data[d].unique()))
print(string)
# In[ ]:
count_features += ["floors","bathrooms"]
# In[ ]:
numerical_features += dtype_list
# In[ ]:
numerical_features
# In[ ]:
count_features
# In[ ]:
categorical_features
# In[ ]:
temporal_features
# ---
# In[ ]:
from seaborn import countplot, axes_style
from matplotlib.pyplot import show,figure
from pandas import DataFrame
from IPython.display import display
with axes_style("whitegrid"):
for feature in categorical_features + count_features:
figure(figsize=(12.5,7))
ax = countplot(data[feature], color="dimgrey")
ax.set_title(feature)
ax.set_xlabel("", visible=False)
if data[feature].unique().size > 5: ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
show()
display(DataFrame(data[feature].value_counts().apply(lambda x: x / len(data) * 100).round(2)).T)
# ---
# In[ ]:
from seaborn import distplot, boxplot, despine
from matplotlib.pyplot import subplot
from IPython.display import display
from pandas import DataFrame
def numeric_analysis(series):
no_nulls = series.dropna()
with axes_style({"axes.grid": False}):
cell_1 = subplot(211)
dp = distplot(no_nulls, kde=False)
dp.set_xlabel("",visible=False)
dp.set_yticklabels(dp.get_yticklabels(),visible=False)
despine(left = True)
cell_2 = subplot(212, sharex=cell_1)
boxplot(no_nulls)
despine(left=True)
show()
display(DataFrame(series.describe().round(2)).T)
# In[ ]:
for n in numerical_features:
numeric_analysis(data[data[n].notnull()][n])
# ---
# In[ ]:
from seaborn import lmplot
from matplotlib.pyplot import figure
for c in count_features:
lmplot(data=data, x="long", y="lat", fit_reg=False, hue=c, size=10)
show()
```
#### File: house_sales/converted_notebooks/kernel_30.py
```python
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.relativedelta import relativedelta
#import geohash
from catboost import CatBoostRegressor
import catboost
# # Feature engineering helpers
# In[ ]:
def returnYear(row):
if row['yr_renovated']!=0:
return datetime.strptime(str(row['yr_renovated']),'%Y')
else:
return row['yr_built']
def deltaInYearsAge(row):
difference = relativedelta(row['date'], row['yr_built'])
years = difference.years
return years
def deltaInYearsRenovated(row):
difference = relativedelta(row['yr_renovated'], row['yr_built'])
years = difference.years
return years
# # Since kaggle does not support geohash libraries, using one from git
# Original source https://github.com/vinsci/geohash/blob/master/Geohash/geohash.py
# The libraies gave a much better result of 0.96
# In[ ]:
from math import log10
__base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
def geohashEncode(latitude, longitude, precision=12):
"""
Encode a position given in float arguments latitude, longitude to
a geohash which will have the character count precision.
"""
lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
geohash = []
bits = [ 16, 8, 4, 2, 1 ]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += __base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
# # Load Data and define the target variable
# In[ ]:
house = pd.read_csv('../input/housesalesprediction/kc_house_data.csv')
print (house.shape)
house.drop_duplicates('id',inplace=True)
print(house.shape)
targetVariableColumnName = 'price'
# In[ ]:
house.columns
# # creating features based on location. Geohash with different accuracies is handy for clustering/grouping
# In[ ]:
house['date'] = pd.to_datetime(house['date'])
house.sort_values('date',inplace=True)
house['yr_built'] = house.yr_built.apply(lambda x:datetime.strptime(str(x),'%Y') )
house['yr_renovated'] = house.apply(returnYear,axis=1)
house['age']=house.apply(deltaInYearsAge,axis=1)
house['renovatedAge']=house.apply(deltaInYearsRenovated,axis=1)
house['geohash']=house.apply(lambda points: geohashEncode(points.lat, points.long,precision=4),axis = 1)
house['pricepersqft']=house['price']/house['sqft_living']
# In[ ]:
house.shape[0]*0.8
# In[ ]:
train = house.head(17148)
# # Groupby functions on getting bias over neighborhood
# In[ ]:
train=train.join(train.groupby(['geohash'])['pricepersqft'].mean(),on='geohash',rsuffix='priceaverage600m')
train=train.join(train.groupby(['geohash'])['pricepersqft'].min(),on='geohash',rsuffix='pricemin600m')
train=train.join(train.groupby(['geohash'])['pricepersqft'].max(),on='geohash',rsuffix='pricemax600m')
train=train.join(train.groupby(['geohash'])['pricepersqft'].max(),on='geohash',rsuffix='pricemax600m')
# In[ ]:
pd.set_option('display.float_format', lambda x: '%.0f' % x)
print (train.shape)
train.drop_duplicates('id',inplace=True)
print (train.shape)
train.describe().T
# In[ ]:
test = house.tail(4465)
test.to_csv('original_test.csv')
currentIds=set(test['id'].values)
print (test.shape)
test=pd.merge(test, train[['geohash','pricepersqftpriceaverage600m','pricepersqftpricemin600m', 'pricepersqftpricemax600m']], on="geohash")
test.drop_duplicates('id',inplace=True)
test.to_csv('merged_test.csv')
currentIds1=set(test['id'].values)
print (currentIds.difference(currentIds1))
print (test.shape)
# # now drop the items already covered in added features
# zip code, lat, lon are covered in addl features with respect to location
# year renowated and built are added as age and renowated age
# other columns logprice, geohash ...
# In[ ]:
columns=list(train.columns.values)
columns.remove(targetVariableColumnName)
columns=[item for item in columns if item not in ['zipcode', 'lat','long','id','yr_renovated','yr_built','date','geohash','geohash_70m','Log_price']]
print (columns)
# # Feature ranking
#
# In[ ]:
# First extract the target variable which is our House prices
Y = train.price.values
# Drop price from the house dataframe and create a matrix out of the house data
X = train[columns].as_matrix()
# Store the column/feature names into a list "colnames"
colnames = columns
# In[ ]:
ranks = {}
# Create our function which stores the feature rankings to the ranks dictionary
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
# In[ ]:
from sklearn.feature_selection import RFE, f_regression
from sklearn.linear_model import (LinearRegression, Ridge, Lasso, RandomizedLasso)
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
# In[ ]:
rf = RandomForestRegressor(n_jobs=-1, n_estimators=50, verbose=3)
rf.fit(X,Y)
ranks["RF"] = ranking(rf.feature_importances_, colnames);
# In[ ]:
# Finally let's run our Selection Stability method with Randomized Lasso
rlasso = RandomizedLasso(alpha=0.04)
rlasso.fit(X, Y)
ranks["rlasso/Stability"] = ranking(np.abs(rlasso.scores_), colnames)
print('finished')
# In[ ]:
# Construct our Linear Regression model
lr = LinearRegression(normalize=True)
lr.fit(X,Y)
#stop the search when only the last feature is left
rfe = RFE(lr, n_features_to_select=1, verbose =3 )
rfe.fit(X,Y)
ranks["RFE"] = ranking(list(map(float, rfe.ranking_)), colnames, order=-1)
# In[ ]:
# Using Linear Regression
lr = LinearRegression(normalize=True)
lr.fit(X,Y)
ranks["LinReg"] = ranking(np.abs(lr.coef_), colnames)
# Using Ridge
ridge = Ridge(alpha = 7)
ridge.fit(X,Y)
ranks['Ridge'] = ranking(np.abs(ridge.coef_), colnames)
# Using Lasso
lasso = Lasso(alpha=.05)
lasso.fit(X, Y)
ranks["Lasso"] = ranking(np.abs(lasso.coef_), colnames)
# In[ ]:
# Create empty dictionary to store the mean value calculated from all the scores
r = {}
for name in colnames:
r[name] = round(np.mean([ranks[method][name]
for method in ranks.keys()]), 2)
methods = sorted(ranks.keys())
ranks["Mean"] = r
methods.append("Mean")
# In[ ]:
# Put the mean scores into a Pandas dataframe
meanplot = pd.DataFrame(list(r.items()), columns= ['Feature','Mean Ranking'])
# Sort the dataframe
meanplot = meanplot.sort_values('Mean Ranking', ascending=False)
# In[ ]:
import seaborn as sns
# Let's plot the ranking of the features
sns.factorplot(x="Mean Ranking", y="Feature", data = meanplot, kind="bar",
size=11)
# # Let's Predict with CatBoost Library
# In[ ]:
cbc = CatBoostRegressor(random_seed=0).fit(train[columns].values,train[targetVariableColumnName].values)
# In[ ]:
test['predictionsCatBoost'] = cbc.predict(test[columns])
# In[ ]:
from sklearn.metrics import explained_variance_score,median_absolute_error
print (explained_variance_score(test['price'], test['predictionsCatBoost']),median_absolute_error(test['price'], test['predictionsCatBoost']))
# In[ ]:
test['predictionsCatBoost']=test['predictionsCatBoost'].apply(lambda x: int(round(x)))
test[['price','predictionsCatBoost','age','id']].head()
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
import matplotlib
matplotlib.pyplot.scatter(test['predictionsCatBoost'],test[targetVariableColumnName])
# In[ ]:
```
#### File: downloaded_kernels/house_sales/kernel_81.py
```python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
import seaborn
def show_data_info(df):
print(df.info())
# print(df.head())
print(df.describe())
print(df.columns)
def load_dataset():
dateparse = lambda x: pd.datetime.strptime(x, '%Y%m%dT%H%M%S')
#current_path = os.path.dirname(os.path.realpath(__file__))
#csv_path = os.path.join(current_path, "input/kc_house_data.csv")
return pd.read_csv('../input/kc_house_data.csv', index_col=0, parse_dates=['date'], date_parser=dateparse)
def show_histogram(df):
for col in df.columns:
plt.figure(figsize=(9, 9))
df[col].hist(bins=50, histtype='bar', align='mid', label=col)
plt.title(col)
plt.legend()
plt.grid(False)
#print('viz/'+col+'_hist.png...saving')
plt.savefig(col+'_hist.png', format='png', bbox_inches='tight', dpi=200)
# plt.show()
def show_scatter(df, y_list, x_col):
for y in y_list:
plt.figure(figsize=(9, 9))
plt.scatter(df[x_col], df[y])
plt.title(y)
plt.legend()
plt.ylabel(y)
plt.xlabel(x_col)
plt.grid(False)
#print('viz/'+y+'_scatter.png...saving')
plt.savefig(y+'_scatter.png', format='png', bbox_inches='tight', dpi=200)
# plt.show()
def get_correlations(df):
print("Correlations with price:")
corr_matrix = housing_df.corr()
print(corr_matrix['price'].sort_values(ascending=False))
def get_zero_cols(df):
for cols in df.columns:
print(cols, (df[cols] == 0.0).sum())
def remove_irrelevant_colums(df_train, df_test):
# df_train.pop('id')
df_train.pop('date')
# df_test.pop('id')
df_test.pop('date')
housing_df = load_dataset()
# show_data_info(housing_df)
# show_histogram(housing_df)
get_correlations(housing_df)
# show_scatter(housing_df, ['sqft_living', 'grade', 'sqft_above', 'bathrooms', 'sqft_basement', 'bedrooms'], 'price')
# get_zero_cols(housing_df)
housing_df_train, housing_df_test = train_test_split(housing_df, test_size=0.2, random_state=42)
print('*',len(housing_df_train), ' -> ', len(housing_df_test))
remove_irrelevant_colums(housing_df_train, housing_df_test)
housing_df_train_label = housing_df_train.pop('price')
housing_df_test_label = housing_df_test.pop('price')
lin_req = LinearRegression()
lin_req.fit(housing_df_train, housing_df_train_label)
housing_df_prediction = lin_req.predict(housing_df_test)
accuracy = lin_req.score(housing_df_test, housing_df_test_label)
print("Accuracy: {}%".format(int(round(accuracy * 100))))
print(housing_df_prediction)
lin_mae = mean_absolute_error(housing_df_test_label, housing_df_prediction)
print("MAE:",lin_mae)
result_df = pd.DataFrame({"Actual": housing_df_test_label, "Predicted": housing_df_prediction})
# show_data_info(result_df)
result_df = result_df.round({'Predicted': 1})
# print(result_df)
# result_df['Predicted'] = result_df['Predicted'].astype(int)
result_df.to_csv('results.csv', header=True)
```
#### File: house_sales/parsed_kernels/kernel_147.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing
import matplotlib.pyplot as plt # to plot graph
import matplotlib.animation as animation # for animation
#Load the dataset
data = pd.read_csv("../input/kc_house_data.csv")
data.head()
data.shape
#grab data ,preditor variable & add column of 1's for gradident descent
x = data['sqft_living']
y = data['price']
x = (x - x.mean()) / x.std()
x
x = np.c_[np.ones(x.shape[0]), x]
x
#GRADIENT DESCENT
alpha = 0.01 #Step size
iterations = 1000 #No. of iterations
m = y.size #No. of data points
np.random.seed(123) #Set the seed
theta = np.random.rand(2) #Pick some random values to start with
theta
#GRADIENT DESCENT
def gradient_descent(x, y, theta, iterations, alpha):
past_costs = []
past_thetas = [theta]
for i in range(iterations):
prediction = np.dot(x, theta)
error = prediction - y
cost = 1/(2*m) * np.dot(error.T, error)
past_costs.append(cost)
theta = theta - (alpha * (1/m) * np.dot(x.T, error))
past_thetas.append(theta)
return past_thetas,past_costs
#Pass the relevant variables to the function and get the new values back...
past_thetas, past_costs = gradient_descent(x, y, theta, iterations, alpha)
theta = past_thetas[-1]
#Print the results...
print(("Gradient Descent: {:.2f}, {:.2f}".format(theta[0], theta[1])))
#Plot the cost function...
plt.title('Cost Function J')
plt.xlabel('No. of iterations')
plt.ylabel('Cost')
plt.plot(past_costs)
plt.show()
```
#### File: house_sales/parsed_kernels/kernel_152.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print((check_output(["ls", "../input"]).decode("utf8")))
# Any results you write to the current directory are saved as output.
#read the data from the kc_house_data and drop the columns which are not needed
dataFrame = pd.read_csv('../input/kc_house_data.csv',nrows=1000)#read the CSV file only 1000 dataset
Cols = ['price','sqft_living'] #these are the the columns which are needed
dataFrame = dataFrame[Cols] #consider only those columns which are required and drop the other columns
dataFrame[['price']] = dataFrame[['price']]/1000
print((dataFrame.head()))#print the data
print(('no of dataset:',len(dataFrame)))#no of dataset
#data_points = dataFrame.as_matrix() #conver the data to the matrix form
#simply plotting of data in 2d form
plt.scatter(dataFrame['sqft_living'],dataFrame['price'])
plt.title(' sqft_living vs price ')
plt.xlabel('sqft_living area')
plt.ylabel('price k $')
plt.show()
#b,m are the constant of equation linear rgression y = m*x +b
init_consts = np.array([0,0])#inital parameter of best fit which is assign to b=0,m=0
criteria = 8000
epsi = 1e-5 #epsilon
N = len(dataFrame.index)#length of dataset
total_living = sum(dataFrame['sqft_living'])#sum of all sqft_living
sq_total_living = sum(np.power(dataFrame['sqft_living'],2))# sum of sqft_living^2
#Initialize hessian matrix
H = [[-N,-total_living],[-total_living,-sq_total_living]]
#update newton method to give new points
def newton_method_update(old_consts, H, J):
new_consts = np.array(np.subtract(old_consts, np.dot(np.linalg.pinv(H),J)))
return new_consts
price = np.array(dataFrame['price'])#conver to array
living_sqft = np.array(dataFrame['sqft_living'])#conver to array
new_consts = init_consts#initialie new parameter
#this condition for looping
while criteria > epsi:
old_consts = new_consts
J_position1 = np.nansum(price) - N * old_consts[0] - total_living * old_consts[1]
J_position2 = np.nansum(price * living_sqft) - total_living * old_consts[0] - sq_total_living * old_consts[1]
J = np.array([J_position1,J_position2])
new_consts = newton_method_update(old_consts, H, J)
criteria = np.linalg.norm(new_consts - old_consts)#criteria check every time for looping
#this is point obtains which of best fit
#were m = new_points[1] and b=new_points[0]
#
print(new_consts)
#plot the line of best fit
plt.plot(price, new_consts[1] * price + new_consts[0],'red')
#data with respect to sqft_living vs price
plt.scatter(dataFrame['sqft_living'],dataFrame['price'],)
plt.title(' sqft_living vs price ')
plt.xlabel('sqft_living area')
plt.ylabel('price k $')
plt.show()
# In[ ]:
```
#### File: house_sales/parsed_kernels/kernel_40.py
```python
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from subprocess import check_output
print((check_output(["ls", "../input"]).decode("utf8")))
# In[ ]:
house = pd.read_csv("../input/kc_house_data.csv")
del house["id"]
del house["date"]
# In[ ]:
X = house[house.columns[1:19]]
Y = house["price"]
colnames = X.columns
# In[ ]:
ranks = {}
# Create our function which stores the feature rankings to the ranks dictionary
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = [round(x,2) for x in ranks]
return dict(list(zip(names, ranks)))
# In[ ]:
lr = LinearRegression(normalize=True)
lr.fit(X,Y)
rfe = RFE(lr, n_features_to_select=1, verbose =3 )
rfe.fit(X,Y)
ranks["RFE"] = ranking(list(map(float, rfe.ranking_)), colnames)
# In[ ]:
rf = RandomForestRegressor(n_jobs=-1, n_estimators=50, verbose=3)
rf.fit(X,Y)
ranks["RF"] = ranking(rf.feature_importances_, colnames)
# In[ ]:
ranks
```
#### File: house_sales/parsed_kernels/kernel_57.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import jarque_bera
data = pd.read_csv('../input/kc_house_data.csv')
data = data.iloc[0:1000,:]
data.drop_duplicates('id', inplace=True)
print(('Take a look at the data: \n', data.head(), '\n'))
print(('Examine data types of each predictor: \n', data.info(), '\n'))
print(('Check out summary statistics: \n', data.describe(), '\n'))
print(('Missing values?', data.columns.isnull().any(), '\n'))
print(('Columns names:', data.columns.values.tolist()))
# In[ ]:
data = data.drop('zipcode', axis=1)
data = data.drop('date', axis=1)
nums = ['id', 'price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'sqft_above', 'sqft_basement',
'yr_built', 'sqft_living15', 'sqft_lot15']
numsData = data[nums]
numsData.hist(bins=50, figsize=(20,15))
plt.show()
# price, sqft_above, sqft_living, sqft_living15, sqft_lot, sqft_lot15 seem to be right-skewed and are transformed.
# In this case inverse-hyperbolic tranform is used, because, unlike log, it can handle zeros.
# Normally, one would re-transform the produced predictions of the target and the target itself before the loss-function is applied, however, in this case the scale of the target is not of interest.
# In[ ]:
def arcsinh(data, colList):
for item in colList:
data.loc[:,item] = np.arcsinh(data.loc[:,item].values)
return data
jbCols = ['price', 'sqft_above', 'sqft_living', 'sqft_living15', 'sqft_lot', 'sqft_lot15']
numsData = arcsinh(numsData, jbCols)
numsData.hist(bins=50, figsize=(20,15))
data.loc[:,nums] = numsData
# Splitting data set and obtaining the $inSampleError$.
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
data.drop('price', axis=1), data['price'], test_size=0.25, random_state=42)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lr = LinearRegression()
lr.fit(X_train, y_train)
inSamplePreds = lr.predict(X_train)
inSampleErr = mean_squared_error(inSamplePreds, y_train)
print(('In-sample-error:', inSampleErr))
# Now, the Leave-One-Out Bootstrap function is implemented.
# It needs 4 arguments to be passed in.
# 1. The data as a numpy array WITH an id-column, which uniquely identifies each observation, as the first column and
# NO target column.
# 2. The target column as a numpy array.
# 3. The number of bootstrap samples to be created, and
# 4. keyworded arguments of the model to be used.
#
# While coding this function, it came to my mind that it is better to create $B$ bootstraped id-columns instead of $B$ complete data sets that all have to be stored in memory the whole time the function is running.
# This way, only the id-columns are stored all the time and each corresponding bootstrap data set is created through a JOIN-command as needed and then deleted when not in use anymore.
# However, because I could not get the numpy-JOIN to work as I wanted it to, the function unfortunately switches to pandas to execute the join command and then switches back to numpy.
# These cumbersome operations definitely do not improve the function's execution speed.
# In[ ]:
kwargs = {'fit_intercept': True, 'normalize': False, 'copy_X': True, 'n_jobs': 1}
# or kwargs = {}
def LOOB(data, targetCol, B_samples, **kwargs):
avgLossVec = np.zeros((data.shape[0], 1))
bootMat = np.zeros((data.shape[0], B_samples))
idCol = np.zeros((data.shape[0], 1))
idCol = data[:, 0]
targetCol = np.stack((idCol, targetCol))
targetCol = targetCol.T
for column in range(bootMat.shape[1]):
bootMat[:,column] = np.random.choice(idCol, idCol.shape[0],replace=True)
for i in np.nditer(idCol):
bootLossVec = np.zeros((1, 1))
target = targetCol[targetCol[:,0]==i,1]
targetData = data[data[:,0]==i, 1:]
for column in range(bootMat.shape[1]):
if i not in bootMat[:,column]:
tempVec = pd.DataFrame(bootMat[:,column])
tempVec.rename(columns={0:'id'}, inplace=True)
tempData = pd.DataFrame(data)
tempTarget = pd.DataFrame(targetCol)
tempData.rename(columns={0:'id'}, inplace=True)
tempTarget.rename(columns={0:'id'}, inplace=True)
bootMat2 = tempVec.merge(tempData.drop_duplicates(subset=['id']), how='left', on='id')
bootTarget = tempVec.merge(tempTarget.drop_duplicates(subset=['id']), how='left', on='id')
del(tempVec)
del(tempData)
del(tempTarget)
bootMat2 = bootMat2.iloc[:,1:].values
bootTarget = bootTarget.iloc[:,1].values
model = LinearRegression(kwargs)
model.fit(bootMat2, bootTarget)
prediction = model.predict(targetData)
if column != 0:
bootLossVec = np.append(bootLossVec, mean_squared_error(target, prediction))
elif column == 0:
bootLossVec[column] = mean_squared_error(target, prediction)
avgLossVec[np.where(idCol == i)[0]] = np.mean(bootLossVec)
bootErr = np.mean(avgLossVec)
return bootErr
bootErr = LOOB(X_train.values, y_train.values, 80, **kwargs)
bootError = bootErr*0.632 + inSampleErr*0.368
print(('Bootstrap prediction error:', bootError))
# 5-Fold cross validation
# In[ ]:
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
mseee = make_scorer(mean_squared_error, greater_is_better=False)
cvScores = -cross_val_score(lr, X_train, y_train,cv=5 , scoring = mseee)
cvOutErr = cvScores.mean()
print(('10-Fold error estimate:', cvOutErr))
# Out-of-Sample Error
# In[ ]:
testPreds = lr.predict(X_test)
trueError = mean_squared_error(testPreds, y_test)
print(('True test error:', trueError))
# In[ ]:
bars = {'Bootstrap': bootError, '5-Fold-CV': cvOutErr, 'in Sample Error': inSampleErr,
'true test error': trueError}
fig = plt.figure()
plt.bar(list(range(len(bars))), list(bars.values()), align='center')
plt.xticks(list(range(len(bars))), list(bars.keys()))
plt.show()
print(bars)
# As one can see above the bootstrap estimator is definitely an alternative, but an implementation in a quicker language would make it more applicable.
```
#### File: house_sales/parsed_kernels/kernel_64.py
```python
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
# In[ ]:
# Importing the dataset
df = pd.read_csv('../input/kc_house_data.csv')
df.head()
# In[ ]:
# Getting the dependent variables and independent variables
df['date'] = df['date'].apply(lambda x: int(x[:8]))
X = df[['date', 'bedrooms', 'bathrooms', 'sqft_living',
'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade',
'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode',
'lat', 'long', 'sqft_living15', 'sqft_lot15']].values
y = df['price'].values
# In[ ]:
# Splitting the training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state = 0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# In[ ]:
# define base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(19, input_dim=19, kernel_initializer='normal', activation='relu'))
model.add(Dense(output_dim = 19, init = 'uniform', activation = 'relu'))
model.add(Dense(output_dim = 19, init = 'uniform', activation = 'relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# Fitting to the training set
estimator = KerasRegressor(build_fn=baseline_model, epochs=150, batch_size=10, verbose=False)
estimator.fit(X_train, y_train)
# Predicting the results
prediction = estimator.predict(X_test)
# In[ ]:
# Visualization the results and evaluation
n = 12
length = len(prediction)
sns.set_style('darkgrid', {'axis.facecolor':'black'})
f, axes = plt.subplots(n, 1, figsize=(25,30))
times = 0
for i in range(n):
if i == 0:
plt.sca(axes[0])
plt.plot(y_test[:round(length/n)], color = 'red', label = 'Real Price')
plt.plot(prediction[:round(length/n)], color = 'blue', label = 'Predicted Price')
plt.title('House Price Predicitionin King County', fontsize=30)
plt.ylabel('Price', fontsize=20)
plt.legend(loc=1, prop={'size': 10})
else:
if i == n-1:
plt.sca(axes[n-1])
plt.plot(y_test[round(length/n*(n-1)):], color = 'red', label = 'Real Price')
plt.plot(prediction[round(length/n*(n-1)):], color = 'blue', label = 'Predicted Price')
plt.ylabel('Price', fontsize=20)
plt.legend(loc=1, prop={'size': 10})
else:
plt.sca(axes[i])
plt.plot(y_test[round(length/n*i):round(length/n*(i+1))], color = 'red', label = 'Real Price')
plt.plot(prediction[round(length/n*i):round(length/n*(i+1))], color = 'blue', label = 'Predicted Price')
plt.ylabel('Price', fontsize=20)
plt.legend(loc=1, prop={'size': 10})
plt.show()
```
#### File: loan_data/parsed_kernels/kernel_107.py
```python
import os, sys, re
#import cPickle as pickle
from keras.models import Sequential
from keras.layers import Dense
import time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
print (time.time())
dataset = pd.read_csv('../input/loan.csv', low_memory=False)
# Replace all the missing entries with zeros
# In[ ]:
dataset = dataset.fillna(0) ## filling missing values with zeros
# **Data Modification**
#
# Convert all kind of categorical data into integral values accordingly and 'Date Column' into real values'
# In[ ]:
dataset['application_type'] = dataset['application_type'].astype('category').cat.codes
dataset['addr_state'] = dataset['addr_state'].astype('category').cat.codes
dataset['earliest_cr_line'] = pd.to_datetime(dataset['earliest_cr_line'])
dataset['earliest_cr_line'] = (dataset['earliest_cr_line']-dataset['earliest_cr_line'].min())/np.timedelta64(1,'D')
dataset['emp_length'] = dataset['emp_length'].astype('category').cat.codes
dataset['grade'] = dataset['grade'].astype('category').cat.codes
dataset['home_ownership'] = dataset['home_ownership'].astype('category').cat.codes
dataset['initial_list_status'] = dataset['initial_list_status'].astype('category').cat.codes
dataset['issue_d'] = pd.to_datetime(dataset['issue_d'])
dataset['issue_d'] = (dataset['issue_d']-dataset['issue_d'].min())/np.timedelta64(1,'D')
dataset['last_credit_pull_d'] = pd.to_datetime(dataset['last_credit_pull_d'])
dataset['last_credit_pull_d'] = (dataset['last_credit_pull_d']-dataset['last_credit_pull_d'].min())/np.timedelta64(1,'D')
dataset['last_pymnt_d'] = pd.to_datetime(dataset['last_pymnt_d'])
dataset['last_pymnt_d'] = (dataset['last_pymnt_d']-dataset['last_pymnt_d'].min())/np.timedelta64(1,'D')
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
dataset['next_pymnt_d'] = pd.to_datetime(dataset['next_pymnt_d'])
dataset['next_pymnt_d'] = (dataset['next_pymnt_d']-dataset['next_pymnt_d'].min())/np.timedelta64(1,'D')
dataset['purpose'] = dataset['purpose'].astype('category').cat.codes
dataset['pymnt_plan'] = dataset['pymnt_plan'].astype('category').cat.codes
dataset['sub_grade'] = dataset['sub_grade'].astype('category').cat.codes
dataset['term'] = dataset['term'].astype('category').cat.codes
dataset['verification_status'] = dataset['verification_status'].astype('category').cat.codes
dataset['verification_status_joint'] = dataset['verification_status_joint'].astype('category').cat.codes
# Storing non numeric or non real columns name in non_numerics array
# In[ ]:
non_numerics = [x for x in dataset.columns if not (dataset[x].dtype == np.float64 or dataset[x].dtype == np.int8 or dataset[x].dtype == np.int64)]
# Droping non_numerics column for easy modeling
# In[ ]:
df = dataset
df = df.drop(non_numerics,1)
# Converting 'loan result status' into two categories 0 and 1. 0 means loan failed or that type of person should not be given loan in future and 1 means loan passed i.e. they are good for extending the loan.
# In[ ]:
def LoanResult(status):
if (status == 5) or (status == 1) or (status == 7):
return 1
else:
return 0
df['loan_status'] = df['loan_status'].apply(LoanResult)
# Splitting data into train data and test data with the help of scikit library in the ratio of 3:1
# In[ ]:
train, test = train_test_split(df, test_size = 0.25)
##running complete data set will take a lot of time, hence reduced the data set
X_train = train.drop('loan_status',1).values[0:50000, :]
Y_train = train['loan_status'].values[0:50000]
X_test = test.drop('loan_status',1).values[0:1000, :]
Y_test = test['loan_status'].values[0:1000]
X_pred = test.drop('loan_status',1).values[1001:2000, :]
# Setting the seed for pseudo random numbers generation
# In[ ]:
seed = 8
np.random.seed(seed)
# Now we will define a three layered neural network model. We create a Sequential model and add layers one at a time until we are happy with our network topology. After that we will set activation function and number of nets in each layer. These are done by heuristics and training the model several times.
# In[ ]:
# Create the model
model = Sequential()
# Define the three layered model
model.add(Dense(110, input_dim = 68, kernel_initializer = "uniform", activation = "relu"))
model.add(Dense(110, kernel_initializer = "uniform", activation = "relu"))
model.add(Dense(1, kernel_initializer = "uniform", activation = "sigmoid"))
# Now we will compile the model. In this we have to input three parameters viz. loss function, optimizer function and an evaluation metrics. These choices are again by heuristics. Here we are using "binary_crossentropy" as loss func, "adam" as optimizer func and "accuracy" as evaluation metrics.
# In[ ]:
#
# Compile the model
model.compile(loss="binary_crossentropy", optimizer= "adam", metrics=['accuracy'])
#
# Now we have to fit the data into our model.
# We can train or fit our model on our loaded data by calling the fit() function on the model.
#
# The training process will run for a fixed number of iterations through the dataset called epochs, that we must specify using the **epochs** argument. We can also set the number of instances that are evaluated before a weight update in the network is performed, called the batch size and set using the **batch_size** argument.
# In[ ]:
# Fit the model
model.fit(X_train, Y_train, epochs= 50, batch_size=200)
# **Evaluate Model**
#
# We have trained our neural network on the entire dataset and we can evaluate the performance of the network on the test dataset.
# In[ ]:
performance = model.evaluate(X_test, Y_test)
print("%s: %.2f%%" % (model.metrics_names[1], performance[1]*100))
#
# **Final Prediction**
#
# Predicting using the trained model
# In[ ]:
# Predict using the trained model
prediction = model.predict(X_pred)
rounded_predictions = [np.round(x) for x in prediction]
print(rounded_predictions)
```
#### File: loan_data/parsed_kernels/kernel_152.py
```python
import numpy as np
import pandas as pd
import itertools
from sklearn import preprocessing
import matplotlib.pyplot as plt
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import compat
tf.logging.set_verbosity(tf.logging.FATAL)
df = pd.read_csv("../input/loan.csv", low_memory=False)
# <h3> Creating the Target Label </h3>
#
# <p> From a prior notebook, I examined the 'loan_status' column. The cell below creates a column with binary value 0 for loans not in default, and binary value 1 for loans in default.
# In[ ]:
df['Default_Binary'] = int(0)
for index, value in df.loan_status.iteritems():
if value == 'Default':
df.set_value(index,'Default_Binary',int(1))
if value == 'Charged Off':
df.set_value(index, 'Default_Binary',int(1))
if value == 'Late (31-120 days)':
df.set_value(index, 'Default_Binary',int(1))
if value == 'Late (16-30 days)':
df.set_value(index, 'Default_Binary',int(1))
if value == 'Does not meet the credit policy. Status:Charged Off':
df.set_value(index, 'Default_Binary',int(1))
# <h3> Creating a category feature for "Loan Purpose" </h3>
#
# <p> Below I create a new column for loan purpose, and assign each type of loan purpose an integer value. </p>
# In[ ]:
df['Purpose_Cat'] = int(0)
for index, value in df.purpose.iteritems():
if value == 'debt_consolidation':
df.set_value(index,'Purpose_Cat',int(1))
if value == 'credit_card':
df.set_value(index, 'Purpose_Cat',int(2))
if value == 'home_improvement':
df.set_value(index, 'Purpose_Cat',int(3))
if value == 'other':
df.set_value(index, 'Purpose_Cat',int(4))
if value == 'major_purchase':
df.set_value(index,'Purpose_Cat',int(5))
if value == 'small_business':
df.set_value(index, 'Purpose_Cat',int(6))
if value == 'car':
df.set_value(index, 'Purpose_Cat',int(7))
if value == 'medical':
df.set_value(index, 'Purpose_Cat',int(8))
if value == 'moving':
df.set_value(index, 'Purpose_Cat',int(9))
if value == 'vacation':
df.set_value(index,'Purpose_Cat',int(10))
if value == 'house':
df.set_value(index, 'Purpose_Cat',int(11))
if value == 'wedding':
df.set_value(index, 'Purpose_Cat',int(12))
if value == 'renewable_energy':
df.set_value(index, 'Purpose_Cat',int(13))
if value == 'educational':
df.set_value(index, 'Purpose_Cat',int(14))
# <h3> Scaling Interest Rates </h3>
#
# <p> Below I scale the interest rate for each loan to a value between 0 and 1 </p>
# In[ ]:
x = np.array(df.int_rate.values).reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df['int_rate_scaled'] = pd.DataFrame(x_scaled)
print (df.int_rate_scaled[0:5])
# <h3> Scaling Loan Amount </h3>
#
# <p> Below I scale the amount funded for each loan to a value between 0 and 1 </p>
# In[ ]:
x = np.array(df.funded_amnt.values).reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df['funded_amnt_scaled'] = pd.DataFrame(x_scaled)
print (df.funded_amnt_scaled[0:5])
# <h3> Setting up the Neural Network </h3>
#
# <p> Below I split the data into a training, testing, and prediction set </p>
# <p> After that, I assign the feature and target columns, and create the function that will be used to pass the data into the model </p>
# In[ ]:
training_set = df[0:500000] # Train on first 500k rows
testing_set = df[500001:849999] # Test on next 350k rows
prediction_set = df[850000:] # Predict on final 37k rows
COLUMNS = ['Purpose_Cat','funded_amnt_scaled','int_rate_scaled','Default_Binary']
FEATURES = ['Purpose_Cat','funded_amnt_scaled','int_rate_scaled']
LABEL = 'Default_Binary'
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
# <h3> Fitting The Model </h3>
# In[ ]:
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
#config = tf.contrib.learn.RunConfig(keep_checkpoint_max=1) ######## DO NOT DELETE
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_cols, hidden_units=[10, 20, 10], )
regressor.fit(input_fn=lambda: input_fn(training_set), steps=251)
# <h3> Evaluating the Model </h3>
# In[ ]:
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(testing_set), steps=10)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# <h3> Predicting on new data </h3>
# In[ ]:
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
predictions = list(itertools.islice(y, 37379))
# <h3> Visualize Predictions Relative To Interest Rates </h3>
# In[ ]:
plt.plot(prediction_set.int_rate_scaled, predictions, 'ro')
plt.ylabel("Model Prediction Value")
plt.xlabel("Interest Rate of Loan (Scaled between 0-1)")
plt.show()
# <h3> Visualize Predictions Relative to Loan Size </h3>
# In[ ]:
plt.plot(prediction_set.funded_amnt_scaled, predictions, 'ro')
plt.ylabel("Model Prediction Value")
plt.xlabel("Funded Amount of Loan (Scaled between 0-1)")
plt.show()
# <h3> Visualize Predictions Relative to Loan Purpose </h3>
# In[ ]:
plt.plot(prediction_set.Purpose_Cat, predictions, 'ro')
plt.ylabel("Default Prediction Value")
plt.xlabel("Loan Purpose")
plt.title("DNN Regressor Predicting Default By Loan Purpose")
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 8
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
labels = ['Debt Consolidation', 'Credit Card', 'Home Improvement', 'Other',
'Major Purchase', 'Small Business', 'Car', 'Medical',
'Moving', 'Vacation', 'House', 'Wedding',
'Renewable Energy']
plt.xticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14], labels, rotation='vertical')
plt.show()
```
#### File: loan_data/parsed_kernels/kernel_25.py
```python
import numpy as np
print('numpy version\t:',np.__version__)
import pandas as pd
print('pandas version\t:',pd.__version__)
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from scipy import stats
# Regular expressions
import re
# seaborn : advanced visualization
import seaborn as sns
print('seaborn version\t:',sns.__version__)
pd.options.mode.chained_assignment = None #set it to None to remove SettingWithCopyWarning
pd.options.display.float_format = '{:.4f}'.format #set it to convert scientific noations such as 4.225108e+11 to 422510842796.00
pd.set_option('display.max_columns', 100) # to display all the columns
np.set_printoptions(suppress=True,formatter={'float_kind':'{:f}'.format})
import os
import warnings
warnings.filterwarnings('ignore') # if there are any warning due to version mismatch, it will be ignored
# <a id='sourcing'></a>
# ## III. Data Sourcing
# In[2]:
print(os.listdir("../input"))
# In[3]:
loan = pd.read_csv('../input/loan.csv',dtype='object')
print(loan.shape)
# #### Get some insights
# In[4]:
loan.head(2)
# __List of Columns & NA counts where NA values are more than 30%__
# In[5]:
NA_col = loan.isnull().sum()
NA_col = NA_col[NA_col.values >(0.3*len(loan))]
plt.figure(figsize=(20,4))
NA_col.plot(kind='bar')
plt.title('List of Columns & NA counts where NA values are more than 30%')
plt.show()
# <font color='blue'><b>Insights</b>: So we can see from the above plot that there are 20+ columns in the dataset where all the values are NA.</font>
#
# As we can see there are <b>887379</b> rows & <b>74</b> columns in the dataset, it will be very difficult to look at each column one by one & find the NA or missing values.
# So let's find out all columns where missing values are more than certain percentage, let's say <b>30%</b>. We will remove those columns as it is not feasable to impute missing values for those columns.
#
# We will create a UDF for this.
# <a id='cleaning'></a>
# ## IV. Data Cleaning and Manipulation
# __<font color='green'>UDF :: removeNulls(dataframe, axis, percent)</font>__
#
# ###### removeNulls(dataframe, axis, percent) will drop the columns/rows from the dataset based on the parameter values.
# In[6]:
def removeNulls(dataframe, axis =1, percent=0.3):
'''
* removeNull function will remove the rows and columns based on parameters provided.
* dataframe : Name of the dataframe
* axis : axis = 0 defines drop rows, axis =1(default) defines drop columns
* percent : percent of data where column/rows values are null,default is 0.3(30%)
'''
df = dataframe.copy()
ishape = df.shape
if axis == 0:
rownames = df.transpose().isnull().sum()
rownames = list(rownames[rownames.values > percent*len(df)].index)
df.drop(df.index[rownames],inplace=True)
print("\nNumber of Rows dropped\t: ",len(rownames))
else:
colnames = (df.isnull().sum()/len(df))
colnames = list(colnames[colnames.values>=percent].index)
df.drop(labels = colnames,axis =1,inplace=True)
print("Number of Columns dropped\t: ",len(colnames))
print("\nOld dataset rows,columns",ishape,"\nNew dataset rows,columns",df.shape)
return df
# #### 1. Remove columns where NA values are more than or equal to 30%
# In[7]:
loan = removeNulls(loan, axis =1,percent = 0.3)
# #### 2. Remove rows where NA values are more than or equal to 30%
# (Note: NA columns has already been removed in step 1, so we may not find any <b>rows</b> where 30% values are NA)
# In[8]:
loan = removeNulls(loan, axis =0,percent = 0.3)
# There are no rows where NA values are more than or equal to 30%
# #### 3. Remove columns where number of unique value is only 1.
#
# Let's look at no of unique values for each column.We will remove all columns where number of unique value is only 1 because that will not make any sense in the analysis
# In[9]:
unique = loan.nunique()
unique = unique[unique.values == 1]
# In[10]:
loan.drop(labels = list(unique.index), axis =1, inplace=True)
print("So now we are left with",loan.shape ,"rows & columns.")
# #### 4. Employment Term : Remove 'n/a' value with 'self-employed'.
#
# There are some values in emp_term which are <b>'n/a'</b>, we assume that are <b>'self-employed'</b>. Because for 'self-employed' applicants, emp-lenth is 'Not Applicable'
# In[11]:
print(loan.emp_length.unique())
loan.emp_length.fillna('0',inplace=True)
loan.emp_length.replace(['n/a'],'Self-Employed',inplace=True)
print(loan.emp_length.unique())
# #### 5. Remove irrelevant columns.
#
# Till now we have removed the columns based on the count & statistics.
# Now let's look at each column from business perspective if that is required or not for our analysis such as Unique ID's, URL.
# As last 2 digits of zip code is masked 'xx', we can remove that as well.
# In[12]:
not_required_columns = ["id","member_id","url","zip_code"]
loan.drop(labels = not_required_columns, axis =1, inplace=True)
print("So now we are left with",loan.shape ,"rows & columns.")
# #### 6. Cast all continuos variables to numeric
# Cast all continuos variables to numeric so that we can find a correlation between them
# In[13]:
numeric_columns = ['loan_amnt','funded_amnt','funded_amnt_inv','installment','int_rate','annual_inc','dti']
loan[numeric_columns] = loan[numeric_columns].apply(pd.to_numeric)
# In[14]:
loan.tail(3)
# #### 7. Purpose of loan : Drop records where values are less than 0.75%
# We will analyse only those categories which contain more than 0.75% of records.
# Also, we are not aware what comes under 'Other' we will remove this category as well.
# In[15]:
(loan.purpose.value_counts()*100)/len(loan)
# In[16]:
del_loan_purpose = (loan.purpose.value_counts()*100)/len(loan)
del_loan_purpose = del_loan_purpose[(del_loan_purpose < 0.75) | (del_loan_purpose.index == 'other')]
loan.drop(labels = loan[loan.purpose.isin(del_loan_purpose.index)].index, inplace=True)
print("So now we are left with",loan.shape ,"rows & columns.")
print(loan.purpose.unique())
# #### 8. Loan Status : Drop records where values are less than 1.5%
# As we can see, Other than ['Current','Fully Paid' & Charged off] other loan_status are not relevent for our analysis.
# In[17]:
(loan.loan_status.value_counts()*100)/len(loan)
# In[18]:
del_loan_status = (loan.loan_status.value_counts()*100)/len(loan)
del_loan_status = del_loan_status[(del_loan_status < 1.5)]
loan.drop(labels = loan[loan.loan_status.isin(del_loan_status.index)].index, inplace=True)
print("So now we are left with",loan.shape ,"rows & columns.")
print(loan.loan_status.unique())
# <a id='derived'></a>
# ## V. Derived Metrics
# ** We will now derive some new columns based on our business understanding that will be helpful in our analysis. **
# #### 1. Loan amount to Annual Income ratio
# In[19]:
loan['loan_income_ratio']= loan['loan_amnt']/loan['annual_inc']
# #### 2. Extract Year & Month from Issue date
# In[20]:
loan['issue_month'],loan['issue_year'] = loan['issue_d'].str.split('-', 1).str
loan[['issue_d','issue_month','issue_year']].head()
# #### 3. Change order of months from Jan to Dec, currently it's in alphabetical order(A-Z)
# In[21]:
months_order = ["Jan", "Feb", "Mar", "Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
loan['issue_month'] = pd.Categorical(loan['issue_month'],categories=months_order, ordered=True)
# #### 4. Create Bins for range of Loan Amount
# In[22]:
bins = [0, 5000, 10000, 15000, 20000, 25000,40000]
slot = ['0-5000', '5000-10000', '10000-15000', '15000-20000', '20000-25000','25000 and above']
loan['loan_amnt_range'] = pd.cut(loan['loan_amnt'], bins, labels=slot)
# #### 5. Create Bins for range of Annual Income
# In[23]:
bins = [0, 25000, 50000, 75000, 100000,1000000]
slot = ['0-25000', '25000-50000', '50000-75000', '75000-100000', '100000 and above']
loan['annual_inc_range'] = pd.cut(loan['annual_inc'], bins, labels=slot)
# #### 6. Create Bins for range of Interest rates
# In[24]:
bins = [0, 7.5, 10, 12.5, 15,20]
slot = ['0-7.5', '7.5-10', '10-12.5', '12.5-15', '15 and above']
loan['int_rate_range'] = pd.cut(loan['int_rate'], bins, labels=slot)
# <a id='univariate'></a>
# ## VI. Univariate Analysis
# ### Continuous Variables
# In case of continuous variables, we need to understand the central tendency and spread of the variable.These are measured using various statistical metrics visualization methods such as Boxplot,Histogram/Distribution Plot, Violin Plot etc.
# ### Categorical Variables
# For categorical variables, we’ll use frequency table to understand distribution of each category. It can be be measured using two metrics, Count and Count% against each category. Countplot or Bar chart can be used as visualization.
# __<font color='green'>UDF :: univariate(df,col,vartype,hue)</font>__
#
# ###### Univariate function will plot the graphs based on the parameter values.
# In[25]:
def univariate(df,col,vartype,hue =None):
'''
Univariate function will plot the graphs based on the parameters.
df : dataframe name
col : Column name
vartype : variable type : continuos or categorical
Continuos(0) : Distribution, Violin & Boxplot will be plotted.
Categorical(1) : Countplot will be plotted.
hue : It's only applicable for categorical analysis.
'''
sns.set(style="darkgrid")
if vartype == 0:
fig, ax=plt.subplots(nrows =1,ncols=3,figsize=(20,8))
ax[0].set_title("Distribution Plot")
sns.distplot(df[col],ax=ax[0])
ax[1].set_title("Violin Plot")
sns.violinplot(data =df, x=col,ax=ax[1], inner="quartile")
ax[2].set_title("Box Plot")
sns.boxplot(data =df, x=col,ax=ax[2],orient='v')
if vartype == 1:
temp = pd.Series(data = hue)
fig, ax = plt.subplots()
width = len(df[col].unique()) + 6 + 4*len(temp.unique())
fig.set_size_inches(width , 7)
ax = sns.countplot(data = df, x= col, order=df[col].value_counts().index,hue = hue)
if len(temp.unique()) > 0:
for p in ax.patches:
ax.annotate('{:1.1f}%'.format((p.get_height()*100)/float(len(loan))), (p.get_x()+0.05, p.get_height()+20))
else:
for p in ax.patches:
ax.annotate(p.get_height(), (p.get_x()+0.32, p.get_height()+20))
del temp
else:
exit
plt.show()
# ### Continuous Variables
# #### 1. Loan Amount
# In[26]:
univariate(df=loan,col='loan_amnt',vartype=0)
# <font color='blue'><b>Insights</b>: Most of the loan amounts are distributed between 8000 to 20000 USD.</font>
# #### 2. Interest Rate
# In[27]:
univariate(df=loan,col='int_rate',vartype=0)
# <font color='blue'><b>Insights</b>: Most of the loans interest rates are distributed between 10% to 16%.</font>
# #### 3. Annual Income
# In[28]:
loan["annual_inc"].describe()
# ###### Max value is 9500000 which is approx 150 times more than mean value, so we will remove the outliers from Annual Income.
#
# Remove Outliers (values from 99 to 100%)
# In[29]:
q = loan["annual_inc"].quantile(0.995)
loan = loan[loan["annual_inc"] < q]
loan["annual_inc"].describe()
# In[30]:
univariate(df=loan,col='annual_inc',vartype=0)
# <font color='blue'><b>Insights</b>: Most of the applicants earns beteen 40000 to 90000 USD annually.</font>
# ### Categorical Variables
# #### 4. Loan Status
# In[31]:
univariate(df=loan,col='loan_status',vartype=1)
# <font color='blue'><b>Insights</b>: 5% of the applicants Charged off.</font>
# #### 5. Purpose of loan
# In[32]:
univariate(df=loan,col='purpose',vartype=1,hue='loan_status')
# <font color='blue'><b>Insights</b>: Approx 60% of the applicants applied loan for paying their other loans(Debt Consolidation).</font>
# #### 6. Home Ownership wise Loan
# In[33]:
loan.home_ownership.unique()
# In[34]:
# Remove rows where home_ownership'=='OTHER', 'NONE', 'ANY'
rem = ['OTHER', 'NONE', 'ANY']
loan.drop(loan[loan['home_ownership'].isin(rem)].index,inplace=True)
loan.home_ownership.unique()
# In[35]:
univariate(df=loan,col='home_ownership',vartype=1,hue='loan_status')
# <font color='blue'><b>Insights</b>: 40% of applicants are living in rented home whereas 52% applicants were mortagaged their home.</font>
# #### 7. Year wise Loan
# In[36]:
year_wise =loan.groupby(by= [loan.issue_year])[['loan_status']].count()
year_wise.rename(columns={"loan_status": "count"},inplace=True)
ax =year_wise.plot(figsize=(20,8))
year_wise.plot(kind='bar',figsize=(20,8),ax = ax)
plt.show()
# <font color='blue'><b>Insights</b>: loan applicants are increasing year on year, approx 47% of loan applicants received loans in 2011.</font>
# #### 8. Loan Term
# In[37]:
univariate(df=loan,col='term',vartype=1,hue='loan_status')
# <font color='blue'><b>Insights</b>: 70% of applicants applied loan for 36 months term period.</font>
# In[38]:
loan.head(3)
# <a id='bivariate'></a>
# ## VII. Bivariate/Multivariate Analysis
# Bivariate/Multivariate Analysis finds out the relationship between two/two or more variables.We can perform Bivariate/Multivariate analysis for any combination of categorical and continuous variables. The combination can be: Categorical & Categorical, Categorical & Continuous and Continuous & Continuous.
# #### 1. Purpose of Loan vs Loan Amount for each Loan Status
# In[39]:
plt.figure(figsize=(16,12))
sns.boxplot(data =loan, x='purpose', y='loan_amnt', hue ='loan_status')
plt.title('Purpose of Loan vs Loan Amount')
plt.show()
# #### 2. Correlation Matrix : All Continuos(Numeric) Variables
# In[40]:
loan_correlation = loan.corr()
loan_correlation
# #### 3. HeatMap: All continuos variables
# In[41]:
f, ax = plt.subplots(figsize=(14, 9))
sns.heatmap(loan_correlation,
xticklabels=loan_correlation.columns.values,
yticklabels=loan_correlation.columns.values,annot= True)
plt.show()
# <font color='blue'><b>Insights</b>: It is clear from the Heatmap that how <b>'loan_amnt','funded_amnt' & 'funded_amnt_inv'</b> are closely <b>interrelated</b>.So we can take any one column out of them for our analysis.</font>
# #### 4. Employment Length vs Loan Amount for different pupose of Loan
# In[42]:
loanstatus=loan.pivot_table(index=['loan_status','purpose','emp_length'],values='loan_amnt',aggfunc=('count')).reset_index()
loanstatus=loan.loc[loan['loan_status']=='Charged Off']
# In[43]:
ax = plt.figure(figsize=(30, 18))
ax = sns.boxplot(x='emp_length',y='loan_amnt',hue='purpose',data=loanstatus)
ax.set_title('Employment Length vs Loan Amount for different pupose of Loan',fontsize=22,weight="bold")
ax.set_xlabel('Employment Length',fontsize=16)
ax.set_ylabel('Loan Amount',color = 'b',fontsize=16)
plt.show()
# <a id='bivariate_prob'></a>
# ## VIII. Bivariate/Multivariate Analysis with Probability of Charge off
# ### Categorical Variables vs Probability of Charged Off
#
# The main motive of this use case to find what parameters are impacting the most on Loan Status that is if a applicant will successfully complete the loan term or will charge off.
#
# So we will be using a new term now <b>Probability of Charged Off</b> that will be equal to :
#
# \begin{equation*}
# {Probability\:of\:Charged\:Off =\:}\frac{Number\:of\:Applicants\:who\:charged\:off}{Total\:No.\:of\:Applicants}
# \end{equation*}
#
#
# We will calculate this probability w.r.t each column in bivariate analysis & will see how the Probability of Charged Off changes with these columns.
#
# We will create a user defined function for this.
# __<font color='green'><u><b>UDF: </b>crosstab</u></font>__
# ##### 'crosstab' function will be used to get the summarized table for each column(passed as a variable) along with probability of charge off w.r.t that column.
# In[44]:
def crosstab(df,col):
'''
df : Dataframe
col: Column Name
'''
crosstab = pd.crosstab(df[col], df['loan_status'],margins=True)
crosstab['Probability_Charged Off'] = round((crosstab['Charged Off']/crosstab['All']),3)
crosstab = crosstab[0:-1]
return crosstab
# __<font color='green'><u><b>UDF: </b>bivariate_prob</u></font>__
# ##### 'bivariate_prob' function will be used to plot count of values for each column(passed as a variable) stacked across 'loan_status' along with probability of charged off as a line chart.
# In[45]:
# Probability of charge off
def bivariate_prob(df,col,stacked= True):
'''
df : Dataframe
col : Column Name
stacked : True(default) for Stacked Bar
'''
# get dataframe from crosstab function
plotCrosstab = crosstab(df,col)
linePlot = plotCrosstab[['Probability_Charged Off']]
barPlot = plotCrosstab.iloc[:,0:2]
ax = linePlot.plot(figsize=(20,8), marker='o',color = 'b')
ax2 = barPlot.plot(kind='bar',ax = ax,rot=1,secondary_y=True,stacked=stacked)
ax.set_title(df[col].name.title()+' vs Probability Charge Off',fontsize=20,weight="bold")
ax.set_xlabel(df[col].name.title(),fontsize=14)
ax.set_ylabel('Probability of Charged off',color = 'b',fontsize=14)
ax2.set_ylabel('Number of Applicants',color = 'g',fontsize=14)
plt.show()
# #### 1. Location vs Probability Charge Off
# In[46]:
filter_states = loan.addr_state.value_counts()
filter_states = filter_states[(filter_states < 10)]
loan_filter_states = loan.drop(labels = loan[loan.addr_state.isin(filter_states.index)].index)
# In[47]:
states = crosstab(loan_filter_states,'addr_state')
display(states.tail(20))
bivariate_prob(df =loan_filter_states,col ='addr_state')
# <font color='blue'><b>Insights</b>: There are multiple States/Provinces with high probability of charge,highest being 'NV' at 7%</font>
# #### 2. Purpose of Loan vs Probability Charge Off
# In[48]:
purpose = crosstab(loan,'purpose')
display(purpose)
bivariate_prob(df =loan,col ='purpose',stacked=False)
# <font color='blue'><b>Insights</b>: Applicants who has taken the Loan for 'small business' has the highest probabilty of charge off of 14%. So bank should take extra caution like take some asset or guarentee while approving the loan for purpose of 'small business'</font>
# #### 3. Grade/Subgrade vs Probability Charge Off
# In[49]:
grade = crosstab(loan,'grade')
display(grade)
bivariate_prob(df =loan,col ='grade',stacked=False)
bivariate_prob(df =loan,col ='sub_grade')
# <font color='blue'><b>Insights</b>: As we move from Grade A to G, probability that person will charged off is increasing.</font>
# #### 4. Annual Income Range vs Probability Charge Off
# In[50]:
annual_inc_range = crosstab(loan,'annual_inc_range')
display(annual_inc_range)
bivariate_prob(df =loan,col ='annual_inc_range')
# <font color='blue'><b>Insights</b>: As the annual income is decreasing the probability that person will default is increasing with highest of 7% at (0 to 25000) salary bracket.</font>
# #### 5. Interest rate Range vs Probability Charge Off
# In[51]:
int_rate_range = crosstab(loan,'int_rate_range')
display(int_rate_range)
bivariate_prob(df =loan,col ='int_rate_range')
# <font color='blue'><b>Insights</b>: As the interest rate is increasing the probability that person will default is increasing with highest of 9% at 15% & above bracket.</font>
# #### 6. Employment Length vs Probability Charge Off
# In[52]:
emp_length = crosstab(loan,'emp_length')
display(emp_length)
bivariate_prob(df =loan,col ='emp_length')
# <font color='blue'><b>Insights</b>: Applicants who are self employed & less than 1 year of experience are more probable of charged off.. </font>
# <a id='Conclusion'></a>
# ***
# ## <font color='blue'><b>IX. Conclusion</b></font>
# ### Target Variable
# * <font color='blue'><b>Loan Status</b></font>
#
# ### Top-5 Major variables to consider for loan prediction:
# 1. <font color='blue'><b>Purpose of Loan</b></font>
# 2. <font color='blue'><b>Employment Length</b></font>
# 3. <font color='blue'><b>Grade</b></font>
# 4. <font color='blue'><b>Interest Rate</b></font>
# 5. <font color='blue'><b>Term</b></font>
```
#### File: loan_data/parsed_kernels/kernel_34.py
```python
from __future__ import division
import itertools
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.patheffects as patheffects
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
import seaborn as sns
from seaborn import cm
from seaborn.axisgrid import Grid
from seaborn.utils import (despine, axis_ticklabels_overlap, relative_luminance, to_utf8)
from seaborn.external.six import string_types
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(to_utf8, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(to_utf8, i)) for i in index.values]
else:
return index.values
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper2(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cellsize, cellsize_vmax,
cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None, ax_kws=None, rect_kws=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
# Get the positions and used label for the ticks
nx, ny = data.T.shape
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, string_types) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, string_types) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Determine good default values for cell size
self._determine_cellsize_params(plot_data, cellsize, cellsize_vmax)
# Sort out the annotations
if annot is None:
annot = False
annot_data = None
elif isinstance(annot, bool):
if annot:
annot_data = plot_data
else:
annot_data = None
else:
try:
annot_data = annot.values
except AttributeError:
annot_data = annot
if annot.shape != plot_data.shape:
raise ValueError('Data supplied to "annot" must be the same '
'shape as the data to plot.')
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.annot_kws.setdefault('color', "black")
self.annot_kws.setdefault('ha', "center")
self.annot_kws.setdefault('va', "center")
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))
self.ax_kws = {} if ax_kws is None else ax_kws
self.rect_kws = {} if rect_kws is None else rect_kws
# self.rect_kws.setdefault('edgecolor', "black")
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, string_types):
self.cmap = mpl.cm.get_cmap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
def _determine_cellsize_params(self, plot_data, cellsize, cellsize_vmax):
if cellsize is None:
self.cellsize = np.ones(plot_data.shape)
self.cellsize_vmax = 1.0
else:
if isinstance(cellsize, pd.DataFrame):
cellsize = cellsize.values
self.cellsize = cellsize
if cellsize_vmax is None:
cellsize_vmax = cellsize.max()
self.cellsize_vmax = cellsize_vmax
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap and annotate
height, width = self.plot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
data = self.plot_data.data
cellsize = self.cellsize
mask = self.plot_data.mask
if not isinstance(mask, np.ndarray) and not mask:
mask = np.zeros(self.plot_data.shape, np.bool)
annot_data = self.annot_data
if not self.annot:
annot_data = np.zeros(self.plot_data.shape)
# Draw rectangles instead of using pcolormesh
# Might be slower than original heatmap
for x, y, m, val, s, an_val in zip(xpos.flat, ypos.flat, mask.flat, data.flat, cellsize.flat, annot_data.flat):
if not m:
vv = (val - self.vmin) / (self.vmax - self.vmin)
size = np.clip(s / self.cellsize_vmax, 0.1, 1.0)
color = self.cmap(vv)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, **self.rect_kws)
ax.add_patch(rect)
if self.annot:
annotation = ("{:" + self.fmt + "}").format(an_val)
text = ax.text(x, y, annotation, **self.annot_kws)
# add edge to text
text_luminance = relative_luminance(text.get_color())
text_edge_color = ".15" if text_luminance > .408 else "w"
text.set_path_effects([mpl.patheffects.withStroke(linewidth=1, foreground=text_edge_color)])
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Set other attributes
ax.set(**self.ax_kws)
if self.cbar:
norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)
scalar_mappable = mpl.cm.ScalarMappable(cmap=self.cmap, norm=norm)
scalar_mappable.set_array(self.plot_data.data)
cb = ax.figure.colorbar(scalar_mappable, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# if kws.get('rasterized', False):
# cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, string_types) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, string_types) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
ax.figure.draw(ax.figure.canvas.get_renderer())
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
def heatmap2(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
cellsize=None, cellsize_vmax=None,
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None, ax_kws=None, rect_kws=None):
# Initialize the plotter object
plotter = _HeatMapper2(data, vmin, vmax, cmap, center, robust,
annot, fmt, annot_kws,
cellsize, cellsize_vmax,
cbar, cbar_kws, xticklabels,
yticklabels, mask, ax_kws, rect_kws)
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
# delete grid
ax.grid(False)
plotter.plot(ax, cbar_ax)
return ax
# ### Example
#
# I'll demonstrate by plotting lending club dataset.
# Before plotting, I preprocessed the data using insights from https://www.kaggle.com/erykwalczak/initial-loan-book-analysis.
# In this example, I only use 5 features, grade, loan_amnt(binned), purpose, annual_inc(binned) and loan_status(bad status or not).
# In[ ]:
np.random.seed(0)
sns.set()
# input
df = pd.read_csv("../input/loan.csv")
columns_used = ["annual_inc", "grade", "loan_amnt", "loan_status", "purpose"]
df = df[columns_used].copy()
# binning
def make_desc(bin):
ret = []
for i in range(len(bin)+1):
d1 = "" if i == 0 else "{}k".format(round(bin[i-1] / 1000.0))
d2 = "" if i == len(bin) else "{}k".format(round(bin[i] / 1000.0))
ret.append("[{}] {}-{}".format(i, d1, d2))
return np.array(ret)
annual_inc_bin = [20000.0, 40000.0, 60000.0, 80000.0, 100000.0, 150000.0, 200000.0]
annual_inc_bin_desc = make_desc(annual_inc_bin)
df["annual_inc_bin"] = annual_inc_bin_desc[np.digitize(df["annual_inc"], annual_inc_bin)]
loan_amnt_bin = [5000, 10000, 20000, 30000, 35000]
loan_amnt_bin_desc = make_desc(loan_amnt_bin)
df["loan_amnt_bin"] = loan_amnt_bin_desc[np.digitize(df["loan_amnt"], loan_amnt_bin)]
# define bad_status
bad_status = ["Charged Off ", "Default",
"Does not meet the credit policy. Status:Charged Off",
"In Grace Period",
"Default Receiver",
"Late (16-30 days)", "Late (31-120 days)"]
df["bad_status"] = np.where(df["loan_status"].isin(bad_status), 1, 0)
for c in columns_used:
print("--- {} ---".format(c))
print(df[c].value_counts())
print()
# Now plot the data.
# This time, I would like to look at probability of bad status in matrix, with number of samples of each cell.
# First, create a function for plotting.
# In[ ]:
def plot(index, columns):
values = "bad_status"
vmax = 0.10
cellsize_vmax = 10000
g_ratio = df.pivot_table(index=index, columns=columns, values=values, aggfunc="mean")
g_size = df.pivot_table(index=index, columns=columns, values=values, aggfunc="size")
annot = np.vectorize(lambda x: "" if np.isnan(x) else "{:.1f}%".format(x * 100))(g_ratio)
# adjust visual balance
figsize = (g_ratio.shape[1] * 0.8, g_ratio.shape[0] * 0.8)
cbar_width = 0.05 * 6.0 / figsize[0]
f, ax = plt.subplots(1, 1, figsize=figsize)
cbar_ax = f.add_axes([.91, 0.1, cbar_width, 0.8])
heatmap2(g_ratio, ax=ax, cbar_ax=cbar_ax,
vmax=vmax, cmap="PuRd", annot=annot, fmt="s", annot_kws={"fontsize":"small"},
cellsize=g_size, cellsize_vmax=cellsize_vmax,
square=True, ax_kws={"title": "{} x {}".format(index, columns)})
plt.show()
# Let's take a look at each combination of features.
# In[ ]:
plot("grade", "loan_amnt_bin")
# * Grade is a strong feature.
# * Grade F, G are not many.
# * Loan amount is not a strong feature.
# In[ ]:
plot("grade", "purpose")
# * Grade is a strong feature in almost all purposes.
# * Purposes like credit_card and debt_consolidation are many.
# * Educational purpose has high probability to be bad status, but not are many.
# In[ ]:
plot("grade", "annual_inc_bin")
# * Grade is also a strong feature here.
# * Annual income is not strong but effective feature.
# In[ ]:
plot("loan_amnt_bin", "purpose")
# * Purpose is a strong feature.
# * Loan amount is not strong but effective feature depending on purpose.
# In[ ]:
plot("loan_amnt_bin", "annual_inc_bin")
# I think this is the most interesting plot in this example.
# * Loan amount and annual income have correlation.
# * Balance of loan amount and annual income is a strong feature. They have enough number of samples, so are supposed to be statistically reliable.
# In[ ]:
plot("purpose", "annual_inc_bin")
# * Annual income is a strong feature. The higher income, the lower probability. However, in small business it goes
# opposite.
# ### Conclusion
# As you can see, it is sometimes beneficial to change cell sizes in heatmap and display one more infomation.
# I hope this is helpful for analyzing data in some competitions.
```
#### File: loan_data/parsed_kernels/kernel_87.py
```python
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
from sklearn import tree
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
#from subprocess import check_output
#print(check_output(["ls", "../input"]).decode("utf8"))
def sql_query(s):
"""Return results for a SQL query.
Arguments:
s (str) -- SQL query string
Returns:
(list) -- SQL query results
"""
conn = sqlite3.connect("../input/database.sqlite")
c = conn.cursor()
c.execute(s)
result = c.fetchall()
conn.close()
return result
def print_details():
"""Print database details including table names and the number of rows.
"""
table_names = sql_query("SELECT name FROM sqlite_master " +
"WHERE type='table' " +
"ORDER BY name;")[0][0]
print("Names of tables in SQLite database: {0}".format(table_names))
num_rows = sql_query("SELECT COUNT(*) FROM loan;")[0][0]
print("Number of records in table: {0}".format(num_rows))
def print_column_names():
"""Print the column names in the 'loan' table.
Note that the "index" column name is specific to Python and is not part of
the original SQLite database.
"""
conn = sqlite3.connect("../input/database.sqlite")
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT * FROM loan LIMIT 2;")
r = c.fetchone()
i = 1
print("Column names:")
for k in r.keys():
print("{0:d}\t{1}".format(i, k))
i += 1
conn.close()
print_details()
print_column_names()
# # Data exploration
# Explore loan data records that contain specific strings in the **title** field. The search strings investigated are:
#
# * "credit card"
# * "medical"
# * "debt"
# In[ ]:
emp_length_dict = {'n/a':0,
'< 1 year':0,
'1 year':1,
'2 years':2,
'3 years':3,
'4 years':4,
'5 years':5,
'6 years':6,
'7 years':7,
'8 years':8,
'9 years':9,
'10+ years':10}
home_ownership_dict = {'MORTGAGE':0,
'OWN':1,
'RENT':2,
'OTHER':3,
'NONE':4,
'ANY':5}
features_dict = {'loan_amnt':0,
'int_rate':1,
'annual_inc':2,
'delinq_2yrs':3,
'open_acc':4,
'dti':5,
'emp_length':6,
'funded_amnt':7,
'tot_cur_bal':8,
'home_ownership':9}
def get_data(s):
"""Return features and targets for a specific search term.
Arguments:
s (str) -- string to search for in loan "title" field
Returns:
(list of lists) -- [list of feature tuples, list of targets]
(features) -- [(sample1 features), (sample2 features),...]
(target) -- [sample1 target, sample2 target,...]
"""
data = sql_query("SELECT " +
"loan_amnt,int_rate,annual_inc," +
"loan_status,title,delinq_2yrs," +
"open_acc,dti,emp_length," +
"funded_amnt,tot_cur_bal,home_ownership " +
"FROM loan " +
"WHERE application_type='INDIVIDUAL';")
features_list = []
target_list = []
n = 0 # counter, number of total samples
n0 = 0 # counter, number of samples with target=0
n1 = 0 # counter, number of samples with target=1
for d in data:
# d[0] (loan_amnt) -- must have type 'float'
# d[1] (int_rate) -- must have type 'str'
# d[2] (annual_inc) -- must have type 'float'
# d[3] (loan_status) -- must have type 'str'
# d[4] (title) -- must have type 'str'
# d[5] (delinq_2yrs) -- must have type 'float'
# d[6] (open_acc) -- must have type 'float'
# d[7] (dti) -- must have type 'float'
# d[8] (emp_length) -- must have type 'str'
# d[9] (funded_amnt) -- must have type 'float'
# d[10] (tot_cur_bal) -- must have type 'float'
# d[11] (home_ownership) -- must have type 'str'
test0 = isinstance(d[0], float)
test1 = isinstance(d[1], str)
test2 = isinstance(d[2], float)
test3 = isinstance(d[3], str)
test4 = isinstance(d[4], str)
test5 = isinstance(d[5], float)
test6 = isinstance(d[6], float)
test7 = isinstance(d[7], float)
test8 = isinstance(d[8], str)
test9 = isinstance(d[9], float)
test10 = isinstance(d[10], float)
if (test0 and test1 and test2 and test3 and test4 and test5 and
test6 and test7 and test8 and test9 and test10):
# Ensure that "int_rate" string value can be converted to float
try:
d1_float = float(d[1].replace("%", ""))
except:
continue
# Ensure that "emp_length" string value is in dict
try:
e = emp_length_dict[d[8]]
except:
print("Error e")
continue
# Ensure that "home_ownership" string value is in dict
try:
h = home_ownership_dict[d[11]]
except:
print("Error h")
continue
# Set "title" string to lowercase for search purposes
if s.lower() in d[4].lower():
if d[3] == 'Fully Paid' or d[3] == 'Current':
target = 0 # Define target value as 0
n += 1
n0 += 1
elif 'Late' in d[3] or d[3] == 'Charged Off':
target = 1 # Define target value as 1
n += 1
n1 += 1
else:
continue
# Define features tuple:
# (loan_amnt, int_rate, annual_inc)
features = (d[0],
float(d[1].replace("%", "")),
d[2],
d[5],
d[6],
d[7],
emp_length_dict[d[8]],
d[9],
d[10],
home_ownership_dict[d[11]])
features_list.append(features)
target_list.append(target)
else:
pass
print("----------------------------------------")
print(s)
print("----------------------------------------")
print("Total number of samples: {0}".format(n))
print("% of all samples with target=0: {0:3.4f}%".format(100*n0/(n0+n1)))
print("% of all samples with target=1: {0:3.4f}%".format(100*n1/(n0+n1)))
print("")
result = [features_list, target_list]
return result
def create_scatter_plot(x0_data, y0_data,
x1_data, y1_data,
pt, pa,
x_label, y_label,
axis_type):
plt.figure(num=2, figsize=(8, 8))
ax = plt.gca()
ax.set_axis_bgcolor("#BBBBBB")
ax.set_axisbelow(True)
plt.subplots_adjust(bottom=0.1, left=0.15, right=0.95, top=0.95)
plt.title(pt, fontsize=16)
plt.axis(pa)
plt.xlabel(x_label, fontsize=16)
plt.ylabel(y_label, fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
if axis_type == 'semilogx':
plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='b')
plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='r')
elif axis_type == 'semilogy':
plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='b')
plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='r')
elif axis_type == "loglog":
plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='b')
plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='r')
else:
plt.plot(x0_data, y0_data, label='0: "Fully Paid" or "Current"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='b')
plt.plot(x1_data, y1_data, label='1: "Late" or "Charged Off"',
linestyle='None', marker='.', markersize=8,
alpha=0.5, color='r')
plt.grid(b=True, which='major', axis='both',
linestyle="-", color="white")
plt.legend(loc='upper right', numpoints=1, fontsize=12)
plt.show()
plt.clf()
def plot_two_fields(data, s, f1, f2,
pa, x_label, y_label,
axis_type):
# d (list of lists) -- data from "get_data" function
# s (string) -- search string
# f1 (string) -- database field 1
# f2 (string) -- database field 2
# pa (list) -- plot axis
# x_label (string) -- x-axis label
# y_label (string) -- y-axis label
# fn (string) -- figure name
x0_list = [] # Fully Paid or Current
y0_list = [] # Fully Paid or Current
x1_list = [] # Late or Charged Off
y1_list = [] # Late or Charged Off
features_list = data[0]
target_list = data[1]
for i in range(len(features_list)):
x = features_list[i][features_dict[f1]]
y = features_list[i][features_dict[f2]]
if target_list[i] == 0:
x0_list.append(x)
y0_list.append(y)
elif target_list[i] == 1:
x1_list.append(x)
y1_list.append(y)
else:
pass
create_scatter_plot(
x0_list, y0_list,
x1_list, y1_list,
"Loan title search term: " + s, pa,
x_label, y_label,
axis_type)
# ### Search string: "credit card"
# In[ ]:
cc_data = get_data('credit card')
# In[ ]:
plot_two_fields(cc_data, 'credit card', 'loan_amnt', 'int_rate',
[1e2, 1e5, 5.0, 30.0], 'loan amount', 'interest rate',
'semilogx')
# In[ ]:
plot_two_fields(cc_data, 'credit card', 'annual_inc', 'int_rate',
[1e3, 1e7, 5.0, 30.0], 'annual income', 'interest rate',
'semilogx')
# In[ ]:
plot_two_fields(cc_data, 'credit card', 'annual_inc', 'loan_amnt',
[1e3, 1e7, 0.0, 35000.0], 'annual income', 'loan amount',
'semilogx')
# In[ ]:
plot_two_fields(cc_data, 'credit card', 'loan_amnt', 'funded_amnt',
[0.0, 35000.0, 0.0, 35000.0], 'loan amount', 'funded amount',
'standard')
# In[ ]:
plot_two_fields(cc_data, 'credit card', 'home_ownership', 'funded_amnt',
[-1, 6, 0.0, 35000.0], 'home ownership', 'funded amount',
'standard')
# ### Search string: "medical"
# In[ ]:
medical_data = get_data('medical')
# In[ ]:
plot_two_fields(medical_data, 'medical', 'loan_amnt', 'int_rate',
[1e2, 1e5, 5.0, 30.0], 'loan amount', 'interest rate',
'semilogx')
# In[ ]:
plot_two_fields(medical_data, 'medical', 'annual_inc', 'int_rate',
[1e3, 1e7, 5.0, 30.0], 'annual income', 'interest rate',
'semilogx')
# In[ ]:
plot_two_fields(medical_data, 'medical', 'annual_inc', 'loan_amnt',
[1e3, 1e7, 0.0, 35000.0], 'annual income', 'loan amount',
'semilogx')
# In[ ]:
plot_two_fields(medical_data, 'medical', 'loan_amnt', 'funded_amnt',
[0.0, 35000.0, 0.0, 35000.0], 'loan amount', 'funded amount',
'standard')
# In[ ]:
plot_two_fields(medical_data, 'medical', 'home_ownership', 'funded_amnt',
[-1, 6, 0.0, 35000.0], 'home ownership', 'funded amount',
'standard')
# ### Search string: "debt"
# In[ ]:
debt_data = get_data('debt')
# In[ ]:
plot_two_fields(debt_data, 'debt', 'loan_amnt', 'int_rate',
[1e2, 1e5, 5.0, 30.0], 'loan amount', 'interest rate',
'semilogx')
# In[ ]:
plot_two_fields(debt_data, 'debt', 'annual_inc', 'int_rate',
[1e3, 1e7, 5.0, 30.0], 'annual income', 'interest rate',
'semilogx')
# In[ ]:
plot_two_fields(debt_data, 'debt', 'annual_inc', 'loan_amnt',
[1e3, 1e7, 0.0, 35000.0], 'annual income', 'loan amount',
'semilogx')
# In[ ]:
plot_two_fields(debt_data, 'debt', 'loan_amnt', 'funded_amnt',
[0.0, 35000.0, 0.0, 35000.0], 'loan amount', 'funded amount',
'standard')
# In[ ]:
plot_two_fields(debt_data, 'debt', 'home_ownership', 'funded_amnt',
[-1, 6, 0.0, 35000.0], 'home ownership', 'funded amount',
'standard')
# # Decision tree classifer for predicting the loan status
# A decision tree classifier (scikit-learn) is used to predict the **loan_status**. A binary classification system is used, in which the values for the **loan_status** field are classified as follows:
#
# * 0: "Fully Paid" or "Current"
# * 1: "Late" (for any time period) or "Charged Off"
#
# The loan status category (0 or 1) is hereafter referred to as the "target".
# In[ ]:
def create_classifier(f, t, nt):
"""Create classifier for predicting loan status. Print accuracy.
Arguments:
f (list of tuples) -- [(sample 1 features), (sample 2 features),...]
t (list) -- [sample 1 target, sample 2 target,...]
nt (int) -- number of samples to use in training set
"""
training_set_features = []
training_set_target = []
testing_set_features = []
testing_set_target = []
print("Number of training set samples:\t{0}".format(nt))
print("Number of testing set samples:\t{0}".format(len(f)-nt))
print("")
# Build training set
for i in np.arange(0, nt, 1):
training_set_features.append(f[i])
training_set_target.append(t[i])
# Build testing set
for i in np.arange(nt, len(f), 1):
testing_set_features.append(f[i])
testing_set_target.append(t[i])
clf = tree.DecisionTreeClassifier()
clf = clf.fit(training_set_features, training_set_target)
n = 0
n_correct = 0
n0 = 0
n0_correct = 0
n1 = 0
n1_correct = 0
# Compare predictions to testing data
for i in range(len(testing_set_features)):
t = testing_set_target[i]
p = clf.predict(np.asarray(testing_set_features[i]).reshape(1, -1))
# Category 0
if t == 0:
if t == p[0]:
equal = "yes"
n_correct += 1
n0_correct += 1
else:
equal = "no"
n += 1
n0 += 1
# Category 1
elif t == 1:
if t == p[0]:
equal = "yes"
n_correct += 1
n1_correct += 1
else:
equal = "no"
n += 1
n1 += 1
else:
pass
n_accuracy = 100.0 * n_correct / n
n0_accuracy = 100.0 * n0_correct / n0
n1_accuracy = 100.0 * n1_correct / n1
print("Accuracy of predicting testing set target values:")
# Accuracy - manual calculation:
print(" All samples (method 1): {0:3.4f}%".format(n_accuracy))
# Accuracy - scikit-learn built-in method:
print(" All samples (method 2): {0:3.4f}%".format(
100.0 * clf.score(testing_set_features, testing_set_target)))
print(" Samples with target=0: {0:3.4f}%".format(n0_accuracy))
print(" Samples with target=1: {0:3.4f}%\n".format(n1_accuracy))
# ### Search string: "credit card"
# In[ ]:
create_classifier(cc_data[0], cc_data[1], 2000)
# ### Search string: "medical"
# In[ ]:
create_classifier(medical_data[0], medical_data[1], 2000)
# ### Search string: "debt"
# In[ ]:
create_classifier(debt_data[0], debt_data[1], 2000)
# # Conclusions
# A decision tree classifier was used to predict the loan status category (0 or 1) for loan data associated with specific search strings. Loans with a **poor** loan status category (target=1) were predicted with an accuracy in the range of 16-18% for the three search strings investigated.
#
# The ability to accurately predict loans that are likely to end up with a **poor** outcome is valuable for lenders since this reduces the chance of funding a loan that results in a net financial loss.
#
# # Limitations
#
# * The **poor** loan data was plotted after the **good** loan data. Consequently, many of the **good** loan data points are hidden underneath the **bad** loan data points, resulting in an over representation of the **bad** data points in the plots.
# * The decision tree classifier was tested with only a single training set for each of the three search strings.
# * The date/time features of the data have not been taken into account.
#
# # Future work
#
# * Improve data visualization so that fewer **good** loan data points are hidden under the **bad** loan data points.
# * Test the decision tree classifier with multiple training sets for each of the three search strings.
# * Improve the prediction accuracy.
# * Consider the date/time features of the data.
#
# ***Comments/critiques are welcomed, thanks!***
```
#### File: loan_data/parsed_kernels/kernel_91.py
```python
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.cross_validation import train_test_split
from sklearn import tree
from sklearn import metrics
from sklearn.metrics import precision_recall_fscore_support
from sklearn.cross_validation import cross_val_score, LeaveOneOut
from scipy.stats import sem
from sklearn.cross_validation import cross_val_score, LeaveOneOut
from scipy.stats import sem
# #### Importing Data and getting basic information of dataset
# In[ ]:
data = pd.read_csv("../input/loan.csv")
# In[ ]:
data.head()
# In[ ]:
data.shape
# # Cleansing
# #### Dropping unnecessary columns
# In[ ]:
colsRem = ["id","member_id","desc","emp_title","policy_code","pymnt_plan","title","url"]
data.drop(colsRem,axis=1,inplace=True)
data.shape
# In[ ]:
df = data
# Creating a function to sanitize the 'emp_length' attribute
# In[ ]:
def getYear(year):
yearNo = year.split(" ")[0]
return int(yearNo)
# Sanitizing the emp_length column
# In[ ]:
df["emp_length"].replace("10+ years","10",inplace=True)
df["emp_length"].replace("< 1 year","0",inplace=True)
df["emp_length"].replace("n/a","-1",inplace=True)
df["emp_length"] = df["emp_length"].apply(getYear)
df["emp_length"].unique()
# Sanitizing the term column
# In[ ]:
def getTerm(term):
month = term.split(" ")[1]
return int(month)
# In[ ]:
df["term"] = df["term"].apply(getTerm)
df["term"].unique()
# Sanitizing the Zipcode column
# In[ ]:
def getZip(zipcode):
zipcd = zipcode.split("x")[0]
return int(zipcd)
# In[ ]:
df["zip_code"] = df["zip_code"].apply(getZip)
df["zip_code"].head()
# #### Sanitizing the Date inputs
# In[ ]:
cols = ["earliest_cr_line","issue_d","last_credit_pull_d","last_pymnt_d","next_pymnt_d"]
for col in cols:
df[col] = pd.to_datetime(df[col],format="%b-%Y")
df[cols].head()
# #### Mapping loan_status
# In[ ]:
df["loan_status"].replace("Late (31-120 days)","Late",inplace=True)
df["loan_status"].replace("Late (16-30 days)","Late",inplace=True)
df["loan_status"].replace("Does not meet the credit policy. Status:Fully Paid","Fully Paid",inplace=True)
df["loan_status"].replace("Does not meet the credit policy. Status:Charged Off","Charged Off",inplace=True)
df["loan_status"].unique()
# #### Correlation Matrix
# In[ ]:
df["loan_status_num"] = df["loan_status"]
loanStat = {'Default':0, 'Charged Off':1,'Late':2,'In Grace Period':3,'Issued':4,'Current':5,'Fully Paid':6}
df['loan_status_num'] = df['loan_status_num'].map(loanStat)
# In[ ]:
corr =df.corr()
# #### Plotting correlations using HeatMap
# In[ ]:
plt.figure(figsize=(10, 10))
plt.imshow(corr, cmap='RdYlGn', interpolation='none', aspect='auto')
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns);
plt.suptitle('Correlations Heat Map', fontsize=15, fontweight='bold')
plt.show()
# #### Extrction of correlation of Loan_status_num with other features
# In[ ]:
corrvalues = corr.tail(1)
import numpy as np
corrvalues = np.round(corrvalues, decimals=2)
# #### Extracting features with correlation greater than 0.1 and less than 0.1
# In[ ]:
c =[]
for cols in corrvalues:
if corrvalues[cols][0] <= -0.1 or corrvalues[cols][0] >= 0.1:
c.append(cols)
print (c)
dfCl = df[c]
# In[ ]:
dfCl.drop("annual_inc_joint",axis=1,inplace=True)
dfCl.shape
# ### Normalizing the Inputs
# In[ ]:
df_norm = (dfCl - dfCl.mean()) / (dfCl.max() - dfCl.min())
# In[ ]:
dfCl['loan_status_num'].unique()
# In[ ]:
corr = df_norm.corr()
plt.figure(figsize=(10, 10))
plt.imshow(df_norm.corr(), cmap='RdYlGn', interpolation='none', aspect='auto')
plt.colorbar()
plt.xticks(range(len(corr)), corr.columns, rotation='vertical')
plt.yticks(range(len(corr)), corr.columns);
plt.suptitle('Correlations Heat Map of Normalized Data', fontsize=15, fontweight='bold')
plt.show()
# #### Checking for null values in the normalized dataframe
# In[ ]:
df_norm.isnull().sum()
# # Modelling Decision tree from Scikit Learn
# #### Test train Split
# In[ ]:
loan = dfCl
loanX = loan.drop("loan_status_num",axis=1)
loanX = np.array(loanX)
loanY = loan["loan_status_num"]
loanY = np.array(loanY)
X_train, X_test, y_train, y_test = train_test_split(loanX, loanY, test_size=0.25, random_state=33)
# #### Fitting the model
# In[ ]:
classifier = tree.DecisionTreeClassifier(criterion='entropy', max_depth=3,min_samples_leaf=5)
classifier = classifier.fit(X_train,y_train)
# #### Generating Report on the fitted model
# In[ ]:
def performance_measure(X,y,classifier, show_accuracy=True, show_precision = True, show_classification_report=True, show_confusion_matrix=True):
y_pred=classifier.predict(X)
if show_accuracy:
print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y,y_pred)),"\n")
if show_precision:
print("Precision Report")
print("Precision,Recall,F-score")
print(precision_recall_fscore_support(y, y_pred, average='weighted'))
if show_confusion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
performance_measure(X_train,y_train,classifier, show_classification_report=True, show_confusion_matrix=True)
# #### Leave One out implementation (CrossValidation)
# #### This may take time according to the specs of used machine.
# In[ ]:
def LeaveOneOut(X_train,y_train,classifier):
# Perform Leave-One-Out cross validation
# We are preforming 1313 classifications!
loo = LeaveOneOut(X_train[:].shape[0])
scores=np.zeros(X_train[:].shape[0])
for train_index,test_index in loo:
X_train_cv, X_test_cv= X_train[train_index], X_train[test_index]
y_train_cv, y_test_cv= y_train[train_index], y_train[test_index]
classifier = classifier.fit(X_train_cv,y_train_cv)
y_pred=classifier.predict(X_test_cv)
scores[test_index]=metrics.accuracy_score(y_test_cv.astype(int), y_pred.astype(int))
print (("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores), sem(scores)))
# In[ ]:
LeaveOneOut(X_train, y_train,classifier)
```
#### File: university_rankings/converted_notebooks/kernel_84.py
```python
import IPython
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
matplotlib.style.use('ggplot')
get_ipython().run_line_magic('matplotlib', 'inline')
import re
import math
from scipy import stats
from sklearn import datasets
from sklearn.cluster import KMeans
import sklearn.metrics as sm
import matplotlib.patches as mpatches
# Setting options
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 5000)
# In[ ]:
# Loading data
times_df = pd.read_csv('../input/timesData.csv')
cwur_df = pd.read_csv('../input/cwurData.csv')
shanghai_df = pd.read_csv('../input/shanghaiData.csv')
# In[ ]:
# Cleaning data
times_df = times_df.replace("École Normale Supérieure", "Ecole Normale Superieure")
times_df = times_df.replace("École Polytechnique", "Ecole Polytechnique")
times_df = times_df.replace("École Polytechnique Fédérale de Lausanne","Ecole Polytechnique Federale de Lausanne")
times_df = times_df.replace("ETH Zurich – Swiss Federal Institute of Technology Zurich",
"Swiss Federal Institute of Technology Zurich")
times_df = times_df.replace("King’s College London", "King's College London")
times_df = times_df.replace("Rutgers, the State University of New Jersey", "Rutgers University, New Brunswick")
times_df = times_df.replace("The University of Queensland", "University of Queensland")
times_df = times_df.replace("University of Göttingen", "University of Gottingen")
times_df = times_df.replace("University of Michigan", "University of Michigan, Ann Arbor")
times_df = times_df.replace("University of Minnesota", "University of Minnesota, Twin Cities")
times_df = times_df.replace("Paris-Sud University", "University of Paris-Sud")
times_df = times_df.replace("Washington University in St Louis", "Washington University in St. Louis")
times_df = times_df.replace("University of Massachusetts", "University of Massachusetts, Amherst")
times_df = times_df.replace("Wageningen University and Research Center", "Wageningen University and Research Centre")
times_df = times_df.replace("Indiana University", "Indiana University Bloomington")
times_df = times_df.replace("Paris Diderot University – Paris 7", "Paris Diderot University")
times_df = times_df.replace("KTH Royal Institute of Technology", "Royal Institute of Technology")
times_df = times_df.replace("Université Libre de Bruxelles", "University Libre Bruxelles")
times_df = times_df.replace("University of São Paulo", "University of Sao Paulo")
times_df = times_df.replace("Université Catholique de Louvain", "Catholic University of Louvain")
times_df = times_df.replace("Aix-Marseille University", "Aix Marseille University")
cwur_df = cwur_df.replace("University of Göttingen", "University of Gottingen")
cwur_df = cwur_df.replace("École normale supérieure - Paris", "Ecole Normale Superieure")
cwur_df = cwur_df.replace("École Polytechnique", "Ecole Polytechnique")
cwur_df = cwur_df.replace("Indiana University - Bloomington", "Indiana University Bloomington")
cwur_df = cwur_df.replace("Ludwig Maximilian University of Munich", "LMU Munich")
cwur_df = cwur_df.replace("Ohio State University, Columbus", "Ohio State University")
cwur_df = cwur_df.replace("Paris Diderot University - Paris 7", "Paris Diderot University")
cwur_df = cwur_df.replace("Pennsylvania State University, University Park", "Pennsylvania State University")
cwur_df = cwur_df.replace("Pierre-and-Marie-Curie University", "Pierre and Marie Curie University")
cwur_df = cwur_df.replace("Purdue University, West Lafayette", "Purdue University")
cwur_df = cwur_df.replace("Rutgers University-New Brunswick", "Rutgers University, New Brunswick")
cwur_df = cwur_df.replace("Swiss Federal Institute of Technology in Zurich", "Swiss Federal Institute of Technology Zurich")
cwur_df = cwur_df.replace("Swiss Federal Institute of Technology in Lausanne","Ecole Polytechnique Federale de Lausanne")
cwur_df = cwur_df.replace("Technion \xe2\x80\x93 Israel Institute of Technology", "Technion-Israel Institute of Technology")
cwur_df = cwur_df.replace("Texas A&M University, College Station", "Texas A&M University")
cwur_df = cwur_df.replace("University of Illinois at Urbana–Champaign", "University of Illinois at Urbana-Champaign")
cwur_df = cwur_df.replace("University of Pittsburgh - Pittsburgh Campus", "University of Pittsburgh")
cwur_df = cwur_df.replace("University of Washington - Seattle", "University of Washington")
cwur_df = cwur_df.replace("University of Wisconsin–Madison", "University of Wisconsin-Madison")
cwur_df = cwur_df.replace("Katholieke Universiteit Leuven", "KU Leuven")
cwur_df = cwur_df.replace("Ruprecht Karl University of Heidelberg", "Heidelberg University")
cwur_df = cwur_df.replace("London School of Economics", "London School of Economics and Political Science")
cwur_df = cwur_df.replace("University of Massachusetts Amherst", "University of Massachusetts, Amherst")
cwur_df = cwur_df.replace("Technion – Israel Institute of Technology", "Technion Israel Institute of Technology")
cwur_df = cwur_df.replace("University of Colorado Denver", "University of Colorado at Denver")
cwur_df = cwur_df.replace("Albert Ludwig University of Freiburg", "University of Freiburg")
cwur_df = cwur_df.replace("Université libre de Bruxelles", "University Libre Bruxelles")
cwur_df = cwur_df.replace("University of São Paulo", "University of Sao Paulo")
cwur_df = cwur_df.replace("Aix-Marseille University", "Aix Marseille University")
cwur_df = cwur_df.replace("Université catholique de Louvain", "Catholic University of Louvain")
cwur_df = cwur_df.replace("Trinity College, Dublin", "Trinity College Dublin")
shanghai_df = shanghai_df.replace("Arizona State University - Tempe", "Arizona State University")
shanghai_df = shanghai_df.replace("Ecole Normale Superieure - Paris", "Ecole Normale Superieure")
shanghai_df = shanghai_df.replace("Massachusetts Institute of Technology (MIT)", "Massachusetts Institute of Technology")
shanghai_df = shanghai_df.replace("Pennsylvania State University - University Park", "Pennsylvania State University")
shanghai_df = shanghai_df.replace("Pierre and Marie Curie University - Paris 6", "Pierre and Marie Curie University")
shanghai_df = shanghai_df.replace("Purdue University - West Lafayette", "Purdue University")
shanghai_df = shanghai_df.replace("Rutgers, The State University of New Jersey - New Brunswick",
"Rutgers University, New Brunswick")
shanghai_df = shanghai_df.replace("Technical University Munich", "Technical University of Munich")
shanghai_df = shanghai_df.replace("Texas A & M University", "Texas A&M University")
shanghai_df = shanghai_df.replace("Texas A&M University - College Station", "Texas A&M University")
shanghai_df = shanghai_df.replace("The Australian National University", "Australian National University")
shanghai_df = shanghai_df.replace("The Hebrew University of Jerusalem", "Hebrew University of Jerusalem")
shanghai_df = shanghai_df.replace("The Imperial College of Science, Technology and Medicine", "Imperial College London")
shanghai_df = shanghai_df.replace("The Johns Hopkins University", "Johns Hopkins University")
shanghai_df = shanghai_df.replace("The Ohio State University - Columbus","Ohio State University")
shanghai_df = shanghai_df.replace("The University of Edinburgh","University of Edinburgh")
shanghai_df = shanghai_df.replace("The University of Manchester", "University of Manchester")
shanghai_df = shanghai_df.replace("The University of Melbourne","University of Melbourne")
shanghai_df = shanghai_df.replace("The University of Queensland", "University of Queensland")
shanghai_df = shanghai_df.replace("The University of Texas at Austin", "University of Texas at Austin")
shanghai_df = shanghai_df.replace("The University of Texas Southwestern Medical Center at Dallas",
"University of Texas Southwestern Medical Center")
shanghai_df = shanghai_df.replace("The University of Tokyo","University of Tokyo")
shanghai_df = shanghai_df.replace("The University of Western Australia", "University of Western Australia")
shanghai_df = shanghai_df.replace("University of California-Berkeley", "University of California, Berkeley")
shanghai_df = shanghai_df.replace("University of Colorado at Boulder", "University of Colorado Boulder")
shanghai_df = shanghai_df.replace("University of Michigan - Ann Arbor", "University of Michigan, Ann Arbor")
shanghai_df = shanghai_df.replace("University of Michigan-Ann Arbor", "University of Michigan, Ann Arbor")
shanghai_df = shanghai_df.replace("University of Paris Sud (Paris 11)", "University of Paris-Sud")
shanghai_df = shanghai_df.replace("University of Paris-Sud (Paris 11)", "University of Paris-Sud")
shanghai_df = shanghai_df.replace("University of Pittsburgh-Pittsburgh Campus", "University of Pittsburgh")
shanghai_df = shanghai_df.replace("University of Pittsburgh, Pittsburgh Campus", "University of Pittsburgh")
shanghai_df = shanghai_df.replace("University of Wisconsin - Madison", "University of Wisconsin-Madison")
shanghai_df = shanghai_df.replace("University of Munich","LMU Munich")
shanghai_df = shanghai_df.replace("Moscow State University", "Lomonosov Moscow State University")
shanghai_df = shanghai_df.replace("University of Massachusetts Medical School - Worcester",
"University of Massachusetts Medical School")
shanghai_df = shanghai_df.replace("Joseph Fourier University (Grenoble 1)", "Joseph Fourier University")
shanghai_df = shanghai_df.replace("University Paris Diderot - Paris 7", "Paris Diderot University")
shanghai_df = shanghai_df.replace("University of Wageningen", "Wageningen University and Research Centre")
shanghai_df = shanghai_df.replace("The University of Texas M. D. Anderson Cancer Center",
"University of Texas MD Anderson Cancer Center")
shanghai_df = shanghai_df.replace("Technion-Israel Institute of Technology", "Technion Israel Institute of Technology")
shanghai_df = shanghai_df.replace("Swiss Federal Institute of Technology Lausanne", "Ecole Polytechnique Federale de Lausanne")
shanghai_df = shanghai_df.replace("University of Frankfurt", "Goethe University Frankfurt")
shanghai_df = shanghai_df.replace("The University of Glasgow", "University of Glasgow")
shanghai_df = shanghai_df.replace("The University of Sheffield", "University of Sheffield")
shanghai_df = shanghai_df.replace("The University of New South Wales", "University of New South Wales")
shanghai_df = shanghai_df.replace("University of Massachusetts Amherst", "University of Massachusetts, Amherst")
shanghai_df = shanghai_df.replace("University of Goettingen", "University of Gottingen")
shanghai_df = shanghai_df.replace("The University of Texas at Dallas", "University of Texas at Dallas")
shanghai_df = shanghai_df.replace("The University of Hong Kong", "University of Hong Kong")
shanghai_df = shanghai_df.replace("The Hong Kong University of Science and Technology",
"Hong Kong University of Science and Technology")
shanghai_df = shanghai_df.replace("Royal Holloway, U. of London", "Royal Holloway, University of London")
shanghai_df = shanghai_df.replace("Queen Mary, University of London", "Queen Mary University of London")
shanghai_df = shanghai_df.replace("Korea Advanced Institute of Science and Technology",
"Korea Advanced Institute of Science and Technology (KAIST)")
# recast data type
times_df['international'] = times_df['international'].replace('-', np.nan)
times_df['international'] = times_df['international'].astype(float)
times_df['income'] = times_df['income'].replace('-', np.nan)
times_df['income'] = times_df['income'].astype(float)
times_df['total_score'] = times_df['total_score'].replace('-', np.nan)
times_df['total_score'] = times_df['total_score'].astype(float)
# fill in na values with mean in the year and impute total score for times data
for year in range(2011, 2017):
inter_mean = times_df[times_df['year'] == year].international.mean()
income_mean = times_df[times_df['year'] == year].income.mean()
times_df.ix[(times_df.year == year) & (times_df.international.isnull()), 'international'] = inter_mean
times_df.ix[(times_df.year == year) & (times_df.income.isnull()), 'income'] = income_mean
times_df.ix[times_df.total_score.isnull(), 'total_score'] = 0.3*times_df['teaching'] + 0.3*times_df['citations'
] + 0.3*times_df['research'] + 0.075*times_df['international'] + 0.025*times_df['income']
# Rename columns
cwur_df.rename(columns={'institution': 'university_name'}, inplace=True)
print("Data Cleaned")
# In[ ]:
# Getting data in appropriate format
# replace ranking range to midpoint
def mid_rank(rank_string):
rank = re.sub('=', '', rank_string)
rank = rank.split('-')
s = 0
for each in rank:
each = float(each)
s = s + each
return s/len(rank)
# replace ranking range for shanghai and times data
times_df['world_rank_tidy'] = times_df['world_rank'].apply(mid_rank)
shanghai_df['world_rank_tidy'] = shanghai_df['world_rank'].apply(mid_rank)
# get unique school and country using times and cwur data
# Manually link countries for unique shanghai universities
shanghai_schools = pd.DataFrame([['Technion-Israel Institute of Technology', 'Israel'],
['Swiss Federal Institute of Technology Lausanne', 'Switzerland']], columns=['university_name', 'country'])
school_country = cwur_df.drop_duplicates(['university_name', 'country'])[['university_name', 'country']].append(
times_df.drop_duplicates(['university_name', 'country'])[['university_name', 'country']], ignore_index=True).append(
shanghai_schools, ignore_index=True)
school_country['country'].replace(['United States of America', 'United States'], 'USA', inplace=True)
school_country['country'].replace(['United Kingdom'], 'UK', inplace=True)
# Manually replacing countries which were not present in our pivot for countires - cwur
school_country['country'][school_country['university_name'] == 'Technion-Israel Institute of Technology'] = 'Israel'
school_country['country'][school_country['university_name'] == 'Swiss Federal Institute of Technology Lausanne'] = 'Switzerland'
school_country = school_country.drop_duplicates(['university_name', 'country'])[['university_name', 'country']]
school_country = school_country.reset_index(drop=True)
# get ranking and score information by year
cwur_world_ranking = cwur_df[['university_name', 'country', 'world_rank', 'year']]
cwur_world_ranking = cwur_world_ranking.pivot(index = 'university_name', columns = 'year')['world_rank']
cwur_world_ranking.columns = ['cwur_2012_r', 'cwur_2013_r', 'cwur_2014_r', 'cwur_2015_r']
cwur_world_ranking = cwur_world_ranking.reset_index()
times_ranking = times_df[['university_name', 'country', 'world_rank_tidy', 'year']]
times_ranking = times_ranking.pivot(index = 'university_name', columns = 'year')['world_rank_tidy']
times_ranking.columns = ['times_2011_r', 'times_2012_r', 'times_2013_r', 'times_2014_r', 'times_2015_r', 'times_2016_r']
times_ranking = times_ranking.reset_index()
shanghai_ranking = shanghai_df[['university_name', 'world_rank_tidy', 'year']]
for y in range(2005, 2011):
shanghai_ranking = shanghai_ranking[shanghai_ranking.year != y]
shanghai_ranking = shanghai_ranking.pivot(index = 'university_name', columns = 'year')['world_rank_tidy']
shanghai_ranking.columns = ['sh_2011_r', 'sh_2012_r', 'sh_2013_r', 'sh_2014_r', 'sh_2015_r']
shanghai_ranking = shanghai_ranking.reset_index()
# join ranking and score for all 3
rank_all = pd.merge(cwur_world_ranking, times_ranking, on = 'university_name', how = 'outer')
rank_all = pd.merge(rank_all, shanghai_ranking, on = 'university_name', how = 'outer')
rank_all = pd.merge(rank_all, school_country, on = 'university_name', how = 'left')
rank_all.head(2)
# ### Part 2 - Preparing data for analysis
#
# We shall consider the top 100 Universities for each ranking system for the year 2014 and then merge them together.
# In[ ]:
# Merging relevant data and computing pairwise ranking system difference for each university
# For universities which are not common in all ranking system, I am imputing a rank of 700
# Taking top 100 colleges from 3 ranking systems for the year 2015
top = 150
rank_analysis = rank_all[['university_name','country', 'times_2014_r', 'cwur_2014_r', 'sh_2014_r']]
ra_t = rank_analysis.sort_values(by='times_2014_r').head(top)
ra_c = rank_analysis.sort_values(by='cwur_2014_r').head(top)
ra_s = rank_analysis.sort_values(by='sh_2014_r').head(top)
# Rename columns
ra_c.rename(columns={'country': 'country_c', 'times_2014_r': 'times_2014_r_c',
'cwur_2014_r': 'cwur_2014_r_c', 'sh_2014_r': 'sh_2014_r_c'}, inplace=True)
ra_s.rename(columns={'country': 'country_s', 'times_2014_r': 'times_2014_r_s',
'cwur_2014_r': 'cwur_2014_r_s', 'sh_2014_r': 'sh_2014_r_s'}, inplace=True)
# Merging the data based on top 100 universities from each ranking
rank_analysis_sct = pd.merge(ra_t,
pd.merge(ra_c,
ra_s, on = 'university_name', how = 'outer'),
on = 'university_name', how = 'outer')
# Ensuring country column is not blank for universities not present in all 3 rankings
for i in range(len(rank_analysis_sct)):
if pd.isnull(rank_analysis_sct.loc[i, 'country']):
rank_analysis_sct.loc[i, 'country'] = str(rank_analysis[rank_analysis['university_name'] ==
rank_analysis_sct.loc[i, 'university_name']].iloc[0]['country'])
# Ensuring rank column is not blank for universities not present in all 3 rankings
rank_analysis_sct['times_2014_r'] = rank_analysis_sct['times_2014_r'].replace(np.nan, rank_analysis_sct['times_2014_r_c'])
rank_analysis_sct['times_2014_r'] = rank_analysis_sct['times_2014_r'].replace(np.nan, rank_analysis_sct['times_2014_r_s'])
rank_analysis_sct['cwur_2014_r'] = rank_analysis_sct['cwur_2014_r'].replace(np.nan, rank_analysis_sct['cwur_2014_r_c'])
rank_analysis_sct['cwur_2014_r'] = rank_analysis_sct['cwur_2014_r'].replace(np.nan, rank_analysis_sct['cwur_2014_r_s'])
rank_analysis_sct['sh_2014_r'] = rank_analysis_sct['sh_2014_r'].replace(np.nan, rank_analysis_sct['sh_2014_r_c'])
rank_analysis_sct['sh_2014_r'] = rank_analysis_sct['sh_2014_r'].replace(np.nan, rank_analysis_sct['sh_2014_r_s'])
# Replacing nan items (universities which do not exist in ranking) with rank of 700 to ensure they are at farther distance
rank_analysis_sct['times_2014_r'] = rank_analysis_sct['times_2014_r'].replace(np.nan, 700).astype(int)
rank_analysis_sct['cwur_2014_r'] = rank_analysis_sct['cwur_2014_r'].replace(np.nan, 700).astype(int)
rank_analysis_sct['sh_2014_r'] = rank_analysis_sct['sh_2014_r'].replace(np.nan, 700).astype(int)
# Selecting only required columns
rank_analysis_sct = rank_analysis_sct[['university_name', 'country',
'times_2014_r', 'cwur_2014_r', 'sh_2014_r']]
# Creating columns for difference in ranking for each pair
rank_analysis_sct['t_c'] = rank_analysis_sct['times_2014_r'] - rank_analysis_sct['cwur_2014_r']
rank_analysis_sct['t_s'] = rank_analysis_sct['times_2014_r'] - rank_analysis_sct['sh_2014_r']
rank_analysis_sct['c_s'] = rank_analysis_sct['cwur_2014_r'] - rank_analysis_sct['sh_2014_r']
rank_analysis_sct.head(2)
# ### Part 3 - Cluster Analysis
#
# In this section we will analyze whether universities in each ranking system can be clustered based on how different the rankings are in relation to the other ranking systems (pairwise).
#
# We will see if a distinction between the 5 groups given below can be done based on clustering algorithm:
#
# 1. University heavily biased towards ranking system 1
#
# 2. University slightly biased towards ranking system 1
#
# 3. University in ranking system 1 and ranking system 2 not biased
#
# 4. University slightly biased towards ranking system 2
#
# 5. University heavily biased towards ranking system 2
#
# We will also verify our clustering results by comparing it to logical results (based on hard coded values for each of the 5 groups above)
# In[ ]:
# Checking the distribution of pairwise ranking difference
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(12, 6))
fig.text(0.04, 0.5, 'Number of Universities', va='center', rotation='vertical', fontsize =15)
plt.subplot(1,3,1)
plt.hist(rank_analysis_sct.t_c, color = 'purple', alpha = 0.4, range=[-400,800], bins=(25))
plt.axvline(0, color = 'purple', linestyle = 'dashed', linewidth = 2)
plt.xlabel('Times & CWUR')
plt.subplot(1,3,2)
plt.hist(rank_analysis_sct.t_s, color = 'purple', alpha = 0.4, range=[-400,800], bins=(25))
plt.axvline(0, color = 'purple', linestyle = 'dashed', linewidth = 2)
plt.xlabel('Times & Shanghai')
plt.subplot(1,3,3)
plt.hist(rank_analysis_sct.c_s, color = 'purple', alpha = 0.4, range=[-400,800], bins=(25))
plt.axvline(0, color = 'purple', linestyle = 'dashed', linewidth = 2)
plt.xlabel('CWUR & Shanghai')
plt.suptitle("Distribution of pairwise ranking difference", fontsize=20)
plt.savefig('plot_all_hist.jpg')
plt.show()
# The pairwise ranking distances look more or less normally distributed. Now let us start with clustering.
# In[ ]:
# Function to create logical clusters by hardcoding group memberships
# The groups are
# 1. University heavily biased towards ranking system 1 -> Pairwise difference greater than 216
# 2. University slightly biased towards ranking system 1 -> Diff less than 216 greater than 50
# 3. University in ranking system 1 and ranking system 2 not biased -> Pairwise diff less than +/- 50
# 4. University slightly biased towards ranking system 2 -> Diff greater than -216 less than -50
# 5. University heavily biased towards ranking system 2 -> Pairwise difference lesser than -216
def logical_cluster(pair_col, logical_cluster_col):
rank_analysis_sct[logical_cluster_col] = 0
for i in range(len(rank_analysis_sct)):
if rank_analysis_sct.loc[i,pair_col] < -216: rank_analysis_sct.loc[i,logical_cluster_col] = 0
elif rank_analysis_sct.loc[i,pair_col] < -50 and rank_analysis_sct.loc[i,pair_col] >= -216:
rank_analysis_sct.loc[i,logical_cluster_col] = 1
elif rank_analysis_sct.loc[i,pair_col] > -50 and rank_analysis_sct.loc[i,pair_col] < 50:
rank_analysis_sct.loc[i,logical_cluster_col] = 2
elif rank_analysis_sct.loc[i,pair_col] > 50 and rank_analysis_sct.loc[i,pair_col] <= 216:
rank_analysis_sct.loc[i,logical_cluster_col] = 3
elif rank_analysis_sct.loc[i,pair_col] > 216: rank_analysis_sct.loc[i,logical_cluster_col] = 4
# In[ ]:
# Creating logical clusters based on intervals obtained after eyeballing the data
logical_cluster('t_c', 't_c_cluster_logical')
logical_cluster('t_s', 't_s_cluster_logical')
logical_cluster('c_s', 'c_s_cluster_logical')
# #### Here we have created pairwise logical clusters after eyeballing our data. This will give us a good measure of testing our clustering algorithm.
#
# #### Now let us cluster using kmeans clustering algorithm
# In[ ]:
# Function to create K-means cluster
def kmeans_cluster(pair_col, knn_cluster_col, order):
model = KMeans(n_clusters=5)
k_mean = rank_analysis_sct[[pair_col]]
model.fit(k_mean)
pred = np.choose(model.labels_, order).astype(np.int64) # Assigning correct labels
rank_analysis_sct[knn_cluster_col] = pred # Adding column of cluster information to dataset
# In[ ]:
# Creating kmeans clusters
np.random.seed(seed=1)
kmeans_cluster('t_c', 't_c_cluster_kmeans', [2, 4, 0, 1, 3])
kmeans_cluster('t_s', 't_s_cluster_kmeans', [2, 4, 0, 3, 1])
kmeans_cluster('c_s', 'c_s_cluster_kmeans', [2, 0, 1, 4, 3])
# In[ ]:
# Function to create scatter plot for pairwise clustering results
def bias_scatter(colormap, rank_diff, cluster, r1, r2, typ):
plt.scatter(rank_diff, rank_diff, c=colormap[cluster], s=40, alpha=0.6)
plt.title('University Bias - '+ r1 + ' vs ' + r2 + ' (' + typ + ')', fontsize = 15)
plt.xlabel('Difference')
plt.ylabel('Difference')
b1 = mpatches.Patch(color=colormap[0], label='Highly Favored by' + r1, alpha = 0.7)
b2 = mpatches.Patch(color=colormap[1], label='Favored by' + r1, alpha = 0.7)
b3 = mpatches.Patch(color=colormap[2], label='Neutral', alpha = 0.7)
b4 = mpatches.Patch(color=colormap[3], label='Favored by' + r2, alpha = 0.7)
b5 = mpatches.Patch(color=colormap[4], label='Highly Favored by Times' +r2, alpha = 0.7)
plt.legend(handles=[b1, b2, b3, b4, b5], loc = 2)
#plt.savefig('LogicalVsKMean.jpg')
#plt.show()
# In[ ]:
# Plotting scatterplot
colormap_tc = np.array(['navy', 'skyblue', 'black','palegreen', 'green'])
colormap_ts = np.array(['navy', 'skyblue', 'black','coral', 'darkred'])
colormap_cs = np.array(['green', 'palegreen', 'black','coral', 'darkred'])
plt.figure(figsize=(12,22))
plt.subplot(3, 2, 1)
bias_scatter(colormap_tc, rank_analysis_sct.t_c, rank_analysis_sct['t_c_cluster_logical'], 'Times', 'CWUR', 'Logical')
plt.subplot(3, 2, 2)
bias_scatter(colormap_tc, rank_analysis_sct.t_c, rank_analysis_sct['t_c_cluster_kmeans'], 'Times', 'CWUR', 'K-means')
plt.subplot(3, 2, 3)
bias_scatter(colormap_ts, rank_analysis_sct.t_s, rank_analysis_sct['t_s_cluster_logical'], 'Times', 'Shanghai', 'Logical')
plt.subplot(3, 2, 4)
bias_scatter(colormap_ts, rank_analysis_sct.t_s, rank_analysis_sct['t_s_cluster_kmeans'], 'Times', 'Shanghai', 'K-means')
plt.subplot(3, 2, 5)
bias_scatter(colormap_cs, rank_analysis_sct.c_s, rank_analysis_sct['c_s_cluster_logical'], 'CWUR', 'Shanghai', 'Logical')
plt.subplot(3, 2, 6)
bias_scatter(colormap_cs, rank_analysis_sct.c_s, rank_analysis_sct['c_s_cluster_kmeans'], 'CWUR', 'Shanghai', 'K-means')
plt.savefig('plot_clusters_scatter.jpg')
# We see that the logical and machine learning results are very similar. Let us visualize these same results using a barplot to give us a better idea.
# In[ ]:
# Function to create barplot for pairwise clustering results
def bias_bar(logical_col, knn_col, cm, r1, r2):
logical_bias = rank_analysis_sct.groupby(logical_col).count()['university_name']
kmeans_bias = rank_analysis_sct.groupby(knn_col).count()['university_name']
x = logical_bias.index
y1 = logical_bias.values
y2 = kmeans_bias
bar_width = 0.35
opacity = 0.7
rects1 = plt.bar([x[0], x[0]+0.4], [y1[0], y2[0]], bar_width, alpha=opacity, color=cm[0], label='High Favor: ' + r1)
rects2 = plt.bar([x[1], x[1]+0.4], [y1[1], y2[1]], bar_width, alpha=opacity, color=cm[1], label='Favor: ' + r1)
rects3 = plt.bar([x[2], x[2]+0.4], [y1[2], y2[2]], bar_width, alpha=opacity, color=cm[2], label='Neutral')
rects4 = plt.bar([x[3], x[3]+0.4], [y1[3], y2[3]], bar_width, alpha=opacity, color=cm[3], label='Favor: ' + r2)
rects5 = plt.bar([x[4], x[4]+0.4], [y1[4], y2[4]], bar_width, alpha=opacity, color=cm[4], label='High favor: ' + r2)
plt.text(x[0], y1[0], y1[0], ha='center', va='bottom', size=10)
plt.text(x[1], y1[1], y1[1], ha='center', va='bottom', size=10)
plt.text(x[2], y1[2], y1[2], ha='center', va='bottom', size=10)
plt.text(x[3], y1[3], y1[3], ha='center', va='bottom', size=10)
plt.text(x[4], y1[4], y1[4], ha='center', va='bottom', size=10)
plt.text(x[0] + bar_width, y2[0], y2[0], ha='center', va='bottom', size=10)
plt.text(x[1] + bar_width, y2[1], y2[1], ha='center', va='bottom', size=10)
plt.text(x[2] + bar_width, y2[2], y2[2], ha='center', va='bottom', size=10)
plt.text(x[3] + bar_width, y2[3], y2[3], ha='center', va='bottom', size=10)
plt.text(x[4] + bar_width, y2[4], y2[4], ha='center', va='bottom', size=10)
plt.xlabel('Bias')
plt.ylabel('Univesities')
#plt.title('Bias in University Pairs')
plt.xticks(x + bar_width, ('Logical / KMeans', 'Logical / KMeans',
'Logical / KMeans', 'Logical / KMeans', 'Logical / KMeans'))
plt.legend()
plt.tight_layout()
# In[ ]:
# Plotting barplot
plt.figure(figsize=(9,12))
plt.subplot(3, 1, 1)
bias_bar('t_c_cluster_logical', 't_c_cluster_kmeans', colormap_tc, 'Times', 'CWUR')
plt.subplot(3, 1, 2)
bias_bar('t_s_cluster_logical', 't_s_cluster_kmeans', colormap_ts, 'Times', 'Shanghai')
plt.subplot(3, 1, 3)
bias_bar('c_s_cluster_logical', 'c_s_cluster_kmeans', colormap_cs, 'CWUR', 'Shanghai')
plt.savefig('plot_clusters_bar.jpg')
# From the barplots we can confirm that the logical and KMeans clustering results are similar.
# In[ ]:
# Comparing K-mean classification to logical classification
y = rank_analysis_sct.t_c_cluster_logical
# Performance Metrics
print('Accuracy',sm.accuracy_score(y, rank_analysis_sct['t_c_cluster_kmeans']))
# Confusion Matrix
sm.confusion_matrix(y, rank_analysis_sct['t_c_cluster_kmeans'])
# #### 89% Accuracy rate of confusion matrix is pretty good (especially considering we just eyeballed the data to hard-code initial clusters) so will maintain the KMean model to cluster pairwise ranking systems.
# #### These plots help us visualize the count of Universities for which there is underlying bias between any 2 ranking systems as well as understand in which form the bias exists.
#
# #### Now let us aggregate the result for each University.
# In[ ]:
# Creating binary columns to determine if 2 systems agree on the ranking of University (based on cluster)
for i in range(len(rank_analysis_sct)):
if rank_analysis_sct.loc[i,'t_c_cluster_kmeans'] in [1,2,3]: rank_analysis_sct.loc[i,'t_c_proximity'] = 1
else: rank_analysis_sct.loc[i,'t_c_proximity'] = 0
if rank_analysis_sct.loc[i,'t_s_cluster_kmeans'] in [1,2,3]: rank_analysis_sct.loc[i,'t_s_proximity'] = 1
else: rank_analysis_sct.loc[i,'t_s_proximity'] = 0
if rank_analysis_sct.loc[i,'c_s_cluster_kmeans'] in [1,2,3]: rank_analysis_sct.loc[i,'c_s_proximity'] = 1
else: rank_analysis_sct.loc[i,'c_s_proximity'] = 0
# Creating column for aggregate trustworthiness of all 3 ranking systems for each University
# Score of 3 means all 3 ranking sytem pairs agree on ranking of a University and
# Score of 0 means that no pair of ranking system agrees on ranking of a University
rank_analysis_sct['impartiality_score'] = rank_analysis_sct['t_c_proximity'
] + rank_analysis_sct['t_s_proximity'] + rank_analysis_sct['c_s_proximity']
rank_analysis_sct.to_csv('resultsRankingAnalysis.csv')
# Summarizing results
assurance_summary = rank_analysis_sct[['university_name', 'impartiality_score']].groupby('impartiality_score').count()
assurance_summary.rename(columns={'university_name': 'Total Universities'}, inplace=True)
assurance_summary.sort_index(ascending = False)
# We use a metric called 'impartiality score' to aggregate our clustering results.
#
# 171 Universities have an impartiality score of 3. This means that these 171 universities have similar rankings across all ranking systems which means that all ranking systems are impartial towards them. 31 (14+17) Universities have an impartiality score of either 2 or 3 which means that these universities have very different rankings across all ranking systems. This means one or two of the the ranking systems are biased towards/against them.
# ### Part 4 - Checking for bias in ranking system owing to countries
#
# First let us see how the distribution of countries in the ranking systems looks like
# In[ ]:
# Preparing data for analyzing country bias
country_bias = pd.DataFrame(rank_analysis_sct.groupby('country').count().sort_values(by=
'university_name',ascending = False)['university_name'])
country_bias = pd.DataFrame(list(country_bias['university_name'].values),
list(country_bias['university_name'].index))
country_bias.rename(columns={0: 'Total Universities'}, inplace=True)
print(country_bias)
# Here we see the distribution of countries harboring top 100 universities in each ranking system.
#
# Now let us check if any ranking system exhibits bias based on country. For the purpose of this analysis, we will assume there is a bias if the difference in ranking is greater than 50 (this is a charitable range given that we are considering the top 100 Universities). Also, we will be considering all countries in this analysis, but the countries which have less than 2 universities in the ranking won't be very significant (and hence won't be displayed) in the final analysis just on account of small sample size.
#
# We will be considering both - the bias against Universities from a country as well as the bias towards the universities from a country.
# In[ ]:
# Creating function to compute bias based on the kmeans cluster affiliation of a university
def country_bias_calc(p_kmeans, p, bias_name, country_bias_tab):
pkm1, pkm2 = p_kmeans[0]+'_cluster_kmeans', p_kmeans[1]+'_cluster_kmeans'
bias_pair = pd.DataFrame(rank_analysis_sct[rank_analysis_sct[pkm1].isin(p[0]) &
rank_analysis_sct[pkm2].isin(p[1])
].groupby('country').count()['university_name'])
bias_pair = pd.DataFrame(list(bias_pair['university_name'].values),
list(bias_pair['university_name'].index))
bias_pair.rename(columns={0: bias_name}, inplace=True)
if country_bias_tab.empty: tab = country_bias
else: tab = country_bias_tab
country_bias_tab = pd.merge(tab, bias_pair, on=None,left_index=True, right_index=True,
how = 'left')
country_bias_tab[bias_name] = country_bias_tab[bias_name].replace(np.nan, 0)
country_bias_tab[bias_name + ' %'] = country_bias_tab[bias_name] / country_bias_tab[
'Total Universities'] * 100
return country_bias_tab
# In[ ]:
# Computing country bias
country_bias_f = pd.DataFrame
country_bias_a = pd.DataFrame
country_bias_f = country_bias_calc(['t_c', 't_s'],[[0,1],[0,1]], 'Times Bias', country_bias_f)
country_bias_f = country_bias_calc(['t_c', 'c_s'],[[3,4],[0,1]], 'CWUR Bias', country_bias_f)
country_bias_f = country_bias_calc(['t_s', 'c_s'],[[3,4],[3,4]], 'Shanghai Bias', country_bias_f)
country_bias_a = country_bias_calc(['t_c', 't_s'],[[3,4],[3,4]], 'Times Bias', country_bias_a)
country_bias_a = country_bias_calc(['t_c', 'c_s'],[[0,1],[3,4]], 'CWUR Bias', country_bias_a)
country_bias_a = country_bias_calc(['t_s', 'c_s'],[[0,1],[0,1]], 'Shanghai Bias', country_bias_a)
# Uncomment below code to check for extreme bias
#country_bias_f = country_bias_calc(['t_c', 't_s'],[[0,0],[0,0]], 'Times Bias', country_bias_f)
#country_bias_f = country_bias_calc(['t_c', 'c_s'],[[4,4],[0,0]], 'CWUR Bias', country_bias_f)
#country_bias_f = country_bias_calc(['t_s', 'c_s'],[[4,4],[4,4]], 'Shanghai Bias', country_bias_f)
#country_bias_a = country_bias_calc(['t_c', 't_s'],[[4,4],[4,4]], 'Times Bias', country_bias_a)
#country_bias_a = country_bias_calc(['t_c', 'c_s'],[[0,0],[4,4]], 'CWUR Bias', country_bias_a)
#country_bias_a = country_bias_calc(['t_s', 'c_s'],[[0,0],[0,0]], 'Shanghai Bias', country_bias_a)
# In[ ]:
country_bias_a.head(2)
# In[ ]:
# Breaking the main tables into tables based on rankings to plot
t = 15 # Minimumum bias % for us to consider bias
u = 2 # Minimum universities in the ranking system to consider bias
bias_for_times = country_bias_f[(country_bias_f['Times Bias %'] >= t) & (country_bias_f['Total Universities'] > u)
].sort_values(by='Times Bias %', ascending = False)[['Total Universities', 'Times Bias', 'Times Bias %']]
bias_against_times = country_bias_a[(country_bias_a['Times Bias %'] >= t) & (country_bias_a['Total Universities'] > u)
].sort_values(by='Times Bias %', ascending = False)[['Total Universities', 'Times Bias', 'Times Bias %']]
bias_for_cwur = country_bias_f[(country_bias_f['CWUR Bias %'] >= t) & (country_bias_f['Total Universities'] > u)
].sort_values(by='CWUR Bias %', ascending = False)[['Total Universities', 'CWUR Bias', 'CWUR Bias %']]
bias_against_cwur = country_bias_a[(country_bias_a['CWUR Bias %'] >= t) & (country_bias_a['Total Universities'] > u)
].sort_values(by='CWUR Bias %', ascending = False)[['Total Universities', 'CWUR Bias', 'CWUR Bias %']]
bias_for_shanghai = country_bias_f[(country_bias_f['Shanghai Bias %'] >= t) & (country_bias_f['Total Universities'] > u)
].sort_values(by='Shanghai Bias %', ascending = False)[['Total Universities', 'Shanghai Bias', 'Shanghai Bias %']]
bias_against_shanghai = country_bias_a[(country_bias_a['Shanghai Bias %'] >= t) & (country_bias_a['Total Universities'] > u)
].sort_values(by='Shanghai Bias %', ascending = False)[['Total Universities', 'Shanghai Bias', 'Shanghai Bias %']]
# In[ ]:
# Function to create country bias bar plot
def bias_plot(b_for, b_against, b_name):
def autolabel(rects, ht, m):
cnt = 0
for rect in rects:
height = rect.get_height()
if cnt < len(rects) and rect == rects1[cnt]:
ht.append(height)
cnt+=1
#m.text(rect.get_x() + rect.get_width()/2.,
# height/2-0.5, '%d' % int(height), ha='center', va='bottom', fontsize=12)
else:
#m.text(rect.get_x() + rect.get_width()/2.,
# height/2-0.5, '%d' % int(height), ha='center', va='bottom', fontsize=12)
if m==ax2 and cnt==0 and height/ht[cnt] > 0.85:
m.text(rect.get_x() + rect.get_width()/2.,
height-2, '%d' % (height/ht[cnt]*100)+'%', ha='center', va='bottom', fontsize=18)
else:
m.text(rect.get_x() + rect.get_width()/2.,
height, '%d' % (height/ht[cnt]*100)+'%', ha='center', va='bottom', fontsize=18)
cnt+=1
return ht
N = len(b_for)
univ_total = np.array(b_for['Total Universities'])
univ_bias_for = np.array(b_for[b_name + ' Bias'])
ind = np.arange(N)
width = 0.35
fig, (ax1, ax2) = plt.subplots(2, figsize = (13,8))
rects1 = ax1.bar(ind, univ_total, width, color='green')
rects2 = ax1.bar(ind + width, univ_bias_for, width, color='lightgreen')
ax1.set_ylabel('Count', fontsize=14)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(b_for.index, fontsize=14)
ax1.legend((rects1[0], rects2[0]), ('Total Universities',
'Universities biased for by ' + b_name), loc='upper left')
ax1.spines['right'].set_color('none')
ax1.spines['top'].set_color('none')
ax1.yaxis.set_ticks_position('none')
ax1.xaxis.set_ticks_position('none')
ht = []
ht = autolabel(rects1, ht, ax1)
autolabel(rects2, ht, ax1)
N = len(b_against)
univ_total = np.array(b_against['Total Universities'])
univ_bias_against = np.array(b_against[b_name + ' Bias'])
ind = np.arange(N)
rects1 = ax2.bar(ind, univ_total, width, color='firebrick')
rects2 = ax2.bar(ind + width, univ_bias_against, width, color='salmon')
ax2.set_ylabel('Count', fontsize=14)
ax2.set_xticks(ind + width)
ax2.set_xticklabels(b_against.index, fontsize=14)
ax2.legend((rects1[0], rects2[0]), ('Total Universities',
'Universities biased against by ' + b_name), loc='upper left')
ax2.spines['right'].set_color('none')
ax2.spines['top'].set_color('none')
ax2.yaxis.set_ticks_position('none')
ax2.xaxis.set_ticks_position('none')
ht = []
ht = autolabel(rects1, ht, ax2)
autolabel(rects2, ht, ax2)
plt.suptitle('Country-wise bias towards(green) and against(red) universities - ' + b_name, fontsize=20)
plt.savefig('plot_'+b_name+'_bias.jpg')
plt.show()
# In[ ]:
# Computing country bias for each ranking system pair
bias_plot(bias_for_times, bias_against_times, 'Times')
bias_plot(bias_for_cwur, bias_against_cwur, 'CWUR')
bias_plot(bias_for_shanghai, bias_against_shanghai, 'Shanghai')
# Please note that these results are for the countries which have a minimum of 2 universities in the ranking systems and a minimum of 15% bias based on countries.
# In conclusion, we can say that CWUR shows minimum bias TOWARDS universities based on the country of the university but shows maximum bias AGAINST universities based on their countries. Times shows the second highest bias (considering towards and against bias) whereas Shanghai seems to show some bias based on countries but to a lesser degree compared to the other two.
# Analysis by <NAME>, graduate student at the University of Washington majoring in Data Science.
# www.linkedin.com/in/nelsondsouza1
```
#### File: survey/loan-data/code.py
```python
import random
import re
import sys
sys.path.append("../../")
import pandas as pd
import numpy as np
from demo import *
# just utility so we don't clobber original dataframe
def cp(d):
return df.copy()
def code(db_node):
return db.get_code(db_node)
def run(db_node):
func = db.get_executable(db_node)
cp_df = cp(df)
return func(cp_df)
db = None
ALL_FUNCS = None
ALL_CODE_FRAGMENTS = None
df = None
def init():
global db
global ALL_FUNCS
global ALL_CODE_FRAGMENTS
global df
db = start("../../sample_db.pkl")
ALL_FUNCS = db.extracted_functions()
ALL_CODE_FRAGMENTS = [code(p) for p in ALL_FUNCS]
df = pd.read_csv("../../demo-data/loan.csv", nrows=1000)
def survey_task(
db, query, n, max_loc=None, random_state=None, rename_funcs=True
):
if random_state is not None:
np.random.seed(random_state)
random.seed(random_state)
if query is None:
# random querying -- effectively
all_funcs = ALL_CODE_FRAGMENTS
n = min(len(all_funcs), n)
if max_loc is not None:
all_funcs = [c for c in all_funcs if len(c.split("\n")) <= max_loc]
query_results = np.random.choice(
all_funcs,
size=n,
replace=False,
)
else:
query_results = db.query(query)[:n]
code_fragments = []
for ix, prog in enumerate(query_results):
if not isinstance(prog, str):
prog = code(prog)
if rename_funcs:
prog = re.sub(r'cleaning_func_[0-9]+', 'f{}'.format(ix), prog)
print("# Fragment {}".format(ix))
print(prog)
print("\n")
code_fragments.append(prog)
return code_fragments
class Task(object):
def __init__(self, title, description, query):
self.title = title
self.description = description
self.query = query
def generate(self, db, n, random_state):
print("# Task {}".format(self.title))
print("# {}".format(self.description))
print("# Transfer fragments (treatment)")
survey_task(db, self.query, n, random_state=random_state)
print("\n")
print("# Random fragments (control)")
survey_task(db, None, n, max_loc=20, random_state=random_state)
task1 = Task(
title="1",
description="Identify non-current loans based on loan_status",
query=["loan_status"],
)
task2 = Task(
title="2",
description=
"Round the interest rate column (`int_rate`) to nearest integer",
query=["int_rate", pd.DataFrame.astype],
)
task3 = Task(
title="3",
description="Compute the issue month and year associated with each loan",
query=["issue_month", pd.to_datetime],
)
task4 = Task(
title="4",
description=
"Fill in missing values in the months since last delinquency column (`mths_since_last_delinq`)",
query=["mths_since_last_delinq", pd.Series.fillna],
)
task5 = Task(
title="5",
description="Drop columns with many missing values",
query=[pd.DataFrame.dropna],
)
def main():
init()
seed = 42
tasks = [task1, task2, task3, task4, task5]
for ix, t in enumerate(tasks):
t.generate(db, 5, seed + ix)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: common-code-extraction/wranglesearch/extraction_statistics.py
```python
from argparse import ArgumentParser
import glob
import inspect
import os
import pickle
import matplotlib.pyplot as plt
plt.ion()
import networkx as nx
import numpy as np
import pandas as pd
from plpy.analyze.dynamic_tracer import DynamicDataTracer
from .identify_donations import ColumnUse, ColumnDef, remove_duplicate_graphs
from .lift_donations import DonatedFunction
from .utils import build_script_paths, print_df
def summarize_lifted(lifted_path):
if not os.path.exists(lifted_path):
return dict(lifted=False)
else:
return dict(lifted=True)
def summarize_trace(trace_path):
info = dict(has_trace=False, trace_len=0)
if not os.path.exists(trace_path):
return info
info['has_trace'] = True
with open(trace_path, 'rb') as f:
tracer = pickle.load(f)
info['trace_len'] = len(tracer.trace_events)
return info
def summarize_graph(graph_path):
info = dict(has_graph=False, num_graph_nodes=0, num_graph_edges=0)
if not os.path.exists(graph_path):
return info
info['has_graph'] = True
with open(graph_path, 'rb') as f:
graph = pickle.load(f)
info['num_graph_nodes'] = graph.number_of_nodes()
info['num_graph_edges'] = graph.number_of_edges()
return info
def summarize_donations(donations_path):
info = dict(has_donations=False, num_donations=0)
if not os.path.exists(donations_path):
return info
info['has_donations'] = True
with open(donations_path, 'rb') as f:
donations = pickle.load(f)
info['num_donations'] = len(donations)
return info
def summarize_functions(functions_path):
info = dict(has_functions=False, num_functions=0)
if not os.path.exists(functions_path):
return info
info['has_functions'] = True
with open(functions_path, 'rb') as f:
functions = pickle.load(f)
info['num_functions'] = len(functions)
info['avg_function_len'] = np.mean([
f.graph.number_of_nodes() for f in functions
])
# FIXME: this line fails because we currently lift some functions incorrectly
# func_objs = [f._get_func_obj() for f in functions]
func_objs = []
info['fraction_more_than_one_arg'] = np.mean([
len(inspect.getfullargspec(f).args) > 1 for f in func_objs
])
return info
def functions_length_distribution(functions):
s = pd.Series([f.graph.number_of_nodes() for f in functions])
return s, s.plot(kind='hist')
def functions_args_distribution(functions):
func_objs = [f._get_func_obj() for f in functions]
s = pd.Series([len(inspect.getfullargspec(f).args) for f in func_objs])
return s, s.plot(kind='hist')
def summarize(scripts_dir, results_dir, detailed=False):
scripts_paths = glob.glob(os.path.join(scripts_dir, '*[0-9].py'))
functions_paths = []
results = []
for s in scripts_paths:
paths = build_script_paths(s, output_dir=results_dir)
info = dict(script_path=paths['script_path'])
info['name'] = os.path.basename(info['script_path'])
info.update(summarize_lifted(paths['lifted_path']))
info.update(summarize_trace(paths['trace_path']))
info.update(summarize_graph(paths['graph_path']))
info.update(summarize_donations(paths['donations_path']))
info.update(summarize_functions(paths['functions_path']))
results.append(info)
if info['has_functions']:
functions_paths.append(paths['functions_path'])
summary_df = pd.DataFrame(results)
if not detailed:
return summary_df
functions = []
for f_path in functions_paths:
with open(f_path, 'rb') as f:
functions.extend(pickle.load(f))
length_dist_results = functions_length_distribution(functions)
arg_dist_results = functions_args_distribution(functions)
return summary_df, length_dist_results, arg_dist_results
def print_report(summary_df):
total_ct = summary_df.shape[0]
ct_fields = ['has_trace', 'has_graph', 'has_donations', 'has_functions']
mean_fields = ['avg_function_len', 'fraction_more_than_one_arg']
sum_fields = ['num_donations', 'num_functions']
print('General Summary')
print('---------------------')
for f in ct_fields:
ct = summary_df[f].sum()
print(
'Files {}: {}/{} ({})'.format(
f, ct, total_ct, round(ct / total_ct, 2)
)
)
for f in sum_fields:
print('Total {}: {}'.format(f, summary_df[f].sum()))
for f in mean_fields:
print('Mean {}: {}'.format(f, round(np.mean(summary_df[f]), 2)))
print('======================')
print('Detailed Report (only entries with a trace)')
detailed_fields = [
'name', 'trace_len', 'num_donations', 'num_functions',
'avg_function_len'
]
reduced_df = summary_df.loc[summary_df['has_trace']][detailed_fields]
print_df(reduced_df)
def create_regex(df):
return '|'.join(['({})'.format(s) for s in df['script_path'].values])
def print_failed_trace(summary_df, regex):
mask = ~summary_df['has_trace']
if any(mask):
failed = summary_df[mask]
print(
'Failed to collect a trace: {} / {}'.format(
failed.shape[0], summary_df.shape[0]
)
)
print_df(failed[['script_path']])
if regex:
print(create_regex(failed))
else:
print('No trace collection failures')
def print_failed_graph(summary_df, regex):
has_trace = summary_df['has_trace']
missing_graph = ~summary_df['has_graph']
mask = has_trace & missing_graph
if any(mask):
failed = summary_df[mask]
print(
'Failed to build a graph: {} / {}'.format(
failed.shape[0], summary_df.shape[0]
)
)
print_df(failed[['script_path']])
if regex:
print(create_regex(failed))
else:
print('No graph building failures')
def main(args):
summary_df = summarize(args.scripts_dir, args.results_dir)
if not args.silent_report:
print_report(summary_df)
if args.failed_trace:
print_failed_trace(summary_df, args.regex)
if args.failed_graph:
print_failed_graph(summary_df, args.regex)
if args.output_path:
summary_df.to_csv(args.output_path, index=False)
if __name__ == '__main__':
parser = ArgumentParser(description='Summarize extraction statistics')
parser.add_argument('scripts_dir', type=str, help='Directory for scripts')
parser.add_argument(
'results_dir',
type=str,
help='Directory to results (trace, graph, etc)'
)
parser.add_argument(
'-o', '--output_path', type=str, help='Path to store csv of summary'
)
parser.add_argument(
'-t',
'--failed_trace',
action='store_true',
help='Print info for scripts that failed to trace'
)
parser.add_argument(
'-g',
'--failed_graph',
action='store_true',
help='Print info for scripts that failed to graph'
)
parser.add_argument(
'-s',
'--silent_report',
action='store_true',
help='Do not print out main report'
)
parser.add_argument(
'-r',
'--regex',
action='store_true',
help='Produce regex of script names'
)
args = parser.parse_args()
try:
main(args)
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: common-code-extraction/wranglesearch/magic.py
```python
from IPython.core.magic import (
Magics,
magics_class,
line_magic,
cell_magic,
line_cell_magic,
)
import pickle
class CustomUnpickler(pickle.Unpickler):
def find_class(self, module, name):
from transfer.build_db import (
FunctionDatabase,
NodeTypes,
RelationshipTypes,
)
db_classes = {
"FunctionDatabase": FunctionDatabase,
"NodeTypes": NodeTypes,
"RelationshipTypes": RelationshipTypes,
}
if name in db_classes:
return db_classes[name]
return super().find_class(module, name)
def start(path="sample_db.pkl"):
with open(path, "rb") as fin:
db = CustomUnpickler(fin).load()
db.startup()
return db
@magics_class
class TransferMagics(Magics):
def __init__(self, shell, db):
super().__init__(shell)
self.db = db
@line_cell_magic
def tquery(self, line, cell=None):
if cell is None:
query = line
else:
query = cell
query = query.strip()
query = query.split()
possible_result_ix = query[-1]
try:
result_ix = int(possible_result_ix)
query = query[:-1]
except ValueError:
result_ix = 1
# try to see if any strings are actually objects
clean_query = []
for term in query:
try:
obj = eval(term, self.shell.user_ns)
clean_query.append(obj)
except:
clean_query.append(term)
try:
query_results = self.db.query(clean_query)
except:
print("Lookup failed...")
return
num_avail = len(query_results)
if num_avail == 0:
print("No snippets available")
return
if result_ix < 1 or result_ix > num_avail:
# invalid indices map to top result
result_ix = 1
query_result = query_results[result_ix - 1]
code = self.db.get_code(query_result)
code = code.replace("\t", ' ' * 4)
self.shell.set_next_input(
'# query={}, Snippet={}/{} \n{}'.format(
query, result_ix, num_avail, code
),
replace=False
)
def load_ipython_extension(ipython):
db = start()
tdb = TransferMagics(ipython, db)
ipython.register_magics(tdb)
``` |
{
"source": "josepablocam/janus-public",
"score": 2
} |
#### File: janus/analysis/dump_pipelines.py
```python
from argparse import ArgumentParser
import difflib
import json
import os
import subprocess
import pandas as pd
import tqdm
from janus.pipeline import pipeline_to_tree as pt
from janus.analysis import performance_analysis as pa
def get_str_diff(t1, t2):
with open("t1.txt", "w") as fout:
fout.write(t1)
with open("t2.txt", "w") as fout:
fout.write(t2)
proc = subprocess.Popen(
["diff", "t1.txt", "t2.txt"],
stdout=subprocess.PIPE,
)
diff_msg, _ = proc.communicate()
os.remove("t1.txt")
os.remove("t2.txt")
return diff_msg.decode()
def get_pipelines_json(prepared_df):
prepared_df["original_pipeline"] = [
pt.to_pipeline(g) if not pd.isnull(g) else None
for g in tqdm.tqdm(prepared_df["graph_orig"])
]
prepared_df["repaired_pipeline"] = [
pt.to_pipeline(g) if not pd.isnull(g) else None
for g in tqdm.tqdm(prepared_df["graph_repaired"])
]
prepared_df["original_tree"] = [
pt.to_tree(p) if not pd.isnull(p) else None
for p in tqdm.tqdm(prepared_df["original_pipeline"])
]
prepared_df["repaired_tree"] = [
pt.to_tree(p) if not pd.isnull(p) else None
for p in tqdm.tqdm(prepared_df["repaired_pipeline"])
]
prepared_df["original_json"] = [
pt.to_json(t, payload_as_str=True) if not pd.isnull(t) else None
for t in tqdm.tqdm(prepared_df["original_tree"])
]
prepared_df["repaired_json"] = [
pt.to_json(t, payload_as_str=True) if not pd.isnull(t) else None
for t in tqdm.tqdm(prepared_df["repaired_tree"])
]
prepared_df["original_text"] = [
pt.to_text(t) if not pd.isnull(t) else None
for t in tqdm.tqdm(prepared_df["original_tree"])
]
prepared_df["repaired_text"] = [
pt.to_text(t) if not pd.isnull(t) else None
for t in tqdm.tqdm(prepared_df["repaired_tree"])
]
prepared_df["text_diff"] = [
get_str_diff(t1, t2) for t1, t2 in tqdm.tqdm(
list(
zip(prepared_df["original_text"],
prepared_df["repaired_text"])))
]
prepared_df["original_score"] = prepared_df["mean_test_score_orig"]
prepared_df["repaired_score"] = prepared_df["mean_test_score_repaired"]
prepared_df["score_delta"] = prepared_df["repaired_score"] - prepared_df["original_score"]
worse = prepared_df[prepared_df["hurt"]]
same = prepared_df[(~prepared_df["hurt"]) & (~prepared_df["improved"])]
better = prepared_df[prepared_df["improved"]]
prepared_df = prepared_df[[
"dataset",
"original_json",
"original_score",
"repaired_json",
"repaired_score",
"improved",
"hurt",
"score_delta",
"text_diff",
]]
results = []
for _, row in prepared_df.iterrows():
results.append(dict(row))
return results
def get_args():
parser = ArgumentParser(description="Dump pipelines and repairs as json")
parser.add_argument(
"--input",
type=str,
nargs="+",
help="Path to experiment output pickled files")
parser.add_argument("--strategy", type=str, help="Strategy to dump")
parser.add_argument("--output", type=str, help="Path to dump file")
return parser.parse_args()
def main():
args = get_args()
args = get_args()
dfs = []
for path in args.input[:1]:
df = pd.read_pickle(path)
# fixing so older results....
df["mean_test_score"] = df["mean_test_score"].astype(float)
dfs.append(df)
combined_df = pd.concat(dfs, axis=0)
combined_df = combined_df[combined_df["strategy"] == args.strategy]
prepared_df = pa.prepare_df(df, compute_dist=False)
json_output = get_pipelines_json(prepared_df)
with open(args.output, "w") as fout:
json.dump(json_output, fout)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: janus/analysis/tree_pairs_analysis.py
```python
from argparse import ArgumentParser
from collections import defaultdict
import numpy as np
import os
import pickle
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['font.size'] = 12
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from janus.repair.tree_pairs import CorpusEntry, TreePairCorpus
def create_distance_df(corpus):
records = []
for entry in corpus:
rec = {"distance": entry.distance, "method": corpus.sample_method}
records.append(rec)
return pd.DataFrame(records)
def plot_distance_ecdf(df):
fig, ax = plt.subplots(1)
methods = sorted(df["method"].unique())
colors = sns.color_palette("colorblind", len(methods))
palette = {m: c for m, c in zip(methods, colors)}
sns.ecdfplot(
data=df,
x="distance",
hue="method",
ax=ax,
palette=palette,
hue_order=methods,
)
return ax
def get_args():
parser = ArgumentParser(description="Compare post-tree sampling methods")
parser.add_argument(
"--input", type=str, nargs="+", help="TreePairCorpus files")
parser.add_argument(
"--output_dir", type=str, help="Output directory for analysis results")
return parser.parse_args()
def main():
args = get_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
dist_dfs = []
compute_times = defaultdict(lambda: [])
for path in args.input:
with open(path, "rb") as fin:
corpus = pickle.load(fin)
dist_df = create_distance_df(corpus)
dist_dfs.append(dist_df)
compute_times[corpus.sample_method].append(corpus.compute_time)
combined_dist_df = pd.concat(dist_dfs, axis=0)
print("Number of pipeline pairs")
print(combined_dist_df.groupby("method").size())
dist_ecdf = plot_distance_ecdf(combined_dist_df)
dist_ecdf.get_figure().savefig(
os.path.join(args.output_dir, "distance_ecdf.pdf"))
summary_df = combined_dist_df.groupby("method")["distance"].agg(
["mean", "max", "std"])
summary_df = summary_df.reset_index()
compute_times_info = {
k: (np.mean(v), np.std(v))
for k, v in compute_times.items()
}
summary_df["mean_compute_time_str"] = [
"{:.2f}(+/- {:.2f})".format(*compute_times_info[m])
for m in summary_df["method"]
]
summary_df["mean_distance_str"] = [
"{:.2f}(+/- {:.2f})".format(m, d)
for m, d in zip(summary_df["mean"], summary_df["std"])
]
summary_df = summary_df[[
"method", "mean_distance_str", "mean_compute_time_str"
]]
summary_df = summary_df.rename(
columns={
"method": "Sampling method",
"mean_distance_str": "Mean (SD) Distance",
"mean_compute_time_str": "Mean (SD) Time (s)"
})
summary_df.to_latex(
os.path.join(args.output_dir, "summary.tex"), index=False)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: janus/evaluation/paper_example.py
```python
from argparse import ArgumentParser
from collections import defaultdict
import difflib
import os
import sys
from bs4 import BeautifulSoup
import glob
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import statsmodels.stats.contingency_tables
import tqdm
import zss
from janus.pipeline import pipeline_to_tree as pt
from janus.analysis import performance_analysis as pa
from janus.analysis import rule_analysis as ra
from janus.repair.local_rules import (
is_match_edit,
edit_to_str,
get_safe_label,
)
from janus.repair.local_rules import (
ComponentInsert,
ComponentRemove,
ComponentUpdate,
HyperparamRule,
HyperparamRemove,
HyperparamUpdate,
get_safe_label,
CorpusEntry,
RuleCorpus,
)
from janus import utils
from janus.repair.tree_pairs import CorpusEntry, TreePairCorpus
def remove_diff_legend(src):
parsed = BeautifulSoup(src)
# second table is legend
legend = parsed.find_all("table")[1]
legend.extract()
return str(parsed)
def show_diff_tree(orig, repaired, path=None):
orig_str = pt.to_text(orig, indent_str=" ")
repaired_str = pt.to_text(repaired, indent_str=" ")
diff = difflib.HtmlDiff().make_file(
orig_str.split("\n"),
repaired_str.split("\n"),
fromdesc="orig",
todesc="repaired",
context=True,
)
diff = remove_diff_legend(diff)
if path is not None:
with open(path, "w", encoding="utf-8") as fout:
fout.write(diff)
return diff
def get_rules(paths, seed=42):
rules = ra.load_rules(paths)
sampler = ra.get_rule_sampler("weighted", rules, random_state=seed)
sampler_rules = [r for group in sampler.rule_map.values() for r in group]
df_sampler = ra.df_from_rules(sampler_rules)
return df_sampler
def get_tree_emoji():
return "\U0001F332"
def get_code_emoji():
return "\U0001F4C4"
def add_ellipsis_row(df, format_numeric=False):
df = df.copy()
if format_numeric:
col_types = df.dtypes
is_numeric = col_types.map(lambda x: np.issubdtype(x, np.number))
num_cols = col_types[is_numeric].index.values
for c in num_cols:
df[c] = df[c].map(lambda x: "{:.2f}".format(x))
row = pd.DataFrame([{c: "..." for c in df.columns}])
return pd.concat((df, row), axis=0)
def show_trace(improved):
entries = [(get_code_emoji(), s) for s in improved["mean_test_score_orig"]]
df = pd.DataFrame(entries, columns=["pipeline", "performance"])
df = df.head(4)
return add_ellipsis_row(df, format_numeric=True)
def show_tree_pairs(tree_pairs_corpus, threshold=10):
# sat our distance threshold
entries = [e for e in tree_pairs_corpus.entries if e.distance < threshold]
# dominating pair
entries = [
e for e in entries if not pd.isnull(e.pre.external_score)
and not pd.isnull(e.post.external_score)
and e.pre.external_score < e.post.external_score
]
records = []
for e in entries:
record = {
"tree_1": get_tree_emoji(),
"tree_2": get_tree_emoji(),
"distance": e.distance,
"score_delta": (e.post.external_score - e.pre.external_score)
}
records.append(record)
df = pd.DataFrame(records)
df = df.groupby("distance").head(1)
df = df.head(3)
return add_ellipsis_row(df, format_numeric=True)
def show_edit_ops(tree_pairs_corpus, seed=42):
# non match edits
edits = [
op for e in tree_pairs_corpus.entries for op in e.edits
if op.type != zss.Operation.match
]
edits = [{"edit_type": e.type, "edit_str": edit_to_str(e)} for e in edits]
df = pd.DataFrame(edits)
df = df.sample(frac=1.0, replace=False, random_state=seed)
df = df.groupby("edit_type").head(1)
df = df[["edit_str"]].rename(columns={"edit_str": "edit"})
return add_ellipsis_row(df)
def rule_name_for_paper(r_str):
names = {
"ComponentInsert": "CInsert",
"ComponentRemove": "CRemove",
"ComponentUpdate": "CUpdate",
"HyperparamRemove": "HRemove",
"HyperparamUpdate": "HUpdate",
}
for orig, replacement in names.items():
r_str = r_str.replace(orig, replacement)
return r_str
def show_rules(df_rules, seed=42):
# to latex html table
# show rules of each type
# for ComponentInsert pick ones that don't
# just insert a stacking estimator
df_rules = df_rules.copy()
cond_insert = (
df_rules["type_str"] == "ComponentInsert"
) & (df_rules["post_label"].map(
lambda x: isinstance(x, str) and not x.endswith("StackingEstimator")))
others = df_rules["type_str"] != "ComponentInsert"
df_rules = df_rules[cond_insert | others]
df_rules = df_rules.sample(frac=1, replace=False, random_state=seed)
df_rules = df_rules.groupby("type_str").head(1)
df_rules["rule_str"] = df_rules["rule"].map(lambda x: x.as_str())
df_rules["rule_str"] = df_rules["rule_str"].map(rule_name_for_paper)
rule_strs = df_rules["rule_str"]
df = rule_strs.to_frame(name="LSR")
return add_ellipsis_row(df)
def get_args():
parser = ArgumentParser(description="Extended system diagram")
parser.add_argument("--input_dir", type=str, help="Folder for $RESULTS")
parser.add_argument("--output_dir", type=str, help="Folder for $ANALYSIS")
parser.add_argument("--seed", type=int, help="RNG seed", default=42)
return parser.parse_args()
def main():
args = get_args()
df = pd.read_pickle(
os.path.join(
args.input_dir,
"tpot-pipelines-with-tpot-rules/car-evaluation-synthetic-evaluation-janus.pkl"
))
prepared_df = pa.prepare_df(df, compute_dist=True)
improved = prepared_df[prepared_df["improved"]]
improved = improved.sort_values("distance", ascending=False)
tree_pairs = pd.read_pickle(
os.path.join(args.input_dir, "tpot/car-evaluation-tree-pairs.pkl"))
rules = get_rules(
glob.glob(os.path.join(args.input_dir, "tpot/*-local-rules.pkl")),
seed=args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
trace_df = show_trace(improved)
trace_df.to_html(
os.path.join(args.output_dir, "trace.html"),
index=False,
float_format="{:.2f}".format,
)
tree_pairs_df = show_tree_pairs(tree_pairs)
tree_pairs_df.to_html(
os.path.join(args.output_dir, "tree-pairs.html"),
index=False,
float_format="{:.2f}".format,
)
edits_df = show_edit_ops(tree_pairs, seed=args.seed)
edits_df.to_html(os.path.join(args.output_dir, "edits.html"), index=False)
lsr_df = show_rules(rules, seed=args.seed)
lsr_df.to_html(
os.path.join(args.output_dir, "lsr.html"),
index=False,
)
# Janus applied
min_score_diff = 0.05
improved = improved[improved.score_diff >= min_score_diff]
improved = improved.sort_values("score_diff", ascending=True)
ix = 0
orig = improved.iloc[ix].graph_orig
repaired = improved.iloc[ix].graph_repaired
print("Orig graph")
print(pt.to_text(orig))
print("-----------")
print("Repaired graph")
print(pt.to_text(repaired))
print("Score improvement: ", improved.iloc[ix].score_diff)
print("From: ", improved.iloc[ix].mean_test_score_orig)
print("To: ", improved.iloc[ix].mean_test_score_repaired)
show_diff_tree(orig, repaired, os.path.join(args.output_dir, "diff.html"))
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
sys.exit(1)
```
#### File: evaluation/user-scripts/forest-cover-script1.py
```python
dataset = "forest-cover"
metric = "balanced_accuracy"
def make_pipeline():
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
p = Pipeline([('scaler', StandardScaler()), ('logit', logit)])
return p
```
#### File: evaluation/user-scripts/forest-cover-script5.py
```python
dataset = "forest-cover"
metric = "balanced_accuracy"
def make_pipeline():
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
scaler = StandardScaler()
clf = SVC(C=10, kernel='rbf', probability=True, random_state=0)
p = Pipeline([
('scaler', scaler),
('clf', clf),
])
return p
```
#### File: evaluation/user-scripts/ghouls-script1.py
```python
dataset = "ghouls"
metric = "accuracy"
def make_pipeline():
from sklearn.pipeline import Pipeline
from sklearn.ensemble import (
VotingClassifier,
RandomForestClassifier,
BaggingClassifier,
GradientBoostingClassifier,
)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
rf = RandomForestClassifier(n_estimators=10, random_state=0)
bag = BaggingClassifier(max_samples=5, n_estimators=25, random_state=0)
gb = GradientBoostingClassifier(
learning_rate=0.1, max_depth=5, n_estimators=100, random_state=0)
lr = LogisticRegression(
penalty='l1', C=1, random_state=0, solver="liblinear")
svc = SVC(
C=10, degree=3, kernel='linear', probability=True, random_state=0)
clf = VotingClassifier(
estimators=[('rf', rf), ('bag', bag), ('gbc', gb), ('lr', lr),
('svc', svc)],
voting='hard',
)
p = Pipeline([('clf', clf)])
return p
```
#### File: evaluation/user-scripts/otto-script1.py
```python
dataset = "otto"
metric = "neg_log_loss"
def make_pipeline():
from sklearn.pipeline import Pipeline
import xgboost as xgb
clf = xgb.XGBClassifier(
n_estimators=150,
learning_rate=0.2,
colsample_bytree=0.7,
random_state=0,
)
p = Pipeline([("clf", clf)])
return p
```
#### File: evaluation/user-scripts/otto-script3.py
```python
dataset = "otto"
metric = "neg_log_loss"
def make_pipeline():
from sklearn.pipeline import Pipeline
import xgboost as xgb
# Ignoring gpu_hist (no access to GPU)
# Ignoring n_thread=6 (single threaded)
clf = xgb.XGBClassifier(
n_estimators=600,
max_depth=5,
colsample_bytree=0.8,
learning_rate=0.1,
criterion="entropy",
random_state=0,
)
p = Pipeline([("clf", clf)])
return p
```
#### File: evaluation/user-scripts/otto-script4.py
```python
dataset = "otto"
metric = "neg_log_loss"
def make_pipeline():
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# focused on SVC pipeline
# to provide some diversity
# (i.e. not just XGB pipelines)
# probability=True needed for neg_log_loss
clf = SVC(probability=True, random_state=0)
p = Pipeline([("clf", clf)])
return p
```
#### File: janus/kaggle/collect_pipelines.py
```python
from argparse import ArgumentParser
import os
import shutil
import sys
import tempfile
import numpy as np
import pandas as pd
from janus.lift.extract_pipeline import PipelineLifter
from janus import utils
def get_target_remapping(dataset_path):
data_name = os.path.basename(dataset_path)
target_name = {
"forest-cover.csv": "Cover_Type",
"ghouls.csv": "type",
"otto.csv": "target",
}
return {"target": target_name[data_name]}
def add_fake_id_column(df, dataset_path):
if dataset_path.endswith("forest-cover.csv"):
name = "Id"
else:
name = "id"
if name not in df.columns:
df[name] = np.arange(0, df.shape[0])
return df
def prepare_dataset(dataset_path, max_size):
df = pd.read_csv(dataset_path)
sampled_path = dataset_path + "-sampled"
if max_size is not None and max_size < df.shape[0]:
df = df.sample(n=max_size, replace=False)
# rename target column
target_remap = get_target_remapping(dataset_path)
df = df.rename(columns=target_remap)
df = add_fake_id_column(df, dataset_path)
df.to_csv(sampled_path, index=False)
return sampled_path
def create_kaggle_dir_structure(tmp,
script_path,
dataset_path,
max_size=None,
random_state=42):
data_dir = os.path.join(tmp, "input")
src_dir = os.path.join(tmp, "src")
os.makedirs(data_dir, exist_ok=True)
os.makedirs(src_dir, exist_ok=True)
script_name = os.path.basename(script_path)
new_script_path = os.path.join(src_dir, script_name)
shutil.copy(script_path, new_script_path)
# create downsampled dataset if needed
utils.set_seed(random_state)
dataset_path = prepare_dataset(dataset_path, max_size)
# symlink the dataset as train.csv *and* test.csv
# we are only interested in getting the pipeline
# not any performance coming from this
train_path = os.path.join(data_dir, "train.csv")
if os.path.exists(train_path):
os.remove(train_path)
os.symlink(dataset_path, train_path)
test_path = os.path.join(data_dir, "test.csv")
if os.path.exists(test_path):
os.remove(test_path)
os.symlink(dataset_path, test_path)
return new_script_path
def get_args():
parser = ArgumentParser(description="Run kaggle script and lift pipelines")
parser.add_argument("--script", type=str, help="Path to script")
parser.add_argument("--tmp", type=str, help="Temporary directory")
parser.add_argument(
"--keep_tmp", action="store_true", help="Don't delete temporary")
parser.add_argument("--dataset", type=str, help="Path to csv dataset")
parser.add_argument(
"--output", type=str, help="Path to store lifted pipelines (pickled)")
parser.add_argument(
"--max_size",
type=int,
help="Max number of rows in dataset (downsample if needed)",
default=1000,
)
parser.add_argument(
"--random_state",
type=int,
help="RNG seed",
default=42,
)
return parser.parse_args()
def main():
args = get_args()
output_path = os.path.abspath(args.output)
if args.tmp is None:
tmp = tempfile.TemporaryDirectory()
tmp_dir_path = tmp.name
else:
tmp_dir_path = args.tmp
if not os.path.exists(tmp_dir_path):
os.makedirs(tmp_dir_path)
new_script_path = create_kaggle_dir_structure(
tmp_dir_path,
args.script,
os.path.abspath(args.dataset),
max_size=args.max_size,
random_state=args.random_state,
)
orig_dir = os.getcwd()
script_dir = os.path.dirname(new_script_path)
script_name = os.path.basename(new_script_path)
os.chdir(script_dir)
print("Running {}".format(script_name))
lifted = PipelineLifter(script_name)
if not lifted.failed:
print("Dumping pipelines for", args.script)
lifted.dump(output_path)
else:
print("Failed extracting pipelines for", args.script)
os.chdir(orig_dir)
if args.keep_tmp:
return
if args.tmp is None:
tmp.cleanup()
else:
shutil.rmtree(args.tmp)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
sys.exit(1)
```
#### File: janus/janus/mp_utils.py
```python
import multiprocessing as mp
from multiprocessing.context import TimeoutError
import sys
def run(seconds, fun, *args, **kwargs):
if seconds >= 0:
pool = mp.get_context("spawn").Pool(processes=1)
try:
proc = pool.apply_async(fun, args, kwargs)
result = proc.get(seconds)
return result
except mp.TimeoutError:
pool.terminate()
pool.close()
raise mp.TimeoutError()
finally:
pool.terminate()
pool.close()
else:
# if no timeout, then no point
# in incurring cost of running as separate process
# so call locally
return fun(*args, **kwargs)
```
#### File: janus/repair/local_rules.py
```python
from argparse import ArgumentParser
from collections import Counter
import pickle
import sys
import numpy as np
import sklearn.base
from tpot.builtins import StackingEstimator
import tqdm
import zss
from janus.pipeline import pipeline_to_tree as pt
from janus.repair.tree_pairs import CorpusEntry, TreePairCorpus
def is_update_edit(edit):
return edit.type == zss.compare.Operation.update
def is_match_edit(edit):
return edit.type == zss.compare.Operation.match
def is_remove_edit(edit):
return edit.type == zss.compare.Operation.remove
def is_insert_edit(edit):
return edit.type == zss.compare.Operation.insert
def get_safe_label(n):
if n is not None:
return n.label
else:
return None
def get_edit_op_str(op):
type_strs = {
zss.Operation.update: "Update",
zss.Operation.remove: "Remove",
zss.Operation.insert: "Insert",
}
return type_strs.get(op.type)
class LocalEditRule(object):
@staticmethod
def can_build_rule(self, edit):
raise NotImplementedError()
def can_apply(self, node):
raise NotImplementedError()
def apply(self, node):
raise NotImplementedError()
def key(self):
raise NotImplementedError()
def info(self):
return self._info
def score_delta(self):
return self._score_delta
def set_score_delta(self, delta):
self._score_delta = delta
def as_str(self):
_, pre, ctx, post = self.key()
cls = self.__class__.__name__
if pre is not None:
pre = pre.split(".")[-1]
if post is not None:
post = post.split(".")[-1]
ctx = ", ".join(
[e.split(".")[-1] if e is not None else "None" for e in ctx])
s = "{cls}({pre} -> {post}) @ ({ctx})".format(
cls=cls,
pre=pre,
post=post,
ctx=ctx,
)
return s
def value_as_feature(val):
if not isinstance(val, (str, int, float, bool)):
return str(type(val))
if isinstance(val, float) and np.isnan(val):
return str(val)
return val
class HyperparamRule(LocalEditRule):
@staticmethod
def info_from_node(node):
info = {
"parent": get_safe_label(node.parent),
"hyperparam": node.payload[0],
"hypervalue": value_as_feature(node.payload[1]),
"hypervalue_type": str(type(node.payload[1])),
}
for c in node.siblings():
try:
if pt.is_param_node(c):
name, val = c.payload
info["sibling_param_name_" + name] = True
info["sibling_param_value_" + name] = value_as_feature(val)
elif pt.is_component_node(c):
info["sibling_component_" + c.label] = True
else:
pass
except:
pass
return info
class ComponentRule(LocalEditRule):
@staticmethod
def info_from_node(node):
info = {
"parent": get_safe_label(node.parent),
"component": node.label,
}
for c in node.siblings():
if pt.is_component_node(c):
info[c.label] = True
return info
class HyperparamUpdate(HyperparamRule):
def __init__(self, edit):
self.pre, self.post = edit.arg1, edit.arg2
self._info = HyperparamRule.info_from_node(self.pre)
@staticmethod
def can_build_rule(edit):
return is_update_edit(edit) and \
pt.is_param_node(edit.arg1) and \
pt.is_param_node(edit.arg2) and \
edit.arg1.parent is not None and \
edit.arg2.parent is not None and \
edit.arg1.parent.label == edit.arg2.parent.label
def key(self):
ctx = frozenset([self.pre.parent.label])
return (str(type(self)), self.pre.payload[0], ctx, self.post.label)
def can_apply(self, node):
if not pt.is_param_node(
node) or node.payload[0] != self.pre.payload[0]:
return False
if self.pre.parent.label != get_safe_label(node.parent):
# hyperparameters depend on the parent component
# so if mismatched, can't really apply
return False
# for string hypervalues (most likely representing enumerations)
# we apply only if there is a match
pre_hypervalue = self.pre.payload[1]
post_hypervalue = self.post.payload[1]
cand_hypervalue = node.payload[1]
if post_hypervalue == cand_hypervalue:
# no-op, don't bother applying
return False
if isinstance(pre_hypervalue, str):
return pre_hypervalue == cand_hypervalue
else:
# TODO: for numeric we could add predicate or some form of abstraction
# could be learned...
return True
def apply(self, node):
return pt.shallow_copy(self.post)
class HyperparamRemove(HyperparamRule):
# NB: removing a hyperparameter is equivalent to
# setting it to its default value in the constructor
def __init__(self, edit):
self.pre, self.post = edit.arg1, edit.arg2
self._info = HyperparamRule.info_from_node(self.pre)
@staticmethod
def can_build_rule(edit):
return is_remove_edit(edit) and \
pt.is_param_node(edit.arg1) and \
edit.arg1.parent is not None and \
edit.arg2 is None
def key(self):
ctx = frozenset([self.pre.parent.label])
return (str(type(self)), self.pre.payload[0], ctx, None)
def can_apply(self, node):
if not pt.is_param_node(
node) or node.payload[0] != self.pre.payload[0]:
return False
if self.pre.parent.label != get_safe_label(node.parent):
# requires component context to effectively apply
return False
# for string hypervalues (most likely representing enumerations)
# we apply only if there is a match
pre_hypervalue = self.pre.payload[1]
cand_hypervalue = node.payload[1]
if isinstance(pre_hypervalue, str):
return pre_hypervalue == cand_hypervalue
else:
# TODO: for numeric we could add predicate or some form of abstraction
# could be learned...
return True
def apply(self, node):
# we delete by returning None...
# actual deletion is handled by caller
return None
class ComponentUpdate(ComponentRule):
def __init__(self, edit):
self.pre, self.post = edit.arg1, edit.arg2
compiled_post = pt.to_pipeline(self.post)
self.post_is_classifier = sklearn.base.is_classifier(compiled_post)
if self.post_is_classifier:
wrapped_post = pt.to_tree(StackingEstimator(compiled_post))
self.wrapped_post = wrapped_post.children[0]
self._info = ComponentRule.info_from_node(self.pre)
@staticmethod
def can_build_rule(edit):
base_cond = is_update_edit(edit) and \
pt.is_component_node(edit.arg1) and \
pt.is_component_node(edit.arg2) and \
edit.arg1.parent is not None
if not base_cond:
return False
# want to also try compiling the post on its own
try:
pt.to_pipeline(edit.arg2)
return True
except:
return False
def key(self):
ctx = frozenset([
self.pre.parent.label,
get_safe_label(self.pre.left),
get_safe_label(self.pre.right)
])
return (str(type(self)), self.pre.label, ctx, self.post.label)
def can_apply(self, node):
if not pt.is_component_node(node) or node.label != self.pre.label:
return False
if node.label == self.post.label:
# no-op
return False
# at least one direct neighbor matches
return self.pre.parent.label == get_safe_label(node.parent) or \
get_safe_label(self.pre.left) == get_safe_label(node.left) or \
get_safe_label(self.pre.right) == get_safe_label(node.right)
def apply(self, node):
compiled_node = pt.to_pipeline(node)
if self.post_is_classifier and not sklearn.base.is_classifier(
compiled_node):
post = self.wrapped_post
else:
post = self.post
return pt.shallow_copy(post)
class ComponentRemove(ComponentRule):
def __init__(self, edit):
self.pre, self.post = edit.arg1, edit.arg2
self._info = ComponentRule.info_from_node(self.pre)
@staticmethod
def can_build_rule(edit):
return is_remove_edit(edit) and \
pt.is_component_node(edit.arg1) and \
edit.arg2 is None and \
edit.arg1.parent is not None
def key(self):
ctx = frozenset([
self.pre.parent.label,
get_safe_label(self.pre.left),
get_safe_label(self.pre.right)
])
return (str(type(self)), self.pre.label, ctx, None)
def can_apply(self, node):
if not pt.is_component_node(node) or node.label != self.pre.label:
return False
return self.pre.parent.label == get_safe_label(node.parent) or \
get_safe_label(self.pre.left) == get_safe_label(node.left) or \
get_safe_label(self.pre.right) == get_safe_label(node.right)
def apply(self, node):
return None
class ComponentInsert(ComponentRule):
# NB: for insertion, we insert
# into the children of `node` we're applying rule to
# and return the node with modified children
def __init__(self, edit):
# the "pre" is really the parent node
self.pre = edit.pre_parent
self.post = edit.arg2
# some re-used info for pre children
self.pre_children_n = len(self.pre.children)
self.pre_children_labels = [c.label for c in self.pre.children]
self.pre_children_label_set = set(self.pre_children_labels)
compiled_post = pt.to_pipeline(self.post)
self.post_is_classifier = sklearn.base.is_classifier(compiled_post)
if self.post_is_classifier:
wrapped_post = pt.to_tree(StackingEstimator(compiled_post))
# remove the 'root' node, just want the one for the clf
self.wrapped_post = wrapped_post.children[0]
self._info = ComponentRule.info_from_node(self.pre)
@staticmethod
def can_build_rule(edit):
base_cond = is_insert_edit(edit) and \
edit.arg1 is None and \
edit.pre_parent is not None and \
pt.is_component_node(edit.arg2) and \
edit.arg2.parent is not None
if not base_cond:
return False
# want to also try compiling the post on its own
try:
pt.to_pipeline(edit.arg2)
return True
except:
return False
def key(self):
ctx = frozenset([c.label for c in self.pre.children])
return (str(type(self)), self.pre.label, ctx, self.post.label)
def can_apply(self, node):
# a component insertion can only be applied to
# a "combinator" object, so must be Pipeline type
# TODO: extend with other combinator types here if necessary
if not pt.is_composition_node(node):
return False
# node must be a well-formed pipeline, i.e. there must be
# a classifier at the end
if len(node.children) == 0:
return False
try:
compiled_possible_clf = pt.to_pipeline(node.children[-1])
if not sklearn.base.is_classifier(compiled_possible_clf):
return False
except:
# can't compile it, can't really insert appropriately
return False
# we apply it by inserting into its children
# so check that at least one child matches
# what we observed in the pre node's children
return any(
c.label in self.pre_children_label_set for c in node.children)
def apply(self, node, seed=None):
if seed is not None:
np.random.seed(seed)
# find children of node that existed as a child of the pre
candidate_locations = []
for ix, c in enumerate(node.children):
if c.label in self.pre_children_label_set:
candidate_locations.append(ix)
if len(candidate_locations) == 0:
# only happens if we try to apply without
# calling .can_apply, so must be trying to force
# application...so we'll just set candidate_locations
# to be any
candidate_locations = np.arange(0, len(node.children))
# pick a location at random
target_ix = np.random.choice(candidate_locations, 1)[0]
# randomly pick if insert before or after that ix
# if target_ix == ix, we're insert before
# so add 0 to insert before or 1 to insert after
target_ix = target_ix + np.random.choice([0, 1], 1)[0]
node = pt.shallow_copy(node)
n_children = len(node.children)
post = pt.shallow_copy(self.post)
if target_ix < n_children:
# the new component will *not* be at the
# end of the pipeline
# so if its a classifier
# we want to insert, need to wrap in stackingestimator
if self.post_is_classifier:
post = pt.shallow_copy(self.wrapped_post)
else:
# at the end of the pipeline
if not self.post_is_classifier:
# can't have a non-classifier at the end of the
# pipeline
# so shift the insertion point back by one
target_ix -= 1
else:
# the post is a classifier, so the existing
# classifier needs to be wrapped in stacking estimator
# otherwise pipeline is invalid
existing_clf_node = node.children[-1]
compiled_clf = pt.to_pipeline(existing_clf_node)
has_classifier = sklearn.base.is_classifier(compiled_clf)
# should always be true given the .can_apply condition
assert has_classifier
# we want to insert a new classifier at the end
# so we take existing classifier and wrap it
wrapped_clf = pt.to_tree(StackingEstimator(compiled_clf))
wrapped_clf = wrapped_clf.children[0]
# replace the raw clf with the new wrapped clf
node.replace_child(n_children - 1, wrapped_clf)
node.insert_child(target_ix, post)
return node
RULE_TYPES = [
HyperparamUpdate,
HyperparamRemove,
ComponentUpdate,
ComponentRemove,
ComponentInsert,
]
def edit_to_str(edit):
pre_label = get_safe_label(edit.arg1)
post_label = get_safe_label(edit.arg2)
if pre_label is not None:
pre_label = pre_label.split(".")[-1]
if post_label is not None:
post_label = post_label.split(".")[-1]
return "{op}({pre} -> {post})".format(
op=get_edit_op_str(edit),
pre=pre_label,
post=post_label,
)
# add some more info to the edit obj
class AugmentedEdit(object):
def __init__(self, edit, parent_match_edit=None):
self._edit = edit
if is_update_edit(edit) or is_remove_edit(edit):
self.pre_parent = edit.arg1.parent
if is_insert_edit(edit):
assert parent_match_edit is not None
assert is_match_edit(parent_match_edit)
self.pre_parent = parent_match_edit.arg1
def __getattr__(self, attr):
return getattr(self._edit, attr)
def get_parent_match_edit(edit, all_edits):
# need to find the parent where we inserted
# the new node, so we find the "match"
# associated with that... this means we only
# consider insertions for nodes that match
# which is a restriction of the search space
# (but ok for our use case)
possible_parent_match_edits = [
e for e in all_edits if e.arg2 == edit.arg2.parent and is_match_edit(e)
]
parent_match_edit = None
if len(possible_parent_match_edits) == 1:
parent_match_edit = possible_parent_match_edits[0]
return parent_match_edit
class RuleCorpus(object):
def __init__(self, pairs, max_edit_distance, exclude_params=None):
self.rules = []
self.max_edit_distance = max_edit_distance
self.exclude_params = exclude_params
self.build_rules(pairs)
print("Extracted", len(self.rules), "local edit rules")
rule_counter = Counter()
rule_counter.update([type(r) for r in self.rules])
print("Rule breakdown")
for t, c in rule_counter.items():
print("\ttype {}: {}".format(t.__name__, c))
def build_rules(self, pairs):
for pair in tqdm.tqdm(pairs):
if pair.distance > self.max_edit_distance:
continue
for ix, edit in tqdm.tqdm(enumerate(pair.edits)):
if is_match_edit(edit):
continue
if is_insert_edit(edit):
parent_match_edit = get_parent_match_edit(edit, pair.edits)
if parent_match_edit is None:
# can't do anything if no parent...
continue
else:
# not necessary info
parent_match_edit = None
# edit with some more info
aug_edit = AugmentedEdit(edit, parent_match_edit)
if pair.pre.external_score is not None:
pair_score_delta = pair.post.external_score - pair.pre.external_score
else:
# we do this by definition for cases where the pre fails
# TODO: consider if a different value makes more sense...
pair_score_delta = 0.0
for rule_type in RULE_TYPES:
if rule_type.can_build_rule(aug_edit):
rule = rule_type(aug_edit)
rule.set_score_delta(pair_score_delta)
# some hyperparameters are not really worth modifying
if isinstance(rule, HyperparamRule
) and self.exclude_params is not None:
param_name = rule.pre.payload[0]
if param_name in self.exclude_params:
continue
self.rules.append(rule)
def get_args():
parser = ArgumentParser(
description="Extract local edit rules from tree pairs")
parser.add_argument("--input", type=str, help="Path to pickled tree pairs")
# no point in extracting local rules when in reality
# the pre/post tree pairs require a ton of edits
parser.add_argument(
"--max_edit_distance",
type=int,
help="Max number of edits in the pair",
default=3,
)
parser.add_argument(
"--exclude_params",
type=str,
nargs="+",
help="Hyperparams to exclude from update/remove rules",
default=["random_state", "n_jobs", "verbose", "cv"])
parser.add_argument("--output", type=str, help="Path to save local rules")
return parser.parse_args()
def main():
args = get_args()
with open(args.input, "rb") as fin:
pairs = pickle.load(fin)
rule_corpus = RuleCorpus(pairs, args.max_edit_distance,
args.exclude_params)
with open(args.output, "wb") as fout:
pickle.dump(rule_corpus, fout)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
sys.exit(1)
```
#### File: janus/repair/meta_learning.py
```python
from argparse import ArgumentParser
import pickle
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import CountVectorizer
import tqdm
from janus.repair.mutate import get_random_mutation_sampler
from janus.pipeline import pipeline_to_tree as pt
class PipelineScorePredictor(object):
def __init__(self, model=None, encoder=None):
if model is None:
model = RandomForestRegressor()
self.model = model
if encoder is None:
encoder = CountVectorizer(
tokenizer=pt.pipeline_tokenizer,
token_pattern=None,
)
self.encoder = encoder
self.fit_ = False
def encode_(self, pipelines):
tr = [pt.to_tree(p) for p in pipelines]
jsons = [pt.to_json(t) for t in tr]
as_text = [str(j) for j in jsons]
# encode into text
if not self.fit_:
self.encoder.fit(as_text)
enc = self.encoder.transform(as_text)
return enc
def fit(self, pipelines, scores):
# convert to vector rep
X = self.encode_(pipelines)
y = np.array(scores)
self.model.fit(X, y)
self.fit_ = True
return self
def predict(self, pipelines):
# convert to vector rep
encoded = self.encode_(pipelines)
return self.model.predict(encoded)
def train(paths, random_state=None):
dfs = []
for p in tqdm.tqdm(paths):
df = pd.read_pickle(p)
df["failed"] = df["external_score"].isna()
df = df[~df["failed"]]
df = df[["obj_graph", "external_score"]]
dfs.append(df)
combined_df = pd.concat(dfs, axis=0)
pipelines = combined_df["obj_graph"].values
scores = combined_df["external_score"].values
if random_state is not None:
np.random.seed(random_state)
score_predictor = PipelineScorePredictor()
score_predictor.fit(pipelines, scores)
return score_predictor
def get_args():
parser = ArgumentParser(
description="Train meta learner pipeline score strategy")
parser.add_argument(
"--input",
type=str,
nargs="+",
help="Path to pickled search trace dataframes",
)
parser.add_argument(
"--output",
type=str,
help="Path to dump trained model",
)
parser.add_argument(
"--random_state",
type=int,
help="RNG seed",
default=42,
)
return parser.parse_args()
def main():
args = get_args()
score_predictor = train(args.input)
with open(args.output, "wb") as fout:
pickle.dump(score_predictor, fout)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
sys.exit(1)
```
#### File: janus/repair/repair_tools.py
```python
from janus.repair.tree_enumerator import (
get_tree_enumerator, )
from janus.repair.rule_sampler import (
get_rule_sampler, )
import pickle
# named strategy -> (rule, enumeration)
PREDEFINED_STRATEGIES = {
"weighted-transducer": ("weighted", "beam"), # janus
"rf-transducer": ("predictive", "beam"), # deprecated
"random-mutation": ("mutation", "orig-only"), # random-mutation baseline
"random-janus": ("random", "orig-only"), # random-janus baseline
"classifier-swap": (None, "classifier-swap"), # deprecated
"meta-learning": ("mutation", "meta-learning"), # meta-learning baseline
"meta-janus": ("weighted", "meta-janus"), # meta-janus approach
"janus": ("weighted", "orig-only"),
}
def get_repair_tools(
predefined_strategy=None,
rule_strategy=None,
enumeration_strategy=None,
score_predictor=None,
rules_paths=False,
random_state=None,
):
if predefined_strategy is not None:
assert rule_strategy is None
assert enumeration_strategy is None
if predefined_strategy not in PREDEFINED_STRATEGIES:
raise Exception("Unknown predefined_strategy: " +
predefined_strategy)
rule_strategy, enumeration_strategy = PREDEFINED_STRATEGIES[
predefined_strategy]
rules = []
if rule_strategy is not None and rule_strategy != "mutation":
for p in rules_paths:
with open(p, "rb") as fin:
rule_corpus = pickle.load(fin)
rules.extend(rule_corpus.rules)
rule_sampler = get_rule_sampler(
rule_strategy,
rules,
random_state,
)
if enumeration_strategy.startswith("meta") and score_predictor is not None:
with open(score_predictor, "rb") as fin:
score_predictor = pickle.load(fin)
enumerator = get_tree_enumerator(
enumeration_strategy,
rule_sampler,
force_apply=(rule_strategy == "mutation"),
score_predictor=score_predictor,
)
return {
"rules": rules,
"rule_sampler": rule_sampler,
"tree_enumerator": enumerator
}
```
#### File: plpy/analyze/graph_builder.py
```python
from argparse import ArgumentParser, RawTextHelpFormatter
from enum import Enum
import networkx as nx
import pickle
import textwrap
from .dynamic_tracer import DynamicDataTracer, get_nested_references, to_ast_node
from .dynamic_trace_events import *
class MemoryRefinementStrategy(Enum):
INCLUDE_ALL = 0
IGNORE_BASE = 1
MOST_SPECIFIC = 2
class DynamicTraceToGraph(object):
def __init__(self, ignore_unknown=False, memory_refinement=0):
# do not construct nodes for unknown memory references
self.ignore_unknown = ignore_unknown
# only consume memory update based on policy
self.memory_refinement = MemoryRefinementStrategy(memory_refinement)
# graph with statement nodes and edges for data dependencies
self.graph = nx.DiGraph()
# counter for node identifiers
self.counter = 0
# note that pydot doesn't like negatives...
self.unknown_id = self.allocate_node_id()
# mappings to node identifiers
self.lineno_to_nodeid = {}
self.mem_loc_to_lineno = {}
self.consuming = []
def allocate_node_id(self):
_id = self.counter
self.counter += 1
return _id
def create_and_add_node(self, node_id, trace_event):
self.graph.add_node(node_id)
# set up attributes
attributes = [
'src', 'lineno', 'event', 'complete_defs', 'defs', 'calls', 'uses'
]
for attr in attributes:
self.graph.nodes[node_id][attr] = None
if node_id == self.unknown_id:
self.graph.nodes[node_id]['src'] = 'UNKNOWNS: '
else:
self.graph.nodes[node_id]['src'] = trace_event.line
self.graph.nodes[node_id]['lineno'] = trace_event.lineno
self.graph.nodes[node_id]['event'] = trace_event
return self.graph.nodes[node_id]
def handle_ExecLine(self, event):
if self.consuming:
return
# TODO: this currently ignores loops and allocates a new node per statement executed
node_id = self.allocate_node_id()
self.create_and_add_node(node_id, event)
self.graph.nodes[node_id]['uses'] = event.uses
dependencies = []
for var in event.uses:
if (not self.ignore_unknown) or var.id in self.mem_loc_to_lineno:
ml_id = self.get_latest_node_id_for_mem_loc(var.name, var.id)
dependencies.append((ml_id, node_id))
self.graph.add_edges_from(dependencies)
# set node id for this lineno
self.lineno_to_nodeid[event.lineno] = node_id
def get_latest_node_id_for_mem_loc(self, name, mem_loc):
if not mem_loc in self.mem_loc_to_lineno:
# one of the unknown locations
# create new node if needed and accumulate string for debugging when drawn
if not self.unknown_id in self.graph.nodes:
self.create_and_add_node(self.unknown_id, None)
self.graph.nodes[self.unknown_id]['src'] += ('%s,' % name)
return self.unknown_id
else:
lineno = self.mem_loc_to_lineno[mem_loc]
return self.lineno_to_nodeid[lineno]
@staticmethod
def refine_ignore_base(_vars):
"""
ignore base memory update for references to 'containers'
"""
bases = set([])
for var in _vars:
ast_node_name = to_ast_node(var.name)
refs = get_nested_references(ast_node_name, exclude_first=True)
refs = sorted(refs, key=len)
if refs:
bases.add(refs[0])
return [var for var in _vars if not var.name in bases]
@staticmethod
def refine_most_specific(_vars):
"""
ignore all but the most specific memory update for references to 'containers'
"""
nested_references = set([])
for var in _vars:
ast_node_name = to_ast_node(var.name)
refs = get_nested_references(ast_node_name, exclude_first=True)
nested_references.update(refs)
return [var for var in _vars if not var.name in nested_references]
def refine_memory_updates(self, _vars):
# refine memory locations according to a specific strategy
# this refinement is purely syntactic
# doing this based on actual memory addresses isn't really feasible
# for example, given a data frame df, df['c1'] always returns the same
# id as long as unmodified, but retrieving first element,
# id(df['c1'][0]), repeatedly returns different value as it allocates a new
# np.dtype object.
# If instead we retrieve with df['c1'].values.item(0) we always get same id
# as it copies it to a Python object...
# all this to say: inferring related objects from memory addresses hardly
# seems bulletproof, so might as well just do syntactically
if self.memory_refinement == MemoryRefinementStrategy.INCLUDE_ALL:
return _vars, _vars
elif self.memory_refinement == MemoryRefinementStrategy.IGNORE_BASE:
return _vars, self.refine_ignore_base(_vars)
elif self.memory_refinement == MemoryRefinementStrategy.MOST_SPECIFIC:
return _vars, self.refine_most_specific(_vars)
else:
raise Exception(
"Invalid memory refinement strategy: %s" %
self.memory_refinement
)
def handle_MemoryUpdate(self, event):
if self.consuming:
return
defs = list(event.defs)
# complete defs maintain all information
# but edges in the graph are only built off of defs
complete_defs, defs = self.refine_memory_updates(defs)
for d in defs:
self.mem_loc_to_lineno[d.id] = event.lineno
# add these defs to the line node that created them
line_node_id = self.lineno_to_nodeid[event.lineno]
self.graph.nodes[line_node_id]['defs'] = defs
self.graph.nodes[line_node_id]['complete_defs'] = complete_defs
def handle_EnterCall(self, event):
self.consuming += [event]
# add call information to node associated with the stmt that triggered the call event
if event.lineno in self.lineno_to_nodeid:
node_id = self.lineno_to_nodeid[event.lineno]
calls = self.graph.nodes[node_id]['calls']
calls = [] if calls is None else calls
calls.append(event)
self.graph.nodes[node_id]['calls'] = calls
def handle_ExitCall(self, event):
self.consuming.pop()
def handle_ExceptionEvent(self, event):
print(
'Graph has an exception event, stopped processing. Saving current progress.'
)
def run(self, tracer):
assert isinstance(
tracer, DynamicDataTracer
), 'This graph builder only works for dynamic data traces'
handlers = {
ExecLine: self.handle_ExecLine,
MemoryUpdate: self.handle_MemoryUpdate,
EnterCall: self.handle_EnterCall,
ExitCall: self.handle_ExitCall,
ExceptionEvent: self.handle_ExceptionEvent,
}
for e in tracer.trace_events:
handlers[type(e)](e)
return self.graph
def draw(g, dot_layout=True):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1)
labels = nx.get_node_attributes(g, 'src')
# use better graphviz layout
pos = nx.drawing.nx_pydot.graphviz_layout(g) if dot_layout else None
nx.draw(g, labels=labels, node_size=100, ax=ax, pos=pos)
return plt, plt.gcf()
def main(args):
with open(args.input_path, 'rb') as f:
tracer = pickle.load(f)
builder = DynamicTraceToGraph(
ignore_unknown=args.ignore_unknown,
memory_refinement=args.memory_refinement
)
graph = builder.run(tracer)
with open(args.output_path, 'wb') as f:
pickle.dump(graph, f)
if args.draw:
plt, plot_fig = draw(graph)
plot_path = args.output_path + '_graph.pdf'
plt.savefig(plot_path)
plt.show(block=args.block)
if __name__ == '__main__':
parser = ArgumentParser(
description='Build networkx graph from tracer (with events)',
formatter_class=RawTextHelpFormatter
)
parser.add_argument(
'input_path', type=str, help='Path to pickled tracer (with events)'
)
parser.add_argument(
'output_path', type=str, help='Path to store pickled networkx graph'
)
parser.add_argument(
'-i',
'--ignore_unknown',
action='store_true',
help='Exclude unknown memory locations from graph'
)
parser.add_argument(
'-m',
'--memory_refinement',
type=int,
help=textwrap.dedent(
"""
0: apply all memory updates (MemoryRefinementStrategy.INCLUDE_ALL) (DEFAULT)
1: ignore base (MemoryRefinementStrategy.IGNORE_BASE)
2: ignore all but most specific (MemoryRefinementStrategy.MOST_SPECIFIC)
Determined syntactically
"""
),
default=0
)
parser.add_argument(
'-d', '--draw', action='store_true', help='Draw graph and display'
)
parser.add_argument(
'-b',
'--block',
action='store_true',
help='Block when displaying graph'
)
args = parser.parse_args()
try:
main(args)
except Exception as err:
import pdb
pdb.post_mortem()
```
#### File: plpy/rewrite/expr_lifter.py
```python
from argparse import ArgumentParser
import ast
from copy import deepcopy
from astunparse import unparse
# FIXME:
# remove unnecessary deepcopy
# add documentation
class SliceRewriter(ast.NodeTransformer):
def visit_Slice(self, node):
lower_str = unparse(node.lower) if node.lower else str(None)
upper_str = unparse(node.upper) if node.upper else str(None)
step_str = unparse(node.step) if node.step else str(None)
new_node = ast.parse(
'slice(%s, %s, %s)' % (lower_str, upper_str, step_str)
).body[0].value
return ast.copy_location(new_node, node)
class ExpressionLifter(ast.NodeTransformer):
"""
Convert python AST to lift nested expression such that
any subexpression is now atomic (unless one of the ignored AST node types)
"""
def __init__(self, sym_format_name=None):
if sym_format_name is None:
sym_format_name = '_var%d'
self.variable_counter = 0
self.sym_format_name = sym_format_name
self.atom_types = (
ast.Name,
ast.Num,
ast.Str,
ast.Bytes,
ast.NameConstant,
ast.Ellipsis,
ast.Constant,
)
# we ignore certain types where lifting
# would change the semantics or complicate
# with little benefit
self.ignore_expr_types = (
ast.BoolOp,
ast.Lambda,
ast.ListComp,
ast.SetComp,
ast.DictComp,
ast.GeneratorExp,
ast.Await,
ast.Yield,
ast.YieldFrom,
ast.JoinedStr,
)
self.ignore_stmt_types = (
ast.Delete,
ast.Import,
ast.ImportFrom,
ast.Global,
ast.Nonlocal,
ast.Pass,
ast.Break,
ast.Continue,
)
def run(self, src):
if not isinstance(src, ast.Module):
src = ast.parse(src)
else:
src = deepcopy(src)
# remove slices
src = SliceRewriter().visit(src)
# we don't quite stick to the correct nodes
# so just unparse and reparse
lifted = self.visit(src)
lifted_src = unparse(lifted)
return ast.parse(lifted_src)
def is_atomic(self, node):
# either clearly atomic
return isinstance(node, self.atom_types)\
or isinstance(node, ast.Index) and self.is_atomic(node.value)\
or node is None
def is_ignorable_expr(self, node):
return isinstance(node, self.ignore_expr_types)
def is_ignorable_stmt(self, node):
return isinstance(node, self.ignore_stmt_types)
def ignore(self, node):
return [], node
def alloc_symbol_name(self):
sym = self.sym_format_name % self.variable_counter
self.variable_counter += 1
return sym
def alloc_assign_node(self, rhs_node, ctx=None):
id_allocated = self.alloc_symbol_name()
lhs_node = ast.Name(id=id_allocated, ctx=ast.Store())
assign_node = ast.Assign([lhs_node], rhs_node)
assign_node = ast.copy_location(assign_node, rhs_node)
name_node = ast.Name(id=id_allocated, ctx=ctx if ctx else ast.Load())
return assign_node, name_node
def lift(self, node):
if self.is_atomic(node):
return self.ignore(node)
recursive_nodes = self.visit(node)
prev_assignment_nodes = recursive_nodes[0]
rhs_node = recursive_nodes[1]
curr_assignment, name_node = self.alloc_assign_node(rhs_node)
assignment_nodes = prev_assignment_nodes + [curr_assignment]
return assignment_nodes, name_node
def lift_list(self, nodes):
new_nodes = []
assignments = []
for node in nodes:
if not self.is_atomic(node):
lifted_nodes = self.lift(node)
new_nodes.append(lifted_nodes[1])
assignments.extend(lifted_nodes[0])
else:
new_nodes.append(node)
return assignments, new_nodes
def visit_top_level_list(self, nodes):
new_nodes = []
for expr in nodes:
_nn = self.visit(expr)
try:
new_nodes.extend(_nn)
except TypeError:
new_nodes.append(_nn)
return new_nodes
def visit(self, node):
if self.is_ignorable_stmt(node):
return node
elif self.is_ignorable_expr(node):
return self.ignore(node)
elif self.is_atomic(node):
return self.ignore(node)
else:
return super().visit(node)
# Top-level elements: return list or single element
# Statements
def visit_FunctionDef(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
return new_node
def visit_AsyncFunctionDef(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
return new_node
def visit_ClassDef(self, node):
new_node = deepcopy(node)
# new_node.bases = self.visit_top_level_list(node.exprs)
# TODO: what are the bases, keywords here
new_node.body = self.visit_top_level_list(node.body)
return new_node
def visit_Return(self, node):
new_node = deepcopy(node)
assignments = []
if not self.is_atomic(node.value):
nodes = self.lift(node.value)
new_node.value = nodes[1]
assignments.extend(nodes[0])
return assignments + [new_node]
def visit_Assign(self, node):
new_node = deepcopy(node)
nodes = self.visit(node.value)
assignment_nodes = nodes[0]
new_value = nodes[1]
new_node.value = new_value
return assignment_nodes + [new_node]
def visit_AugAssign(self, node):
new_node = deepcopy(node)
nodes = self.visit(node.value)
assignment_nodes = nodes[0]
# extra for augmented assigment
more_nodes = self.lift(nodes[1])
assignment_nodes.extend(more_nodes[0])
new_node.value = more_nodes[1]
return assignment_nodes + [new_node]
def visit_AnnAssign(self, node):
new_node = deepcopy(node)
nodes = self.visit(node.value)
assignment_nodes = nodes[0]
new_node.value = nodes[1]
return assignment_nodes + [new_node]
def visit_For(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
new_node.orelse = self.visit_top_level_list(node.orelse)
return new_node
def visit_AsyncFor(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
new_node.orelse = self.visit_top_level_list(node.orelse)
return new_node
def visit_While(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
new_node.orelse = self.visit_top_level_list(node.orelse)
return new_node
def visit_If(self, node):
new_node = deepcopy(node)
assignment_nodes = []
test_nodes = self.visit(node.test)
assignment_nodes.extend(test_nodes[0])
new_node.test = test_nodes[1]
new_node.body = self.visit_top_level_list(node.body)
new_node.orelse = self.visit_top_level_list(node.orelse)
return assignment_nodes + [new_node]
def visit_With(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
return new_node
def visit_AsynchWith(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
return new_node
def visit_Raise(self, node):
new_node = deepcopy(node)
assignment_nodes = []
exc_nodes = self.visit(node.exc)
assignment_nodes.extend(exc_nodes[0])
new_node.exc = exc_nodes[1]
cause_nodes = self.visit(node.cause)
assignment_nodes.extend(cause_nodes[0])
new_node.cause = cause_nodes[1]
return assignment_nodes + [new_node]
def visit_Try(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
new_node.handlers = self.visit_top_level_list(node.handlers)
new_node.orelse = self.visit_top_level_list(node.orelse)
new_node.finalbody = self.visit_top_level_list(node.finalbody)
return new_node
def visit_ExceptHandler(self, node):
new_node = deepcopy(node)
new_node.body = self.visit_top_level_list(node.body)
return new_node
def visit_Assert(self, node):
new_node = deepcopy(node)
assignment_nodes = []
test_nodes = self.visit(node.test)
assignment_nodes.extend(test_nodes[0])
new_node.test = test_nodes[1]
return assignment_nodes + [new_node]
# Expressions
def visit_Expr(self, node):
nodes = self.visit(node.value)
# need to wrap the final node in expr
other_nodes = nodes[0]
value_node = nodes[1]
expr_node = ast.Expr(value=value_node)
return other_nodes + [expr_node]
# Non-top level
# Return tuples
def visit_BinOp(self, node):
new_node = deepcopy(node)
assignments = []
left_nodes = self.lift(node.left)
assignments.extend(left_nodes[0])
new_node.left = left_nodes[1]
right_nodes = self.lift(node.right)
assignments.extend(right_nodes[0])
new_node.right = right_nodes[1]
return assignments, new_node
def visit_UnaryOp(self, node):
new_node = deepcopy(node)
assignments = []
operand_nodes = self.lift(node.operand)
assignments = operand_nodes[0]
new_node.operand = operand_nodes[1]
return assignments, new_node
def visit_IfExp(self, node):
new_node = deepcopy(node)
assignments = []
test_nodes = self.lift(node.test)
new_node.test = test_nodes[1]
assignments.extend(test_nodes[0])
return assignments, new_node
def visit_Compare(self, node):
new_node = deepcopy(node)
assignments = []
left_nodes = self.lift(node.left)
assignments.extend(left_nodes[0])
new_node.left = left_nodes[1]
comparators_nodes = self.lift_list(node.comparators)
assignments.extend(comparators_nodes[0])
new_node.comparators = comparators_nodes[1]
return assignments, new_node
def visit_Call(self, node):
new_node = deepcopy(node)
assignments = []
# if call is
# just one level deep, where it
# looks like it might be an instance method
# don't lift, since that can make other things harder when analyzing
if isinstance(node.func, ast.Attribute) and isinstance(node.func.value,
ast.Name):
func_nodes = self.ignore(node.func)
else:
func_nodes = self.lift(node.func)
assignments.extend(func_nodes[0])
new_node.func = func_nodes[1]
arg_nodes = self.lift_list(node.args)
assignments.extend(arg_nodes[0])
new_node.args = arg_nodes[1]
new_kws = []
for kw in node.keywords:
kw_nodes = self.visit(kw)
assignments.extend(kw_nodes[0])
new_kws.append(kw_nodes[1])
new_node.keywords = new_kws
return assignments, new_node
def visit_keyword(self, node):
new_node = deepcopy(node)
assignments = []
value_nodes = self.lift(node.value)
assignments.extend(value_nodes[0])
new_node.value = value_nodes[1]
return assignments, new_node
def visit_Attribute(self, node):
new_node = deepcopy(node)
assignments = []
value_nodes = self.lift(node.value)
assignments.extend(value_nodes[0])
new_node.value = value_nodes[1]
return assignments, new_node
def visit_Subscript(self, node):
new_node = deepcopy(node)
assignments = []
value_nodes = self.lift(node.value)
assignments.extend(value_nodes[0])
new_node.value = value_nodes[1]
slice_nodes = self.lift(node.slice)
assignments.extend(slice_nodes[0])
new_node.slice = slice_nodes[1]
return assignments, new_node
def visit_Index(self, node):
new_node = deepcopy(node)
assignments = []
value_nodes = self.lift(node.value)
assignments.extend(value_nodes[0])
new_node.value = value_nodes[1]
return assignments, new_node
def visit_Starred(self, node):
new_node = deepcopy(node)
assignments = []
value_nodes = self.lift(node.value)
assignments.extend(value_nodes[0])
new_node.value = value_nodes[1]
return assignments, new_node
def visit_List(self, node):
new_node = deepcopy(node)
assignments = []
elts_nodes = self.lift_list(node.elts)
assignments.extend(elts_nodes[0])
new_node.elts = elts_nodes[1]
return assignments, new_node
def visit_Tuple(self, node):
new_node = deepcopy(node)
assignments = []
elts_nodes = self.lift_list(node.elts)
assignments.extend(elts_nodes[0])
new_node.elts = elts_nodes[1]
return assignments, new_node
def visit_Set(self, node):
new_node = deepcopy(node)
assignments = []
elts_nodes = self.lift_list(node.elts)
assignments.extend(elts_nodes[0])
new_node.elts = elts_nodes[1]
return assignments, new_node
def visit_Dict(self, node):
new_node = deepcopy(node)
assignments = []
keys_nodes = self.lift_list(node.keys)
assignments.extend(keys_nodes[0])
new_node.keys = keys_nodes[1]
values_nodes = self.lift_list(node.values)
assignments.extend(values_nodes[0])
new_node.values = values_nodes[1]
return assignments, new_node
def visit_Slice(self, node):
new_node = deepcopy(node)
assignments = []
lower_nodes = self.lift(node.lower)
assignments.extend(lower_nodes[0])
new_node.lower = lower_nodes[1]
upper_nodes = self.lift(node.upper)
assignments.extend(upper_nodes[0])
new_node.upper = upper_nodes[1]
step_nodes = self.lift(node.step)
assignments.extend(step_nodes[0])
new_node.step = step_nodes[1]
return assignments, new_node
def visit_ExtSlice(self, node):
new_node = deepcopy(node)
assignments = []
dims_nodes = self.lift_list(node.dims)
assignments.extend(dims_nodes[0])
new_node.dims = dims_nodes[1]
return assignments, new_node
def visit_FormattedValue(self, node):
new_node = deepcopy(node)
assignments = []
value_nodes = self.lift(node.value)
assignments.extend(value_nodes[0])
new_node.value = value_nodes[1]
return assignments, new_node
def lift_expressions(src):
return ExpressionLifter().run(src)
def lift_source(src):
lifted_tree = ExpressionLifter().run(src)
lifted_src = unparse(lifted_tree)
return lifted_src
def main(args):
src = open(args.input_path, 'r').read()
lifted_tree = lift_expressions(src)
lifted_src = unparse(lifted_tree)
with open(args.output_path, 'w') as f:
f.write(lifted_src)
if __name__ == '__main__':
parser = ArgumentParser(description='Expression lifter')
parser.add_argument(
'input_path', type=str, help='Path to input source file'
)
parser.add_argument('output_path', type=str, help='Path to output file')
args = parser.parse_args()
try:
main(args)
except:
import pdb
pdb.post_mortem()
```
#### File: janus/tests/test_lift.py
```python
import pytest
import numpy as np
import sklearn.linear_model
import sklearn.datasets
import tqdm
from janus.pipeline import pipeline_to_tree as pt
from janus.lift.extract_pipeline import PipelineLifter
from tests.utils import PipelineGenerator
import copy
import sys
sys.path.append(".")
def test_lift1():
src = """
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
p = Pipeline([('scaler', StandardScaler()), ('logit', logit)])
import numpy as np
X = np.random.random((10, 10))
y = np.random.random(10)
y = y > 0.5
p.fit(X, y)
p.predict(X)
"""
result = PipelineLifter(src)
assert not result.failed
assert len(result.pipelines) == 1
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
expected = Pipeline([('scaler', StandardScaler()), ('logit', logit)])
assert pt.md5(expected) == pt.md5(result.pipelines[0])
def test_lift2():
# same pipeline but now not using the Pipeline construct in the
# source
src = """
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
import numpy as np
X = np.random.random((10, 10))
y = np.random.random(10)
y = y > 0.5
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
logit.fit(X_scaled, y)
logit.predict(X_scaled)
"""
result = PipelineLifter(src)
assert not result.failed
assert len(result.pipelines) == 1
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
expected = Pipeline([('scaler', StandardScaler()), ('logit', logit)])
assert pt.md5(expected) == pt.md5(result.pipelines[0])
def test_lift3():
# https://www.kaggle.com/vsmolyakov/svm-classifier
# with some mods --> remove deprecated stale/code from sklearn
# in source (from older version, incompatible)
src = """
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import csv as csv
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
# subbed out data here...
X_train = np.random.random((100, 100))
y_train = np.random.random(100) > 0.5
svm_parameters = [{'kernel': ['rbf'], 'C': [1,10,100,1000]}]
clf = GridSearchCV(SVC(), svm_parameters, cv=3, verbose=2)
clf.fit(X_train, y_train)
clf.best_params_
C_opt = 10
clf = SVC(C=C_opt, kernel='rbf')
clf.fit(X_train, y_train)
clf.n_support_
X_test_data = np.random.random((100, 100))
y_pred = clf.predict(X_test_data)
"""
result = PipelineLifter(src)
assert not result.failed
assert len(result.pipelines) == 2
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
clf = SVC(C=10, kernel="rbf")
expected1 = Pipeline([("clf", clf)])
svm_parameters = [{'kernel': ['rbf'], 'C': [1, 10, 100, 1000]}]
clf = GridSearchCV(SVC(), svm_parameters, cv=3, verbose=2)
expected2 = Pipeline([("clf", clf)])
expected = set([pt.md5(expected1), pt.md5(expected2)])
got = set([pt.md5(p) for p in result.pipelines])
assert got == expected
def test_lift4():
# now no .predict call, but has a .fit call
src = """
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
p = Pipeline([('scaler', StandardScaler()), ('logit', logit)])
import numpy as np
X = np.random.random((10, 10))
y = np.random.random(10)
y = y > 0.5
p.fit(X, y)
"""
result = PipelineLifter(src)
assert not result.failed
assert len(result.pipelines) == 1
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(
C=1,
solver='lbfgs',
max_iter=500,
random_state=17,
n_jobs=1,
multi_class='multinomial')
expected = Pipeline([('scaler', StandardScaler()), ('logit', logit)])
assert pt.md5(expected) == pt.md5(result.pipelines[0])
```
#### File: tpot/tests/zero_count_tests.py
```python
import numpy as np
from tpot.builtins import ZeroCount
X = np.array([[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19],
[0, 1, 3, 4, 5],
[5, 0, 0, 0, 0]])
def test_ZeroCount():
"""Assert that ZeroCount operator returns correct transformed X."""
op = ZeroCount()
X_transformed = op.transform(X)
zero_col = np.array([3, 2, 1, 4])
non_zero = np.array([2, 3, 4, 1])
assert np.allclose(zero_col, X_transformed[:, 0])
assert np.allclose(non_zero, X_transformed[:, 1])
def test_ZeroCount_fit():
"""Assert that fit() in ZeroCount does nothing."""
op = ZeroCount()
ret_op = op.fit(X)
assert ret_op==op
```
#### File: kaggle/forest-cover-type-prediction/script_11.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
train = pd.read_csv('../input/forest-cover-type-prediction/train.csv',
index_col='Id')
test = pd.read_csv('../input/forest-cover-type-prediction/test.csv',
index_col='Id')
train.head(1).T
train['Cover_Type'].value_counts()
def write_to_submission_file(predicted_labels, out_file,
target='Cover_Type', index_label="Id", init_index=15121):
# turn predictions into data frame and save as csv file
predicted_df = pd.DataFrame(predicted_labels,
index = np.arange(init_index,
predicted_labels.shape[0] + init_index),
columns=[target])
predicted_df.to_csv(out_file, index_label=index_label)
X_train, X_valid, y_train, y_valid = train_test_split(
train.drop('Cover_Type', axis=1), train['Cover_Type'],
test_size=0.3, random_state=101)
logit = LogisticRegression(C=1, solver='lbfgs', max_iter=500,
random_state=17, n_jobs=4,
multi_class='multinomial')
logit_pipe = Pipeline([('scaler', StandardScaler()),
('logit', logit)])
logit_pipe.fit(X_train, y_train)
logit_val_pred = logit_pipe.predict(X_valid)
accuracy_score(y_valid, logit_val_pred)
first_forest = RandomForestClassifier(
n_estimators=100, random_state=17, n_jobs=4)
first_forest.fit(X_train, y_train)
forest_val_pred = first_forest.predict(X_valid)
accuracy_score(y_valid, forest_val_pred)
train.columns
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
for col in train.columns:
train[col]=LabelEncoder().fit(train[col]).transform(train[col])
model= DecisionTreeClassifier(criterion= 'entropy',max_depth = 1)
AdaBoost= AdaBoostClassifier(base_estimator= first_forest, n_estimators= 400,learning_rate=1)
boostmodel= AdaBoost.fit(X_train, y_train)
y_predict= boostmodel.predict(X_valid)
accuracy_score(y_valid, y_predict)
write_to_submission_file(y_predict,'final answer.csv')
```
#### File: kaggle/forest-cover-type-prediction/script_19.py
```python
import pandas as pd
from sklearn import ensemble
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import normalize
import math
def two_largest_indices(inlist):
largest = 0
second_largest = 0
largest_index = 0
second_largest_index = -1
for i in range(len(inlist)):
item = inlist[i]
if item > largest:
second_largest = largest
second_largest_index = largest_index
largest = item
largest_index = i
elif largest > item >= second_largest:
second_largest = item
second_largest_index = i
# Return the results as a tuple
return largest_index, second_largest_index
if __name__ == "__main__":
loc_train = "../input/train.csv"
loc_test = "../input/test.csv"
loc_submission = "kaggle.rf200.entropy.submission.csv"
df_train = pd.read_csv(loc_train)
df_test = pd.read_csv(loc_test)
cols_to_normalize = ['Aspect','Slope','Horizontal_Distance_To_Hydrology','Vertical_Distance_To_Hydrology',
'Hillshade_9am','Hillshade_Noon','Hillshade_3pm','Horizontal_Distance_To_Fire_Points']
df_train[cols_to_normalize] = normalize(df_train[cols_to_normalize])
df_test[cols_to_normalize] = normalize(df_test[cols_to_normalize])
feature_cols = [col for col in df_train.columns if col not in ['Cover_Type','Id']]
feature_cols.append('binned_elevation')
feature_cols.append('Horizontal_Distance_To_Roadways_Log')
feature_cols.append('Soil_Type12_32')
feature_cols.append('Soil_Type23_22_32_33')
df_train['binned_elevation'] = [math.floor(v/50.0) for v in df_train['Elevation']]
df_test['binned_elevation'] = [math.floor(v/50.0) for v in df_test['Elevation']]
df_train['Horizontal_Distance_To_Roadways_Log'] = [math.log(v+1) for v in df_train['Horizontal_Distance_To_Roadways']]
df_test['Horizontal_Distance_To_Roadways_Log'] = [math.log(v+1) for v in df_test['Horizontal_Distance_To_Roadways']]
df_train['Soil_Type12_32'] = df_train['Soil_Type32'] + df_train['Soil_Type12']
df_test['Soil_Type12_32'] = df_test['Soil_Type32'] + df_test['Soil_Type12']
df_train['Soil_Type23_22_32_33'] = df_train['Soil_Type23'] + df_train['Soil_Type22'] + df_train['Soil_Type32'] + df_train['Soil_Type33']
df_test['Soil_Type23_22_32_33'] = df_test['Soil_Type23'] + df_test['Soil_Type22'] + df_test['Soil_Type32'] + df_test['Soil_Type33']
df_train_1_2 = df_train[(df_train['Cover_Type'] <= 2)]
df_train_3_4_6 = df_train[(df_train['Cover_Type'].isin([3,4,6]))]
X_train = df_train[feature_cols]
X_test = df_test[feature_cols]
X_train_1_2 = df_train_1_2[feature_cols]
X_train_3_4_6 = df_train_3_4_6[feature_cols]
y = df_train['Cover_Type']
y_1_2 = df_train_1_2['Cover_Type']
y_3_4_6 = df_train_3_4_6['Cover_Type']
test_ids = df_test['Id']
del df_train
del df_test
clf = ensemble.ExtraTreesClassifier(n_estimators=100,n_jobs=-1,random_state=0)
clf.fit(X_train, y)
clf_1_2 = ensemble.RandomForestClassifier(n_estimators=200,n_jobs=-1,random_state=0)
clf_1_2.fit(X_train_1_2, y_1_2)
clf_3_4_6 = ensemble.RandomForestClassifier(n_estimators=200,n_jobs=-1,random_state=0)
clf_3_4_6.fit(X_train_3_4_6, y_3_4_6)
del X_train
vals_1_2 = {}
for e, val in enumerate(list(clf_1_2.predict_proba(X_test))):
vals_1_2[e] = val
print(clf_1_2.classes_)
vals_3_4_6 = {}
for e, val in enumerate(list(clf_3_4_6.predict_proba(X_test))):
vals_3_4_6[e] = val
print(clf_3_4_6.classes_)
vals = {}
for e, val in enumerate(list(clf.predict(X_test))):
vals[e] = val
with open(loc_submission, "w") as outfile:
outfile.write("Id,Cover_Type\n")
for e, val in enumerate(list(clf.predict_proba(X_test))):
val[0] += vals_1_2[e][0]/1.3
val[1] += vals_1_2[e][1]/1.1
val[2] += vals_3_4_6[e][0]/3.4
val[3] += vals_3_4_6[e][1]/4.0
val[5] += vals_3_4_6[e][2]/3.6
i,j = two_largest_indices(val)
v = i + 1
outfile.write("%s,%s\n"%(test_ids[e],v))
```
#### File: kaggle/forest-cover-type-prediction/script_2.py
```python
import warnings
warnings.filterwarnings('ignore')
# Read raw data from the file
import pandas #provides data structures to quickly analyze data
#Since this code runs on Kaggle server, train data can be accessed directly in the 'input' folder
dataset = pandas.read_csv("../input/train.csv")
#Drop the first column 'Id' since it just has serial numbers. Not useful in the prediction process.
dataset = dataset.iloc[:,1:]
#Removal list initialize
rem = []
#Add constant columns as they don't help in prediction process
for c in dataset.columns:
if dataset[c].std() == 0: #standard deviation is zero
rem.append(c)
#drop the columns
dataset.drop(rem,axis=1,inplace=True)
print(rem)
#Following columns are dropped
#get the number of rows and columns
r, c = dataset.shape
#get the list of columns
cols = dataset.columns
#create an array which has indexes of columns
i_cols = []
for i in range(0,c-1):
i_cols.append(i)
#array of importance rank of all features
ranks = []
#Extract only the values
array = dataset.values
#Y is the target column, X has the rest
X_orig = array[:,0:(c-1)]
Y = array[:,(c-1)]
#Validation chunk size
val_size = 0.1
#Use a common seed in all experiments so that same chunk is used for validation
seed = 0
#Split the data into chunks
from sklearn import cross_validation
X_train, X_val, Y_train, Y_val = cross_validation.train_test_split(X_orig, Y, test_size=val_size, random_state=seed)
#Import libraries for data transformations
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
#All features
X_all = []
#Additionally we will make a list of subsets
X_all_add =[]
#columns to be dropped
rem_cols = []
#indexes of columns to be dropped
i_rem = []
#Add this version of X to the list
X_all.append(['Orig','All', X_train,X_val,1.0,cols[:c-1],rem_cols,ranks,i_cols,i_rem])
#point where categorical data begins
size=10
import numpy
#Standardized
#Apply transform only for non-categorical data
X_temp = StandardScaler().fit_transform(X_train[:,0:size])
X_val_temp = StandardScaler().fit_transform(X_val[:,0:size])
#Concatenate non-categorical data and categorical
X_con = numpy.concatenate((X_temp,X_train[:,size:]),axis=1)
X_val_con = numpy.concatenate((X_val_temp,X_val[:,size:]),axis=1)
#Add this version of X to the list
X_all.append(['StdSca','All', X_con,X_val_con,1.0,cols,rem_cols,ranks,i_cols,i_rem])
#MinMax
#Apply transform only for non-categorical data
X_temp = MinMaxScaler().fit_transform(X_train[:,0:size])
X_val_temp = MinMaxScaler().fit_transform(X_val[:,0:size])
#Concatenate non-categorical data and categorical
X_con = numpy.concatenate((X_temp,X_train[:,size:]),axis=1)
X_val_con = numpy.concatenate((X_val_temp,X_val[:,size:]),axis=1)
#Add this version of X to the list
X_all.append(['MinMax', 'All', X_con,X_val_con,1.0,cols,rem_cols,ranks,i_cols,i_rem])
#Normalize
#Apply transform only for non-categorical data
X_temp = Normalizer().fit_transform(X_train[:,0:size])
X_val_temp = Normalizer().fit_transform(X_val[:,0:size])
#Concatenate non-categorical data and categorical
X_con = numpy.concatenate((X_temp,X_train[:,size:]),axis=1)
X_val_con = numpy.concatenate((X_val_temp,X_val[:,size:]),axis=1)
#Add this version of X to the list
X_all.append(['Norm', 'All', X_con,X_val_con,1.0,cols,rem_cols,ranks,i_cols,i_rem])
#Impute
#Imputer is not used as no data is missing
#List of transformations
trans_list = []
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
trans_list.append(trans)
#Select top 75%,50%,25%
ratio_list = [0.75,0.50,0.25]
#Median of rankings for each column
unsorted_rank = [0,8,11,4,5,2,5,7.5,9.5,3,8,28.5,14.5,2,35,19.5,12,14,37,25.5,50,44,9,28,20.5,19.5,40,38,20,38,43,35,44,22,24,33,49,42,46,47,27.5,19,31.5,23,28,42,30.5,46,40,12,13,18]
#List of feature selection models
feat = []
#Add Median to the list
n = 'Median'
for val in ratio_list:
feat.append([n,val])
for trans,s, X, X_val, d, cols, rem_cols, ra, i_cols, i_rem in X_all:
#Create subsets of feature list based on ranking and ratio_list
for name, v in feat:
#Combine importance and index of the column in the array joined
joined = []
for i, pred in enumerate(unsorted_rank):
joined.append([i,cols[i],pred])
#Sort in descending order
joined_sorted = sorted(joined, key=lambda x: x[2])
#Starting point of the columns to be dropped
rem_start = int((v*(c-1)))
#List of names of columns selected
cols_list = []
#Indexes of columns selected
i_cols_list = []
#Ranking of all the columns
rank_list =[]
#List of columns not selected
rem_list = []
#Indexes of columns not selected
i_rem_list = []
#Split the array. Store selected columns in cols_list and removed in rem_list
for j, (i, col, x) in enumerate(list(joined_sorted)):
#Store the rank
rank_list.append([i,j])
#Store selected columns in cols_list and indexes in i_cols_list
if(j < rem_start):
cols_list.append(col)
i_cols_list.append(i)
#Store not selected columns in rem_list and indexes in i_rem_list
else:
rem_list.append(col)
i_rem_list.append(i)
#Sort the rank_list and store only the ranks. Drop the index
#Append model name, array, columns selected and columns to be removed to the additional list
X_all_add.append([trans,name,X,X_val,v,cols_list,rem_list,[x[1] for x in sorted(rank_list,key=lambda x:x[0])],i_cols_list,i_rem_list])
#Import plotting library
import matplotlib.pyplot as plt
#Dictionary to store the accuracies for all combinations
acc = {}
#List of combinations
comb = []
#Append name of transformation to trans_list
for trans in trans_list:
acc[trans]=[]
#Evaluation of various combinations of LinearDiscriminatAnalysis using all the views
#Import the library
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
#Set the base model
model = LinearDiscriminantAnalysis()
algo = "LDA"
##Set figure size
#plt.rc("figure", figsize=(25, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s+%s of %s" % (algo,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s+%s of %s" % (algo,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is 65%. Occurs when all features are used and without any transformation!
#Performance of MinMax and Normalizer is very poor
#Evaluation of various combinations of LogisticRegression using all the views
#Import the library
from sklearn.linear_model import LogisticRegression
C_list = [100]
for C in C_list:
#Set the base model
model = LogisticRegression(n_jobs=-1,random_state=seed,C=C)
algo = "LR"
##Set figure size
#plt.rc("figure", figsize=(25, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with C=%s+%s of %s" % (algo,C,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with C=%s+%s of %s" % (algo,C,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 67% with LR when C=100 and all attributes are considered and with standardized data
#Performance improves will increasing value of C
#Performance of Normalizer and MinMax Scaler is poor in general
#Evaluation of various combinations of KNN Classifier using all the views
#Import the library
from sklearn.neighbors import KNeighborsClassifier
n_list = [1]
for n_neighbors in n_list:
#Set the base model
model = KNeighborsClassifier(n_jobs=-1,n_neighbors=n_neighbors)
algo = "KNN"
##Set figure size
#plt.rc("figure", figsize=(25, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with n=%s+%s of %s" % (algo,n_neighbors,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with n=%s+%s of %s" % (algo,n_neighbors,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 86% when n_neighbors=1 and normalizer is used
#Evaluation of various combinations of Naive Bayes using all the views
#Import the library
from sklearn.naive_bayes import GaussianNB
#Set the base model
model = GaussianNB()
algo = "NB"
##Set figure size
#plt.rc("figure", figsize=(25, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s+%s of %s" % (algo,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s+%s of %s" % (algo,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 64%. Original with 50% subset outperfoms all transformations of NB
#Evaluation of various combinations of CART using all the views
#Import the library
from sklearn.tree import DecisionTreeClassifier
d_list = [13]
for max_depth in d_list:
#Set the base model
model = DecisionTreeClassifier(random_state=seed,max_depth=max_depth)
algo = "CART"
#Set figure size
plt.rc("figure", figsize=(15, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with d=%s+%s of %s" % (algo,max_depth,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with d=%s+%s of %s" % (algo,max_depth,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 79% when max_depth=13 and for Original
#Evaluation of various combinations of SVM using all the views
#Import the library
from sklearn.svm import SVC
c_list = [10]
for C in c_list:
#Set the base model
model = SVC(random_state=seed,C=C)
algo = "SVM"
#Set figure size
#plt.rc("figure", figsize=(15, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with C=%s+%s of %s" % (algo,C,"All",1.0))
##Accuracy of the model using a subset of features
#for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
# model.fit(X[:,i_cols_list],Y_train)
# result = model.score(X_val[:,i_cols_list], Y_val)
# acc[trans].append(result)
# print(trans+"+"+name+"+%d" % (v*(c-1)))
# print(result)
#for v in ratio_list:
# comb.append("%s with C=%s+%s of %s" % (algo,C,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Training time is very high compared to other algos
#Performance is very poor for original. Shows the importance of data transformation
#Best estimated performance is close to 77% when C=10 and for StandardScaler with 0.25 subset
#Evaluation of various combinations of Bagged Decision Trees using all the views
#Import the library
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
#Base estimator
base_estimator = DecisionTreeClassifier(random_state=seed,max_depth=13)
n_list = [100]
for n_estimators in n_list:
#Set the base model
model = BaggingClassifier(n_jobs=-1,base_estimator=base_estimator, n_estimators=n_estimators, random_state=seed)
algo = "Bag"
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 82% when n_estimators is 100 for Original
#Evaluation of various combinations of Random Forest using all the views
#Import the library
from sklearn.ensemble import RandomForestClassifier
n_list = [100]
for n_estimators in n_list:
#Set the base model
model = RandomForestClassifier(n_jobs=-1,n_estimators=n_estimators, random_state=seed)
algo = "RF"
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 85% when n_estimators is 100
#Evaluation of various combinations of Extra Trees using all the views
#Import the library
from sklearn.ensemble import ExtraTreesClassifier
n_list = [100]
for n_estimators in n_list:
#Set the base model
model = ExtraTreesClassifier(n_jobs=-1,n_estimators=n_estimators, random_state=seed)
algo = "ET"
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 88% when n_estimators is 100 , StdScaler with 0.75
#Evaluation of various combinations of AdaBoost ensemble using all the views
#Import the library
from sklearn.ensemble import AdaBoostClassifier
n_list = [100]
for n_estimators in n_list:
#Set the base model
model = AdaBoostClassifier(n_estimators=n_estimators, random_state=seed)
algo = "Ada"
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 38% when n_estimators is 100
#Evaluation of various combinations of Stochastic Gradient Boosting using all the views
#Import the library
from sklearn.ensemble import GradientBoostingClassifier
d_list = [9]
for max_depth in d_list:
#Set the base model
model = GradientBoostingClassifier(max_depth=max_depth, random_state=seed)
algo = "SGB"
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with d=%s+%s of %s" % (algo,max_depth,"All",1.0))
##Accuracy of the model using a subset of features
#for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
# model.fit(X[:,i_cols_list],Y_train)
# result = model.score(X_val[:,i_cols_list], Y_val)
# acc[trans].append(result)
# #print(trans+"+"+name+"+%d" % (v*(c-1)))
# #print(result)
#for v in ratio_list:
# comb.append("%s with d=%s+%s of %s" % (algo,max_depth,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#training time is too high
#Best estimated performance is close to 86% when depth is 7
#Evaluation of various combinations of Voting Classifier using all the views
#Import the library
from sklearn.ensemble import VotingClassifier
list_estimators =[]
estimators = []
model1 = ExtraTreesClassifier(n_jobs=-1,n_estimators=100, random_state=seed)
estimators.append(('et', model1))
model2 = RandomForestClassifier(n_jobs=-1,n_estimators=100, random_state=seed)
estimators.append(('rf', model2))
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
base_estimator = DecisionTreeClassifier(random_state=seed,max_depth=13)
model3 = BaggingClassifier(n_jobs=-1,base_estimator=base_estimator, n_estimators=100, random_state=seed)
estimators.append(('bag', model3))
list_estimators.append(['Voting',estimators])
for name, estimators in list_estimators:
#Set the base model
model = VotingClassifier(estimators=estimators, n_jobs=-1)
algo = name
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s+%s of %s" % (algo,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s+%s of %s" % (algo,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 86%
#Evaluation of various combinations of XG Boost using all the views
#Import the library
from xgboost import XGBClassifier
n_list = [300]
for n_estimators in n_list:
#Set the base model
model = XGBClassifier(n_estimators=n_estimators, seed=seed,subsample=0.25)
algo = "XGB"
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"All",1.0))
#Accuracy of the model using a subset of features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
model.fit(X[:,i_cols_list],Y_train)
result = model.score(X_val[:,i_cols_list], Y_val)
acc[trans].append(result)
#print(trans+"+"+name+"+%d" % (v*(c-1)))
#print(result)
for v in ratio_list:
comb.append("%s with n=%s+%s of %s" % (algo,n_estimators,"Subset",v))
##Plot the accuracies of all combinations
#fig, ax = plt.subplots()
##Plot each transformation
#for trans in trans_list:
# plt.plot(acc[trans])
##Set the tick names to names of combinations
#ax.set_xticks(range(len(comb)))
#ax.set_xticklabels(comb,rotation='vertical')
##Display the plot
#plt.legend(trans_list,loc='best')
##Plot the accuracy for all combinations
#plt.show()
#Best estimated performance is close to 80% when n_estimators is 300, sub_sample=0.25 , subset=0.75
#Evaluation of baseline model of MLP using all the views
#Import libraries for deep learning
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Dense
#Import libraries for encoding
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
#no. of output classes
y = 7
#random state
numpy.random.seed(seed)
# one hot encode class values
encoder = LabelEncoder()
Y_train_en = encoder.fit_transform(Y_train)
Y_train_hot = np_utils.to_categorical(Y_train_en,y)
Y_val_en = encoder.fit_transform(Y_val)
Y_val_hot = np_utils.to_categorical(Y_val_en,y)
# define baseline model
def baseline(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1), input_dim=v*(c-1), init='normal', activation='relu'))
model.add(Dense(y, init='normal', activation='sigmoid'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# define smaller model
def smaller(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1)/2, input_dim=v*(c-1), init='normal', activation='relu'))
model.add(Dense(y, init='normal', activation='sigmoid'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# define deeper model
def deeper(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1), input_dim=v*(c-1), init='normal', activation='relu'))
model.add(Dense(v*(c-1)/2, init='normal', activation='relu'))
model.add(Dense(y, init='normal', activation='sigmoid'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Optimize using dropout and decay
from keras.optimizers import SGD
from keras.layers import Dropout
from keras.constraints import maxnorm
def dropout(v):
#create model
model = Sequential()
model.add(Dense(v*(c-1), input_dim=v*(c-1), init='normal', activation='relu',W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(v*(c-1)/2, init='normal', activation='relu', W_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(y, init='normal', activation='sigmoid'))
# Compile model
sgd = SGD(lr=0.1,momentum=0.9,decay=0.0,nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
# define decay model
def decay(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1), input_dim=v*(c-1), init='normal', activation='relu'))
model.add(Dense(y, init='normal', activation='sigmoid'))
# Compile model
sgd = SGD(lr=0.1,momentum=0.8,decay=0.01,nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
est_list = [('MLP',baseline),('smaller',smaller),('deeper',deeper),('dropout',dropout),('decay',decay)]
for name, est in est_list:
algo = name
#Set figure size
plt.rc("figure", figsize=(20, 10))
#Accuracy of the model using all features
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all:
model = KerasClassifier(build_fn=est, v=v, nb_epoch=10, verbose=0)
model.fit(X[:,i_cols_list],Y_train_hot)
result = model.score(X_val[:,i_cols_list], Y_val_hot)
acc[trans].append(result)
# print(trans+"+"+name+"+%d" % (v*(c-1)))
# print(result)
comb.append("%s+%s of %s" % (algo,"All",1.0))
##Accuracy of the model using a subset of features
#for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
# model = KerasClassifier(build_fn=est, v=v, nb_epoch=10, verbose=0)
# model.fit(X[:,i_cols_list],Y_train_hot)
# result = model.score(X_val[:,i_cols_list], Y_val_hot)
# acc[trans].append(result)
# print(trans+"+"+name+"+%d" % (v*(c-1)))
# print(result)
#for v in ratio_list:
# comb.append("%s+%s of %s" % (algo,"Subset",v))
#Plot the accuracies of all combinations
fig, ax = plt.subplots()
#Plot each transformation
for trans in trans_list:
plt.plot(acc[trans])
#Set the tick names to names of combinations
ax.set_xticks(range(len(comb)))
ax.set_xticklabels(comb,rotation='vertical')
#Display the plot
plt.legend(trans_list,loc='best')
#Plot the accuracy for all combinations
plt.show()
# Best estimated performance is 71%
# Performance is poor is general. Data transformations make a huge difference.
# Make predictions using Extra Tress Classifier + 0.5 subset as it gave the best estimated performance
n_estimators = 100
#Obtain the list of indexes for the required model
indexes = []
for trans,name,X,X_val,v,cols_list,rem_list,rank_list,i_cols_list,i_rem_list in X_all_add:
if v == 0.5:
if trans == 'Orig':
indexes = i_cols_list
break
#Best model definition
best_model = ExtraTreesClassifier(n_jobs=-1,n_estimators=n_estimators)
best_model.fit(X_orig[:,indexes],Y)
#Read test dataset
dataset_test = pandas.read_csv("../input/test.csv")
#Drop unnecessary columns
ID = dataset_test['Id']
dataset_test.drop('Id',axis=1,inplace=True)
dataset_test.drop(rem,axis=1,inplace=True)
X_test = dataset_test.values
#Make predictions using the best model
predictions = best_model.predict(X_test[:,indexes])
# Write submissions to output file in the correct format
with open("submission.csv", "w") as subfile:
subfile.write("Id,Cover_Type\n")
for i, pred in enumerate(list(predictions)):
subfile.write("%s,%s\n"%(ID[i],pred))
```
#### File: kaggle/forest-cover-type-prediction/script_38.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Restrict minor warnings
import warnings
warnings.filterwarnings('ignore')
# Import test and train data
df_train = pd.read_csv('../input/train.csv')
df_Test = pd.read_csv('../input/test.csv')
df_test = df_Test
# First 5 data points
df_train.head()
# Datatypes of the attributes
df_train.dtypes
pd.set_option('display.max_columns', None) # we need to see all the columns
df_train.describe()
# From both train and test data
df_train = df_train.drop(['Soil_Type7', 'Soil_Type15'], axis = 1)
df_test = df_test.drop(['Soil_Type7', 'Soil_Type15'], axis = 1)
# Also drop 'Id'
df_train = df_train.iloc[:,1:]
df_test = df_test.iloc[:,1:]
size = 10
corrmat = df_train.iloc[:,:size].corr()
f, ax = plt.subplots(figsize = (10,8))
sns.heatmap(corrmat,vmax=0.8,square=True);
data = df_train.iloc[:,:size]
# Get name of the columns
cols = data.columns
# Calculate the pearson correlation coefficients for all combinations
data_corr = data.corr()
# Threshold ( only highly correlated ones matter)
threshold = 0.5
corr_list = []
data_corr
# Sorting out the highly correlated values
for i in range(0, size):
for j in range(i+1, size):
if data_corr.iloc[i,j]>= threshold and data_corr.iloc[i,j]<1\
or data_corr.iloc[i,j] <0 and data_corr.iloc[i,j]<=-threshold:
corr_list.append([data_corr.iloc[i,j],i,j])
# Sorting the values
s_corr_list = sorted(corr_list,key= lambda x: -abs(x[0]))
# print the higher values
for v,i,j in s_corr_list:
print("%s and %s = %.2f" % (cols[i], cols[j], v))
df_train.iloc[:,:10].skew()
# Pair wise scatter plot with hue being 'Cover_Type'
for v,i,j in s_corr_list:
sns.pairplot(data = df_train, hue='Cover_Type', size= 6, x_vars=cols[i], y_vars=cols[j])
plt.show()
# A violin plot is a hybrid of a box plot and a kernel density plot, which shows peaks in the data.
cols = df_train.columns
size = len(cols) - 1 # We don't need the target attribute
# x-axis has target attributes to distinguish between classes
x = cols[size]
y = cols[0:size]
for i in range(0, size):
sns.violinplot(data=df_train, x=x, y=y[i])
plt.show()
df_train.Wilderness_Area2.value_counts()
### Group one-hot encoded variables of a category into one single variable
cols = df_train.columns
r,c = df_train.shape
# Create a new dataframe with r rows, one column for each encoded category, and target in the end
new_data = pd.DataFrame(index= np.arange(0,r), columns=['Wilderness_Area', 'Soil_Type', 'Cover_Type'])
# Make an entry in data for each r for category_id, target_value
for i in range(0,r):
p = 0;
q = 0;
# Category1_range
for j in range(10,14):
if (df_train.iloc[i,j] == 1):
p = j-9 # category_class
break
# Category2_range
for k in range(14,54):
if (df_train.iloc[i,k] == 1):
q = k-13 # category_class
break
# Make an entry in data for each r
new_data.iloc[i] = [p,q,df_train.iloc[i, c-1]]
# plot for category1
sns.countplot(x = 'Wilderness_Area', hue = 'Cover_Type', data = new_data)
plt.show()
# Plot for category2
plt.rc("figure", figsize = (25,10))
sns.countplot(x='Soil_Type', hue = 'Cover_Type', data= new_data)
plt.show()
# Checking the value count for different soil_types
for i in range(10, df_train.shape[1]-1):
j = df_train.columns[i]
print (df_train[j].value_counts())
# Let's drop them
df_train = df_train.drop(['Soil_Type8', 'Soil_Type25'], axis=1)
df_test = df_test.drop(['Soil_Type8', 'Soil_Type25'], axis=1)
df_train1 = df_train # To be used for algos like SVM where we need normalization and StandardScaler
df_test1 = df_test # To be used under normalization and StandardScaler
# Checking for data transformation (take only non-categorical values)
df_train.iloc[:,:10].skew()
#Horizontal_Distance_To_Hydrology
from scipy import stats
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Hydrology'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Hydrology'], plot=plt)
df_train1['Horizontal_Distance_To_Hydrology'] = np.sqrt(df_train1['Horizontal_Distance_To_Hydrology'])
# Plot again after sqrt transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Hydrology'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Hydrology'], plot=plt)
#Vertical_Distance_To_Hydrology
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Vertical_Distance_To_Hydrology'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Vertical_Distance_To_Hydrology'], plot=plt)
#Horizontal_Distance_To_Roadways
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Roadways'], fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Roadways'], plot=plt)
df_train1['Horizontal_Distance_To_Roadways'] = np.sqrt(df_train1['Horizontal_Distance_To_Roadways'])
# Plot again after sqrt transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Roadways'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Roadways'], plot=plt)
#Hillshade_9am
fig = plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_9am'],fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_9am'],plot=plt)
df_train1['Hillshade_9am'] = np.square(df_train1['Hillshade_9am'])
# Plot again after square transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_9am'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_9am'], plot=plt)
# Hillshade_Noon
fig = plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_Noon'],fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_Noon'],plot=plt)
df_train1['Hillshade_Noon'] = np.square(df_train1['Hillshade_Noon'])
# Plot again after square transformation
fig = plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_Noon'],fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_Noon'],plot=plt)
# Horizontal_Distance_To_Fire_Points
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Fire_Points'], fit=stats.norm)
plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Fire_Points'],plot=plt)
df_train1['Horizontal_Distance_To_Fire_Points'] = np.sqrt(df_train1['Horizontal_Distance_To_Fire_Points'])
# Plot again after sqrt transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Fire_Points'], fit=stats.norm)
plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Fire_Points'],plot=plt)
# To be used in case of algorithms like SVM
df_test1[['Horizontal_Distance_To_Hydrology','Horizontal_Distance_To_Fire_Points'\
,'Horizontal_Distance_To_Roadways']] = np.sqrt(df_test1[['Horizontal_Distance_To_Hydrology',\
'Horizontal_Distance_To_Fire_Points','Horizontal_Distance_To_Roadways']])
# To be used in case of algorithms like SVM
df_test1[['Hillshade_9am','Hillshade_Noon']] = np.square(df_test1[['Hillshade_9am','Hillshade_Noon']])
from sklearn.preprocessing import StandardScaler
# Taking only non-categorical values
Size = 10
X_temp = df_train.iloc[:,:Size]
X_test_temp = df_test.iloc[:,:Size]
X_temp1 = df_train1.iloc[:,:Size]
X_test_temp1 = df_test1.iloc[:,:Size]
X_temp1 = StandardScaler().fit_transform(X_temp1)
X_test_temp1 = StandardScaler().fit_transform(X_test_temp1)
r,c = df_train.shape
X_train = np.concatenate((X_temp,df_train.iloc[:,Size:c-1]),axis=1)
X_train1 = np.concatenate((X_temp1, df_train1.iloc[:,Size:c-1]), axis=1) # to be used for SVM
y_train = df_train.Cover_Type.values
from sklearn import svm
from sklearn.model_selection import train_test_split
#from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
# Setting parameters
x_data, x_test_data, y_data, y_test_data = train_test_split(X_train1,y_train,test_size=0.2, random_state=123)
svm_para = [{'kernel':['rbf'],'C': [1,10,100,100]}]
#classifier = GridSearchCV(svm.SVC(),svm_para,cv=3,verbose=2)
#classifier.fit(x_data,y_data)
#classifier.best_params_
#classifier.grid_scores_
# Parameters optimized using the code in above cell
C_opt = 10 # reasonable option
clf = svm.SVC(C=C_opt,kernel='rbf')
clf.fit(X_train1,y_train)
clf.score(X_train1,y_train)
# y_pred = clf.predict(X_test1)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report
# setting parameters
x_data, x_test_data, y_data, y_test_data = train_test_split(X_train,y_train,test_size= 0.3, random_state=0)
etc_para = [{'n_estimators':[20,30,100], 'max_depth':[5,10,15], 'max_features':[0.1,0.2,0.3]}]
# Default number of features is sqrt(n)
# Default number of min_samples_leaf is 1
ETC = GridSearchCV(ExtraTreesClassifier(),param_grid=etc_para, cv=10, n_jobs=-1)
ETC.fit(x_data, y_data)
ETC.best_params_
ETC.grid_scores_
print ('Best accuracy obtained: {}'.format(ETC.best_score_))
print ('Parameters:')
for key, value in ETC.best_params_.items():
print('\t{}:{}'.format(key,value))
# Classification Report
Y_pred = ETC.predict(x_test_data)
target = ['class1', 'class2','class3','class4','class5','class6','class7' ]
print (classification_report(y_test_data, Y_pred, target_names=target))
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(model,title, X, y,n_jobs = 1, ylim = None, cv = None,train_sizes = np.linspace(0.1, 1, 5)):
# Figrue parameters
plt.figure(figsize=(10,8))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training Examples')
plt.ylabel('Score')
train_sizes, train_score, test_score = learning_curve(model, X, y, cv = cv, n_jobs=n_jobs, train_sizes=train_sizes)
# Calculate mean and std
train_score_mean = np.mean(train_score, axis=1)
train_score_std = np.std(train_score, axis=1)
test_score_mean = np.mean(test_score, axis=1)
test_score_std = np.std(test_score, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_score_mean - train_score_std, train_score_mean + train_score_std,\
alpha = 0.1, color = 'r')
plt.fill_between(train_sizes, test_score_mean - test_score_std, test_score_mean + test_score_std,\
alpha = 0.1, color = 'g')
plt.plot(train_sizes, train_score_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_score_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc = "best")
return plt
# 'max_features': 0.3, 'n_estimators': 100, 'max_depth': 15, 'min_samples_leaf: 1'
etc = ExtraTreesClassifier(bootstrap=True, oob_score=True, n_estimators=100, max_depth=10, max_features=0.3, \
min_samples_leaf=1)
etc.fit(X_train, y_train)
# yy_pred = etc.predict(X_test)
etc.score(X_train, y_train)
# Plotting learning curve
title = 'Learning Curve (ExtraTreeClassifier)'
# cross validation with 50 iterations to have a smoother curve
cv = ShuffleSplit(n_splits=50, test_size=0.2, random_state=0)
model = etc
plot_learning_curve(model,title,X_train, y_train, n_jobs=-1,ylim=None,cv=cv)
plt.show()
```
#### File: kaggle/forest-cover-type-prediction/script_43.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.metrics import pairwise_distances
from scipy.spatial import distance
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
train_data.head()
train_data.describe()
train_labels = train_data.Cover_Type.values
test_id = test_data.Id.values
train_data.drop(['Soil_Type7', 'Soil_Type15', 'Id', 'Cover_Type'], axis=1, inplace=True)
test_data.drop(['Soil_Type7', 'Soil_Type15', 'Id'], axis=1, inplace=True)
print(train_data.shape, test_data.shape)
min_max_scaler = MinMaxScaler() # If you did not use the scaler, you will get higher accuracy
train_data = min_max_scaler.fit_transform(train_data)
test_data = min_max_scaler.fit_transform(test_data)
distance_matrix = pairwise_distances(train_data, metric = 'euclidean')
print(distance_matrix.shape)
sorted_distance_index = np.argsort(distance_matrix, axis=1).astype(np.uint16)
print(sorted_distance_index)
sorted_distance_labels = train_labels[sorted_distance_index].astype(np.uint8)
print(sorted_distance_labels)
max_k = 100
k_matrix = np.empty((len(sorted_distance_labels), 0), dtype=np.uint8)
for k in range (1, max_k+1):
k_along_rows = np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=sorted_distance_labels[:, 1:k+1]).reshape(len(sorted_distance_labels), -1)
k_matrix = np.hstack((k_matrix, k_along_rows))
print(k_matrix)
k_truth_table = np.where(k_matrix == train_labels[:, None], 1, 0)
print(k_truth_table)
print(k_truth_table.shape)
accuracy_per_k = np.sum(k_truth_table, axis=0)/len(k_truth_table)
best_accuracy = np.amax(accuracy_per_k)
best_k = np.argmax(accuracy_per_k) + 1 # real k = index + 1
print('Best K: {0}, Best Accuracy: {1:4.2f}%'.format(best_k, best_accuracy*100))
plt.plot(range(1, max_k+1), accuracy_per_k)
plt.title('Classification accuracy vs Choice of K')
plt.xlabel('K')
plt.ylabel('Classification Accuracy')
plt.show()
print("RAM needed for the distance matrix = {:.2f} GB".format(len(train_data)*len(test_data) * 64 / (8 * 1024 * 1024 * 1024)))
# Those variables are no longer needed, Free up some RAM instead
del k_truth_table
del k_matrix
del sorted_distance_labels
del sorted_distance_index
del distance_matrix
# ALERT: This code takes some time, it took 8 minutes on a powerful PC but with relatively low RAM usage (around 6.8G)
def classify(unknown, dataset, labels, k):
classify_distance_matrix = pairwise_distances(unknown, dataset, metric='euclidean')
nearest_images = np.argsort(classify_distance_matrix)[:, :k]
nearest_images_labels = labels[nearest_images]
classification = np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=nearest_images_labels[:, :k])
return classification.astype(np.uint8).reshape(-1, 1)
predict = np.empty((0, 1), dtype=np.uint8)
chunks = 15
last_chunk_index = 0
for i in range(1, chunks+1):
new_chunk_index = int(i * len(test_data) / chunks)
predict = np.concatenate((predict, classify(test_data[last_chunk_index : new_chunk_index], train_data, train_labels, best_k)))
last_chunk_index = new_chunk_index
print("Progress = {:.2f}%".format(i * 100 / chunks))
submission = pd.DataFrame({"Id": test_id, "Cover_Type": predict.ravel()})
submission.to_csv('submission.csv', index=False)
```
#### File: kaggle/forest-cover-type-prediction/script_45.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
display(train_set.head())
display(train_set.describe())
display(train_set.keys())
display(len(train_set.keys()))
# Checking binary column
soil_cols = ['Soil_Type' + str(i) for i in range(1, 41)]
wilder_cols = ['Wilderness_Area' + str(i) for i in range(1, 5)]
# If sum : 15120 => data is OK!
display(train_set[soil_cols].sum(axis=1).sum(axis=0))
display(train_set[wilder_cols].sum(axis=1).sum(axis=0))
# categorical variable
cate_vars = soil_cols[:]
cate_vars.extend(wilder_cols)
# continuous variable
cont_vars = list(train_set.keys())
cont_vars.remove('Id')
cont_vars.remove('Cover_Type')
cont_vars = [var for var in cont_vars if var not in cate_vars]
print(cate_vars)
print(cont_vars)
# How about using this features directly? (Not using the scaling and normalization)
fig = plt.figure()
fig.set_size_inches(35, 35)
sns.set(font_scale=2)
# Delete 'Id' and change cover type to dummy variables
cont_var_train_set = train_set.drop('Id', axis=1).drop(cate_vars, axis=1)
# Categorical feature : cannot using correlation directly.
cont_var_train_set_dum = pd.get_dummies(cont_var_train_set, columns=['Cover_Type'])
correlation = cont_var_train_set_dum.corr()
sns.heatmap(correlation, cmap='viridis', annot=True, linewidths=3)
from sklearn.preprocessing import StandardScaler
# After feature scailing : Actually, it is same to above correlation
scaled_feat = cont_var_train_set_dum.iloc[:, :-7]
dummy_labels = cont_var_train_set_dum.iloc[:, -7:]
# using scaler
scaler = StandardScaler()
scaler.fit(scaled_feat)
scaled_feat = scaler.transform(scaled_feat)
scaled_feat = pd.DataFrame(scaled_feat, columns=cont_vars)
scaled_feat.head()
fig = plt.figure()
fig.set_size_inches(35, 35)
correlation2 = pd.concat([scaled_feat, dummy_labels], axis=1).corr()
sns.heatmap(correlation2, cmap='viridis', annot=True, linewidths=3)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
# Spliting the datasets
features = pd.concat([scaled_feat, train_set[cate_vars]], axis=1)
features.head()
x_train, x_test, y_train, y_test = train_test_split(features, train_set['Cover_Type'], random_state=20190425, test_size=0.3)
rf_model = RandomForestClassifier(max_depth=7, n_estimators=300)
rf_model.fit(x_train, y_train)
# Predicting naively
pred = rf_model.predict(x_test)
display(accuracy_score(y_test, pred))
display(classification_report(y_test, pred))
# See the importance of features
importances = rf_model.feature_importances_
indices = np.argsort(importances)
fig = plt.figure()
fig.set_size_inches(20, 20)
sns.set(font_scale=1.5)
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), features.keys()[indices])
plt.xlabel('Relative Importance')
# dimensional reduction
from sklearn.decomposition import PCA
import numpy as np
pca = PCA(n_components=None, random_state=20180425)
pca.fit(features)
pca_var = pca.explained_variance_ratio_
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
ax1, ax2 = ax.flatten()
ax1.plot(pca_var)
ax2.plot(np.cumsum(pca_var))
train_set.head()
for idx, row in train_set.iterrows():
for i in range(1, 5):
if row['Wilderness_Area' + str(i)] == 1:
train_set.loc[idx, 'Wilderness_Area'] = i
for i in range(1, 41):
if row['Soil_Type' + str(i)] == 1:
train_set.loc[idx, 'Soil_Type'] = i
train_set.head()
wilderness_area_col = train_set['Wilderness_Area'].astype(int)
soil_type_col = train_set['Soil_Type'].astype(int)
display(wilderness_area_col.head())
display(soil_type_col.head())
# train_set = train_set.drop(['Soil_Type'+str(idx) for idx in range(1, 41)], axis=1)
# train_set = train_set.drop(['Wilderness_Area'+str(idx) for idx in range(1, 5)], axis=1)
import scipy.stats as ss
# get confusion matrix manually
confusions = []
for soil in range(1, 41):
for cover in range(1, 8):
cond = train_set[(train_set['Soil_Type'] == soil) & (train_set['Cover_Type'] == cover)]
confusions.append(cond.count()['Soil_Type'])
confusion_matrix = np.array(confusions).reshape(40, 7)
confusion_matrix = confusion_matrix[confusion_matrix.sum(axis=1) > 0]
# cramers v stat 1
def get_cramers_stat(confusion_matrix):
confusion_matrix = confusion_matrix
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum()
phi2 = chi2 / n
cramers_stat = np.sqrt(phi2 / (min(confusion_matrix.shape)-1))
return cramers_stat
soil_type_result_1 = get_cramers_stat(confusion_matrix)
print(soil_type_result_1)
confusion_matrix = pd.crosstab(train_set['Soil_Type'], train_set['Cover_Type'])
confusion_matrix = np.array(confusion_matrix)
soil_type_result_2 = get_cramers_stat(confusion_matrix)
print(soil_type_result_2)
confusion = []
for wilderness in range(1, 5):
for cover in range(1, 8):
cond = train_set[(train_set['Wilderness_Area'] == wilderness) & (train_set['Cover_Type'] == cover)]
confusion.append(cond.count()['Wilderness_Area'])
confusion_matrix = np.array(confusion).reshape(4, 7)
wilderness_area_result_1 = get_cramers_stat(confusion_matrix)
print(wilderness_area_result_1)
confusion_matrix = pd.crosstab(train_set['Wilderness_Area'], train_set['Cover_Type'])
confusion_matrix = np.array(confusion_matrix)
wilderness_area_result_2 = get_cramers_stat(confusion_matrix)
print(wilderness_area_result_2)
cate_vars_1 = ['Wilderness_Area', 'Soil_Type']
input_features = pd.concat([scaled_feat, wilderness_area_col, soil_type_col], axis=1)
labels = train_set['Cover_Type']
display(input_features.head())
display(labels.head())
x_train, x_test, y_train, y_test = train_test_split(input_features, labels, random_state=20190501, test_size=0.3)
import tensorflow as tf
wilderness_area_cate_list = list(set(input_features['Wilderness_Area']))
soil_type_cate_list = list(set(input_features['Soil_Type']))
wilderness_area_cols = tf.feature_column.categorical_column_with_vocabulary_list(
'Wilderness_Area', wilderness_area_cate_list
)
soil_type_cols = tf.feature_column.categorical_column_with_vocabulary_list(
'Soil_Type', soil_type_cate_list
)
embed_wilderness_area_cols = tf.feature_column.embedding_column(
categorical_column=wilderness_area_cols,
dimension = 5
# dimension = round(len(wilderness_area_cate_list) ** 0.25)
)
embed_soil_type_cols = tf.feature_column.embedding_column(
categorical_column=soil_type_cols,
dimension = 5
# dimension = round(len(soil_type_cate_list) ** 0.25)
)
test_set_rf = test_set.copy()
test_set_rf_cont = test_set_rf[cont_vars]
scaler.fit(test_set_rf_cont)
test_set_rf_cont = scaler.transform(test_set_rf_cont)
test_set_rf_cont = pd.DataFrame(test_set_rf_cont, columns=cont_vars)
test_set_rf_cate = test_set_rf[cate_vars]
scaled_test_set_rf = pd.concat([test_set_rf_cont, test_set_rf_cate], axis=1)
scaled_test_set_rf.head()
rf_pred = rf_model.predict(scaled_test_set_rf)
rf_result = pd.concat([test_set['Id'], pd.DataFrame({'Cover_Type': rf_pred})], axis=1)
rf_result.to_csv("rf_submission.csv", index=False)
train_input_fn = tf.estimator.inputs.pandas_input_fn(
x = x_train,
y = y_train,
num_epochs = 30,
batch_size = 50,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.pandas_input_fn(
x = x_test,
y = y_test,
num_epochs = 1,
shuffle = False
)
tf_features = []
# used standard scaler in sklearn
[tf_features.append(tf.feature_column.numeric_column(col))
for col in cont_vars]
tf_features.extend([embed_wilderness_area_cols, embed_soil_type_cols])
tf_features
estimator = tf.estimator.DNNClassifier(
feature_columns = tf_features,
hidden_units = [1024, 512, 256],
n_classes = 8,
optimizer = tf.train.AdamOptimizer()
)
estimator.train(input_fn=train_input_fn)
estimator.evaluate(input_fn=eval_input_fn)
# Test set setting
test_set_copy = test_set.copy()
# categorical data transfer
for idx, row in test_set_copy.iterrows():
for i in range(1, 5):
if row['Wilderness_Area' + str(i)] == 1:
test_set_copy.loc[idx, 'Wilderness_Area'] = i
for i in range(1, 41):
if row['Soil_Type' + str(i)] == 1:
test_set_copy.loc[idx, 'Soil_Type'] = i
# 1. scaling the continous features
test_cont_feat = test_set_copy[cont_vars]
scaler.fit(test_cont_feat)
test_scaled_cont_feat = scaler.transform(test_cont_feat)
test_scaled_cont_feat = pd.DataFrame(test_scaled_cont_feat, columns=cont_vars)
# 2. categorical features
test_cate_feat = test_set_copy[cate_vars_1].astype(int)
# 3. concat
test_input_features = pd.concat([test_scaled_cont_feat, test_cate_feat], axis=1)
display(test_cont_feat.head())
display(test_scaled_cont_feat.head())
display(test_input_features.head())
# 3. prediction input function
pred_input_fn = tf.estimator.inputs.pandas_input_fn(
x = test_input_features,
num_epochs = 1,
shuffle = False
)
predictions = list(estimator.predict(pred_input_fn))
predicted_classes = [int(pred['classes']) for pred in predictions]
result = predicted_classes[:]
result = pd.concat([test_set['Id'], pd.DataFrame({'Cover_Type': result})], axis=1)
result.head()
result.to_csv("submission.csv", index=False)
```
#### File: kaggle/otto-group-product-classification-challenge/script_14.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import xgboost as xg
from functools import partial
from hyperopt import hp, fmin, tpe, Trials
from hyperopt.pyll.base import scope
# Reading train dataset in the environment.
dataset_pd = pd.read_csv("/kaggle/input/otto-group-product-classification-challenge/train.csv", index_col = 0)
print(dataset_pd.shape)
# Reading test dataset in the environment.
dataset_pd2 = pd.read_csv("/kaggle/input/otto-group-product-classification-challenge/test.csv", index_col = 0)
print(dataset_pd2.shape)
# Creating a predictor matrix (removing the response variable column)
dataset_train = dataset_pd.values
X = dataset_train[:,0:93] # Predictors
y = dataset_train[:,93] # Response
# XGBoost do not take a categorical variable as input. We can use LabelEncoder to assign labels to categorical variables.
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(y)
label_encoder_y = label_encoder.transform(y)
# optimize function
def optimize(params, x, y):
model = xg.XGBClassifier(**params)
kf = StratifiedKFold(n_splits = 5)
accuracies = []
for idx in kf.split(X = x, y = y):
train_idx , test_idx = idx[0], idx[1]
xtrain = x[train_idx]
ytrain = y[train_idx]
xtest = x[test_idx]
ytest = y[test_idx]
model.fit(xtrain, ytrain)
preds = model.predict(xtest)
fold_acc = accuracy_score(ytest, preds)
accuracies.append(fold_acc)
return -1.0 * np.mean(accuracies)
# Parameter Space for XGBoost
param_space = {
'max_depth' : scope.int(hp.quniform('max_depth', 3,15, 1)),
'n_estimators' : scope.int(hp.quniform('n_estimators', 100, 600, 1)),
'criterion' : hp.choice('criterion', ['gini', 'entropy']),
'colsample_bytree' : hp.uniform('colsample_bytree', 0.01,1),
'learning_rate' : hp.uniform('learning_rate', 0.001,1)
}
# Optimization Function
optimization_function = partial(
optimize,
x = X,
y = label_encoder_y
)
trials = Trials()
result = fmin(fn = optimization_function,
space = param_space,
algo = tpe.suggest,
max_evals = 15,
trials = trials
)
print(result)
# Train and test split of the data
X_train, X_test, y_train, y_test = train_test_split(X, label_encoder_y, test_size = 0.33, random_state = 7)
classifier = xg.XGBClassifier(n_thread = 6,
n_estimators = 396,
max_depth = 6,
colsample_bytree = 0.9292372781188178,
learning_rate = 0.28725052863307404,
criterion = "gini")
classifier.fit(X_train, y_train)
# Check the accuracy of the model on train and test dataset.
accuracy_train = accuracy_score(y_train, classifier.predict(X_train))
print("Accuracy on train dataset %.2f%%" % (accuracy_train * 100))
accuracy_test = accuracy_score(y_test, classifier.predict(X_test))
print("Accuracy on test dataset %.2f%%" % (accuracy_test * 100))
# code for submission file.
dataset_test = dataset_pd2.values
classifier = xg.XGBClassifier(n_thread = 6,
n_estimators = 396,
max_depth = 6,
colsample_bytree = 0.9292372781188178,
learning_rate = 0.28725052863307404,
criterion = "gini")
classifier.fit(X, label_encoder_y)
prediction_sub = classifier.predict(dataset_test)
#dataset_pd2["prediction"] = prediction_sub
X_sub = np.array(prediction_sub).reshape(-1,1)
onehot_encoder = OneHotEncoder(sparse = False)
submission_file = onehot_encoder.fit_transform(X_sub)
submission_file_df = pd.DataFrame(submission_file,
columns = ['Class_1','Class_2','Class_3','Class_4','Class_5','Class_6',
'Class_7','Class_8','Class_9'], index = dataset_pd2.index)
submission_file_df.to_csv("submission_otto_ver2.csv")
```
#### File: kaggle/otto-group-product-classification-challenge/script_29.py
```python
import pandas as pd
import numpy as np
from time import time
from sklearn import ensemble, feature_extraction, preprocessing
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import train_test_split
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
# import data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
sample = pd.read_csv('../input/sampleSubmission.csv')
# drop ids and get labels
labels = train.target.values
train = train.drop('id', axis=1)
train = train.drop('target', axis=1)
test = test.drop('id', axis=1)
# train, validation split
# encode labels
lbl_enc = preprocessing.LabelEncoder()
labels = lbl_enc.fit_transform(labels)
X_train, X_test, y_train, y_test = train_test_split(train, labels, test_size=.2)
# train a with ensemble
clf = ExtraTreesClassifier(n_estimators=50, max_depth=None, min_samples_split=1, random_state=0, verbose=True, n_jobs=-1)
t0 = time()
clf.fit(X_train, y_train)
print("Fitting Done in %0.3fs" % (time() - t0))
t0 = time()
y_tr_pred = clf.predict_proba(X_train)
print("Done in %0.3fs" % (time() - t0))
t0 = time()
y_ts_pred = clf.predict_proba(X_test)
print("Done in %0.3fs" % (time() - t0))
TrainLoss = multiclass_log_loss(y_train, y_tr_pred, eps=1e-15)
TestLoss = multiclass_log_loss(y_test, y_ts_pred, eps=1e-15)
print('Multiclass Loss')
print(' Train', TrainLoss)
print(' Test', TestLoss)
# Train with Full Data
clf.fit(train, labels)
# predict on test set
preds = clf.predict_proba(test)
# create submission file
preds = pd.DataFrame(preds, index=sample.id.values, columns=sample.columns[1:])
preds.to_csv('MyModel.csv', index_label='id')
```
#### File: kaggle/otto-group-product-classification-challenge/script_32.py
```python
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import lightgbm as lgb_org
import optuna.integration.lightgbm as lgb
base_dir = Path(
"/kaggle/input/otto-group-product-classification-challenge/"
)
def read_csv(path):
df = pd.read_csv(path)
for col in df.columns:
if col.startswith("feat_"):
df[col] = df[col].astype("int32")
return df
df = read_csv(str(base_dir / "train.csv"))
class_to_order = dict()
order_to_class = dict()
for idx, col in enumerate(df.target.unique()):
order_to_class[idx] = col
class_to_order[col] = idx
df["target_ord"] = df["target"].map(class_to_order)
feature_columns = [
col for col in df.columns if col.startswith("feat_")
]
target_column = ["target_ord"]
X_train, X_val, y_train, y_val = train_test_split(
df[feature_columns], df[target_column],
test_size=0.3, random_state=42,
stratify=df[target_column]
)
dtrain = lgb_org.Dataset(X_train, y_train)
dval = lgb_org.Dataset(X_val, y_val)
params = dict(
objective="multiclass",
metric="multi_logloss",
num_class=9,
seed=42,
)
best_params, tuning_history = dict(), list()
booster = lgb.train(params, dtrain, valid_sets=dval,
verbose_eval=0,
best_params=best_params,
early_stopping_rounds=5,
tuning_history=tuning_history)
print("Best Params:", best_params)
print("Tuning history:", tuning_history)
df_test = read_csv(str(base_dir / "test.csv"))
pred = booster.predict(df_test[feature_columns])
for idx, col in order_to_class.items():
df_test[col] = pred[:,idx]
df_test[["id"] + [f"Class_{i}" for i in range(1, 10)]].to_csv('submission.csv', index=False)
```
#### File: kaggle/otto-group-product-classification-challenge/script_44.py
```python
__author__ = 'Sushant'
from sklearn.base import BaseEstimator, ClassifierMixin
from scipy.optimize import minimize
from sklearn.metrics import log_loss
import numpy
from sklearn.cross_validation import StratifiedShuffleSplit
"""
Usage:
estimators = []
estimators.append(RandomForestClassifier(n_estimators = 100))
estimators.append(GMM(n_components = 9))
C_MC = MegaClassifier(estimators = estimators, xv_tries = 5)
C_MC. fit(X_train, y_train)
C_MC.predict_proba(X_test)
Description:
The MegaClassifier object automatically partitions training data in a
stratified manner into 'xv_tries' number of folds (default 4), trains
all models in 'estimators' with the stratified training sets and records
their output on the stratified validation set.
During optimization it selects weights that result in minimization of
averaged log-loss across all the validation sets.
"""
class StratifiedSplit(object):
@staticmethod
def train_test_split( X, y, test_size = 0.2):
res = StratifiedShuffleSplit(y, n_iter=1, test_size=test_size)
for ind_train, ind_test in res:
X_train = []
y_train = []
X_test = []
y_test = []
for ind in ind_train:
X_train.append(X[ind])
y_train.append(y[ind])
for ind in ind_test:
X_test.append(X[ind])
y_test.append(y[ind])
return X_train, X_test, y_train, y_test
class MegaClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, estimators, xv_tries=4, test_size=0.2):
self.estimators = estimators
self.xv_tries = xv_tries
self.test_size = test_size
def fit(self, X, y):
self.X_trains = []
self.y_trains = []
self.X_valids = []
self.y_valids = []
for i in xrange(self.xv_tries):
Xt, Xv, yt, yv = StratifiedSplit.train_test_split(X, y, test_size=self.test_size)
self.X_trains.append(Xt)
self.X_valids.append(Xv)
self.y_trains.append(yt)
self.y_valids.append(yv)
# train the classifiers
self.all_xv_predictions = []
for ind, Xt in enumerate(self.X_trains):
cur_xv_predictions = []
for estimator in self.estimators:
#new_est = copy.deepcopy(estimator)
#new_est.fit(Xt, self.y_trains[ind])
estimator.fit(Xt, self.y_trains[ind])
cur_xv_predictions.append(estimator.predict_proba(self.X_valids[ind]))
self.all_xv_predictions.append(cur_xv_predictions)
num_estimators = len(self.estimators)
initial_weights = [1.0 / float(num_estimators) for i in xrange(num_estimators)]
print ("Optimizing....")
bounds = [(0, 1) for i in xrange(num_estimators)]
constraints = {'type': 'eq', 'fun': lambda w: 1 - sum(w)}
res = minimize(self.__find_best_blending_weights, initial_weights, bounds=bounds, constraints=constraints)
self.final_weights = res.x
print ("Optimization finished...")
print ("Weights:")
print (self.final_weights)
for estimator in self.estimators:
estimator.fit(X, y)
def __find_best_blending_weights(self, weights):
log_losses = []
for ind1, xv_predictions in enumerate(self.all_xv_predictions):
y_final_pred_prob = None
for ind, est_predictions in enumerate(xv_predictions):
if y_final_pred_prob is None:
y_final_pred_prob = weights[ind] * est_predictions
else:
y_final_pred_prob = numpy.add(y_final_pred_prob, (weights[ind] * est_predictions))
log_losses.append(log_loss(self.y_valids[ind1], y_final_pred_prob))
log_losses = numpy.array(log_losses)
return log_losses.mean()
def predict_proba(self, X):
y_final_pred_prob = None
for ind, estimator in enumerate(self.estimators):
y_pp_cur = estimator.predict_proba(X)
if y_final_pred_prob is None:
y_final_pred_prob = self.final_weights[ind] * y_pp_cur
else:
y_final_pred_prob = numpy.add(y_final_pred_prob, (self.final_weights[ind] * y_pp_cur))
return y_final_pred_prob
``` |
{
"source": "josepablocam/python-pl",
"score": 2
} |
#### File: tests/test_analyze/test_dynamic_tracer.py
```python
import ast
import astunparse
import sys
import textwrap
import pytest
from plpy.analyze.dynamic_trace_events import *
from plpy.analyze import dynamic_tracer as dt
class BasicTracer(object):
def __init__(self, fun, trace_lines=False, trace_inside_call=True):
self.fun = fun
self.trace_lines = trace_lines
self.trace_inside_call = trace_inside_call
self.frame_acc = []
self.result_acc = []
self.orig_tracer = None
def trace(self, frame, event, arg):
if event == 'call' or (event == 'line' and self.trace_lines):
self.frame_acc.append(frame)
self.result_acc.append(self.fun(frame))
if self.trace_inside_call:
return self.trace
else:
return None
def setup(self):
self.orig_tracer = sys.gettrace()
sys.settrace(self.trace)
def shutdown(self):
sys.settrace(self.orig_tracer)
def clear(self):
self.frame_acc = []
self.result_acc = []
def __enter__(self):
self.setup()
def __exit__(self, type, value, traceback):
self.shutdown()
# test helpers
def test_to_ast_node():
node = dt.to_ast_node('x + 2')
assert astunparse.unparse(node).strip() == '(x + 2)'
def test_get_caller_frame():
def f():
return
def g():
return f()
tracer = BasicTracer(lambda x: x)
with tracer:
g()
g_frame = tracer.result_acc[0]
f_frame = tracer.result_acc[1]
assert dt.get_caller_frame(f_frame) == g_frame
def get_basic_function():
def f():
return
tracer = BasicTracer(dt.get_function_obj)
with tracer:
f()
return tracer.result_acc[0], f
def get_basic_method():
class BasicClass(object):
def f(self):
return
val = BasicClass()
tracer = BasicTracer(dt.get_function_obj)
with tracer:
val.f()
return tracer.result_acc[0], val.f
def get_basic_static_method():
class BasicClass(object):
@staticmethod
def f():
return
tracer = BasicTracer(dt.get_function_obj)
with tracer:
BasicClass.f()
return tracer.result_acc[0], BasicClass.f
def get_basic_nested_function():
def g():
def f():
return
f()
return f
tracer = BasicTracer(dt.get_function_obj)
with tracer:
res = g()
return tracer.result_acc[1], res
@pytest.mark.parametrize(
'get_func', [
get_basic_function, get_basic_method, get_basic_static_method,
get_basic_nested_function
]
)
def test_get_function_obj(get_func):
fetched, fun = get_func()
assert fetched == fun, 'Failed to retrieve appropriate function object'
def test_get_co_name():
def f():
return
tracer = BasicTracer(dt.get_co_name)
with tracer:
f()
assert tracer.result_acc[0] == 'f'
class BasicDummyClass(object):
def m(self):
return
@staticmethod
def s():
return
def test_get_function_qual_name():
tracer = BasicTracer(dt.get_function_qual_name)
val = BasicDummyClass()
with tracer:
val.m()
val.s()
assert tracer.result_acc[0] == 'BasicDummyClass.m', 'Qualified method name'
assert tracer.result_acc[
1] == 'BasicDummyClass.s', 'Qualified static method name'
@pytest.mark.parametrize(
'node_str,full_expected,all_but_first_expected',
[('a', ['a'], []), ('a.b.c', ['a', 'a.b', 'a.b.c'], ['a', 'a.b'])]
)
def test_extract_references(node_str, full_expected, all_but_first_expected):
node = ast.parse(node_str)
refs = set(dt.get_nested_references(node))
refs_but_first = set(dt.get_nested_references(node, exclude_first=True))
full_expected = set(full_expected)
all_but_first_expected = set(all_but_first_expected)
assert refs == full_expected, 'References do not match'
assert refs_but_first == all_but_first_expected, 'References do not match'
def test_register_assignment_stubs():
stubber = dt.AddMemoryUpdateStubs('_stub')
src = "x = 1; y = f(); z += 1; d[1] = 2"
expected = "x = 1; _stub(['x']); y = f(); _stub(['y']); z += 1; _stub(['z']); d[1] = 2; _stub(['d', 'd[1]'])"
with_stubs = stubber.visit(ast.parse(src))
assert ast.dump(with_stubs) == ast.dump(ast.parse(expected))
src_with_imports = """
import numpy as np
import sklearn.preprocessing
from sklearn import linear_models
from sklearn import linear_models as lm
"""
expected_with_imports = """
import numpy as np
_stub(['np'])
import sklearn.preprocessing
_stub(['sklearn.preprocessing'])
from sklearn import linear_models
_stub(['linear_models'])
from sklearn import linear_models as lm
_stub(['lm'])
"""
src_with_imports = textwrap.dedent(src_with_imports)
expected_with_imports = textwrap.dedent(expected_with_imports)
with_imports_with_stubs = stubber.visit(ast.parse(src_with_imports))
assert ast.dump(with_imports_with_stubs) == ast.dump(
ast.parse(expected_with_imports)
)
def test_is_stub_call():
tracer = BasicTracer(dt.get_function_obj)
with tracer:
dt.memory_update_stub(['var'])
assert dt.is_stub_call(
tracer.result_acc[0]
), 'Calling a stub function should yield true'
tracer.result_acc = []
def f():
return
with tracer:
f()
assert not dt.is_stub_call(
tracer.result_acc[0]
), 'Calling non-stub function should yield false'
@pytest.mark.parametrize(
'_input,expected', [
('a = 10', []),
('a = x * 10', ['x']),
('a = a.b * 10', ['a', 'a.b']),
('a = b[0] * 10', ['b', 'b[0]']),
('a = b[0][1] * 10', ['b', 'b[0]', 'b[0][1]']),
('a = b[1:2] * 10', ['b', 'b[1:2]']),
('a.b.c = x * 10', ['x', 'a', 'a.b']),
('a[0][1] = c.d[0]', ['a', 'a[0]', 'c', 'c.d', 'c.d[0]']),
('a.b.c[0] = 10', ['a', 'a.b', 'a.b.c']),
('x = {a:1, b.c:2}', ['a', 'b', 'b.c']),
]
)
def test_get_load_references_from_line(_input, expected):
tracer = dt.DynamicDataTracer()
refs = tracer.get_load_references_from_line(_input)
assert refs == set(expected), 'Load references do not match'
def test_function_defined_by_user():
# make tracer think current file is user file
tracer = dt.DynamicDataTracer()
tracer.file_path = __file__
helper = BasicTracer(tracer._defined_by_user)
with helper:
def f():
return 2
f()
assert helper.result_acc[0], 'Function was defined by user in test file'
helper.clear()
with helper:
# calling any function that defined by us in this file
ast.parse('1')
assert not helper.result_acc[
0], 'Function was not defined by user in this test file'
def test_function_called_by_user():
tracer = dt.DynamicDataTracer()
tracer.file_path = __file__
helper = BasicTracer(tracer._called_by_user, trace_inside_call=True)
import pandas as pd
with helper:
pd.DataFrame([(1, 2)])
assert helper.result_acc[0] and (
not helper.result_acc[1]
), 'First is call made by user, second is not (its call to np._amax in np source)'
def standardize_source(src):
return astunparse.unparse(ast.parse(src))
def check_memory_update(event, updates):
assert isinstance(event, MemoryUpdate)
assert set(d.name for d in event.defs) == set(updates)
def check_exec_line(event, line, refs_loaded):
assert isinstance(event, ExecLine)
try:
assert standardize_source(event.line) == standardize_source(line)
except SyntaxError:
# somethings such as with... can't parse as asingle line
assert event.line.strip() == line.strip()
uses = set(u.name for u in event.uses)
assert uses == set(refs_loaded)
def check_enter_call(event, qualname, call_args, is_method):
assert isinstance(event, EnterCall)
assert event.details['qualname'] == qualname
if call_args is not None:
abstract_call_args = event.details['abstract_call_args']
abstract_call_args_names = set(a.name for a in abstract_call_args)
assert abstract_call_args_names == set(call_args)
assert event.details['is_method'] == is_method
def check_exit_call(event, co_name):
assert isinstance(event, ExitCall)
assert event.details['co_name'] == co_name
def check_ignore(event):
pass
def make_event_check(fun, *args, **kwargs):
return lambda x: fun(x, *args, **kwargs)
# function call
def basic_case_1():
src = """
def f(x, y):
return x + y
x = 10
y = 20
z = f(x, y)
"""
expected_event_checks = [
# x = 10
make_event_check(check_exec_line, line='x = 10', refs_loaded=[]),
make_event_check(check_memory_update, updates=['x']),
# y = 20
make_event_check(check_exec_line, line='y = 20', refs_loaded=[]),
make_event_check(check_memory_update, updates=['y']),
# z = f(x, y)
make_event_check(
check_exec_line, line='z = f(x, y)', refs_loaded=['f', 'x', 'y']
),
make_event_check(
check_enter_call,
qualname='f',
call_args=['x', 'y'],
is_method=False
),
make_event_check(
check_exec_line, line='return x + y', refs_loaded=['x', 'y']
),
make_event_check(check_exit_call, co_name='f'),
make_event_check(check_memory_update, updates=['z']),
]
return src, expected_event_checks
# static method call
def basic_case_2():
src = """
class A(object):
@staticmethod
def f(x, y):
return x + y
x = 10
y = 20
z = A.f(x, y)
"""
expected_event_checks = [
# x = 10
make_event_check(check_exec_line, line='x = 10', refs_loaded=[]),
make_event_check(check_memory_update, updates=['x']),
# y = 20
make_event_check(check_exec_line, line='y = 20', refs_loaded=[]),
make_event_check(check_memory_update, updates=['y']),
# z = A.f(x, y)
make_event_check(
check_exec_line,
line='z = A.f(x, y)',
refs_loaded=['x', 'y', 'A', 'A.f']
),
# note that is_method is False as staticmethods are indistinguishable from function's in Python 3.*
# in particular, inspect.ismethod returns False
make_event_check(
check_enter_call,
qualname='A.f',
call_args=['x', 'y'],
is_method=False
),
make_event_check(
check_exec_line, line='return x + y', refs_loaded=['x', 'y']
),
make_event_check(check_exit_call, co_name='f'),
make_event_check(check_memory_update, updates=['z']),
]
return src, expected_event_checks
# method call
def basic_case_3():
src = """
import numpy as np
class A(object):
def __init__(self, x):
self.v = x
def f(self, x, y):
return x + y + self.v
x = 10
y = 20
obj = A(10)
z = obj.f(x, y)
obj.v = 200
np.max([1,2,3])
x = 2
"""
expected_event_checks = [
# import numpy as np
make_event_check(
check_exec_line, line='import numpy as np', refs_loaded=[]
),
make_event_check(check_memory_update, updates=['np']),
# x = 10
make_event_check(check_exec_line, line='x = 10', refs_loaded=[]),
make_event_check(check_memory_update, updates=['x']),
# y = 10
make_event_check(check_exec_line, line='y = 20', refs_loaded=[]),
make_event_check(check_memory_update, updates=['y']),
# obj = A(10)
make_event_check(
check_exec_line, line='obj = A(10)', refs_loaded=['A']
),
# note that the constructor call is not yet a 'method' as there is no instance bount to it at the time of function entry
make_event_check(
check_enter_call,
qualname='A',
call_args=['self', 'x'],
is_method=False
),
make_event_check(
check_exec_line, line='self.v = x', refs_loaded=['self', 'x']
),
# self, and self.v
make_event_check(check_memory_update, updates=['self', 'self.v']),
make_event_check(check_exit_call, co_name='__init__'),
make_event_check(check_memory_update, updates=['obj']),
# z = obj.f(x, y)
make_event_check(
check_exec_line,
line='z = obj.f(x, y)',
refs_loaded=['obj', 'obj.f', 'x', 'y']
),
# note that is_method is False as staticmethods are indistinguishable from function's in Python 3.*
# in particular, inspect.ismethod returns False
make_event_check(
check_enter_call,
qualname='A.f',
call_args=['self', 'x', 'y'],
is_method=True
),
make_event_check(
check_exec_line,
line='return x + y + self.v',
refs_loaded=['x', 'y', 'self', 'self.v']
),
make_event_check(check_exit_call, co_name='f'),
make_event_check(check_memory_update, updates=['z']),
# obj.v = 200
make_event_check(
check_exec_line, line='obj.v = 200', refs_loaded=['obj']
),
make_event_check(check_memory_update, updates=['obj', 'obj.v']),
make_event_check(
check_exec_line,
line='np.max([1,2,3])',
refs_loaded=['np', 'np.max']
),
make_event_check(
check_enter_call,
qualname='amax',
call_args=['a', 'axis', 'out', 'keepdims'],
is_method=False
),
make_event_check(check_exit_call, co_name='amax'),
make_event_check(check_exec_line, line='x = 2', refs_loaded=[]),
make_event_check(check_memory_update, updates=['x']),
]
return src, expected_event_checks
# use of with (enter/exit)
def basic_case_4():
src = """
class A(object):
def __init__(self, val):
self.val = val
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
orig = 10
with A(orig) as v:
x = v.val
max([x, 2])
w = 10
import pandas as pd
df = pd.DataFrame([(1, 2), (3, 4)], columns=['c1', 'c2'])
df.max()
"""
expected_event_checks = [
make_event_check(check_exec_line, line='orig = 10', refs_loaded=[]),
make_event_check(check_memory_update, updates=['orig']),
make_event_check(
check_exec_line,
line='with A(orig) as v:',
refs_loaded=['A', 'orig']
),
# inside __init__
make_event_check(
check_exec_line,
line='self.val = val',
refs_loaded=['self', 'val']
),
make_event_check(check_memory_update, updates=['self.val', 'self']),
# inside __return__ as defined by user
make_event_check(
check_exec_line, line='return self', refs_loaded=['self']
),
make_event_check(check_memory_update, updates=['v']),
make_event_check(
check_exec_line, line='x = v.val', refs_loaded=['v', 'v.val']
),
make_event_check(check_memory_update, updates=['x']),
make_event_check(
check_exec_line, line='max([x, 2])', refs_loaded=['x', 'max']
),
make_event_check(check_exec_line, line='w = 10', refs_loaded=[]),
make_event_check(check_memory_update, updates=['w']),
make_event_check(
check_exec_line, line='import pandas as pd', refs_loaded=[]
),
make_event_check(check_memory_update, updates=['pd']),
make_event_check(
check_exec_line,
line="df = pd.DataFrame([(1, 2), (3, 4)], columns=['c1', 'c2'])",
refs_loaded=['pd', 'pd.DataFrame']
),
# ignore call_args too many....
make_event_check(
check_enter_call,
qualname='DataFrame',
call_args=None,
is_method=False
),
make_event_check(check_exit_call, co_name='__init__'),
make_event_check(check_memory_update, updates=['df']),
make_event_check(
check_exec_line, line='df.max()', refs_loaded=['df', 'df.max']
),
make_event_check(
check_enter_call,
qualname='DataFrame.max',
call_args=None,
is_method=True
),
# this matches the co_name for DataFrame.max
make_event_check(check_exit_call, co_name='stat_func'),
]
return src, expected_event_checks
# call to a C function
def basic_case_5():
src = """
import numpy as np
v = [1,2,3]
v_log = np.log(v)
other = 100
"""
expected_event_checks = [
make_event_check(
check_exec_line, line='import numpy as np', refs_loaded=[]
),
make_event_check(check_memory_update, updates=['np']),
make_event_check(check_exec_line, line='v = [1,2,3]', refs_loaded=[]),
make_event_check(check_memory_update, updates=['v']),
make_event_check(
check_exec_line,
line='v_log = np.log(v)',
refs_loaded=['np', 'np.log', 'v']
),
# no enter/exit calls as C function
make_event_check(check_memory_update, updates=['v_log']),
make_event_check(check_exec_line, line='other = 100', refs_loaded=[]),
make_event_check(check_memory_update, updates=['other']),
]
return src, expected_event_checks
# non-local variable in function and using a function closure
def basic_case_6():
src = """
_times = 2
def mult(x):
return x * _times
mult(10)
def with_closure():
x = 100
def times_100(y):
return y * x
return times_100
f = with_closure()
f(10)
"""
expected_event_checks = [
make_event_check(check_exec_line, line='_times = 2', refs_loaded=[]),
make_event_check(check_memory_update, updates=['_times']),
make_event_check(
check_exec_line, line='mult(10)', refs_loaded=['mult', '_times']
), #_times loaded indirectly by mult call
make_event_check(
check_enter_call,
qualname='mult',
call_args=['x'],
is_method=False
),
make_event_check(
check_exec_line,
line='return x * _times',
refs_loaded=['x', '_times']
),
make_event_check(check_exit_call, co_name='mult'),
make_event_check(
check_exec_line,
line='f = with_closure()',
refs_loaded=['with_closure']
),
make_event_check(
check_enter_call,
qualname='with_closure',
call_args=[],
is_method=False
),
make_event_check(check_exec_line, line='x = 100', refs_loaded=[]),
make_event_check(check_memory_update, updates=['x']),
make_event_check(
check_exec_line,
line='return times_100',
refs_loaded=['times_100']
),
make_event_check(check_exit_call, co_name='with_closure'),
make_event_check(check_memory_update, updates=['f']),
make_event_check(check_exec_line, line='f(10)', refs_loaded=['f']),
make_event_check(
check_enter_call,
qualname='with_closure.<locals>.times_100',
call_args=['y'],
is_method=False
),
make_event_check(
check_exec_line, 'return y * x', refs_loaded=['x', 'y']
),
make_event_check(check_exit_call, co_name='times_100'),
]
return src, expected_event_checks
# call user code from non-user code
def basic_case_7():
src = """
import pandas as pd
s = pd.Series([1, 2, 3])
def g():
return 2
def f(x):
g()
return x * 2
f(10)
s.apply(f)
s[0] = 100
"""
expected_event_checks = [
make_event_check(
check_exec_line, line='import pandas as pd', refs_loaded=[]
),
make_event_check(check_memory_update, updates=['pd']),
make_event_check(
check_exec_line,
line='s = pd.Series([1,2,3])',
refs_loaded=['pd', 'pd.Series']
),
make_event_check(
check_enter_call,
qualname='Series',
call_args=None,
is_method=False
),
make_event_check(check_exit_call, co_name='__init__'),
make_event_check(check_memory_update, updates=['s']),
make_event_check(
check_exec_line, line='f(10)', refs_loaded=['f', 'g']
), # g loaded indirectly by f
make_event_check(
check_enter_call, qualname='f', call_args=['x'], is_method=False
),
make_event_check(check_exec_line, line='g()', refs_loaded=['g']),
make_event_check(
check_enter_call, qualname='g', call_args=[], is_method=False
),
make_event_check(check_exec_line, line='return 2', refs_loaded=[]),
make_event_check(check_exit_call, co_name='g'),
make_event_check(
check_exec_line, line='return x * 2', refs_loaded=['x']
),
make_event_check(check_exit_call, co_name='f'),
make_event_check(
check_exec_line,
line='s.apply(f)',
refs_loaded=['s', 's.apply', 'f', 'g']
), # g loaded indirectly by f
# note that there is no entries for the f calls inside apply
make_event_check(
check_enter_call,
qualname='Series.apply',
call_args=None,
is_method=True
),
make_event_check(check_exit_call, co_name='apply'),
make_event_check(
check_exec_line, line='s[0] = 100', refs_loaded=['s']
),
make_event_check(check_memory_update, updates=['s', 's[0]']),
]
return src, expected_event_checks
# assignment that calls __setitem__, make sure we don't trigger that
def basic_case_8():
src = """
d = {1:2, 2:3}
d[1] = 1000
"""
expected_event_checks = [
make_event_check(
check_exec_line, line='d = {1:2, 2:3}', refs_loaded=[]
),
make_event_check(check_memory_update, updates=['d']),
make_event_check(
check_exec_line, line='d[1] = 1000', refs_loaded=['d']
),
make_event_check(check_memory_update, updates=['d', 'd[1]']),
]
return src, expected_event_checks
# bounded loop
def basic_case_9():
src = """
class A(object):
def __init__(self, v):
self.v = 10
x = 0
while x < 10:
for i in range(10):
i
x += 1
a = A(1)
a.v += 1
[x for x in range(10)]
{x for x in range(10)}
"""
loop_bound = 2
expected_event_checks = [
make_event_check(check_exec_line, line='x = 0', refs_loaded=[]),
make_event_check(check_memory_update, updates=['x']),
# bounded to two
make_event_check(check_exec_line, line='i', refs_loaded=['i']),
make_event_check(check_exec_line, line='i', refs_loaded=['i']),
make_event_check(check_exec_line, line='x += 1', refs_loaded=['x']),
make_event_check(check_memory_update, updates=['x']),
# bounded to two
make_event_check(check_exec_line, line='i', refs_loaded=['i']),
make_event_check(check_exec_line, line='i', refs_loaded=['i']),
make_event_check(check_exec_line, line='x += 1', refs_loaded=['x']),
make_event_check(check_memory_update, updates=['x']),
# done with outer loop
# outside
make_event_check(check_exec_line, line='a = A(1)', refs_loaded=['A']),
make_event_check(check_ignore), # enter call
make_event_check(check_ignore), # line
make_event_check(check_ignore), # memory update
make_event_check(check_ignore), # exit call
make_event_check(check_ignore), # memory update
make_event_check(
check_exec_line, line='a.v += 1', refs_loaded=['a', 'a.v']
),
make_event_check(check_memory_update, updates=['a', 'a.v']),
make_event_check(
check_exec_line,
line='[x for x in range(10)]',
refs_loaded=['x', 'range']
),
make_event_check(
check_exec_line,
line='{x for x in range(10)}',
refs_loaded=['x', 'range']
),
]
return src, expected_event_checks, loop_bound
# use a global variables in user function that is called from third party function
def basic_case_10():
src = """
import pandas as pd
df = pd.DataFrame([(1, 2), (3, 4)], columns=['c1', 'c2'])
extra = 10
other_extra = 100
def g(y):
return other_extra
# dependency on global extra
def add(x):
return g(x) + extra
df['c2'].apply(add)
"""
# line, memupdate, line, call, return, memupdate, line, memupdate, line, memupdate
expected_event_checks = [make_event_check(check_ignore)] * 10
expected_event_checks += [
# note that extra is also implicitly loaded as it is used by add. add is called inside apply, which we don't instrument, so
# we add it at the ExecLine event
# so the indirect references are: extra, g, other_extra
make_event_check(
check_exec_line,
line="df['c2'].apply(add)",
refs_loaded=[
'df', "df['c2']", "df['c2'].apply", 'add', 'extra', 'g',
'other_extra'
]
)
]
# call, return
expected_event_checks += [make_event_check(check_ignore)] * 2
return src, expected_event_checks
# an empty program should still work rather than crash
def basic_case_11():
src = " "
expected_event_checks = []
return src, expected_event_checks
# use of comprehensions
def basic_case_12():
src = """
[a for a in range(3)]
{a for a in range(4)}
d = {'a':'b', 'c':'d'}
{ k:v for k, v in d.items()}
"""
expected_event_checks = [
make_event_check(
check_exec_line,
line='[a for a in range(3)]',
refs_loaded=['range', 'a']
),
make_event_check(
check_exec_line,
line='{a for a in range(4)}',
refs_loaded=['range', 'a']
),
make_event_check(check_ignore),
make_event_check(check_ignore),
make_event_check(
check_exec_line,
line="{k:v for k, v in d.items()}",
refs_loaded=['k', 'v', 'd', 'd.items']
),
]
return src, expected_event_checks
basic_cases = [
basic_case_1,
basic_case_2,
basic_case_3,
basic_case_4,
basic_case_5,
basic_case_6,
basic_case_7,
basic_case_8,
basic_case_9,
basic_case_10,
basic_case_11,
basic_case_12,
]
@pytest.mark.parametrize('_input_fun', basic_cases)
def test_basic_programs(_input_fun):
test_inputs = _input_fun()
if len(test_inputs) == 2:
src, expected_checks = test_inputs
loop_bound = None
else:
src, expected_checks, loop_bound = test_inputs
tracer = dt.DynamicDataTracer(loop_bound=loop_bound)
src = textwrap.dedent(src)
tracer.run(src)
print(list(map(str, tracer.trace_events)))
assert len(
tracer.trace_events
) == len(expected_checks), 'The event and checks are mismatched.'
for event, check in zip(tracer.trace_events, expected_checks):
check(event)
``` |
{
"source": "josepablocam/specimen-tools",
"score": 2
} |
#### File: specimen/database/queries.py
```python
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving selection information for those turns"
cursor.execute("""
-- use this min eventid to select the first choice in each round
CREATE TEMP TABLE first_sels AS
SELECT
userid, playid, id as selid, eventid,
target_r, target_g, target_b,
specimen_r, specimen_g, specimen_b,
target_lab_l, target_lab_a, target_lab_b,
specimen_lab_l, specimen_lab_a, specimen_lab_b,
is_first_pick,
target_h,
target_s,
target_v,
specimen_h,
correct
%s
FROM
selectionevents
INNER JOIN sel_cts
ON selectionevents.eventid = sel_cts.min_event_id
WHERE userid <> %d
""" % (added, unknown_user_id)
)
# restrict to subset of users with at least min_turns
if min_turns:
cursor.execute(
"""
CREATE TEMP TABLE enough_plays as
SELECT userid FROM first_sels GROUP BY userid HAVING count(*) >= %s
""" % min_turns
)
cursor.execute('DELETE FROM first_sels WHERE NOT userid IN (SELECT userid FROM enough_plays)')
cursor.close()
# filter to type of selections requested
if which == 'all':
results = read_sql('SELECT * FROM first_sels', self.conn)
elif which == 'correct':
results = read_sql('SELECT * FROM first_sels WHERE correct', self.conn)
else:
results = read_sql('SELECT * FROM first_sels WHERE NOT correct', self.conn)
self.cache[key] = results.copy()
return results
def execute_adhoc(self, query, use_cache=True):
"""
Execute ad-hoc queries over the Specimen database.
:param query: String SQL query
"""
key = query
if use_cache and key in self.cache:
return self.cache[key].copy()
results = read_sql(query, self.conn)
self.cache[key] = results.copy()
return results
```
#### File: specimen-tools/specimen/utils.py
```python
import colorsys
from itertools import combinations
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
import numpy as np
import pycountry
def is_iterable(x):
""" Check if a value is iterable """
try:
iter(x)
return True
except:
return False
def flatten_list(l):
if l and is_iterable(l) and is_iterable(l[0]) and not (isinstance(l[0], str) or isinstance(l[0], unicode)):
return [item for sublist in l for item in sublist]
else:
return l
munsell_hue_labels = np.array(['R', 'YR', 'Y', 'GY', 'G', 'BG', 'B', 'PB', 'P', 'RP'])
def munsell_buckets(hues, labels = False, color = 'right', normalizer = 100.0):
"""
Returns corresponding color in munsell bucket
Source http://www.farbkarten-shop.de/media/products/0944505001412681032.pdf
:param hues: hues to discretize
:param labels: if true returns string name rather than bucket
:param normalizer: divisor constant to normalize if values not already between 0.0 and 1.0
:return: representative hue (and optionally) the label for the bucket
"""
if not is_iterable(hues):
raise ValueError("hues must be iterable")
if not color in ['left', 'mid', 'right']:
raise ValueError("color must be one of 'left', 'mid', 'right'")
munsell_bounds = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# bring to 0.0 to 1.0 if necessary
hues = np.array(hues)
if max(hues) > 1.0:
hues = hues / normalizer
bucketed = np.digitize(hues, bins = munsell_bounds)
# make zero indexed
bucketed -= 1
# reassign values of 1 to the first bucket
bucketed[np.where(hues == 1.0)] = 0
if not labels:
return bucketed
else:
return bucketed, munsell_hue_labels[bucketed]
def _get_hue_pair_map():
""" Order agnostic mapping to munsell hues ( e.g R-P, P-R both map to same value )"""
pairs = list(combinations(munsell_hue_labels, 2))
# self maps
pairs += [(h, h) for h in munsell_hue_labels]
pairs = {p:p for p in pairs}
# reverses pairs
pairs.update({(h2, h1):mapped for (h1, h2), mapped in pairs.iteritems()})
return pairs
munsell_pair_map = _get_hue_pair_map()
def get_full_country_name(iso_code, override = None):
"""
Get country name for 2 letter iso country code used in specimen data as unicode
:param iso_code:
:param override: we may prefer some mappings, or some may be old and not in the countries data, so try override first
:return:
"""
if not override is None and iso_code in override:
return unicode(override[iso_code])
else:
return unicode(pycountry.countries.get(alpha_2 = iso_code).name)
def rgb_to_lab(r, g, b):
rgb = sRGBColor(r, g, b)
lab = convert_color(rgb, LabColor)
# scale to fit Mathematica scale
return tuple(val / 100.0 for val in lab.get_value_tuple())
def lab_to_rgb(l, a, b):
# undo the / 100.0 shown above
lab = LabColor(l * 100.0, a * 100.0, b * 100.0)
rgb = convert_color(lab, sRGBColor)
# scale to fit Mathematica scale
return tuple(rgb.get_value_tuple())
def df_rgb_to_lab(df):
rgb = list('rgb')
df = df[rgb]
f_star = lambda x: list(rgb_to_lab(*x))
return df.apply(f_star, axis=1).rename(columns=dict(zip(rgb, 'lab')))
def mat_rgb_to_lab(mat):
f_star = lambda x: list(rgb_to_lab(*x))
return np.apply_along_axis(f_star, 1, mat)
def mat_lab_to_rgb(mat):
f_star = lambda x: list(lab_to_rgb(*x))
return np.apply_along_axis(f_star, 1, mat)
def prefix(p, l):
return ['%s%s' % (p, e) for e in l]
def to_csv_str(vals):
return ','.join(map(str, vals))
``` |
{
"source": "josepadial/FR",
"score": 3
} |
#### File: P2/Ejercicio5/cliente.py
```python
import socket, select, string, sys, base64
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def prompt(usuario) :
sys.stdout.write(bcolors.OKBLUE + '<' + usuario + '> ' + bcolors.ENDC)
sys.stdout.flush()
if __name__ == "__main__":
# Pedimos el host y el puerto
if(len(sys.argv) < 2) :
print bcolors.WARNING + 'Escribe : ' + sys.argv[0] + ' <host> <puerto (por defecto el 8080)>' + bcolors.ENDC
sys.exit()
host = sys.argv[1]
if len(sys.argv) == 2:
port = 8080
else:
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# Intentamos hacer la conexion
try :
s.connect((host, port))
except :
print bcolors.FAIL + '[Error 400]: No se ha podido hacer la conexion' + bcolors.ENDC
sys.exit()
print bcolors.OKGREEN + 'Conectado a ' + host + ' en el puerto ' + str(port) + '\nEscribe \exit para salir' + bcolors.ENDC
usuario = raw_input(bcolors.HEADER + "Escribe un nombre de usuario\n" + bcolors.ENDC)
prompt(usuario) # Cada usuario tiene su propio alias
while 1:
socket_list = [sys.stdin, s]
# Obtener la lista de sockets
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
#Mensaje del servidor
if sock == s:
data = sock.recv(4096)
if not data :
print bcolors.FAIL + '\n[Error 500] Desconectado del servidor' + bcolors.ENDC
sys.exit()
else :
#print data
sys.stdout.write(data)
prompt(usuario)
#Enviar mensaje escrito por el usuario
else :
msg = sys.stdin.readline()
msg = usuario + ' ' + msg
encoded = base64.b64encode(msg)
s.send(encoded)
if '\exit' in msg:
sys.exit(0)
prompt(usuario)
``` |
{
"source": "josepaiva94/apicy",
"score": 2
} |
#### File: src/apicy/server.py
```python
import logging
import os
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import RedirectResponse, JSONResponse
from .dtos.analyze_text_request import AnalyzeTextRequest
from .dtos.dependency_parses_response import DependencyParsesResponse
from .dtos.models_response import ModelsResponse
from .dtos.named_entities_response import NamedEntitiesResponse
from .dtos.pipeline_response import PipelineResponse
from .dtos.pos_annotations_response import PosAnnotationsResponse
from .dtos.schema_response import SchemaResponse
from .dtos.sentences_response import SentencesResponse
from .exceptions.model_not_found_exception import ModelNotFoundException
from .tools.knowledge_extractor import KnowledgeExtractor
from .tools.model_manager import ModelManager
log = logging.getLogger(__name__)
# load models
MODELS = os.getenv('MODELS', 'en').split()
model_manager = ModelManager()
for model_name in MODELS:
model_manager.get_or_load_model(model_name)
log.info('loaded spacy model %s', model_name)
# start API
API_PREFIX = os.getenv('API_PREFIX', '').rstrip('/')
app = FastAPI(
title='SpaCy API',
version='1.0.0',
description='apiCy is a containerized Docker REST microservice for providing spaCy as a service.',
openapi_prefix=API_PREFIX,
)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['DELETE', 'GET', 'POST', 'PUT'],
allow_headers=['*']
)
@app.exception_handler(ModelNotFoundException)
async def model_not_found_exception_handler(request: Request, exc: ModelNotFoundException):
return JSONResponse(
status_code=404,
content={'message': f'The pretrained statistical model "%s" is not available.' % exc.identifier},
)
@app.get("/", include_in_schema=False)
def docs_redirect():
return RedirectResponse(f'{API_PREFIX}/docs')
@app.get(
'/models',
response_model=ModelsResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['INFO']
)
async def list_available_models():
return {
'models': list(MODELS)
}
@app.get(
'/{lang}/schema',
response_model=SchemaResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['INFO']
)
async def get_model_schema(lang: str):
model = model_manager.get_language_model(lang)
return {
'dep_types': list(model.get_dep_types()),
'ent_types': list(model.get_ent_types()),
'pos_types': list(model.get_pos_types())
}
@app.post(
'/{lang}/pos-annotations',
response_model=PosAnnotationsResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['POS']
)
async def extract_pos_annotations(lang: str, body: AnalyzeTextRequest):
"""Analyzes the text and assigns parts of speech to each word, such as
noun, verb, adjective, etc.
"""
model = model_manager.get_language_model(lang)
extractor = KnowledgeExtractor(model, text=body.text)
return {
'pos_annotations': extractor.annotate_pos_tags()
}
@app.post(
'/{lang}/dependency-parses',
response_model=DependencyParsesResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['DEP']
)
async def extract_dependency_parses(lang: str, body: AnalyzeTextRequest):
"""Analyzes the grammatical structure of natural language sentences, esta-
blishing relationships between "head" words and words which modify tho-
se heads."""
model = model_manager.get_language_model(lang)
extractor = KnowledgeExtractor(model, text=body.text)
return {
'dep_parses': extractor.extract_parse_dependencies()
}
@app.post(
'/{lang}/named-entities',
response_model=NamedEntitiesResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['NER']
)
async def extract_named_entities(lang: str, body: AnalyzeTextRequest):
"""Analyzes the text and extracts named entities."""
model = model_manager.get_language_model(lang)
extractor = KnowledgeExtractor(model, text=body.text)
return {
'entities': extractor.extract_named_entities()
}
@app.post(
'/{lang}/sentences',
response_model=SentencesResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['SENT']
)
async def get_sentences(lang: str, body: AnalyzeTextRequest):
"""Split the text into sentences."""
model = model_manager.get_language_model(lang)
extractor = KnowledgeExtractor(model, text=body.text)
return {
'sentences': extractor.get_sentences_list()
}
@app.post(
'/{lang}/pipeline',
response_model=PipelineResponse,
response_model_exclude_none=True,
response_model_exclude_unset=True,
tags=['POS', 'DEP', 'NER', 'SENT']
)
async def get_pipeline(
lang: str,
body: AnalyzeTextRequest,
pos: bool = False,
dep: bool = False,
sent: bool = False,
ner: bool = False
):
"""Analyze the text and return intermediate results of the pipeline."""
model = model_manager.get_language_model(lang)
extractor = KnowledgeExtractor(model, text=body.text)
resp = {}
if pos:
resp.__setitem__('pos_annotations', extractor.annotate_pos_tags())
if dep:
resp.__setitem__('dep_parses', extractor.extract_parse_dependencies())
if sent:
resp.__setitem__('sentences', extractor.get_sentences_list())
if ner:
resp.__setitem__('entities', extractor.extract_named_entities())
return resp
```
#### File: apicy/tools/model_manager.py
```python
import os
from typing import Dict
import spacy
from spacy.symbols import ORTH
from ..exceptions.model_not_found_exception import ModelNotFoundException
class Model(object):
def __init__(self, spacy_model):
self.spacy_model = spacy_model
def get_dep_types(self):
"""List the available dep labels in the model."""
return self.spacy_model.parser.labels
def get_ent_types(self):
"""List the available entity types in the model."""
return self.spacy_model.entity.labels
def get_pos_types(self):
"""List the available part-of-speech tags in the model."""
return self.spacy_model.tagger.labels
class ModelManager(object):
models: Dict[str, Model] = {}
def get_or_load_model(self, spacy_identifier: str, language: str = None):
"""Get spacy model."""
if language is None:
language = spacy_identifier.split('_', 1)[0]
if language.lower() not in self.models:
self.models[language.lower()] = Model(spacy_model=spacy.load(spacy_identifier))
self.load_special_cases_if_available(language)
if language.lower() not in self.models:
raise ModelNotFoundException(language.lower())
return self.models[language.lower()]
def get_language_model(self, language: str):
if language.lower() not in self.models:
raise ModelNotFoundException(language.lower())
return self.models[language.lower()]
def load_special_cases_if_available(self, language):
special_cases_str = os.getenv(f"{language.upper()}_SPECIAL_CASES", "")
if special_cases_str:
model = self.models[language.lower()]
for special_case in special_cases_str.split(','):
model.spacy_model.tokenizer.add_special_case(
special_case,
[{ORTH: special_case}]
)
``` |
{
"source": "josepaiva94/sokman",
"score": 2
} |
#### File: sokman/sok/models.py
```python
from typing import Optional, Set
from django.core.validators import RegexValidator
from django.db import models
from django.db.models.query import QuerySet
class Author(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self) -> str:
return self.name
class Tag(models.Model):
name = models.CharField(max_length=255, unique=True)
criteria = models.TextField(blank=True)
implies = models.ManyToManyField('Tag', related_name='implied_by', blank=True)
@property
def transitive_publications(self) -> Set['Publication']:
publications: Set[Publication] = set(self.publications.filter(exclusion_criteria__isnull=True))
for implied in self.implied_by.all():
publications = publications.union(implied.transitive_publications)
return publications
@property
def total_publications(self) -> int:
return len(self.transitive_publications)
def __str__(self) -> str:
return self.name
class ExclusionCriterion(models.Model):
name = models.CharField(max_length=255, unique=True)
description = models.TextField(blank=True, default='')
def __str__(self) -> str:
return self.name
class Meta:
verbose_name_plural = "exclusion criteria"
class Source(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self) -> str:
return self.name
class SearchTerm(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self) -> str:
return self.name
class Publication(models.Model):
cite_key = models.CharField(max_length=255, unique=True)
title = models.CharField(max_length=255)
year = models.PositiveSmallIntegerField()
peer_reviewed = models.BooleanField(null=True, default=None)
classified = models.BooleanField(default=False)
first_page = models.PositiveSmallIntegerField(blank=True, null=True, default=None)
last_page = models.PositiveSmallIntegerField(blank=True, null=True, default=None)
doi = models.CharField(max_length=255, unique=True, blank=True, null=True, default=None)
variant_of = models.ForeignKey(
'Publication',
on_delete=models.CASCADE,
related_name='variants',
blank=True,
null=True,
)
authors = models.ManyToManyField(Author, related_name='publications', through='PublicationAuthor')
sources = models.ManyToManyField(Source, related_name='publications', through='PublicationSource')
references = models.ManyToManyField('Publication', related_name='referenced_by', through='PublicationReference', through_fields=('publication', 'reference'))
exclusion_criteria = models.ManyToManyField(ExclusionCriterion, related_name='publications', blank=True)
tags = models.ManyToManyField(Tag, related_name='publications', through='PublicationTag')
@property
def is_peer_reviewed_or_cited_by_peer_reviewed(self) -> bool:
if self.peer_reviewed:
return True
for referenced_by in self.referenced_by.filter():
if referenced_by.is_peer_reviewed_or_cited_by_peer_reviewed:
return True
return False
@property
def is_relevant(self) -> bool:
return not self.exclusion_criteria.exists()
@property
def relevant_references(self) -> QuerySet:
return self.references.filter(exclusion_criteria__isnull=True)
@property
def relevant_referenced_by(self) -> QuerySet:
return self.referenced_by.filter(exclusion_criteria__isnull=True)
@property
def stage(self) -> Optional[str]:
if not self.is_relevant:
return 'excluded'
# Directly found by search term
if self.sources.exists():
return 'primary'
# Referenced by primary (backward snowballing)
# TODO make transitive
if self.referenced_by.filter(exclusion_criteria__isnull=True, sources__isnull=False):
return 'secondary'
# References a primary (forward snowballing)
# TODO make transitive
if self.references.filter(exclusion_criteria__isnull=True, sources__isnull=False):
return 'tertiary'
return None
def __str__(self) -> str:
return self.cite_key
class SemanticScholar(models.Model):
paper_id = models.CharField(
max_length=40,
unique=True,
validators=[
RegexValidator(r'^[a-f0-9]{40}$'),
],
)
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
def __str__(self) -> str:
return self.paper_id
class Meta:
verbose_name_plural = "semantic scholar"
# M:N Relationships
class PublicationAuthor(models.Model):
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
position = models.PositiveSmallIntegerField()
class Meta:
unique_together = (('publication', 'author'), ('publication', 'position'))
class PublicationTag(models.Model):
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
comment = models.CharField(max_length=255, blank=True, null=True)
class Meta:
unique_together = (('publication', 'tag'),)
class PublicationSource(models.Model):
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
search_term = models.ForeignKey(SearchTerm, on_delete=models.CASCADE)
class Meta:
unique_together = (('publication', 'source', 'search_term'),)
class PublicationReference(models.Model):
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
reference = models.ForeignKey(Publication, on_delete=models.CASCADE, related_name='cited_by')
identifier = models.CharField(max_length=255, blank=True, null=True, default=None)
@property
def is_self_cite(self) -> bool:
lhs: Set[int] = set(self.publication.authors.values_list('pk', flat=True))
rhs: Set[int] = set(self.reference.authors.values_list('pk', flat=True))
return not lhs.isdisjoint(rhs)
class Meta:
unique_together = (('publication', 'reference'), ('publication', 'identifier'))
``` |
{
"source": "JosePaniagua7/connect-processors-toolkit",
"score": 3
} |
#### File: processors_toolkit/configuration/exceptions.py
```python
from typing import Optional
class MissingConfigurationParameterError(Exception):
def __init__(self, message: str, parameter: Optional[str] = None):
self.message = message
self.parameter = parameter
super().__init__(self.message)
```
#### File: processors_toolkit/logger/__init__.py
```python
from logging import LoggerAdapter
class ExtensionLoggerAdapter(LoggerAdapter):
def process(self, msg, kwargs):
extra = kwargs.get("extra", {})
extra.update(self.extra)
kwargs['extra'] = extra
if 'id' in extra:
msg = f"{extra.get('id')} {msg}"
return msg, kwargs
```
#### File: processors_toolkit/logger/mixins.py
```python
from logging import LoggerAdapter
from typing import Union
from connect.processors_toolkit.logger import ExtensionLoggerAdapter
from connect.processors_toolkit.requests import RequestBuilder
class WithBoundedLogger:
logger: LoggerAdapter
def bind_logger(self, request: Union[RequestBuilder, dict]) -> LoggerAdapter:
"""
Binds the logger to the given request by attaching the id to
the extra data of the logger adapter.
:param request: Union[RequestBuilder, dict] The request to extract the ids
:return: LoggerAdapter
"""
from_request = {}
if isinstance(request, RequestBuilder) and request.id() is not None:
from_request.update({'id': request.id()})
elif isinstance(request, dict) and request.get('id') is not None:
from_request.update({'id': request.get('id')})
self.logger = ExtensionLoggerAdapter(
self.logger.logger,
{**self.logger.extra, **from_request},
)
return self.logger
```
#### File: processors_toolkit/requests/__init__.py
```python
from __future__ import annotations
from typing import Any, Dict, List, Optional, Union
from connect.processors_toolkit import find_by_id, merge
from connect.processors_toolkit.requests.assets import AssetBuilder
from connect.processors_toolkit.requests.helpers import make_param, request_model
from connect.processors_toolkit.requests.tier_configurations import TierConfigurationBuilder
from connect.processors_toolkit.requests.exceptions import MissingParameterError
class RequestBuilder:
def __init__(self, request: Optional[dict] = None):
request = {} if request is None else request
if not isinstance(request, dict):
raise ValueError('Request must be a dictionary.')
self._request = request
def __repr__(self) -> str:
return '{class_name}(request={request})'.format(
class_name=self.__class__.__name__,
request=self._request,
)
def __str__(self) -> str:
return str(self._request)
def raw(self) -> dict:
return self._request
def request_type(self) -> str:
return request_model(self.raw())
def is_tier_config_request(self) -> bool:
return 'tier-config' == self.request_type()
def is_asset_request(self) -> bool:
return 'asset' == self.request_type()
def without(self, key: str) -> RequestBuilder:
self._request.pop(key, None)
return self
def id(self) -> Optional[str]:
return self._request.get('id')
def with_id(self, request_id: str) -> RequestBuilder:
self._request.update({'id': request_id})
return self
def type(self) -> Optional[str]:
return self._request.get('type')
def with_type(self, request_type: str) -> RequestBuilder:
self._request.update({'type': request_type})
return self
def status(self) -> Optional[str]:
return self._request.get('status')
def with_status(self, request_status) -> RequestBuilder:
self._request.update({'status': request_status})
return self
def marketplace(self, key: Optional[str] = None, default: Optional[Any] = None) -> Optional[Any]:
marketplace = self._request.get('marketplace')
if marketplace is None:
return None
return marketplace if key is None else marketplace.get(key, default)
def with_marketplace(self, marketplace_id: str, marketplace_name: Optional[str] = None) -> RequestBuilder:
self._request.update({'marketplace': merge(self._request.get('marketplace', {}), {
'id': marketplace_id,
'name': marketplace_name,
})})
return self
def note(self) -> Optional[str]:
return self._request.get('note')
def with_note(self, note: str) -> RequestBuilder:
self._request.update({'note': note})
return self
def reason(self) -> Optional[str]:
return self._request.get('reason')
def with_reason(self, reason: str) -> RequestBuilder:
self._request.update({'reason': reason})
return self
def assignee(self, key: Optional[str] = None, default: Optional[Any] = None) -> Optional[Any]:
assignee = self._request.get('assignee')
if assignee is None:
return None
return assignee if key is None else assignee.get(key, default)
def with_assignee(self, assignee_id: str, assignee_name: str, assignee_email: str) -> RequestBuilder:
self._request.update({'assignee': merge(self._request.get('assignee', {}), {
'id': assignee_id,
'name': assignee_name,
'email': assignee_email,
})})
return self
def params(self) -> List[Dict[Any, Any]]:
return self._request.get('params', [])
def param(self, param_id: str, key: Optional[str] = None, default: Optional[Any] = None) -> Optional[Any]:
parameter = find_by_id(self.params(), param_id)
if parameter is None:
raise MissingParameterError(f'Missing parameter {param_id}', param_id)
return parameter if key is None else parameter.get(key, default)
def with_params(self, params: List[dict]) -> RequestBuilder:
for param in params:
self.with_param(**param)
return self
def with_param(
self,
param_id: str,
value: Optional[Union[str, dict]] = None,
value_error: Optional[str] = None,
value_type: str = 'text',
) -> RequestBuilder:
try:
param = self.param(param_id)
except MissingParameterError:
param = {'id': param_id}
self._request.update({'params': self.params() + [param]})
members = make_param(param_id, value, value_error, value_type)
param.update({k: v for k, v in members.items() if v is not None})
return self
def asset(self) -> AssetBuilder:
return AssetBuilder(self._request.get('asset', {}))
def with_asset(self, asset: Union[dict, AssetBuilder]) -> RequestBuilder:
asset = asset if isinstance(asset, dict) else asset.raw()
self._request.update({'asset': asset})
return self
def tier_configuration(self) -> TierConfigurationBuilder:
return TierConfigurationBuilder(self._request.get('configuration', {}))
def with_tier_configuration(self, configuration: Union[dict, TierConfigurationBuilder]) -> RequestBuilder:
configuration = configuration if isinstance(configuration, dict) else configuration.raw()
self._request.update({'configuration': configuration})
return self
```
#### File: tests/api/test_mixin_with_asset_helper.py
```python
import pytest
import os
from connect.client import ConnectClient, ClientError
from connect.devops_testing import asserts
from connect.processors_toolkit.requests import RequestBuilder
from connect.processors_toolkit.requests.assets import AssetBuilder
from connect.processors_toolkit.api.mixins import WithAssetHelper
class Helper(WithAssetHelper):
def __init__(self, client: ConnectClient):
self.client = client
BAD_REQUEST_400 = "400 Bad Request"
ASSET_REQUEST_FILE = '/request_asset.json'
def test_asset_helper_should_retrieve_an_asset_by_id(sync_client_factory, response_factory):
asset = AssetBuilder()
asset.with_asset_id('AS-9091-4850-9712')
client = sync_client_factory([
response_factory(value=asset.raw(), status=200)
])
asset = Helper(client).find_asset('AS-9091-4850-9712')
assert isinstance(asset, AssetBuilder)
assert asset.asset_id() == 'AS-9091-4850-9712'
def test_asset_helper_should_retrieve_an_asset_request_by_id(sync_client_factory, response_factory):
asset = AssetBuilder()
asset.with_asset_id('AS-9091-4850-9712')
request = RequestBuilder()
request.with_id('PR-9091-4850-9712-001')
request.with_type('purchase')
request.with_status('pending')
request.with_asset(asset)
client = sync_client_factory([
response_factory(value=request.raw(), status=200)
])
request = Helper(client).find_asset_request('PR-9091-4850-9712-001')
assert isinstance(request, RequestBuilder)
assert request.id() == 'PR-9091-4850-9712-001'
def test_asset_helper_should_approve_an_asset_request(sync_client_factory, response_factory):
asset = AssetBuilder()
asset.with_asset_id('AS-8027-7606-7082')
asset.with_asset_status('active')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('approved')
request.with_asset(asset)
client = sync_client_factory([
response_factory(value=request.raw(), status=200)
])
asset = request.asset()
asset.with_asset_status('processing')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('pending')
request.with_asset(asset)
request = Helper(client).approve_asset_request(request, 'TL-662-440-096')
assert request.id() == 'PR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'approved')
def test_asset_helper_should_approve_an_already_approved_asset_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="REQ_003",
errors=["Only pending and inquiring requests can be approved."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
asset = AssetBuilder()
asset.with_asset_id('AS-8027-7606-7082')
asset.with_asset_status('active')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('approved')
request.with_asset(asset)
request = Helper(client).approve_asset_request(request, 'TL-662-440-096')
assert request.id() == 'PR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'approved')
def test_asset_helper_should_fail_approving_an_asset_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="VAL_001",
errors=[
"effective_date: Datetime has wrong format. Use one of these formats "
"instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z]."
]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_asset(AssetBuilder())
with pytest.raises(ClientError):
Helper(client).approve_asset_request(request, 'TL-662-440-096')
def test_asset_helper_should_fail_an_asset_request(sync_client_factory, response_factory):
reason = 'I don\'t like you :P'
asset = AssetBuilder()
asset.with_asset_id('AS-8027-7606-7082')
asset.with_asset_status('processing')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('failed')
request.with_asset(asset)
request.with_reason(reason)
client = sync_client_factory([
response_factory(value=request.raw(), status=200)
])
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_status('pending')
request.with_asset(asset)
request = Helper(client).fail_asset_request(request, reason)
assert request.id() == 'PR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'failed')
asserts.request_reason(request.raw(), reason)
def test_asset_helper_should_fail_an_already_failed_asset_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="REQ_003",
errors=["Only pending requests can be failed."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
asset = AssetBuilder()
asset.with_asset_id('AS-8027-7606-7082')
asset.with_asset_status('processing')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('failed')
request.with_asset(asset)
request = Helper(client).fail_asset_request(request, 'It is my will')
assert request.id() == 'PR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'failed')
def test_asset_helper_should_fail_failing_an_asset_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="REQ_005",
errors=["Missed fields: reason."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_asset(AssetBuilder())
with pytest.raises(ClientError):
Helper(client).fail_asset_request(request, 'This is not going to work')
def test_asset_helper_should_inquire_an_asset_request(sync_client_factory, response_factory):
asset = AssetBuilder()
asset.with_asset_id('AS-8027-7606-7082')
asset.with_asset_status('processing')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('inquiring')
request.with_asset(asset)
client = sync_client_factory([
response_factory(value=request.raw(), status=200)
])
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('pending')
request.with_asset(asset)
request = Helper(client).inquire_asset_request(request, 'TL-662-440-097')
assert request.id() == 'PR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'inquiring')
def test_asset_helper_should_inquire_an_already_inquired_asset_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="REQ_003",
errors=["Only pending requests can be moved to inquiring status."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
asset = AssetBuilder()
asset.with_asset_id('AS-8027-7606-7082')
asset.with_asset_status('processing')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('purchase')
request.with_status('inquiring')
request.with_asset(asset)
request = Helper(client).inquire_asset_request(request, 'TL-662-440-097')
assert request.id() == 'PR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'inquiring')
def test_asset_helper_should_fail_inquiring_an_asset_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="REQ_003",
errors=["For marking request to inquiring status at least one parameter should be marked as invalid."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_asset(AssetBuilder())
with pytest.raises(ClientError):
Helper(client).inquire_asset_request(request, 'TL-662-440-097')
def test_asset_helper_should_update_a_request_asset_params(sync_client_factory, response_factory, load_json):
on_server = RequestBuilder(load_json(os.path.dirname(__file__) + ASSET_REQUEST_FILE))
after_update = RequestBuilder(load_json(os.path.dirname(__file__) + ASSET_REQUEST_FILE))
asset = after_update.asset()
asset.with_asset_param('CAT_SUBSCRIPTION_ID', 'AS-8790-0160-2196')
after_update.with_asset(asset)
client = sync_client_factory([
response_factory(value=on_server.raw(), status=200),
response_factory(value=after_update.raw(), status=200)
])
request = RequestBuilder(load_json(os.path.dirname(__file__) + ASSET_REQUEST_FILE))
asset = request.asset()
asset.with_asset_param('CAT_SUBSCRIPTION_ID', 'AS-8790-0160-2196')
request.with_asset(asset)
request = Helper(client).update_asset_parameters_request(request)
asserts.asset_param_value_equal(request.raw(), 'CAT_SUBSCRIPTION_ID', 'AS-8790-0160-2196')
def test_asset_helper_should_not_update_request_asset_params(sync_client_factory, response_factory, load_json):
request = RequestBuilder(load_json(os.path.dirname(__file__) + ASSET_REQUEST_FILE))
client = sync_client_factory([
response_factory(value=request.raw(), status=200),
])
request = Helper(client).update_asset_parameters_request(request)
asserts.asset_param_value_equal(request.raw(), 'CAT_SUBSCRIPTION_ID', '')
```
#### File: tests/api/test_mixin_with_conversation_helper.py
```python
import pytest
from connect.client import ConnectClient
from connect.processors_toolkit.api.mixins import WithConversationHelper
class Helper(WithConversationHelper):
def __init__(self, client: ConnectClient):
self.client = client
def test_conversation_helper_should_retrieve_a_conversation_by_id(sync_client_factory, response_factory):
on_server = {
"id": "CO-281-587-907-301-310-717",
"instance_id": "PR-2434-0591-2885-001",
"topic": "Fulfillment Request <PR-2434-0591-2885-001>",
"type": "conversation",
"status": "open",
"accounts": [
{
"id": "VA-610-138",
"name": "IMC Alpha Team Vendor"
}
],
"created": "2022-01-13T09:50:00+00:00"
}
client = sync_client_factory([
response_factory(value=on_server, status=200)
])
conversation = Helper(client).find_conversation('CO-281-587-907-301-310-717')
assert conversation['id'] == 'CO-281-587-907-301-310-717'
def test_conversation_helper_should_retrieve_conversations_by_criteria_all(sync_client_factory, response_factory):
on_server = [
{
"id": "CO-281-587-907-301-310-717",
"instance_id": "PR-2434-0591-2885-002",
"topic": "Fulfillment Request <PR-2434-0591-2885-002>",
"type": "conversation",
"status": "open",
"accounts": [
{
"id": "VA-610-138",
"name": "IMC Lambda Team Vendor"
}
],
"created": "2022-01-13T09:30:00+00:00"
},
{
"id": "CO-281-587-907-301-310-718",
"instance_id": "PR-2434-0591-2885-003",
"topic": "Fulfillment Request <PR-2434-0591-2885-003>",
"type": "conversation",
"status": "open",
"accounts": [
{
"id": "VA-610-138",
"name": "IMC Omicron Team Vendor"
}
],
"created": "2022-01-13T09:49:28+00:00"
}
]
client = sync_client_factory([
response_factory(value=on_server, status=200, count=len(on_server))
])
conversations = Helper(client).match_conversations({})
assert len(conversations) == 2
def test_conversation_helper_should_retrieve_conversations_by_criteria(sync_client_factory, response_factory):
on_server = [
{
"id": "CO-281-587-907-301-310-719",
"instance_id": "PR-2434-0591-2885-008",
"topic": "Fulfillment Request <PR-2434-0591-2885-008>",
"type": "conversation",
"status": "open",
"accounts": [
{
"id": "VA-610-138",
"name": "IMC Gamma Team Vendor"
}
],
"created": "2022-01-13T09:49:28+00:00"
}
]
client = sync_client_factory([
response_factory(value=on_server, status=200)
])
conversations = Helper(client).match_conversations({'id': 'CO-281-587-907-301-310-719'})
assert len(conversations) == 1
def test_conversation_helper_should_add_a_message_to_a_conversation(sync_client_factory, response_factory):
on_server = [
{
"id": "CO-281-587-907-301-310-717",
"instance_id": "PR-2434-0591-2885-009",
"topic": "Fulfillment Request <PR-2434-0591-2885-009>",
"type": "conversation",
"status": "open",
"accounts": [
{
"id": "VA-610-138",
"name": "IMC Beta Team Vendor"
}
],
"created": "2022-01-13T09:30:00+00:00"
}
]
created_message = {
"id": "ME-946-723-371-633-205-783",
"conversation": "CO-281-587-907-301-310-717",
"account": {
"id": "VA-610-138",
"name": "IMC Beta Team Vendor"
},
"created": "2022-01-13T11:30:10+00:00",
"creator": {
"id": "SU-829-966-028",
"name": "<NAME>"
},
"text": "Hello World message!",
"type": "message",
}
client = sync_client_factory([
response_factory(value=on_server, status=200),
response_factory(value=created_message, status=200)
])
message = Helper(client).add_conversation_message_by_request_id(
'PR-2434-0591-2885-009',
'Hello World message!',
)
assert message['text'] == 'Hello World message!'
def test_conversation_helper_should_fail_adding_a_message_to_a_conversation(sync_client_factory, response_factory):
client = sync_client_factory([
response_factory(value=[], status=200)
])
with pytest.raises(ValueError):
Helper(client).add_conversation_message_by_request_id(
'PR-2434-0591-2885-009',
'Hello World this message will fail!',
)
```
#### File: tests/api/test_mixin_with_helpdesk_helper.py
```python
from typing import Union
from connect.client import ConnectClient, AsyncConnectClient
from connect.processors_toolkit.requests.tier_configurations import TierConfigurationBuilder
from connect.processors_toolkit.requests.assets import AssetBuilder
from connect.processors_toolkit.api.mixins import WithHelpdeskHelper
from connect.processors_toolkit.requests import RequestBuilder
class Helper(WithHelpdeskHelper):
def __init__(self, client: Union[ConnectClient, AsyncConnectClient]):
self.client = client
def test_helper_should_create_a_helpdesk_case(sync_client_factory, response_factory):
request = RequestBuilder()
request.with_id('PR-0000-0000-0000-001')
client = sync_client_factory([
response_factory(value={
"id": "CA-000-000-000",
"subject": "PR-0000-0000-0000-001: The case subject 001.",
"description": "This is the long description of the case 001.",
"priority": 0,
"type": "technical",
"state": "pending",
"receiver": {
"account": {
"id": "PA-000-000-000",
},
},
})
])
case = Helper(client).create_helpdesk_case(
request=request,
subject="The case subject 001.",
description="This is the long description of the case 001.",
receiver_id="PA-000-000-000",
)
assert isinstance(case, dict)
def test_helper_should_create_a_helpdesk_case_for_issuer_recipients(sync_client_factory, response_factory):
request = RequestBuilder()
request.with_id('PR-0000-0000-0000-001')
client = sync_client_factory([
response_factory(value={
"id": "CA-000-000-000",
"subject": "PR-0000-0000-0000-001: The case subject 002.",
"description": "This is the long description of the case 002.",
"priority": 0,
"type": "technical",
"state": "pending",
"issuer": {
"recipients": [
{"id": "UR-630-250-903"}
]
},
"receiver": {
"account": {
"id": "PA-000-000-000",
},
},
})
])
case = Helper(client).create_helpdesk_case(
request=request,
subject="The case subject 002.",
description="This is the long description of the case 002.",
receiver_id="PA-000-000-000",
issuer_recipients=[
{"id": "UR-630-250-903"}
]
)
assert isinstance(case, dict)
def test_helper_should_create_a_helpdesk_case_for_asset_product(sync_client_factory, response_factory):
request = RequestBuilder()
request.with_id('PR-0000-0000-0000-001')
asset = AssetBuilder()
asset.with_asset_product('PRD-000-000-000')
request.with_asset(asset)
client = sync_client_factory([
response_factory(value={
"id": "CA-000-000-000",
"subject": "PR-0000-0000-0000-001: The case subject 003.",
"description": "This is the long description of the case 003.",
"priority": 0,
"type": "technical",
"state": "pending",
"product": {
"id": "PRD-000-000-000",
},
"receiver": {
"account": {
"id": "PA-000-000-000",
},
},
})
])
case = Helper(client).create_helpdesk_case(
request=request,
subject="The case subject.",
description="This is the long description of the case 003.",
receiver_id="PA-000-000-000",
)
assert isinstance(case, dict)
def test_helper_should_create_a_helpdesk_case_for_tier_config_product(sync_client_factory, response_factory):
request = RequestBuilder()
request.with_id('TCR-0000-0000-0000-001')
tier = TierConfigurationBuilder()
tier.with_tier_configuration_product('PRD-000-000-000')
request.with_tier_configuration(tier)
client = sync_client_factory([
response_factory(value={
"id": "CA-000-000-000",
"subject": "PR-0000-0000-0000-001: The case subject 004.",
"description": "This is the long description of the case 004.",
"priority": 0,
"type": "technical",
"state": "pending",
"product": {
"id": "PRD-000-000-000",
},
"receiver": {
"account": {
"id": "PA-000-000-000",
},
},
})
])
case = Helper(client).create_helpdesk_case(
request=request,
subject="The case subject.",
description="This is the long description of the case 004.",
receiver_id="PA-000-000-000",
)
assert isinstance(case, dict)
```
#### File: tests/api/test_mixin_with_tier_configuration_helper.py
```python
import pytest
import os
from connect.client import ConnectClient, ClientError
from connect.devops_testing import asserts
from connect.processors_toolkit.requests.tier_configurations import TierConfigurationBuilder
from connect.processors_toolkit.requests import RequestBuilder
from connect.processors_toolkit.api.mixins import WithTierConfigurationHelper
class Helper(WithTierConfigurationHelper):
def __init__(self, client: ConnectClient):
self.client = client
BAD_REQUEST_400 = "400 Bad Request"
ASSET_REQUEST_FILE = '/request_asset.json'
TIER_CONFIG_REQUEST_FILE = '/request_tier_configuration.json'
def test_helper_should_retrieve_a_tier_configuration_by_id(sync_client_factory, response_factory):
tier_on_server = TierConfigurationBuilder()
tier_on_server.with_tier_configuration_id('TC-9091-4850-9712')
client = sync_client_factory([
response_factory(value=tier_on_server.raw(), status=200)
])
tc = Helper(client).find_tier_configuration('TC-9091-4850-9712')
assert isinstance(tc, TierConfigurationBuilder)
assert tc.tier_configuration_id() == 'TC-9091-4850-9712'
def test_helper_should_match_all_tier_configurations(sync_client_factory, response_factory):
content = [
TierConfigurationBuilder({'id': 'TC-000-000-001'}).raw(),
TierConfigurationBuilder({'id': 'TC-000-000-002'}).raw()
]
client = sync_client_factory([
response_factory(count=len(content), value=content)
])
templates = Helper(client).match_tier_configuration({})
assert len(templates) == 2
def test_helper_should_match_tier_configurations(sync_client_factory, response_factory):
content = [
TierConfigurationBuilder({'id': 'TC-000-000-001'}).raw(),
]
client = sync_client_factory([
response_factory(count=len(content), value=content)
])
templates = Helper(client).match_tier_configuration({'id': 'TC-000-000-001'})
assert len(templates) == 1
def test_helper_should_retrieve_a_tier_configuration_request_by_id(sync_client_factory, response_factory):
tier_on_server = TierConfigurationBuilder()
tier_on_server.with_tier_configuration_id('TC-9091-4850-9712')
on_server = RequestBuilder()
on_server.with_id('TCR-9091-4850-9712-001')
on_server.with_type('setup')
on_server.with_status('pending')
on_server.with_tier_configuration(tier_on_server)
client = sync_client_factory([
response_factory(value=on_server.raw(), status=200)
])
request = Helper(client).find_tier_configuration_request('TCR-9091-4850-9712-001')
assert isinstance(request, RequestBuilder)
assert request.id() == 'TCR-9091-4850-9712-001'
def test_helper_should_match_all_tier_configuration_requests(sync_client_factory, response_factory):
content = [
RequestBuilder({'id': 'TCR-000-000-001-001'}).raw(),
RequestBuilder({'id': 'TCR-000-000-002-002'}).raw()
]
client = sync_client_factory([
response_factory(count=len(content), value=content)
])
templates = Helper(client).match_tier_configuration_request({})
assert len(templates) == 2
def test_helper_should_match_tier_configuration_requests(sync_client_factory, response_factory):
content = [
RequestBuilder({'id': 'TCR-000-000-001-001'}).raw(),
]
client = sync_client_factory([
response_factory(count=len(content), value=content)
])
templates = Helper(client).match_tier_configuration_request({'id': 'TCR-000-000-001-001'})
assert len(templates) == 1
def test_helper_should_approve_a_tier_configuration_request(sync_client_factory, response_factory):
tier_on_server = TierConfigurationBuilder()
tier_on_server.with_tier_configuration_id('TC-8027-7606-7082')
tier_on_server.with_tier_configuration_status('active')
on_server = RequestBuilder()
on_server.with_id('TCR-8027-7606-7082-001')
on_server.with_type('setup')
on_server.with_status('approved')
on_server.with_tier_configuration(tier_on_server)
client = sync_client_factory([
response_factory(value=on_server.raw(), status=200)
])
tier = on_server.tier_configuration()
tier.with_tier_configuration_status('processing')
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_type('setup')
request.with_status('pending')
request.with_tier_configuration(tier)
request = Helper(client).approve_tier_configuration_request(request, 'TL-662-440-096')
assert request.id() == 'TCR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'approved')
def test_helper_should_approve_an_already_approved_tier_configuration_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="TC_006",
errors=["Tier configuration request status transition is not allowed."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
tier = TierConfigurationBuilder()
tier.with_tier_configuration_id('TC-8027-7606-7082')
tier.with_tier_configuration_status('active')
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_type('setup')
request.with_status('approved')
request.with_tier_configuration(tier)
request = Helper(client).approve_tier_configuration_request(request, 'TL-662-440-096')
assert request.id() == 'TCR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'approved')
def test_helper_should_fail_approving_a_tier_configuration_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="TC_012",
errors=[
"There is no tier configuration request template with such id."
]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
request = RequestBuilder()
request.with_id('PR-8027-7606-7082-001')
request.with_tier_configuration(TierConfigurationBuilder())
with pytest.raises(ClientError):
Helper(client).approve_tier_configuration_request(request, 'TL-662-440-096')
def test_helper_should_fail_a_tier_configuration_request(sync_client_factory, response_factory):
reason = 'I don\'t like you :P'
tier_on_server = TierConfigurationBuilder()
tier_on_server.with_tier_configuration_id('TC-8027-7606-7082')
tier_on_server.with_tier_configuration_status('processing')
on_server = RequestBuilder()
on_server.with_id('TCR-8027-7606-7082-001')
on_server.with_type('setup')
on_server.with_status('failed')
on_server.with_tier_configuration(tier_on_server)
on_server.with_reason(reason)
client = sync_client_factory([
response_factory(value=on_server.raw(), status=200)
])
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_status('pending')
request.with_tier_configuration(tier_on_server)
request = Helper(client).fail_tier_configuration_request(request, reason)
assert request.id() == 'TCR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'failed')
asserts.request_reason(request.raw(), reason)
def test_helper_should_fail_an_already_failed_tier_configuration_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="TC_006",
errors=["Tier configuration request status transition is not allowed."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
tier = TierConfigurationBuilder()
tier.with_tier_configuration_id('TC-8027-7606-7082')
tier.with_tier_configuration_status('processing')
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_type('setup')
request.with_status('failed')
request.with_tier_configuration(tier)
request = Helper(client).fail_tier_configuration_request(request, 'It is my will')
assert request.id() == 'TCR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'failed')
def test_helper_should_fail_failing_a_tier_configuration_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="VAL_001",
errors=["reason: This field may not be blank."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_tier_configuration(TierConfigurationBuilder())
with pytest.raises(ClientError):
Helper(client).fail_tier_configuration_request(request, "")
def test_helper_should_inquire_a_tier_configuration_request(sync_client_factory, response_factory):
tier = TierConfigurationBuilder()
tier.with_tier_configuration_id('AS-8027-7606-7082')
tier.with_tier_configuration_status('processing')
on_server = RequestBuilder()
on_server.with_id('TCR-8027-7606-7082-001')
on_server.with_type('setup')
on_server.with_status('inquiring')
on_server.with_tier_configuration(tier)
client = sync_client_factory([
response_factory(value=on_server.raw(), status=200)
])
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_type('setup')
request.with_status('pending')
request.with_tier_configuration(tier)
request = Helper(client).inquire_tier_configuration_request(request)
assert request.id() == 'TCR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'inquiring')
def test_helper_should_inquire_an_already_inquired_tier_configuration_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="TC_006",
errors=["Tier configuration request status transition is not allowed."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
tier = TierConfigurationBuilder()
tier.with_tier_configuration_id('TC-8027-7606-7082')
tier.with_tier_configuration_status('processing')
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_type('setup')
request.with_status('inquiring')
request.with_tier_configuration(tier)
request = Helper(client).inquire_tier_configuration_request(request)
assert request.id() == 'TCR-8027-7606-7082-001'
asserts.request_status(request.raw(), 'inquiring')
def test_helper_should_fail_inquiring_a_tier_configuration_request(sync_client_factory, response_factory):
exception = ClientError(
message=BAD_REQUEST_400,
status_code=400,
error_code="TC_006",
errors=["Some weird error..."]
)
client = sync_client_factory([
response_factory(exception=exception, status=exception.status_code)
])
request = RequestBuilder()
request.with_id('TCR-8027-7606-7082-001')
request.with_tier_configuration(TierConfigurationBuilder())
with pytest.raises(ClientError):
Helper(client).inquire_tier_configuration_request(request)
def test_helper_should_update_a_request_tier_configuration_params(sync_client_factory, response_factory, load_json):
on_server = RequestBuilder(load_json(os.path.dirname(__file__) + TIER_CONFIG_REQUEST_FILE))
after_update = RequestBuilder(load_json(os.path.dirname(__file__) + TIER_CONFIG_REQUEST_FILE))
after_update.with_param('TIER_SIGNATURE', 'the-tier-signature-updated')
tier = after_update.tier_configuration()
tier.with_tier_configuration_param('TIER_SIGNATURE', 'the-tier-signature-updated')
after_update.with_tier_configuration(tier)
client = sync_client_factory([
response_factory(value=on_server.raw(), status=200),
response_factory(value=after_update.raw(), status=200)
])
request = RequestBuilder(load_json(os.path.dirname(__file__) + TIER_CONFIG_REQUEST_FILE))
request.with_param('TIER_SIGNATURE', 'the-tier-signature-updated')
print(request.param('TIER_SIGNATURE'))
request = Helper(client).update_tier_configuration_parameters(request)
assert request.raw()['params'][0]['id'] == 'TIER_SIGNATURE'
assert request.raw()['params'][0]['value'] == 'the-tier-signature-updated'
asserts.tier_configuration_param_value_equal(request.raw(), 'TIER_SIGNATURE', 'the-tier-signature-updated')
def test_helper_should_not_update_request_tier_configuration_params(sync_client_factory, response_factory, load_json):
request = RequestBuilder(load_json(os.path.dirname(__file__) + TIER_CONFIG_REQUEST_FILE))
client = sync_client_factory([
response_factory(value=request.raw(), status=200),
])
request = Helper(client).update_tier_configuration_parameters(request)
assert request.raw()['params'][0]['id'] == 'TIER_SIGNATURE'
assert request.raw()['params'][0]['value'] == ''
asserts.tier_configuration_param_value_equal(request.raw(), 'TIER_SIGNATURE', '')
```
#### File: tests/dummy_extension/validations.py
```python
from logging import LoggerAdapter
from typing import Dict
from connect.client import ConnectClient
from connect.eaas.extension import ValidationResponse
from connect.processors_toolkit.application.contracts import ValidationFlow
from connect.processors_toolkit.requests import RequestBuilder
class Purchase(ValidationFlow):
def __init__(
self,
client: ConnectClient,
logger: LoggerAdapter,
config: Dict[str, str],
):
self.client = client
self.logger = logger
self.config = config
def validate(self, request: RequestBuilder) -> ValidationResponse:
self.logger.info('Everything is valid!!!')
return ValidationResponse.done(data=request.raw())
```
#### File: tests/requests/test_assets.py
```python
import pytest
from connect.processors_toolkit.requests.assets import AssetBuilder
from connect.processors_toolkit.requests.exceptions import MissingItemError
def test_asset_builder_should_raise_value_error_on_invalid_init_value():
with pytest.raises(ValueError):
AssetBuilder([])
def test_asset_builder_should_return_none_on_not_initialized_members():
a = AssetBuilder()
a.with_asset_item('ITM_ID_1', 'ITM_MPN_1')
assert a.asset_product() is None
assert a.asset_marketplace() is None
assert a.asset_connection() is None
assert a.asset_connection_provider() is None
assert a.asset_connection_vendor() is None
assert a.asset_connection_hub() is None
assert a.asset_tier('customer') is None
assert a.asset_tier('tier1') is None
assert a.asset_tier('tier2') is None
def test_asset_builder_should_remove_required_member_from_asset():
a = AssetBuilder()
a.with_asset_id('PR-0000-0000-0000-100')
a.without('id')
assert a.asset_id() is None
def test_asset_builder_should_raise_exception_on_adding_parameter_to_missing_asset_item():
with pytest.raises(MissingItemError):
a = AssetBuilder()
a.with_asset_item('ITEM_ID_001', 'ITEM_MPN_001')
a.asset_item_param('ITEM_ID_001', 'PARAM_ID', 'The value')
with pytest.raises(MissingItemError):
a = AssetBuilder()
a.with_asset_item_param('MISSING', 'PARAM_ID', 'The value')
def test_asset_builder_should_build_successfully_a_valid_asset():
a = AssetBuilder()
a.with_asset_id('AS-001')
a.with_asset_status('active')
a.with_asset_external_id('123456789')
a.with_asset_external_uid('9fb50525-a4a4-41a7-ace0-dc3c73796d32')
a.with_asset_product('PRD-000-000-100', 'disabled')
a.with_asset_tier_customer('random')
a.with_asset_tier_tier1('random')
a.with_asset_tier_tier2('random')
a.with_asset_tier_tier2({'contact_info': {'country': 'ES'}})
a.with_asset_marketplace('MP-12345')
a.with_asset_connection(
connection_id='CT-0000-0000-0000',
connection_type='test',
provider={"id": "PA-800-926", "name": "Gamma Team Provider"},
vendor={"id": "VA-610-138", "name": "Gamma Team Vendor"},
hub={"id": "HB-0000-0000", "name": "None"},
)
a.with_asset_params([
{'param_id': 'PARAM_ID_001', 'value': 'VALUE_001'},
{'param_id': 'PARAM_ID_002', 'value': 'VALUE_002'},
{'param_id': 'PARAM_ID_003', 'value': '', 'value_error': 'Some value error'},
{'param_id': 'PARAM_ID_001', 'value': 'VALUE_001_UPDATED'},
])
a.with_asset_items([
{
'item_id': 'ITEM_ID_001',
'item_mpn': 'ITEM_MPN_001',
'params': [{'param_id': 'SOME_ITEM_PARAM_ID', 'value': 'ITEM_ID_001_PARAM_VALUE'}]
},
{
'item_id': 'ITEM_ID_001',
'item_mpn': 'ITEM_MPN_001_UPDATED',
},
{
'item_id': 'ITEM_ID_001',
'item_mpn': 'ITEM_MPN_001_UPDATED',
'params': [{'param_id': 'SOME_ITEM_PARAM_ID', 'value': 'ITEM_ID_001_PARAM_VALUE_UPDATED'}]
}
])
a.with_asset_configuration_params([
{'param_id': 'AS_CFG_ID_001', 'value': 'Cfg value', 'value_error': 'Cfg error value'},
{'param_id': 'AS_CFG_ID_001', 'value': 'Cfg value updated', 'value_error': 'Cfg error value updated'},
])
raw = a.raw()
assert raw['id'] == a.asset_id() == 'AS-001'
assert raw['status'] == a.asset_status() == 'active'
assert raw['external_id'] == a.asset_external_id() == '123456789'
assert raw['external_uid'] == a.asset_external_uid() == '9fb50525-a4a4-41a7-ace0-dc3c73796d32'
assert raw['marketplace']['id'] == a.asset_marketplace('id') == 'MP-12345'
assert a.asset_tier_customer('id') is None
assert raw['tiers']['customer']['external_id'] == a.asset_tier_customer('external_id')
assert raw['tiers']['customer']['external_uid'] == a.asset_tier_customer('external_uid')
assert a.asset_tier_tier1('id') is None
assert raw['tiers']['tier1']['external_id'] == a.asset_tier_tier1('external_id')
assert raw['tiers']['tier1']['external_uid'] == a.asset_tier_tier1('external_uid')
assert a.asset_tier_tier2('id') is None
assert raw['tiers']['tier2']['external_id'] == a.asset_tier_tier2('external_id')
assert raw['tiers']['tier2']['external_uid'] == a.asset_tier_tier2('external_uid')
assert raw['tiers']['tier2']['contact_info']['country'] == a.asset_tier_tier2('contact_info', {}).get('country')
assert a.asset_tier_tier2('contact_info', {}).get('country') == 'ES'
assert raw['connection']['id'] == a.asset_connection('id') == 'CT-0000-0000-0000'
assert raw['connection']['type'] == a.asset_connection('type') == 'test'
assert raw['connection']['provider']['id'] == a.asset_connection_provider('id') == 'PA-800-926'
assert raw['connection']['provider']['name'] == a.asset_connection_provider('name') == 'Gamma Team Provider'
assert raw['connection']['vendor']['id'] == a.asset_connection_vendor('id') == 'VA-610-138'
assert raw['connection']['vendor']['name'] == a.asset_connection_vendor('name') == 'Gamma Team Vendor'
assert raw['connection']['hub']['id'] == a.asset_connection_hub('id') == 'HB-0000-0000'
assert raw['connection']['hub']['name'] == a.asset_connection_hub('name') == 'None'
assert raw['product']['id'] == a.asset_product('id') == 'PRD-000-000-100'
assert raw['product']['status'] == a.asset_product('status') == 'disabled'
assert len(a.asset_params()) == 3
assert raw['params'][0]['id'] == a.asset_param('PARAM_ID_001', 'id') == 'PARAM_ID_001'
assert raw['params'][0]['value'] == a.asset_param('PARAM_ID_001', 'value') == 'VALUE_001_UPDATED'
assert raw['params'][1]['id'] == a.asset_param('PARAM_ID_002', 'id') == 'PARAM_ID_002'
assert raw['params'][1]['value'] == a.asset_param('PARAM_ID_002', 'value') == 'VALUE_002'
assert raw['params'][2]['id'] == a.asset_param('PARAM_ID_003', 'id') == 'PARAM_ID_003'
assert raw['params'][2]['value'] == a.asset_param('PARAM_ID_003', 'value') == ''
assert raw['params'][2]['value_error'] == a.asset_param('PARAM_ID_003', 'value_error') == 'Some value error'
assert len(a.asset_items()) == 1
assert raw['items'][0]['id'] == a.asset_item('ITEM_ID_001', 'id') == 'ITEM_ID_001'
assert raw['items'][0]['mpn'] == a.asset_item('ITEM_ID_001', 'mpn') == 'ITEM_MPN_001_UPDATED'
assert len(a.asset_item_params('ITEM_ID_001')) == 1
assert raw['items'][0]['params'][0]['id'] == a.asset_item_param(
'ITEM_ID_001', 'SOME_ITEM_PARAM_ID', 'id') == 'SOME_ITEM_PARAM_ID'
assert raw['items'][0]['params'][0]['value'] == a.asset_item_param(
'ITEM_ID_001', 'SOME_ITEM_PARAM_ID', 'value') == 'ITEM_ID_001_PARAM_VALUE_UPDATED'
assert raw['configuration']['params'][0]['id'] == a.asset_configuration_param(
'AS_CFG_ID_001', 'id') == 'AS_CFG_ID_001'
assert raw['configuration']['params'][0]['value'] == a.asset_configuration_param(
'AS_CFG_ID_001', 'value') == 'Cfg value updated'
assert raw['configuration']['params'][0]['value_error'] == a.asset_configuration_param(
'AS_CFG_ID_001', 'value_error') == 'Cfg error value updated'
```
#### File: connect-processors-toolkit/tests/test_application.py
```python
import pytest
from connect.eaas.extension import ProcessingResponse
from connect.processors_toolkit.application import Application, Dependencies, DependencyBuildingFailure
from connect.processors_toolkit.application.contracts import ProcessingFlow
from connect.processors_toolkit.requests import RequestBuilder
def test_application_should_make_required_flow_controller(sync_client_factory, logger):
class MySimpleExtension(Application):
pass
class SampleFlow(ProcessingFlow):
def __init__(self, logger):
self.logger = logger
def process(self, request: RequestBuilder) -> ProcessingResponse:
return ProcessingResponse.done()
client = sync_client_factory([])
extension = MySimpleExtension(client, logger, {'key': 'value'})
flow = extension.container.get(SampleFlow)
assert isinstance(flow, SampleFlow)
assert isinstance(flow, ProcessingFlow)
def test_application_should_make_required_flow_controller_with_custom_dependencies(sync_client_factory, logger):
class MyExtensionWithDependencies(Application):
def dependencies(self) -> Dependencies:
return Dependencies().to_class('api_client', SomeAPIClient)
class SomeAPIClient:
def __init__(self, api_key):
self.api_key = api_key
class SampleFlowWithService(ProcessingFlow):
def __init__(self, api_client):
self.api_client = api_client
def process(self, request: RequestBuilder) -> ProcessingResponse:
return ProcessingResponse.done()
client = sync_client_factory([])
config = {'API_KEY': 'my-secret-api-key'}
extension = MyExtensionWithDependencies(client, logger, config)
flow = extension.container.get(SampleFlowWithService)
assert isinstance(flow, SampleFlowWithService)
assert isinstance(flow, ProcessingFlow)
assert flow.api_client.api_key == config['API_KEY']
def test_application_should_raise_exception_on_building_dependencies(sync_client_factory, logger):
class MyExtensionWithDependencies(Application):
pass
class SampleFlowWithService(ProcessingFlow):
def __init__(self, non_registered_dependency):
self.non_registered_dependency = non_registered_dependency
def process(self, request: RequestBuilder) -> ProcessingResponse:
return ProcessingResponse.done()
client = sync_client_factory([])
config = {}
extension = MyExtensionWithDependencies(client, logger, config)
with pytest.raises(DependencyBuildingFailure):
extension.container.get(SampleFlowWithService)
``` |
{
"source": "JosePazNoguera/pam",
"score": 3
} |
#### File: pam/pam/core.py
```python
import logging
import random
import pickle
import copy
from collections import defaultdict
import pam.activity as activity
import pam.plot as plot
from pam import write
from pam import PAMSequenceValidationError, PAMTimesValidationError, PAMValidationLocationsError
from pam import variables
class Population:
def __init__(self, name: str = None):
self.name = name
self.logger = logging.getLogger(__name__)
self.households = {}
def add(self, household):
if not isinstance(household, Household):
raise UserWarning(f"Expected instance of Household, not: {type(household)}")
self.households[household.hid] = household
def get(self, hid, default=None):
return self.households.get(hid, default)
def __getitem__(self, hid):
return self.households[hid]
def __iter__(self):
for hid, household in self.households.items():
yield hid, household
def people(self):
"""
Iterator for people in poulation, returns hid, pid and Person
"""
for hid, household in self.households.items():
for pid, person in household.people.items():
yield hid, pid, person
@property
def population(self):
self.logger.info("Returning un weighted person count.")
return len([1 for hid, pid, person in self.people()])
def __len__(self):
return self.population
def __contains__(self, other):
if isinstance(other, Household):
for _, hh in self:
if other == hh:
return True
return False
if isinstance(other, Person):
for _, _, person in self.people():
if other == person:
return True
return False
raise UserWarning(f"Cannot check if population contains object type: {type(other)}, please provide a Household or Person.")
def __eq__(self, other):
"""
Check for equality of two populations, equality is based on equal attributes and activity plans
of all household and household members. Identifiers (eg hid and pid) are disregarded.
"""
if not isinstance(other, Population):
self.logger.warning(f"Cannot compare population to non population: ({type(other)}), please provide a Population.")
return False
if not len(self) == len(other):
return False
if not self.stats == other.stats:
return False
used = []
for _, hh in self:
for hid, hh2 in other:
if hid in used:
continue
if hh == hh2:
used.append(hid)
break
return False
return True
@property
def num_households(self):
return len([1 for hid, hh in self.households.items()])
@property
def size(self):
return self.freq
@property
def freq(self):
frequencies = [hh.freq for hh in self.households.values()]
if None in frequencies:
return None
return sum(frequencies)
@property
def activity_classes(self):
acts = set()
for _, _, p in self.people():
acts.update(p.activity_classes)
return acts
@property
def mode_classes(self):
modes = set()
for _, _, p in self.people():
modes.update(p.mode_classes)
return modes
@property
def subpopulations(self):
subpopulations = set()
for _, _, p in self.people():
subpopulations.add(p.attributes.get("subpopulation"))
return subpopulations
@property
def attributes(self):
attributes = defaultdict(set)
for _, _, p in self.people():
for k, v in p.attributes.items():
attributes[k].add(v)
for k, v in attributes.items():
if len(v) > 25:
attributes[k] = None
return dict(attributes)
def random_household(self):
return self.households[random.choice(list(self.households))]
def random_person(self):
hh = self.random_household()
return hh.random_person()
@property
def stats(self):
num_households = 0
num_people = 0
num_activities = 0
num_legs = 0
for hid, household in self:
num_households += 1
for pid, person in household:
num_people += 1
num_activities += person.num_activities
num_legs += person.num_legs
return {
'num_households': num_households,
'num_people': num_people,
'num_activities': num_activities,
'num_legs': num_legs,
}
def build_travel_geodataframe(self, **kwargs):
"""
Builds geopandas.GeoDataFrame for travel Legs found for all agents in the Population.
:param kwargs: arguments for plot.build_person_travel_geodataframe,
from_epsg: coordinate system the plans are currently in
to_epsg: coordinate system you want the geo dataframe to be projected to, optional, you need to specify
from_epsg as well to use this.
:return: geopandas.GeoDataFrame with columns for household id (hid) and person id (pid)
"""
gdf = None
for hid, household in self.households.items():
_gdf = household.build_travel_geodataframe(**kwargs)
if gdf is None:
gdf = _gdf
else:
gdf = gdf.append(_gdf)
gdf = gdf.sort_values(['hid', 'pid', 'seq']).reset_index(drop=True)
return gdf
def plot_travel_plotly(self, epsg: str = 'epsg:4326', **kwargs):
"""
Uses plotly's Scattermapbox to plot agents' travel
:param epsg: coordinate system the plans spatial information is in, e.g. 'epsg:27700'
:param kwargs: arguments for plot.plot_travel_plans
:param gdf: geopandas.GeoDataFrame generated by build_person_travel_geodataframe
:param groupby: optional argument for splitting traces in the plot
:param colour_by: argument for specifying what the colour should correspond to in the plot, travel mode by
default
:param cmap: optional argument, useful to pass if generating a number of plots and want to keep colour
scheme
consistent
:param mapbox_access_token: required to generate the plot
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/
:return:
"""
return plot.plot_travel_plans(
gdf=self.build_travel_geodataframe(from_epsg=epsg, to_epsg="epsg:4326"),
**kwargs
)
def fix_plans(
self,
crop: bool = True,
times = True,
locations = True
):
for _, _, person in self.people():
if crop:
person.plan.crop()
if times:
person.plan.fix_time_consistency()
if locations:
person.plan.fix_location_consistency()
def validate(self):
for hid, pid, person in self.people():
person.validate()
def print(self):
print(self)
for _, household in self:
household.print()
def pickle(self, path: str):
with open(path, 'wb') as file:
pickle.dump(self, file)
def to_csv(
self,
dir: str,
crs = None,
to_crs: str = "EPSG:4326"
):
write.to_csv(self, dir, crs, to_crs)
def __str__(self):
return f"Population: {self.population} people in {self.num_households} households."
def __iadd__(self, other):
"""
Unsafe addition with assignment (no guarantee of unique ids).
"""
self.logger.debug("Note that this method requires all identifiers from populations being combined to be unique.")
if isinstance(other, Population):
for hid, hh in other.households.items():
self.households[hid] = copy.deepcopy(hh)
return self
if isinstance(other, Household):
self.households[other.hid] = copy.deepcopy(other)
return self
if isinstance(other, Person):
self.households[other.pid] = Household(other.pid)
self.households[other.pid].people[other.pid] = copy.deepcopy(other)
return self
raise TypeError(f"Object for addition must be a Population Household or Person object, not {type(other)}")
def reindex(self, prefix: str):
"""
Safely reindex all household and person identifiers in population using a prefix.
"""
for hid in list(self.households):
hh = self.households[hid]
new_hid = prefix + str(hid)
if new_hid in self.households:
raise KeyError(f"Duplicate household identifier (hid): {new_hid}")
hh.reindex(prefix)
self.households[new_hid] = hh
del self.households[hid]
def combine(self, other, prefix=""):
"""
Safe addition with assignment by adding a prefix to create unique pids and hids.
"""
prefix = str(prefix)
if isinstance(other, Population):
other.reindex(prefix)
self += other
return None
if isinstance(other, Household):
other.reindex(prefix)
self += other
return None
if isinstance(other, Person):
hh = Household(other.pid) # we create a new hh for single person
hh.add(other)
hh.reindex(prefix)
self += hh
return None
raise TypeError(f"Object for addition must be a Population Household or Person object, not {type(other)}")
def sample_locs(self, sampler, long_term_activities = None, joint_trips_prefix = 'escort_'):
"""
WIP Sample household plan locs using a sampler.
Sampler uses activity types and areas to sample locations. Note that households share
locations for activities of the same type within the same area. Trivially this includes
household location. But also, for example, shopping activities if they are in the same area.
We treat escort activities (ie those prefixed by "escort_") as the escorted activity. For
example, the sampler treats "escort_education" and "education" equally. Note that this shared
activity sampling of location models shared facilities, but does not explicitly infer or
model shared transport. For example there is no consideration of if trips to shared locations
take place at the same time or from the same locations.
After sampling Location objects are shared between shared activity locations and corresponding
trips start and end locations. These objects are mutable, so care must be taken if making changes
as these will impact all other persons shared locations in the household. Often this behaviour
might be expected. For example if we change the location of the household home activity, all
persons and home activities are impacted.
TODO - add method to all core classes
:params list long_term activities: a list of activities for which location is only assigned once (per zone)
:params str joint_trips_prefix: a purpose prefix used to identify escort/joint trips
"""
if long_term_activities is None:
long_term_activities = variables.LONG_TERM_ACTIVITIES
for _, household in self.households.items():
home_loc = activity.Location(
area=household.location.area,
loc=sampler.sample(household.location.area, 'home')
)
unique_locations = {(household.location.area, 'home'): home_loc}
for __, person in household.people.items():
for act in person.activities:
# remove escort prefix from activity types.
if act.act[:len(joint_trips_prefix)] == joint_trips_prefix:
target_act = act.act[(len(joint_trips_prefix)):]
else:
target_act = act.act
if (act.location.area, target_act) in unique_locations:
location = unique_locations[(act.location.area, target_act)]
act.location = location
else:
location = activity.Location(
area=act.location.area,
loc=sampler.sample(act.location.area, target_act)
)
if target_act in long_term_activities:
# one location per zone for long-term choices (only)
# short-term activities, such as shopping can visit multiple locations in the same zone
unique_locations[(act.location.area, target_act)] = location
act.location = location
# complete the alotting activity locations to the trip starts and ends.
for idx in range(person.plan.length):
component = person.plan[idx]
if isinstance(component, activity.Leg):
component.start_location = person.plan[idx-1].location
component.end_location = person.plan[idx+1].location
def sample_locs_complex(self, sampler, long_term_activities = None, joint_trips_prefix = 'escort_'):
"""
Extends sample_locs method to enable more complex and rules-based sampling.
Keeps track of the last location and transport mode, to apply distance- and mode-based sampling rules.
It is generally slower than sample_locs, as it loops through both activities and legs.
:params list long_term activities: a list of activities for which location is only assigned once (per zone)
:params str joint_trips_prefix: a purpose prefix used to identify escort/joint trips
"""
if long_term_activities is None:
long_term_activities = variables.LONG_TERM_ACTIVITIES
for _, household in self.households.items():
home_loc = activity.Location(
area=household.location.area,
loc=sampler.sample(household.location.area, 'home', mode = None, previous_duration=None, previous_loc = None)
)
mode = None
unique_locations = {(household.location.area, 'home'): home_loc}
for _, person in household.people.items():
mode = None
previous_duration = None
previous_loc = None
for idx, component in enumerate(person.plan):
# loop through all plan elements
if isinstance(component, activity.Leg):
mode = component.mode # keep track of last mode
previous_duration = component.duration
elif isinstance(component, activity.Activity):
act = component
# remove "escort_" from activity types.
# TODO: model joint trips
if act.act[:len(joint_trips_prefix)] == joint_trips_prefix:
target_act = act.act[(len(joint_trips_prefix)):]
else:
target_act = act.act
if (act.location.area, target_act) in unique_locations:
location = unique_locations[(act.location.area, target_act)]
act.location = location
else:
location = activity.Location(
area=act.location.area,
loc=sampler.sample(act.location.area, target_act, mode = mode, previous_duration = previous_duration, previous_loc = previous_loc)
)
if target_act in long_term_activities:
unique_locations[(act.location.area, target_act)] = location
act.location = location
previous_loc = location.loc # keep track of previous location
# complete the alotting activity locations to the trip starts and ends.
for idx in range(person.plan.length):
component = person.plan[idx]
if isinstance(component, activity.Leg):
component.start_location = person.plan[idx-1].location
component.end_location = person.plan[idx+1].location
class Household:
logger = logging.getLogger(__name__)
def __init__(self, hid, attributes={}, freq=None, area=None, loc=None):
self.hid = hid
self.people = {}
self.attributes = attributes
self.hh_freq=freq
if area is not None or loc is not None:
self._location = activity.Location(area=area, loc=loc)
else:
self._location = None
def add(self, person):
if not isinstance(person, Person):
raise UserWarning(f"Expected instance of Person, not: {type(person)}")
# person.finalise()
self.people[person.pid] = person
def get(self, pid, default=None):
return self.people.get(pid, default)
def random_person(self):
return self.people[random.choice(list(self.people))]
def __getitem__(self, pid):
return self.people[pid]
def __iter__(self):
for pid, person in self.people.items():
yield pid, person
def __len__(self):
return len(self.people)
def __contains__(self, other_person):
if not isinstance(other_person, Person):
raise UserWarning(f"Cannot check if household contains object type: {type(other_person)}, please provide Person.")
for _, person in self:
if other_person == person:
return True
return False
def __eq__(self, other):
"""
Check for equality of two households, equality is based on equal attributes and activity plans
of household and household members. Identifiers (eg hid and pid) are disregarded unless they
are included in attributes.
"""
if not isinstance(other, Household):
self.logger.warning(f"Cannot compare household to non household: ({type(other)}).")
return False
if not self.attributes == other.attributes:
return False
if not len(self) == len(other):
return False
used = []
for _, person in self:
for pid, person2 in other:
if pid in used:
continue
if person == person2:
used.append(pid)
break
return False
return True
@property
def location(self):
if self._location is not None:
return self._location
for person in self.people.values():
if person.home is not None:
return person.home
self.logger.warning(f"Failed to find location for household: {self.hid}")
@property
def activity_classes(self):
acts = set()
for _, p in self:
acts.update(p.activity_classes)
return acts
@property
def mode_classes(self):
modes = set()
for _, p in self:
modes.update(p.mode_classes)
return modes
@property
def freq(self):
"""
Return hh_freq, else if None, return the average frequency of household members.
# TODO note this assumes we are basing hh freq on person freq.
# TODO replace this with something better.
"""
if self.hh_freq:
return self.hh_freq
if not self.people:
return None
return self.av_person_freq
def set_freq(self, freq):
self.hh_freq = freq
@property
def av_person_freq(self):
if not self.people:
return None
frequencies = [person.freq for person in self.people.values()]
if None in frequencies:
return None
return sum(frequencies) / len(frequencies)
def fix_plans(self, crop=True, times=True, locations=True):
for _, person in self:
if crop:
person.plan.crop()
if times:
person.plan.fix_time_consistency()
if locations:
person.plan.fix_location_consistency()
def shared_activities(self):
shared_activities = []
household_activities = []
for pid, person in self.people.items():
for activity in person.activities:
if activity.isin_exact(household_activities):
shared_activities.append(activity)
if not activity.isin_exact(household_activities):
household_activities.append(activity)
return shared_activities
def print(self):
print(self)
print(self.attributes)
for _, person in self:
person.print()
def size(self):
return len(self.people)
def plot(self, **kwargs):
plot.plot_household(self, **kwargs)
def build_travel_geodataframe(self, **kwargs):
"""
Builds geopandas.GeoDataFrame for travel Legs found for agents within a Household
:param kwargs: arguments for plot.build_person_travel_geodataframe,
from_epsg: coordinate system the plans are currently in
to_epsg: coordinate system you want the geo dataframe to be projected to, optional, you need to specify
from_epsg as well to use this.
:return: geopandas.GeoDataFrame with columns for household id (hid) and person id (pid)
"""
gdf = None
for _, person in self:
_gdf = person.build_travel_geodataframe(**kwargs)
_gdf['hid'] = self.hid
if gdf is None:
gdf = _gdf
else:
gdf = gdf.append(_gdf)
gdf = gdf.sort_values(['pid', 'seq']).reset_index(drop=True)
return gdf
def plot_travel_plotly(self, epsg='epsg:4326', **kwargs):
"""
Uses plotly's Scattermapbox to plot agents' travel
:param epsg: coordinate system the plans spatial information is in, e.g. 'epsg:27700'
:param kwargs: arguments for plot.plot_travel_plans
:param gdf: geopandas.GeoDataFrame generated by build_person_travel_geodataframe
:param groupby: optional argument for splitting traces in the plot
:param colour_by: argument for specifying what the colour should correspond to in the plot, travel mode by
default
:param cmap: optional argument, useful to pass if generating a number of plots and want to keep colour
scheme
consistent
:param mapbox_access_token: required to generate the plot
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/
:return:
"""
return plot.plot_travel_plans(
gdf=self.build_travel_geodataframe(from_epsg=epsg, to_epsg="epsg:4326"),
**kwargs
)
def __str__(self):
return f"Household: {self.hid}"
def __iadd__(self, other):
"""
Unsafe addition with assignment (no guarantee of unique ids).
"""
self.logger.debug("Note that this method requires all identifiers from populations being combined to be unique.")
if isinstance(other, Household):
for pid, person in other.people.items():
self.people[pid] = copy.deepcopy(person)
return self
if isinstance(other, Person):
self.people[other.pid] = copy.deepcopy(other)
return self
raise TypeError(f"Object for addition must be a Household or Person object, not {type(other)}")
def reindex(self, prefix: str):
"""
Safely reindex all person identifiers in household using a prefix.
"""
self.hid = prefix + self.hid
for pid in list(self.people):
person = self.people[pid]
new_pid = prefix + pid
if new_pid in self.people:
raise KeyError(f"Duplicate person identifier (pid): {new_pid}")
person.reindex(prefix)
self.people[new_pid] = person
del self.people[pid]
def pickle(self, path):
with open(path, 'wb') as file:
pickle.dump(self, file)
class Person:
logger = logging.getLogger(__name__)
def __init__(self, pid, freq=None, attributes={}, home_area=None):
self.pid = pid
self.person_freq = freq
self.attributes = attributes
self.plan = activity.Plan(home_area=home_area)
self.home_area = home_area
@property
def freq(self):
"""
Return person_freq, else if None, return the average frequency of legs.
TODO consider passing parent hh on creation so that we can retrieve hh freq if required.
"""
if self.person_freq:
return self.person_freq
return self.av_trip_freq
def set_freq(self, freq):
self.person_freq = freq
@property
def av_trip_freq(self):
if not self.num_legs:
return None
frequencies = [leg.freq for leg in self.legs]
if None in frequencies:
return None
return sum(frequencies) / len(frequencies)
@property
def av_activity_freq(self):
frequencies = [act.freq for act in self.activities]
if None in frequencies:
return None
return sum(frequencies) / len(frequencies)
@property
def home(self):
if self.plan:
return self.plan.home
@property
def activities(self):
if self.plan:
for act in self.plan.activities:
yield act
@property
def num_activities(self):
if self.plan:
return len(list(self.activities))
return 0
@property
def legs(self):
if self.plan:
for leg in self.plan.legs:
yield leg
@property
def num_legs(self):
if self.plan:
return len(list(self.legs))
return 0
@property
def length(self):
return len(self.plan)
def __len__(self):
return self.length
def __getitem__(self, val):
return self.plan[val]
def __iter__(self):
for component in self.plan:
yield component
def __eq__(self, other):
"""
Check for equality of two persons, equality is based on equal attributes and activity plans.
Identifiers (eg pid) are disregarded unless they are included in attributes.
"""
if not isinstance(other, Person):
self.logger.warning(f"Cannot compare person to non person: ({type(other)})")
return False
if not self.attributes == other.attributes:
return False
if not self.plan == other.plan:
return False
return True
@property
def activity_classes(self):
return self.plan.activity_classes
@property
def mode_classes(self):
return self.plan.mode_classes
@property
def has_valid_plan(self):
"""
Check sequence of Activities and Legs.
:return: True
"""
return self.plan.is_valid
def validate(self):
"""
Validate plan.
"""
self.plan.validate()
return True
def validate_sequence(self):
"""
Check sequence of Activities and Legs.
:return: True
"""
if not self.plan.valid_sequence:
raise PAMSequenceValidationError(f"Person {self.pid} has invalid plan sequence")
return True
def validate_times(self):
"""
Check sequence of Activity and Leg times.
:return: True
"""
if not self.plan.valid_times:
raise PAMTimesValidationError(f"Person {self.pid} has invalid plan times")
return True
def validate_locations(self):
"""
Check sequence of Activity and Leg locations.
:return: True
"""
if not self.plan.valid_locations:
raise PAMValidationLocationsError(f"Person {self.pid} has invalid plan locations")
return True
@property
def closed_plan(self):
"""
Check if plan starts and stops at the same facility (based on activity and location)
:return: Bool
"""
return self.plan.closed
@property
def first_activity(self):
return self.plan.first
@property
def home_based(self):
return self.plan.home_based
def add(self, p):
"""
Safely add a new component to the plan.
:param p:
:return:
"""
self.plan.add(p)
def finalise(self):
"""
Add activity end times based on start time of next activity.
"""
self.plan.finalise_activity_end_times()
def fix_plan(self, crop=True, times=True, locations=True):
if crop:
self.plan.crop()
if times:
self.plan.fix_time_consistency()
if locations:
self.plan.fix_location_consistency()
def clear_plan(self):
self.plan.clear()
def print(self):
print(self)
print(self.attributes)
self.plan.print()
def plot(self, **kwargs):
plot.plot_person(self, **kwargs)
def reindex(self, prefix: str):
self.pid = prefix + self.pid
def build_travel_geodataframe(self, **kwargs):
"""
Builds geopandas.GeoDataFrame for Person's Legs
:param kwargs: arguments for plot.build_person_travel_geodataframe,
from_epsg: coordinate system the plans are currently in
to_epsg: coordinate system you want the geo dataframe to be projected to, optional, you need to specify
from_epsg as well to use this.
:return: geopandas.GeoDataFrame with columns for person id (pid)
"""
return plot.build_person_travel_geodataframe(self, **kwargs)
def plot_travel_plotly(self, epsg='epsg:4326', **kwargs):
"""
Uses plotly's Scattermapbox to plot agents' travel
:param epsg: coordinate system the plans spatial information is in, e.g. 'epsg:27700'
:param kwargs: arguments for plot.plot_travel_plans
:param gdf: geopandas.GeoDataFrame generated by build_person_travel_geodataframe
:param groupby: optional argument for splitting traces in the plot
:param colour_by: argument for specifying what the colour should correspond to in the plot, travel mode by
default
:param cmap: optional argument, useful to pass if generating a number of plots and want to keep colour
scheme
consistent
:param mapbox_access_token: required to generate the plot
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/
:return:
"""
return plot.plot_travel_plans(
gdf=self.build_travel_geodataframe(from_epsg=epsg, to_epsg="epsg:4326"),
**kwargs
)
def __str__(self):
return f"Person: {self.pid}"
def remove_activity(self, seq):
"""
Remove an activity from plan at given seq.
Check for wrapped removal.
Return (adjusted) idx of previous and subsequent activities as a tuple
:param seq:
:return: tuple
"""
return self.plan.remove_activity(seq)
def move_activity(self, seq, default='home'):
"""
Move an activity from plan at given seq to default location
:param seq:
:param default: 'home' or pam.activity.Location
:return: None
"""
return self.plan.move_activity(seq, default)
def fill_plan(self, p_idx, s_idx, default='home'):
"""
Fill a plan after Activity has been removed.
:param p_idx: location of previous Activity
:param s_idx: location of subsequent Activity
:param default:
:return: bool
"""
return self.plan.fill_plan(p_idx, s_idx, default=default)
def stay_at_home(self):
self.plan.stay_at_home()
def pickle(self, path):
with open(path, 'wb') as file:
pickle.dump(self, file)
```
#### File: pam/plot/stats.py
```python
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from pam.utils import dt_to_s, td_to_s
from datetime import timedelta
def extract_activity_log(population):
log = []
for hid, pid, person in population.people():
for activity in person.activities:
log.append({
'act': activity.act,
'start': dt_to_s(activity.start_time),
'end': dt_to_s(activity.end_time),
'duration': td_to_s(activity.duration)
})
return pd.DataFrame(log)
def extract_leg_log(population):
log = []
for hid, pid, person in population.people():
for leg in person.legs:
log.append({
'mode': leg.mode,
'start': dt_to_s(leg.start_time),
'end': dt_to_s(leg.end_time),
'duration': td_to_s(leg.duration)
})
return pd.DataFrame(log)
def time_binner(data):
"""
Bin start and end times and durations, return freq table for 24 hour period, 15min intervals.
"""
bins = list(range(0, 24*60*60+1, 15*60))
bins[-1] = 100*60*60
labels = pd.timedelta_range(start='00:00:00', periods=96, freq='15min')
binned = pd.DataFrame(index=pd.timedelta_range(start='00:00:00', periods=96, freq='15min'))
binned['duration'] = pd.cut(data.duration, bins, labels=labels, right=False).value_counts()
binned['end'] = pd.cut(data.end, bins, labels=labels, right=False).value_counts()
binned['start'] = pd.cut(data.start, bins, labels=labels, right=False).value_counts()
binned = binned / binned.max()
return binned
def plot_time_bins(data, sub_col, width=12, height_factor=1.2):
subs = set(data[sub_col])
fig, axs = plt.subplots(
len(subs),
figsize=(width, 1.2*len(subs)),
sharex=False
)
if not isinstance(axs, np.ndarray):
axs = [axs]
for ax, sub in zip(axs, subs):
binned = time_binner(data.loc[data[sub_col] == sub])
ax.pcolormesh(binned.T, cmap='cool', edgecolors='white', linewidth=1)
ax.set_xticks([i for i in range(0,97,8)])
ax.set_xticklabels([f"{h:02}:00" for h in range(0,25,2)])
ax.set_yticks([0.5,1.5,2.5])
ax.set_yticklabels(['Duration', 'End time', 'Start time'])
ax.grid(which='minor', color='w', linestyle='-', linewidth=2)
for pos in ['right','top','bottom','left']:
ax.spines[pos].set_visible(False)
ax.set_title(sub.title(), fontsize='medium', rotation=0)
fig.tight_layout()
return fig
def plot_activity_times(population):
acts = extract_activity_log(population)
fig = plot_time_bins(acts, sub_col='act')
# fig.suptitle("Activity Time Bins")
return fig
def plot_leg_times(population):
legs = extract_leg_log(population)
fig = plot_time_bins(legs, sub_col='mode')
# fig.suptitle("Travel Time Bins")
return fig
def calculate_leg_duration_by_mode(population):
all_legs = []
for hid, pid, person in population.people():
for seq, leg in enumerate(person.legs):
all_legs.append({
'leg mode': leg.mode,
'duration_hours': leg.duration.days*24 + leg.duration.seconds/3600
})
all_legs_df = pd.DataFrame(all_legs)
outputs_df = all_legs_df.groupby('leg mode', as_index = False).agg({'duration_hours': 'sum'})
outputs_df.insert(0, 'scenario', population.name, True)
return outputs_df
def calculate_activity_duration_by_act(population, exclude = None):
all_activities = []
for hid, pid, person in population.people():
for seq, activity in enumerate(person.activities):
all_activities.append({
'act': activity.act,
'duration_hours': activity.duration.days*24 + activity.duration.seconds/3600
})
all_activities_df = pd.DataFrame(all_activities)
outputs_df = all_activities_df.groupby('act', as_index = False).agg({'duration_hours': 'sum'})
outputs_df.insert(0, 'scenario', population.name, True)
if(exclude != None):
outputs_df = outputs_df[outputs_df.act != exclude]
return outputs_df
def calculate_total_activity_duration(population, exclude = None):
total_activity_duration = timedelta(minutes=0)
for hid, pid, person in population.people():
for seq, activity in enumerate(person.activities):
if(activity.act != exclude):
total_activity_duration = total_activity_duration + activity.duration
total_activity_duration_hours = total_activity_duration.days*24 + total_activity_duration.seconds/3600
return total_activity_duration_hours
def calculate_total_leg_duration(population):
total_leg_duration = timedelta(minutes=0)
for hid, pid, person in population.people():
for seq, leg in enumerate(person.legs):
total_leg_duration = total_leg_duration + leg.duration
total_leg_duration_hours = total_leg_duration.days*24 + total_leg_duration.seconds/3600
return total_leg_duration_hours
def plot_activity_duration(list_of_populations, exclude = None, axis = None):
x = []
y = []
for idx, population in enumerate(list_of_populations):
x.append(population.name)
y.append(calculate_total_activity_duration(population, exclude))
outputs_df = pd.DataFrame({'scenario': x, 'activity duration (hours)': y})
x_label_rotation = 90
if(exclude != None):
title = 'activities (excl '+ exclude + ')'
else:
title = 'activities'
if(axis == None):
plt.bar(x, y)
plt.xticks(rotation= x_label_rotation)
plt.ylabel('duration (hours)')
plt.title(title)
plt.show
else:
axis.bar(x,y)
axis.plot()
axis.set_title(title)
axis.set_xlabel('')
axis.set_xticklabels(x, rotation=x_label_rotation)
return outputs_df
def plot_leg_duration(list_of_populations, axis = None):
x = []
y = []
for idx, population in enumerate(list_of_populations):
x.append(population.name)
y.append(calculate_total_leg_duration(population))
outputs_df = pd.DataFrame({'scenario': x, 'leg duration (hours)': y})
title = 'legs'
x_label_rotation = 90
if axis == None:
plt.bar(x, y)
plt.xticks(rotation= x_label_rotation)
plt.ylabel('duration (hours)')
plt.title(title)
else:
axis.bar(x,y)
axis.plot()
axis.set_title(title)
axis.set_xlabel('')
axis.set_xticklabels(x, rotation=x_label_rotation)
return outputs_df
def plot_activity_duration_by_act(list_of_populations, exclude = None, axis = None):
population_act_df = pd.DataFrame()
for idx, population in enumerate(list_of_populations):
population_act_df = population_act_df.append(
calculate_activity_duration_by_act(population, exclude), ignore_index = True)
pivot_for_chart = population_act_df.pivot(
index='scenario',
columns='act',
values='duration_hours'
)
if(exclude != None):
title = 'activities by type (excl '+ exclude + ')'
else:
title = 'activities by type'
if axis == None:
pivot_for_chart.plot.bar(stacked=True)
plt.ylabel('duration (hours)')
plt.title(title)
plt.show
else:
pivot_for_chart.plot.bar(stacked=True, ax = axis)
axis.set_xlabel('')
axis.set_title(title)
return pivot_for_chart
def plot_leg_duration_by_mode(list_of_populations, axis = None):
population_mode_df = pd.DataFrame()
for idx, population in enumerate(list_of_populations):
population_mode_df = population_mode_df.append(
calculate_leg_duration_by_mode(population), ignore_index = True)
pivot_for_chart = population_mode_df.pivot(
index='scenario',
columns='leg mode',
values='duration_hours'
)
title = 'legs by mode'
if axis == None:
pivot_for_chart.plot.bar(stacked=True)
plt.title(title)
plt.ylabel('duration (hours)')
else:
pivot_for_chart.plot.bar(stacked=True, ax = axis)
axis.set_xlabel('')
axis.set_title(title)
return pivot_for_chart
def plot_population_comparisons(list_of_populations, activity_to_exclude = None):
fig1, ax = plt.subplots(nrows=1, ncols=2, tight_layout=True, sharey = True)
legs = plot_leg_duration(list_of_populations, ax[0])
leg_modes = plot_leg_duration_by_mode(list_of_populations, ax[1])
ax[0].set_ylabel('duration (hours)')
fig2, ax2 = plt.subplots(nrows=1, ncols=2, tight_layout=True, sharey = True)
activities = plot_activity_duration(list_of_populations, activity_to_exclude, ax2[0])
activity_types = plot_activity_duration_by_act(list_of_populations, activity_to_exclude, ax2[1])
ax2[0].set_ylabel('duration (hours)')
leg_modes['TOTAL'] = leg_modes.sum(axis=1)
activity_types['TOTAL'] = activity_types.sum(axis=1)
print(leg_modes, '\n', activity_types)
return fig1, fig2, leg_modes, activity_types
```
#### File: pam/samplers/basic.py
```python
import random
def freq_sample(freq: float, sample: float):
"""
Down or up sample a frequency based on a sample size. Sub unit frequencies are
rounded probabalistically.
:param freq: pre sampled frequency (integer)
:param sample: sample size (float)
:return: new frequency (integer)
"""
new_freq = freq * sample
remainder = new_freq - int(new_freq)
remainder = int(random.random() < remainder)
return int(new_freq) + remainder
```
#### File: pam/tests/test_10_plotting.py
```python
import pytest
import pandas as pd
from matplotlib.figure import Figure
from shapely.geometry import Point, LineString
from plotly.graph_objs import Scattermapbox
from pam.plot.plans import build_person_df, build_cmap, build_person_travel_geodataframe, build_rgb_travel_cmap, \
plot_travel_plans
from pam.plot.stats import extract_activity_log, extract_leg_log, time_binner, plot_activity_times, plot_leg_times, \
plot_population_comparisons, calculate_leg_duration_by_mode
from .fixtures import person_heh, Steve, Hilda, instantiate_household_with
from pam.core import Household, Population
from copy import deepcopy
from pam.policy import policies
from tests.test_utils import cyclist, pt_person
def test_build_person_dataframe(person_heh):
df = build_person_df(person_heh)
assert len(df) == 5
assert list(df.act) == ['Home', 'Travel', 'Education', 'Travel', 'Home']
def test_build_cmap_dict():
df = pd.DataFrame(
[
{'act': 'Home', 'dur': None},
{'act': 'Travel', 'dur': None},
{'act': 'Work', 'dur': None},
{'act': 'Travel', 'dur': None},
{'act': 'Home', 'dur': None},
]
)
cmap = build_cmap(df)
assert isinstance(cmap, dict)
assert set(list(cmap)) == set(['Home', 'Work', 'Travel'])
def test_build_rgb_travel_cmap(Steve):
for leg in Steve.legs:
leg.start_location.loc = Point(1, 2)
leg.end_location.loc = Point(2, 3)
gdf = build_person_travel_geodataframe(Steve)
cmap = build_rgb_travel_cmap(gdf, colour_by='mode')
assert cmap == {'car': (255, 237, 111), 'walk': (204, 235, 197)}
def test_build_activity_log(person_heh):
population = Population()
for i in range(5):
hh = Household(i)
hh.add(person_heh)
population.add(hh)
log = extract_activity_log(population)
assert len(log) == 15
assert list(log.columns) == ['act', 'start', 'end', 'duration']
def test_build_leg_log(person_heh):
population = Population()
for i in range(5):
hh = Household(i)
hh.add(person_heh)
population.add(hh)
log = extract_leg_log(population)
assert len(log) == 10
assert list(log.columns) == ['mode', 'start', 'end', 'duration']
def test_time_binner(person_heh):
population = Population()
for i in range(5):
hh = Household(i)
hh.add(person_heh)
population.add(hh)
log = extract_activity_log(population)
binned = time_binner(log)
assert len(binned) == 96
for h in ['start', 'end', 'duration']:
assert binned[h].sum() == 3
def test_plot_act_time_bins(Steve, Hilda):
population = Population()
for i, person in enumerate([Steve, Hilda]):
hh = Household(i)
hh.add(person)
population.add(hh)
fig = plot_activity_times(population)
assert isinstance(fig, Figure)
def test_plot_leg_time_bins(Steve, Hilda):
population = Population()
for i, person in enumerate([Steve, Hilda]):
hh = Household(i)
hh.add(person)
population.add(hh)
fig = plot_leg_times(population)
assert isinstance(fig, Figure)
def test_plot_population_comparisons(Steve, Hilda):
population_1 = Population()
for i, person in enumerate([Steve, Hilda]):
hh = Household(i)
hh.add(person)
population_1.add(hh)
population_1.name = 'base'
population_2 = deepcopy(population_1)
population_2.name = 'work_removed'
policy_remove_work = policies.RemovePersonActivities(
activities=['work'],
probability=1
)
policies.apply_policies(population_2, [policy_remove_work])
list_of_populations = [population_1, population_2]
outputs = plot_population_comparisons(list_of_populations, 'home')
legs = outputs[2]
activities = outputs[3]
check = calculate_leg_duration_by_mode(population_2)
assert isinstance(outputs[0], Figure)
assert isinstance(outputs[1], Figure)
assert legs.loc['work_removed', 'walk'] == check.loc[check['leg mode'] == 'walk', 'duration_hours'].iloc[0]
def test_plot_travel_plans(cyclist):
fig = cyclist.plot_travel_plotly(mapbox_access_token='token')
assert len(fig.data) == 1
assert isinstance(fig.data[0], Scattermapbox)
assert fig.data[0].name == 'bike'
def test_plot_travel_plans_coloured_by_purp(pt_person):
fig = pt_person.plot_travel_plotly(colour_by='pid', mapbox_access_token='token')
assert len(fig.data) == 1
assert isinstance(fig.data[0], Scattermapbox)
assert fig.data[0].name == 'census_1'
def test_plot_travel_plans_grouped_by_legs(pt_person):
fig = pt_person.plot_travel_plotly(groupby=['seq'], mapbox_access_token='token')
for dat in fig.data:
assert isinstance(dat, Scattermapbox)
assert [dat.name for dat in fig.data] == ["('pt', 3)", "('pt', 5)", "('pt', 7)", "('transit_walk', 1)",
"('transit_walk', 2)", "('transit_walk', 4)", "('transit_walk', 6)",
"('transit_walk', 8)"]
def test_plot_travel_plans_for_household(cyclist, pt_person):
hhld = instantiate_household_with([cyclist, pt_person])
fig = hhld.plot_travel_plotly(mapbox_access_token='token')
assert len(fig.data) == 3
assert [dat.name for dat in fig.data] == ['bike', 'pt', 'transit_walk']
```
#### File: pam/tests/test_2_activity_fix.py
```python
from pam.activity import Plan, Activity, Leg, Location
from pam.utils import minutes_to_datetime as mtdt
from pam.variables import END_OF_DAY
import pytest
def test_crop_act_past_end_of_day():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=mtdt(12000)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(12000), end_time=mtdt(12020)))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(12020), end_time=mtdt(12030)))
plan.crop()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
def test_crop_leg_past_end_of_day():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=mtdt(1200)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(1200), end_time=mtdt(12020)))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(12020), end_time=mtdt(12030)))
plan.crop()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
def test_crop_act_out_of_order():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=mtdt(12000)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(12000), end_time=END_OF_DAY))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(12030)))
plan.crop()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
def test_crop_leg_out_of_order():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=END_OF_DAY))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(0), end_time=END_OF_DAY))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(12030)))
plan.crop()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
def test_crop_act_bad_order():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=mtdt(12000)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(12000), end_time=mtdt(12020)))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(12020), end_time=END_OF_DAY))
plan.crop()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
def test_crop_leg_bad_order():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=mtdt(12000)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(12000), end_time=mtdt(11000)))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(11000), end_time=END_OF_DAY))
plan.crop()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
def test_fix_time_consistency():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='A', end_area='B', start_time=mtdt(610), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=END_OF_DAY))
plan.fix_time_consistency()
assert plan[1].start_time == mtdt(600)
def test_fix_location_consistency():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(600), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=END_OF_DAY))
plan.fix_location_consistency()
assert plan[1].start_location.area == 'A'
assert plan[1].end_location.area == 'B'
def test_plan_fix():
plan = Plan()
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(0), end_time=mtdt(600)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='B', start_time=mtdt(610), end_time=mtdt(620)))
plan.add(Activity(seq=3, act='work', area='B', start_time=mtdt(620), end_time=mtdt(12000)))
plan.add(Leg(seq=2, mode='car', start_area='B', end_area='A', start_time=mtdt(12000), end_time=mtdt(11000)))
plan.add(Activity(seq=1, act='home', area='A', start_time=mtdt(11000), end_time=END_OF_DAY))
plan.fix()
assert plan.length == 3
assert plan.day[-1].end_time == END_OF_DAY
assert plan[1].start_time == mtdt(600)
assert plan[1].start_location.area == 'A'
```
#### File: pam/tests/test_2_activity_validate.py
```python
import pytest
from datetime import datetime
from pam.core import Person
from pam.activity import Plan, Activity, Leg
from pam.utils import minutes_to_datetime as mtdt
from .fixtures import person_heh, person_heh_open1, person_hew_open2, person_whw, person_whshw
from pam.variables import END_OF_DAY
from pam import PAMSequenceValidationError, PAMTimesValidationError, PAMValidationLocationsError
def test_person_heh_valid(person_heh):
assert person_heh.has_valid_plan
def test_person_heh_valid_plan(person_heh):
assert person_heh.plan.is_valid
def test_person_heh_open1_valid(person_heh_open1):
assert person_heh_open1.has_valid_plan
def test_person_heh_open1_valid_plan(person_heh_open1):
assert person_heh_open1.plan.is_valid
def test_person_hew_open1_valid(person_hew_open2):
assert person_hew_open2.plan.is_valid
def test_person_whw_valid(person_whw):
assert person_whw.plan.is_valid
def test_person_whshw_valid(person_whshw):
assert person_whshw.plan.is_valid
@pytest.fixture
def act_act_sequence():
person = Person('1')
person.plan.day = [
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
),
Activity(
seq=3,
act='home',
area='a',
start_time=mtdt(180),
end_time=END_OF_DAY
)
]
return person
def test_act_act_sequence_not_valid(act_act_sequence):
with pytest.raises(PAMSequenceValidationError):
act_act_sequence.plan.validate_sequence()
@pytest.fixture
def leg_leg_sequence():
person = Person('1')
person.plan.day = [
Leg(
seq=1,
mode='car',
start_area='a',
end_area='b',
start_time=mtdt(0),
end_time=mtdt(90)
),
Leg(
seq=2,
mode='car',
start_area='b',
end_area='a',
start_time=mtdt(0),
end_time=mtdt(90)
)
]
return person
def test_leg_leg_sequence_not_valid(leg_leg_sequence):
with pytest.raises(PAMSequenceValidationError):
leg_leg_sequence.plan.validate_sequence()
@pytest.fixture
def act_leg_leg_act_plan():
person = Person('1')
person.plan.day = [
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
),
Leg(
seq=1,
mode='car',
start_area='a',
end_area='b',
start_time=mtdt(0),
end_time=mtdt(90)
),
Leg(
seq=1,
mode='car',
start_area='b',
end_area='a',
start_time=mtdt(0),
end_time=mtdt(90)
),
Activity(
seq=3,
act='home',
area='a',
start_time=mtdt(180),
end_time=END_OF_DAY
)
]
return person
def test_act_leg_leg_act_sequence_not_valid(act_leg_leg_act_plan):
with pytest.raises(PAMSequenceValidationError):
act_leg_leg_act_plan.plan.validate_sequence()
@pytest.fixture
def act_leg_act_leg_act_bad_times():
person = Person('1')
person.plan.day = [
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
),
Leg(
seq=1,
mode='car',
start_area='a',
end_area='b',
start_time=mtdt(180),
end_time=mtdt(190)
),
Activity(
seq=2,
act='work',
area='b',
start_time=mtdt(0),
end_time=mtdt(180)
),
Leg(
seq=1,
mode='car',
start_area='b',
end_area='a',
start_time=mtdt(190),
end_time=mtdt(390)
),
Activity(
seq=3,
act='home',
area='a',
start_time=mtdt(280),
end_time=END_OF_DAY
)
]
return person
def test_invalid_times(act_leg_act_leg_act_bad_times):
assert act_leg_act_leg_act_bad_times.plan.validate_locations()
with pytest.raises(PAMTimesValidationError):
act_leg_act_leg_act_bad_times.plan.validate_times()
def test_invalid_times_not_start_at_zero():
plan = Plan()
plan.add(
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(10),
end_time=mtdt(180)
)
)
assert not plan.valid_times
def test_invalid_times_not_end_at_end_of_day():
plan = Plan()
plan.add(
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
)
)
assert not plan.valid_times
@pytest.fixture
def act_leg_act_leg_act_bad_locations1():
person = Person('1')
person.plan.day = [
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
),
Leg(
seq=1,
mode='car',
start_area='a',
end_area='b',
start_time=mtdt(180),
end_time=mtdt(190)
),
Activity(
seq=2,
act='work',
area='b',
start_time=mtdt(190),
end_time=mtdt(200)
),
Leg(
seq=1,
mode='car',
start_area='a',
end_area='a',
start_time=mtdt(200),
end_time=mtdt(390)
),
Activity(
seq=3,
act='home',
area='a',
start_time=mtdt(390),
end_time=END_OF_DAY
)
]
return person
def test_invalid_locations(act_leg_act_leg_act_bad_locations1):
assert act_leg_act_leg_act_bad_locations1.plan.validate_times()
with pytest.raises(PAMValidationLocationsError):
act_leg_act_leg_act_bad_locations1.plan.validate_locations()
@pytest.fixture
def act_leg_act_leg_act_bad_locations2():
person = Person('1')
person.plan.day = [
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
),
Leg(
seq=1,
mode='car',
start_area='a',
end_area='b',
start_time=mtdt(180),
end_time=mtdt(190)
),
Activity(
seq=2,
act='work',
area='b',
start_time=mtdt(190),
end_time=mtdt(200)
),
Leg(
seq=1,
mode='car',
start_area='b',
end_area='a',
start_time=mtdt(200),
end_time=mtdt(390)
),
Activity(
seq=3,
act='home',
area='b',
start_time=mtdt(390),
end_time=END_OF_DAY
)
]
return person
def test_invalid_locations2(act_leg_act_leg_act_bad_locations2):
assert act_leg_act_leg_act_bad_locations2.plan.validate_times()
with pytest.raises(PAMValidationLocationsError):
act_leg_act_leg_act_bad_locations2.plan.validate_locations()
def test_invalid_not_end_with_act():
plan = Plan()
plan.add(
Activity(
seq=1,
act='home',
area='a',
start_time=mtdt(0),
end_time=mtdt(180)
)
)
plan.add(
Leg(
seq=1,
mode='car',
start_area='a',
end_area='b',
start_time=mtdt(180),
end_time=mtdt(190)
)
)
assert not plan.valid_sequence
def test_validate_sequence(person_heh):
assert person_heh.validate()
```
#### File: pam/tests/test_5_probability_samplers.py
```python
import random
from pam.policy import probability_samplers
from tests.fixtures import *
def instantiate_household_with(persons: list):
household = Household(1)
for person in persons:
household.add(person)
return household
@pytest.fixture()
def SmithHousehold_alt(Steve, Hilda):
return instantiate_household_with([Steve, Hilda])
def test_SamplingProbability_samples_when_random_below_prob_val(mocker):
mocker.patch.object(probability_samplers.SamplingProbability, 'p', return_value=0.55)
mocker.patch.object(random, 'random', return_value=0.5)
prob = probability_samplers.SamplingProbability(0.55)
assert prob.sample('')
def test_SamplingProbability_samples_when_random_equal_prob_val(mocker):
mocker.patch.object(probability_samplers.SamplingProbability, 'p', return_value=0.55)
mocker.patch.object(random, 'random', return_value=0.55)
prob = probability_samplers.SamplingProbability(0.55)
assert not prob.sample('')
def test_SamplingProbability_doesnt_sample_when_random_above_prob_val(mocker):
mocker.patch.object(probability_samplers.SamplingProbability, 'p', return_value=0.55)
mocker.patch.object(random, 'random', return_value=0.65)
prob = probability_samplers.SamplingProbability(0.55)
assert not prob.sample('')
def test_SamplingProbability_throws_exception_when_used_for_extracting_p():
prob = probability_samplers.SamplingProbability(0.55)
with pytest.raises(NotImplementedError) as e:
prob.p('')
assert 'is a base class' \
in str(e.value)
def test_SimpleProbability_p_always_returns_same_level_p():
prob = probability_samplers.SimpleProbability(0.45)
assert prob.p('alfjhlfhlwkhf') == 0.45
assert prob.p(None) == 0.45
assert prob.p(Household(1)) == 0.45
assert prob.p(Person(1)) == 0.45
assert prob.p(Activity(1)) == 0.45
#### HouseholdProbability
def test_HouseholdProbability_accepts_integer():
probability_samplers.HouseholdProbability(1)
def test_HouseholdProbability_fails_non_probability_integers():
with pytest.raises(AssertionError):
probability_samplers.HouseholdProbability(2)
def test_HouseholdProbability_accepts_functions():
def custom_sampler(x, kwarg):
return 0.5
assert callable(custom_sampler)
prob = probability_samplers.HouseholdProbability(custom_sampler, {'kwarg': 'kwarg'})
assert prob.kwargs == {'kwarg': 'kwarg'}
def test_HouseholdProbability_defaults_to_empty_kwargs_with_custom_distros():
def custom_sampler(x):
return 0.5
prob = probability_samplers.HouseholdProbability(custom_sampler)
assert prob.kwargs == {}
custom_sampler('', **prob.kwargs)
def test_HouseholdProbability_p_delegates_to_compute_probability_for_household_for_Household(mocker):
mocker.patch.object(probability_samplers.HouseholdProbability, 'compute_probability_for_household', return_value=None)
prob = probability_samplers.HouseholdProbability(0.5)
hhld = Household(1)
prob.p(hhld)
probability_samplers.HouseholdProbability.compute_probability_for_household.assert_called_once_with(hhld)
def test_HouseholdProbability_p_throws_exception_when_given_Person():
prob = probability_samplers.HouseholdProbability(0.5)
with pytest.raises(NotImplementedError) as e:
prob.p(Person(1))
def test_HouseholdProbability_p_throws_exception_when_given_Activity():
prob = probability_samplers.HouseholdProbability(0.5)
with pytest.raises(NotImplementedError) as e:
prob.p(Activity(1))
def test_HouseholdProbability_p_throws_exception_when_given_whatever():
prob = probability_samplers.HouseholdProbability(0.5)
with pytest.raises(TypeError) as e:
prob.p(None)
def test_HouseholdProbability_compute_probability_for_household_returns_same_level_p_for_floats():
prob = probability_samplers.HouseholdProbability(0.5)
assert prob.compute_probability_for_household(Household(1)) == 0.5
def test_HouseholdProbability_compute_probability_for_household_delegates_p_to_custom_callable(mocker):
called = None
def custom_sampler(x, kwarg):
nonlocal called
called = True
return 0.5
prob = probability_samplers.HouseholdProbability(custom_sampler, {'kwarg': 'kwarg'})
hhld = Household(1)
assert prob.compute_probability_for_household(hhld) == 0.5
assert called
#### PersonProbability
def test_PersonProbability_accepts_integer():
probability_samplers.PersonProbability(1)
def test_PersonProbability_fails_non_probability_integers():
with pytest.raises(AssertionError):
probability_samplers.PersonProbability(2)
def test_PersonProbability_accepts_functions():
def custom_sampler(x, kwarg):
return 0.5
prob = probability_samplers.PersonProbability(custom_sampler, {'kwarg': 'kwarg'})
assert prob.kwargs == {'kwarg': 'kwarg'}
def test_PersonProbability_defaults_to_empty_kwargs_with_custom_distros():
def custom_sampler(x):
return 0.5
prob = probability_samplers.PersonProbability(custom_sampler)
assert prob.kwargs == {}
custom_sampler('', **prob.kwargs)
def test_PersonProbability_p_delegates_to_compute_probability_for_person_for_each_person_in_Household(
mocker, SmithHousehold_alt):
mocker.patch.object(probability_samplers.PersonProbability, 'compute_probability_for_person', return_value=0.25)
prob = probability_samplers.PersonProbability(0.25)
p = prob.p(SmithHousehold_alt)
assert probability_samplers.PersonProbability.compute_probability_for_person.call_count == 2
assert p == 0.4375
def test_PersonProbability_p_delegates_to_compute_probability_for_person_for_Person(mocker):
mocker.patch.object(probability_samplers.PersonProbability, 'compute_probability_for_person', return_value=None)
prob = probability_samplers.PersonProbability(0.5)
person = Person(1)
prob.p(person)
probability_samplers.PersonProbability.compute_probability_for_person.assert_called_once_with(person)
def test_PersonProbability_p_throws_exception_when_given_Activity():
prob = probability_samplers.PersonProbability(0.5)
with pytest.raises(NotImplementedError) as e:
prob.p(Activity(1))
def test_PersonProbability_p_throws_exception_when_given_whatever():
prob = probability_samplers.PersonProbability(0.5)
with pytest.raises(NotImplementedError) as e:
prob.p(None)
def test_PersonProbability_compute_probability_for_household_returns_same_level_p_for_floats():
prob = probability_samplers.PersonProbability(0.5)
assert prob.compute_probability_for_person(Person(1)) == 0.5
def test_PersonProbability_compute_probability_for_person_delegates_p_to_custom_callable(mocker):
called = None
def custom_sampler(x, kwarg):
nonlocal called
called = True
return 0.5
prob = probability_samplers.PersonProbability(custom_sampler, {'kwarg': 'kwarg'})
person = Person(1)
assert prob.compute_probability_for_person(person) == 0.5
assert called
#### ActivityProbability
def test_ActivityProbability_accepts_integer():
probability_samplers.ActivityProbability([''], 1)
def test_ActivityProbability_fails_non_probability_integers():
with pytest.raises(AssertionError):
probability_samplers.ActivityProbability([''], 2)
def test_ActivityProbability_accepts_functions():
def custom_sampler(x, kwarg):
return 0.5
prob = probability_samplers.ActivityProbability([''], custom_sampler, {'kwarg': 'kwarg'})
assert prob.kwargs == {'kwarg': 'kwarg'}
def test_ActivityProbability_defaults_to_empty_kwargs_with_custom_distros():
def custom_sampler(x):
return 0.5
prob = probability_samplers.ActivityProbability([''], custom_sampler)
assert prob.kwargs == {}
custom_sampler('', **prob.kwargs)
def test_ActivityProbability_p_delegates_to_compute_probability_for_activity_for_each_activity_for_person_in_Household(mocker, SmithHousehold_alt):
mocker.patch.object(probability_samplers.ActivityProbability, 'compute_probability_for_activity', return_value=0.25)
prob = probability_samplers.ActivityProbability(['work', 'escort_education'], 0.25)
p = prob.p(SmithHousehold_alt)
assert probability_samplers.ActivityProbability.compute_probability_for_activity.call_count == 4
assert p == 0.68359375
def test_ActivityProbability_p_delegates_to_compute_probability_for_activity_for_each_Activity_for_Person(mocker, Steve):
mocker.patch.object(probability_samplers.ActivityProbability, 'compute_probability_for_activity', return_value=0.25)
prob = probability_samplers.ActivityProbability(['work', 'escort'], 0.25)
person = Steve
p = prob.p(person)
assert probability_samplers.ActivityProbability.compute_probability_for_activity.call_count == 2
assert p == 0.4375
def test_ActivityProbability_p_delegates_to_compute_probability_for_activity_for_relevant_Activity(mocker, Steve):
mocker.patch.object(probability_samplers.ActivityProbability, 'compute_probability_for_activity', return_value=0.25)
prob = probability_samplers.ActivityProbability(['work'], 0.25)
act = [act for act in Steve.activities][1]
p = prob.p(act)
probability_samplers.ActivityProbability.compute_probability_for_activity.assert_called_once_with(act)
assert p == 0.25
def test_ActivityProbability_p_returns_0_for_activity_for_irrelevant_Activity(mocker, Steve):
prob = probability_samplers.ActivityProbability(['work'], 0.25)
act = [act for act in Steve.activities][2]
p = prob.p(act)
assert p == 0
def test_ActivityProbability_p_throws_exception_when_given_whatever():
prob = probability_samplers.ActivityProbability([''], 0.5)
with pytest.raises(NotImplementedError) as e:
prob.p(None)
def test_ActivityProbability_compute_probability_for_household_returns_same_level_p_for_floats():
prob = probability_samplers.ActivityProbability([''], 0.5)
assert prob.compute_probability_for_activity(Activity(1)) == 0.5
def test_ActivityProbability_compute_probability_for_activity_delegates_p_to_custom_callable():
called = None
def custom_sampler(x, kwarg):
nonlocal called
called = True
return 0.5
prob = probability_samplers.ActivityProbability([''], custom_sampler, {'kwarg': 'kwarg'})
assert prob.compute_probability_for_activity(Activity(1)) == 0.5
assert called
def test_verify_probability_check_list_of_probabilities():
p_list = [probability_samplers.HouseholdProbability(0.5), probability_samplers.ActivityProbability([''], 0.5),
probability_samplers.SimpleProbability(0.5), 0.2]
verified_p_list = probability_samplers.verify_probability(p_list)
assert p_list[:-1] == verified_p_list[:-1]
assert isinstance(verified_p_list[-1], probability_samplers.SimpleProbability)
assert verified_p_list[-1].p(None) == 0.2
def test_verify_probability_defaults_acceptable_int_to_simple_probability(mocker):
mocker.patch.object(probability_samplers.SimpleProbability, '__init__', return_value=None)
probability_samplers.verify_probability(1)
probability_samplers.SimpleProbability.__init__.assert_called_once_with(1.)
def test_verify_probability_defaults_float_to_simple_probability(mocker):
mocker.patch.object(probability_samplers.SimpleProbability, '__init__', return_value=None)
probability_samplers.verify_probability(0.5)
probability_samplers.SimpleProbability.__init__.assert_called_once_with(0.5)
def test_verify_probability_defaults_float_in_list_to_simple_probability(mocker):
mocker.patch.object(probability_samplers.SimpleProbability, '__init__', return_value=None)
probability_samplers.verify_probability([0.3, probability_samplers.PersonProbability(0.01)])
probability_samplers.SimpleProbability.__init__.assert_called_once_with(0.3)
```
#### File: pam/tests/test_7_activity_fill_plan.py
```python
import pytest
from datetime import datetime
from .fixtures import *
from pam.variables import END_OF_DAY
def test_home_education_home_fill_activity(person_home_education_home):
person = person_home_education_home
p_idx, s_idx = person.remove_activity(2)
assert person.fill_plan(p_idx, s_idx)
assert person.length == 1
assert [p.act for p in person.activities] == ['home']
assert person.plan.day[0].start_time == mtdt(0)
assert person.plan.day[-1].end_time == END_OF_DAY
assert person.has_valid_plan
def test_work_home_work_fill_activity_closed(person_work_home_work_closed):
person = person_work_home_work_closed
p_idx, s_idx = person.remove_activity(0)
assert person.fill_plan(p_idx, s_idx)
assert person.length == 1
assert [p.act for p in person.activities] == ['home']
assert person.plan.day[0].start_time == mtdt(0)
assert person.plan.day[-1].end_time == END_OF_DAY
assert person.has_valid_plan
def test_work_home_shop_work_fill_activity_closed(person_work_home_shop_home_work_closed):
person = person_work_home_shop_home_work_closed
p_idx, s_idx = person.remove_activity(0)
assert person.fill_plan(p_idx, s_idx)
assert person.length == 5
assert [p.act for p in person.activities] == ['home', 'shop', 'home']
assert person.plan.day[0].start_time == mtdt(0)
assert person.plan.day[-1].end_time == END_OF_DAY
assert person.has_valid_plan
def test_work_home_work_fill_first_activity_not_closed(person_work_home_shop_home_work_not_closed):
person = person_work_home_shop_home_work_not_closed
p_idx, s_idx = person.remove_activity(0)
assert person.fill_plan(p_idx, s_idx)
assert person.length == 5
assert [p.act for p in person.activities] == ['home', 'shop', 'home']
assert person.plan.day[0].start_time == mtdt(0)
assert person.plan.day[-1].end_time == END_OF_DAY
assert person.has_valid_plan
def test_work_home_work_fill_mid_activity_not_closed(person_work_home_shop_home_work_not_closed):
person = person_work_home_shop_home_work_not_closed
duration = person.plan.day[2].duration
p_idx, s_idx = person.remove_activity(6)
assert person.fill_plan(p_idx, s_idx)
assert person.length == 7
assert [p.act for p in person.activities] == ['work', 'home', 'shop', 'work']
assert person.plan.day[0].start_time == mtdt(0)
assert person.plan.day[-1].end_time == END_OF_DAY
assert person.plan.day[2].duration > duration # todo fix bad test
assert person.has_valid_plan
def test_work_home_work_add_first_activity_not_closed(person_work_home_work_not_closed):
person = person_work_home_work_not_closed
p_idx, s_idx = person.remove_activity(0)
assert person.fill_plan(p_idx, s_idx)
assert person.length == 3
assert [p.act for p in person.activities] == ['home', 'work']
assert person.plan.day[0].start_time == mtdt(0)
assert person.plan.day[-1].end_time == END_OF_DAY
assert person.has_valid_plan
``` |
{
"source": "josepdecid/3d-printing-detector",
"score": 3
} |
#### File: src/transforms/random_offset_scaling.py
```python
from typing import Tuple
import cv2
import numpy as np
class RandomOffsetScalingAndPadding(object):
def __init__(self, target_size: Tuple[int, int]):
self.__target_width, self.__target_height = target_size
def __call__(self, img: np.ndarray) -> np.ndarray:
img_background = (255 * np.ones(shape=(self.__target_height, self.__target_width, 3))).astype(np.uint8)
new_w = np.random.randint(self.__target_width / 2, self.__target_width)
new_h = np.random.randint(self.__target_height / 2, self.__target_height)
img_resized = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
random_x = np.random.randint(0, self.__target_width - new_w)
random_y = np.random.randint(0, self.__target_height - new_h)
img_background[random_y:random_y + new_h, random_x:random_x + new_w, :] = img_resized
return img_background
```
#### File: src/transforms/random_projection.py
```python
from typing import Tuple
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LightSource
from moviepy.video.io.bindings import mplfig_to_npimage
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from stl.mesh import Mesh
class RandomProjection(object):
"""
Starting from a 3D STL-versioned file, it creates a random 2D projection of the figure from an arbitrary PoV.
It also randomizes illumination parameters related to the azimuth and altitude values (int values 0-255).
And it does the same for the bright and dark surface vectors (RGBA vectors).
STLMesh -> NPArray
"""
def __call__(self, mesh: Mesh) -> np.ndarray:
random_rotation_vectors = 2 * (np.random.rand(3) - 0.5)
random_rotation_angle = float(np.radians(360 * np.random.rand()))
mesh.rotate(random_rotation_vectors, random_rotation_angle)
poly_mesh = RandomProjection.__create_illumination(mesh)
array_img = RandomProjection.__plot_to_array_data(mesh, poly_mesh)
return array_img
@staticmethod
def __create_illumination(mesh: Mesh) -> Poly3DCollection:
lt, dk = RandomProjection.__generate_random_brightness_parameters()
azimuth = float(np.random.rand())
altitude = float(np.random.rand())
poly_mesh = mplot3d.art3d.Poly3DCollection(mesh.vectors)
ls = LightSource(azimuth, altitude)
sns = ls.shade_normals(mesh.get_unit_normals(), fraction=1.0)
rgba = np.array([(lt - dk) * s + dk for s in sns])
poly_mesh.set_facecolor(rgba)
return poly_mesh
@staticmethod
def __plot_to_array_data(mesh: Mesh, poly_mesh: Poly3DCollection) -> np.ndarray:
figure = plt.figure()
axes = mplot3d.Axes3D(figure)
axes.add_collection3d(poly_mesh)
points = mesh.points.reshape(-1, 3)
points_top = max(np.ptp(points, 0)) / 2
controls = [(min(points[:, i]) + max(points[:, i])) / 2 for i in range(3)]
limits = [[controls[i] - points_top, controls[i] + points_top] for i in range(3)]
axes.auto_scale_xyz(*limits)
axes.axis('off')
np_img = mplfig_to_npimage(figure)
plt.close(figure)
return np_img
@staticmethod
def __generate_random_brightness_parameters() -> Tuple[np.ndarray, np.ndarray]:
# TODO: Implement
pass
``` |
{
"source": "JosePedroMatos/ADAPT-DB",
"score": 2
} |
#### File: ADAPT-DB/gpu/ann.py
```python
import numpy as np
import pyopencl as cl
from collections import namedtuple
from scipy.special import expit
import pkg_resources
class Weights:
def __init__(self, wHL=None, bHL=None, wOL=None, bOL=None):
self.wHL = wHL
self.bHL = bHL
self.wOL = wOL
self.bOL = bOL
class ann:
openCL = namedtuple('openCL', ('active','devList','ctx','prg','queue', 'workGroup', 'platform', 'type'), verbose=False, rename=False)
def __init__(self, data, nodes=10, openCL=False, workGroup=(16, 16), platform=0, deviceType='ALL', verbose=0):
self.data=data
self.nodes=nodes
self.openCL.active=openCL
self.openCL.workGroup=workGroup
self.openCL.platform=platform
tmp={'ALL': cl.device_type.ALL, 'CPU': cl.device_type.CPU, 'GPU': cl.device_type.GPU}
self.openCL.type=tmp[deviceType]
self.activationFuns=('sigmoid', 'linear')
self.verbose = verbose
self.setWeights()
if self.openCL.active:
self._prepOpenCL()
def __str__(self):
return 'ANN model\nNodes: %u' % (self.nodes) + \
'\nOpenCL:\n ' + str(self.openCL.devList) + \
'\nwHL:\n' + np.array_str(self.weights.wHL) + \
'\nbHL:\n' + np.array_str(self.weights.bHL) + \
'\nwOL:\n' + np.array_str(self.weights.wOL) + \
'\nbOL:\n' + np.array_str(self.weights.bOL)
def _activate(self, X, layer):
if self.activationFuns[layer]=='sigmoid':
return expit(X)
else:
return X
def _prepOpenCL(self):
platform=cl.get_platforms()[self.openCL.platform]
self.openCL.devList= platform.get_devices(device_type=self.openCL.type)
self.openCL.ctx = cl.Context(devices=self.openCL.devList)
kernelStr=pkg_resources.resource_string(__name__, 'ann.cl') #@UndefinedVariable
self.openCL.prg = cl.Program(self.openCL.ctx, kernelStr.decode('UTF-8')).build()
self.openCL.queue = cl.CommandQueue(self.openCL.ctx)
if self.verbose>0:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
for device in self.openCL.devList:
print("---------------------------------------------------------------")
print(" Device name:", device.name)
print(" Device type:", cl.device_type.to_string(device.type))
print(" Device memory: ", device.global_mem_size//1024//1024, 'MB')
print(" Device max clock speed:", device.max_clock_frequency, 'MHz')
print(" Device compute units:", device.max_compute_units)
print(" Device max work items:", device.get_info(cl.device_info.MAX_WORK_ITEM_SIZES))
print(" Device local memory:", device.get_info(cl.device_info.LOCAL_MEM_SIZE)//1024, 'KB')
def getWeightLen(self):
return (self.data.shape[1]+2)*self.nodes+1
def getWeightsToRegularize(self):
tmp=np.zeros(self.getWeightLen(), dtype=np.bool)
tmp[:self.data.shape[1]*self.nodes]=True
tmp[-self.nodes-1:-1]=True
return tmp
def setWeights(self, weights=None):
if weights is None:
weights=np.random.normal(loc=0, scale=1, size=self.getWeightLen())
#weights=np.linspace(1, self.getWeightLen(), self.getWeightLen())
if len(weights.shape)==1:
weights=np.expand_dims(weights, axis=0)
self.weightsOpenCL=np.reshape(weights, (-1,))
tmp=self.data.shape[1]*self.nodes
wHL=np.reshape(weights[:, :tmp], (-1, self.data.shape[1], self.nodes))
bHL=np.reshape(weights[:, tmp:tmp+self.nodes], (-1, self.nodes))
tmp+=self.nodes
wOL=np.reshape(weights[:, tmp:tmp+self.nodes].T, (self.nodes, -1))
bOL=np.reshape(weights[:, -1], (-1, 1))
self.weights=Weights(wHL, bHL, wOL, bOL)
self.weightsOpenCL=weights
def compute(self, X=[]):
if len(X)==0:
X=self.data
else:
pass
originalLength=X.shape[0]
originalWidth=self.weightsOpenCL.shape[0]
if not self.openCL.active:
networks=self.weights.wHL.shape[0]
phiOL=np.empty((X.shape[0], networks))
for i0 in range(networks):
aHL=X.dot(self.weights.wHL[i0,:,:])+np.tile(self.weights.bHL[i0,],(X.shape[0],1))
phiHL=self._activate(aHL,0)
aOL=phiHL.dot(self.weights.wOL[:,i0])+self.weights.bOL[i0,]
phiOL[:,i0]=self._activate(aOL,1)
else:
remData=np.remainder(X.shape[0],self.openCL.workGroup[0])
if remData != 0:
X=np.vstack((X, np.zeros((self.openCL.workGroup[0]-remData, X.shape[1]))))
else:
remData=self.openCL.workGroup[0]
remNetwork=np.remainder(self.weightsOpenCL.shape[0],self.openCL.workGroup[1])
if remNetwork != 0:
weights=np.vstack((self.weightsOpenCL, np.zeros((self.openCL.workGroup[1]-remNetwork, self.weightsOpenCL.shape[1]))))
else:
weights=self.weightsOpenCL
remNetwork=self.openCL.workGroup[1]
XOpenCL=X.reshape(-1, order = 'C').astype(np.float32)
weightsOpenCL=weights.reshape(-1, order = 'C').astype(np.float32)
mf = cl.mem_flags
inputs=np.int32(X.shape[1])
nodes=np.int32(self.nodes)
dataSize=np.int32(X.shape[0])
weightSize=np.int32(self.weightsOpenCL.shape[1])
dataBuffer = cl.Buffer(self.openCL.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=XOpenCL)
weightsBuffer = cl.Buffer(self.openCL.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=weightsOpenCL)
outBuffer = cl.Buffer(self.openCL.ctx, mf.WRITE_ONLY, int(XOpenCL.nbytes/inputs*weights.shape[0]))
kernel=self.openCL.prg.ann
globalSize=(int(X.shape[0]), int(weights.shape[0]))
localSize=(int(self.openCL.workGroup[0]), int(self.openCL.workGroup[1]))
kernel(self.openCL.queue, globalSize, localSize, inputs, nodes, dataSize, weightSize, dataBuffer, outBuffer, weightsBuffer, cl.LocalMemory(self.weightsOpenCL[0,].nbytes*localSize[1]))
phiOL = np.empty((np.prod(globalSize),)).astype(np.float32)
cl.enqueue_copy(self.openCL.queue, phiOL, outBuffer)
phiOL=np.reshape(phiOL, globalSize, order='F')[:originalLength,:originalWidth]
return phiOL
```
#### File: ADAPT-DB/gpu/crowding.py
```python
import numpy as np
import pyopencl as cl
from collections import namedtuple
import pkg_resources
def phenCrowdingNSGAII(*args, **kwargs):
'''
Phenotype crowding used in NSGAII
'''
if 'fronts' in kwargs:
fronts=kwargs['fronts']
else:
fronts=list((range(0, len(args[0])),))
distance=np.zeros_like(args[0])
for m0 in args:
if type(fronts)==type([]):
for l0 in fronts:
idx=np.array(l0)
x=m0[idx]
tmpSortIdx=np.argsort(x)
tmpSort=x[tmpSortIdx]
if (tmpSort[-1]-tmpSort[0])!=0:
distance[idx[tmpSortIdx[1:-1]]]+=(tmpSort[2:]-tmpSort[:-2])/(tmpSort[-1]-tmpSort[0])
else:
distance[idx[tmpSortIdx[1:-1]]]=0
distance[idx[tmpSortIdx[0]]]=distance[idx[tmpSortIdx[-1]]]=np.Inf
else:
for i0 in np.sort(np.unique(fronts)):
idx=np.where(fronts==i0)[0]
x=m0[idx]
tmpSortIdx=np.argsort(x)
tmpSort=x[tmpSortIdx]
if (tmpSort[-1]-tmpSort[0])!=0:
distance[idx[tmpSortIdx[1:-1]]]+=(tmpSort[2:]-tmpSort[:-2])/(tmpSort[-1]-tmpSort[0])
else:
distance[idx[tmpSortIdx[1:-1]]]=0
distance[idx[tmpSortIdx[0]]]=distance[idx[tmpSortIdx[-1]]]=np.Inf
return distance
def genCrowding(nonExceedance, genes, **kwargs):
'''
Genotype crowding
'''
window=0.05
window=int(len(nonExceedance)*window)
if 'fronts' in kwargs:
fronts=kwargs['fronts']
else:
fronts=list((range(0, len(nonExceedance)),))
results=list()
for l0 in fronts:
tmpFront=np.array(l0)
sortIdxs=np.argsort(nonExceedance[tmpFront])
tmpGenes=genes[sortIdxs,]
tmpResults=np.zeros((len(l0),len(l0)))
for i0 in range(len(l0)):
for i1 in range(i0+1, i0+window):
if i1>=len(l0):
break
tmpResults[i0,i1]=1.0/(0.1+np.linalg.norm(tmpGenes[i0,]-tmpGenes[i1,], ord=2))
tmpResults+=tmpResults.T
results.append(tmpResults)
return results
class crowdingOpenCL(object):
'''
classdocs
'''
openCL=namedtuple('openCL', ('active','devList','ctx','prg','queue','verbose'), verbose=False, rename=False)
sizes=dict()
def __init__(self, workGroup=(16, 16), platform=0, deviceType='ALL', verbose=0):
self.openCL.workGroup=workGroup
self.openCL.platform=platform
tmp={'ALL': cl.device_type.ALL, 'CPU': cl.device_type.CPU, 'GPU': cl.device_type.GPU}
self.openCL.type=tmp[deviceType]
self.openCL.verbose=verbose
self._prepOpenCL()
def _prepOpenCL(self):
platform=cl.get_platforms()[self.openCL.platform]
self.openCL.devList= platform.get_devices(device_type=self.openCL.type)
self.openCL.ctx = cl.Context(devices=self.openCL.devList)
with open ("crowding.cl", "r") as kernelFile:
kernelStr=kernelFile.read()
self.openCL.prg = cl.Program(self.openCL.ctx, kernelStr).build()
self.openCL.queue = cl.CommandQueue(self.openCL.ctx)
if self.openCL.verbose!=0:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
for device in self.openCL.devList:
print("---------------------------------------------------------------")
print(" Device name:", device.name)
print(" Device type:", cl.device_type.to_string(device.type))
print(" Device memory: ", device.global_mem_size//1024//1024, 'MB')
print(" Device max clock speed:", device.max_clock_frequency, 'MHz')
print(" Device compute units:", device.max_compute_units)
print(" Device max work items:", device.get_info(cl.device_info.MAX_WORK_ITEM_SIZES))
print(" Device local memory:", device.get_info(cl.device_info.LOCAL_MEM_SIZE)//1024, 'KB')
def _increment(self, base, interval):
tmp=base%interval
if tmp==0:
return (base, 0)
else:
return (base+interval-tmp, interval-tmp)
def reshapeData(self, genes):
self.sizes['originalGenes'], self.sizes['chromosomeLength']=genes.shape
self.sizes['reshaped0'], self.sizes['add0']=self._increment(self.sizes['originalGenes'], self.openCL.workGroup[0])
self.sizes['reshaped1'], self.sizes['add1']=self._increment(self.sizes['originalGenes'], self.openCL.workGroup[1])
if self.openCL.verbose!=0:
print('Vertical array adjustment: +%.1f%% (%ux %u items)' % (self.sizes['add0']/self.sizes['originalGenes']*100, self.sizes['reshaped0']//self.openCL.workGroup[0], self.openCL.workGroup[0]))
print('Horizontal array adjustment: +%.1f%% (%ux %u items)' % (self.sizes['add1']/self.sizes['originalGenes']*100, self.sizes['reshaped1']//self.openCL.workGroup[1], self.openCL.workGroup[1]))
def compute(self, genes, window):
genesOpenCL=genes.reshape(-1, order = 'C').astype(np.float32)
globalSize=(int(self.sizes['reshaped0']), int(self.sizes['reshaped1']))
localSize=(int(self.openCL.workGroup[0]), int(self.openCL.workGroup[1]))
mf = cl.mem_flags
base=np.float32(2)
chromosomeLength=np.int32(self.sizes['chromosomeLength'])
lim0=np.int32(self.sizes['originalGenes'])
lim1=np.int32(self.sizes['originalGenes'])
genesBuffer = cl.Buffer(self.openCL.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=genesOpenCL)
outBuffer = cl.Buffer(self.openCL.ctx, mf.WRITE_ONLY, int((self.sizes['originalGenes']**2)*np.int32(1).nbytes))
kernel=self.openCL.prg.genCrowding
kernel(self.openCL.queue, globalSize, localSize,
base, chromosomeLength, lim0, lim1,
genesBuffer,
outBuffer)
crowding = np.empty((self.sizes['originalGenes']**2,)).astype(np.float32)
cl.enqueue_copy(self.openCL.queue, crowding, outBuffer)
crowding=np.reshape(crowding, (self.sizes['originalGenes'], -1), order='F')
return crowding
class crowdingPhenCorrOpenCl(object):
'''
classdocs
'''
openCL=namedtuple('openCL', ('active','devList','ctx','prg','queue','verbose'), verbose=False, rename=False)
sizes=dict()
def __init__(self, workGroup=(16, 16), platform=0, deviceType='ALL', verbose=0):
self.openCL.workGroup=workGroup
self.openCL.platform=platform
tmp={'ALL': cl.device_type.ALL, 'CPU': cl.device_type.CPU, 'GPU': cl.device_type.GPU}
self.openCL.type=tmp[deviceType]
self.openCL.verbose=verbose
self._prepOpenCL()
def _prepOpenCL(self):
platform=cl.get_platforms()[self.openCL.platform]
self.openCL.devList= platform.get_devices(device_type=self.openCL.type)
self.openCL.ctx = cl.Context(devices=self.openCL.devList)
kernelStr=pkg_resources.resource_string(__name__, 'correl.cl')#@UndefinedVariable
self.openCL.prg = cl.Program(self.openCL.ctx, kernelStr).build()
self.openCL.queue = cl.CommandQueue(self.openCL.ctx)
if self.openCL.verbose!=0:
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
for device in self.openCL.devList:
print("---------------------------------------------------------------")
print(" Device name:", device.name)
print(" Device type:", cl.device_type.to_string(device.type))
print(" Device memory: ", device.global_mem_size//1024//1024, 'MB')
print(" Device max clock speed:", device.max_clock_frequency, 'MHz')
print(" Device compute units:", device.max_compute_units)
print(" Device max work items:", device.get_info(cl.device_info.MAX_WORK_ITEM_SIZES))
print(" Device local memory:", device.get_info(cl.device_info.LOCAL_MEM_SIZE)//1024, 'KB')
def _increment(self, base, interval):
tmp=base%interval
if tmp==0:
return (base, 0)
else:
return (base+interval-tmp, interval-tmp)
def reshapeData(self, simulations):
self.sizes['originalSimulations'], self.sizes['simulationsLength']=simulations.shape
self.sizes['reshaped0'], self.sizes['add0']=self._increment(self.sizes['originalSimulations'], self.openCL.workGroup[0])
self.sizes['reshaped1'], self.sizes['add1']=self._increment(self.sizes['originalSimulations'], self.openCL.workGroup[1])
if self.openCL.verbose!=0:
print('Vertical array adjustment: +%.1f%% (%ux %u items)' % (self.sizes['add0']/self.sizes['originalSimulations']*100, self.sizes['reshaped0']//self.openCL.workGroup[0], self.openCL.workGroup[0]))
print('Horizontal array adjustment: +%.1f%% (%ux %u items)' % (self.sizes['add1']/self.sizes['originalSimulations']*100, self.sizes['reshaped1']//self.openCL.workGroup[1], self.openCL.workGroup[1]))
def compute(self, simulations, window):
simulationsOpenCL=simulations.reshape(-1, order = 'C').astype(np.float32)
globalSize=(int(self.sizes['reshaped0']), int(self.sizes['reshaped1']))
localSize=(int(self.openCL.workGroup[0]), int(self.openCL.workGroup[1]))
mf = cl.mem_flags
base=np.float32(2)
chromosomeLength=np.int32(self.sizes['simulationsLength'])
lim0=np.int32(self.sizes['originalSimulations'])
lim1=np.int32(self.sizes['originalSimulations'])
window=np.int32(window)
simulationsBuffer = cl.Buffer(self.openCL.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=simulationsOpenCL)
outBuffer = cl.Buffer(self.openCL.ctx, mf.WRITE_ONLY, int((self.sizes['originalSimulations']**2)*np.int32(1).nbytes))
kernel=self.openCL.prg.phenCrowding
kernel(self.openCL.queue, globalSize, localSize,
base, chromosomeLength, window, lim0, lim1,
simulationsBuffer,
outBuffer)
crowding = np.zeros((self.sizes['originalSimulations']**2,)).astype(np.float32)
cl.enqueue_copy(self.openCL.queue, crowding, outBuffer)
crowding=np.reshape(crowding, (self.sizes['originalSimulations'], -1), order='F')
return crowding
```
#### File: ADAPT-DB/gpu/domination.py
```python
import numpy as np
import matplotlib.pyplot as plt
def paretoSorting(x0, x1):
fronts=list()
idx=np.lexsort((x1, x0))
fronts.append(list())
fronts[-1].append(idx[0])
for i0 in idx[1:]:
if x1[i0]>=x1[fronts[-1][-1]]:
fronts.append(list())
fronts[-1].append(i0)
else:
for i1 in range(0,len(fronts)):
if x1[i0]<x1[fronts[i1][-1]]:
fronts[i1].append(i0)
break
return (fronts, idx)
def doubleParetoSorting(x0, x1):
fronts = [[]]
left = [[]]
right = [[]]
idx = np.lexsort((x1, x0))
idxEdge = np.lexsort((-np.square(x0-0.5), x1))
fronts[-1].append(idxEdge[0])
left[-1].append(x0[idxEdge[0]])
right[-1].append(x0[idxEdge[0]])
for i0 in idxEdge[1:]:
if x0[i0]>=left[-1] and x0[i0]<=right[-1]:
#add a new front
fronts.append([])
left.append([])
right.append([])
fronts[-1].append(i0)
left[-1].append(x0[i0])
right[-1].append(x0[i0])
else:
#check existing fonts
for i1 in range(len(fronts)):
if x0[i0]<left[i1] or x0[i0]>right[i1]:
if x0[i0]<left[i1]:
left[i1] = x0[i0]
fronts[i1].insert(0, i0)
else:
right[i1] = x0[i0]
fronts[i1].append(i0)
break
return (fronts, idx)
def plotFronts(fronts, x0, x1, **kwargs):
fig=plt.figure()
ax=plt.gca()
if 'size' in kwargs:
ax.scatter(x0, x1, c='k', s=kwargs['size'])
else:
ax.plot(x0, x1,'ok')
for l0 in fronts:
tmp0=x0[l0]
tmp1=x1[l0]
ax.plot(tmp0, tmp1,'-')
if 'annotate' in kwargs and kwargs['annotate']:
for label, x, y in zip(range(0,len(x0)), x0, x1):
plt.annotate(
label,
xy = (x, y), xytext = (-10, 10),
textcoords = 'offset points', ha = 'right', va = 'bottom',
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3, rad=-0.2'))
return fig
def convexSortingApprox(x0, x1):
''' does not work well '''
fronts0=paretoSorting(x0, x1)[0]
fronts1=paretoSorting(-x0, x1)[0]
minErrIdx=np.argmin(x1)
minErrNE=x0[minErrIdx]
fronts=[]
len0=len(fronts0)
len1=len(fronts1)
for i0 in range(max(len0, len1)):
tmpList=[]
if len0>i0:
tmp=x0[fronts0[i0]]<=minErrNE
tmpList.extend(np.array(fronts0[i0])[tmp])
if len1>i0:
tmp=x0[fronts1[i0]]>minErrNE
tmpList.extend(np.array(fronts1[i0])[tmp])
fronts.append(tmpList)
return fronts
def convexSorting(x0, x1):
#===========================================================================
# fronts, idx=paretoSorting(x0, x1)
#===========================================================================
fronts, idx=doubleParetoSorting(x0, x1)
lastChanged=0
for i0 in range(len(fronts)):
if len(fronts[i0])>0:
for i1 in range(lastChanged-1,i0-1,-1):
tmp=list()
for l0 in reversed(fronts[i1+1]):
if len(fronts[i1])==0 or x0[fronts[i1][-1]]<x0[l0] and x1[fronts[i1][-1]]>x1[l0]:
tmp.insert(0,fronts[i1+1].pop())
if len(tmp)>0:
fronts[i1].extend(tmp)
for i1 in range(i0+1, len(fronts)):
if len(fronts[i1])>0 and x0[fronts[i0][-1]]<x0[fronts[i1][-1]]:
fronts[i0].append(fronts[i1].pop())
lastChanged=i1
#=======================================================================
# if i0 in range(len(fronts)-23,len(fronts)-20):
# plotFronts(fronts, x0, x1)
# plt.show(block=False)
#=======================================================================
for i0 in range(len(fronts)-1,-1,-1):
if len(fronts[i0])==0:
fronts.pop(i0)
return (fronts, idx)
```
#### File: ADAPT-DB/tethys/errorViews.py
```python
from django.shortcuts import render_to_response
from django.conf import settings
def badRequest400(request, exception):
context = {'LANG': request.LANGUAGE_CODE,
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
}
if request.path.startswith('/admin/'):
template_name='400.html'
else:
template_name='main/400.html'
return page_not_found(request, exception, template_name=template_name, context=context)
def forbidden403(request, exception):
context = {'LANG': request.LANGUAGE_CODE,
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
}
if request.path.startswith('/admin/'):
template_name='403.html'
else:
template_name='main/403.html'
return render_to_response(template_name=template_name, context=context)
def notFound404(request, exception):
context = {'LANG': request.LANGUAGE_CODE,
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
}
if request.path.startswith('/admin/'):
template_name='404.html'
else:
template_name='main/404.html'
return render_to_response(template_name=template_name, context=context)
def serverError500(request):
context = {'LANG': request.LANGUAGE_CODE,
'LOCAL_JAVASCIPT': settings.LOCAL_JAVASCIPT,
}
if request.path.startswith('/admin/'):
template_name='500.html'
else:
template_name='main/500.html'
return render_to_response(template_name=template_name, context=context)
```
#### File: ADAPT-DB/timeSeries/admin.py
```python
from django.contrib import admin
from .models import DataType, DataProvider, Location, Series, Value, Forecast, SatelliteData, Colormap
from django.utils.html import format_html, mark_safe
from .satelliteData import TRMMSatelliteRainfall
from django.conf import settings
import datetime as dt
import os
class AdminAutoRecord(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
obj.introducedBy = request.user
obj.save()
def save_formset(self, request, form, formset, change):
if formset.model == DataType:
instances = formset.save(commit=False)
for instance in instances:
instance.introducedBy = request.user
instance.save()
else:
formset.save()
class DataTypeAdmin(AdminAutoRecord):
readonly_fields = ('iconImage',)
fieldsets = [
('Base information', {'fields': (('name', 'abbreviation', 'units', ), 'description'), 'description': 'Base information that characterizes the type of data.'}),
('Display information', {'fields': (('iconImage', 'icon', ),), 'description': 'Figure file that will represent the series in the map. Should be a .png with 60x60px size.'}),
('Extra information', {'fields': ('observations', ), 'description': 'Additional informations about the data type.', 'classes': ('collapse', )})
]
list_display = ('iconImageSmall', 'name', 'description', 'units')
search_fields = ['name', 'description', 'observations']
list_filter = ('units', 'created')
class DataProviderAdmin(AdminAutoRecord):
readonly_fields = ('iconImage',)
fieldsets = [
('Base information', {'fields': (('name', 'abbreviation', 'country'), ('email', 'website'), 'description'), 'description': 'Base information that characterizes the data provider.'}),
('Display information', {'fields': (('iconImage', 'icon', ),), 'description': 'Logo of the data provider. Should be a .png file.'}),
]
list_display = ('iconImage', 'name', 'description', 'website')
search_fields = ['name', 'description']
list_filter = ('created', )
class LocationAdmin(AdminAutoRecord):
fieldsets = [
('Base information', {'fields': (('name', 'lat', 'lon', ),), 'description': 'Base information that characterizes the location.'}),
('Additional information', {'fields': (('catchment', 'river', 'country',),), 'description': 'Additional information that should be used to characterize the location.'}),
('Extra information', {'fields': ('observations', ), 'description': 'Additional informations about the location.', 'classes': ('collapse', )})
]
list_display = ('name', 'lat', 'lon', 'country', 'catchment', 'river')
search_fields = ('name', 'country', 'catchment', 'river', 'lat', 'lon', 'observations')
list_filter = ('catchment', 'river', 'created', 'country')
class SeriesAdmin(AdminAutoRecord):
readonly_fields = ['country', 'catchment', 'river', 'encryptionKey']
def get_readonly_fields(self, request, obj=None):
# encryptions and metaEncryptions can only be changed if the model is being created or there are no values associated with it.
if obj:
if len(Value.objects.filter(series_id=obj.id)[:1])==1:
return self.readonly_fields + ['metaEncrypted',]
return self.readonly_fields
fieldsets = [
('Base information', {'fields': (('name', 'location', 'provider',),('type', 'timeStepUnits', 'timeStepPeriod'),), 'description': 'Base information that characterizes the data series.'}),
('Data and encryption', {'fields': (('encryptionKey', 'metaEncrypted'),), 'description': 'Data upload functionality and information about the encryption of the data. Can only be edited in "empty" series.'}),
('Additional information', {'fields': (('catchment', 'river', 'country',),), 'description': 'Additional information that should be used to characterize the series.'}),
('Extra information', {'fields': ('observations', ), 'description': 'Additional informations about the series.', 'classes': ('collapse', )})
]
list_display = ('name', 'location', 'tYpe', 'timeStep', 'records', 'first', 'last', 'metaEncrypted', 'pRovider')
search_fields = ('name', 'location__name', 'provider__name')
list_filter = ('provider', 'type', 'timeStepUnits', 'metaEncrypted', 'location__catchment', 'location__river', 'location__country')
def pRovider(self, instance):
return format_html('<img height="30" src="/{}"/> ({})', mark_safe(instance.provider.icon), mark_safe(instance.provider.abbreviation))
def tYpe(self, instance):
return format_html('<img height="30" width="30" src="/{}"/> ({})', mark_safe(instance.type.icon), mark_safe(instance.type.name))
def timeStep(self, instance):
return str(instance.timeStepPeriod) + ' ' + instance.timeStepUnits
def country(self, instance):
return instance.location.country.name
def catchment(self, instance):
return instance.location.catchment
def river(self, instance):
return instance.location.river
def records(self, instance):
return Value.objects.filter(series=instance).count()
def first(self, instance):
tmp = Value.objects.filter(series=instance).order_by('date').first()
if tmp:
return tmp.date
else:
return ''
def last(self, instance):
tmp = Value.objects.filter(series=instance).order_by('date').last()
if tmp:
return tmp.date
else:
return ''
class ForecastAdmin(AdminAutoRecord):
readonly_fields = ('ready', 'location', 'variable', 'timeStep', 'errorFunction')
filter_horizontal = ('extraSeries',)
fieldsets = [
('Base information', {'fields': (('name'),
('targetSeries', 'variable', 'timeStep', 'location', 'ready'),), 'description': 'Base information characterizing the forecast and its target series.'}),
('Series to include', {'fields': (('targetExpression'),
('dataExpression'),
('extraSeries'),), 'description': 'Choice of the target series and the additonal series that should be used as covariates.'}),
('Main parameters', {'fields': (('leadTime', 'period', 'referenceDate',),
('splitBySeason',)), 'description': 'Main parameters to define the forecast.'}),
('Extra parameters', {'fields': (
('regularize', 'type', 'nodes',),
('population','epochs',),
('errorFunction', 'allowNegative',),),
'description': 'Additional parameters to define the forecast.', 'classes': ('collapse', )})
]
list_display = ('name', 'targetSeries', 'variable', 'type', 'leadTime', 'ready', 'location')
search_fields = ('name', 'description', 'introducedBy', 'series', 'location')
list_filter = ('ready',)
def location(self, instance):
return str(instance.targetSeries.location)
def variable(self, instance):
return str(instance.targetSeries.type)
def timeStep(self, instance):
return dict(Series.TIME_STEP_PERIOD_CHOICES)[instance.targetSeries.timeStepUnits] + ' (' + str(instance.targetSeries.timeStepPeriod) + ')'
variable.short_description = 'prediction'
class ColormapAdmin(AdminAutoRecord):
fieldsets = [
('Base information', {'fields': (('name',),
('file',)
),
'description': 'Base information characterizing the colormap.'}),
]
list_display = ('name',)
search_fields = ('name', 'introducedBy')
class SatelliteDataAdmin(AdminAutoRecord):
readonly_fields = ('units', 'timestep', 'productSite', 'downloadSite', 'description', 'readyGeometry')
fieldsets = [
('Base information', {'fields': (('name', 'satellite', 'startDate'),
('geometry', 'readyGeometry'),
('colormap'),
),
'description': 'Base information characterizing the Satellite data.'}),
('Additional information', {'fields': (('units', 'timestep'),
('productSite'),
('downloadSite'),
('description'),
),
'description': 'Additional information characterizing the Satellite product.'}),
('Extra parameters', {'fields': (('observations',),
),
'description': 'Additional information.', 'classes': ('collapse', )})
]
list_display = ('name', 'satellite', 'timestep', 'units', 'startDate')
search_fields = ('name', 'satellite', 'description', 'introducedBy')
#-------------------------------------------------- list_filter = ('ready',)
def timestepStr(self, instance):
return str(instance.timestep)
def readyGeometry(self, instance):
if len(instance.jsonGeometry)>=0:
return True
else:
return False
def save_model(self, request, obj, form, change):
downloadFolder = os.path.join(settings.SATELLITE_DOWNLOAD, obj.satellite)
satelliteObj = eval(obj.satellite + '(dataFolder=obj.dataFolder, downloadFolder=downloadFolder)')
# properties from the satellite data class
obj.productSite = satelliteObj.productSite
obj.downloadSite = satelliteObj.downloadSite
obj.description = satelliteObj.description
obj.timestep = str(satelliteObj.timestep[list(satelliteObj.timestep.keys())[0]]) + ' (' + list(satelliteObj.timestep.keys())[0] + ')'
obj.units = satelliteObj.units
# introduced by
obj.introducedBy = request.user
# change dataFolder
tmp = os.path.split(obj.dataFolder)
if tmp[-1]=='__unknonwn__':
obj.dataFolder = os.path.join(tmp[0], obj.name)
# change start time
obj.startDate = obj.startDate.replace(tzinfo=None)
obj.save()
admin.site.register(DataType, DataTypeAdmin)
admin.site.register(DataProvider, DataProviderAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(Series, SeriesAdmin)
admin.site.register(Forecast, ForecastAdmin)
admin.site.register(Colormap, ColormapAdmin)
admin.site.register(SatelliteData, SatelliteDataAdmin)
```
#### File: ADAPT-DB/timeSeries/satelliteData.py
```python
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
import ntpath
import tempfile
import gzip
import shutil
import sys
import re
import geojson
import json
from urllib.request import urlopen
from multiprocessing.dummy import Pool as ThreadPool
from netCDF4 import Dataset, num2date, date2num
from dateutil.relativedelta import relativedelta
from astropy.io.ascii.tests.test_connect import files
class SatelliteData(object):
'''
classdocs
'''
filePrefix = 'unknown'
precision = np.single
significantDigits = None
downloadFailThreshold = 50000
productSite = 'unknown'
downloadSite = 'unknown'
description = 'none'
timestep = {}
units = 'unknown'
def __init__(self, dataFolder, downloadFolder):
'''
Constructor
'''
self.downloadFolder = downloadFolder
self.dataFolder = os.path.join(dataFolder, self.filePrefix)
if not os.path.isdir(self.dataFolder):
os.makedirs(self.dataFolder)
if not os.path.isdir(self.downloadFolder):
os.makedirs(self.downloadFolder)
self._listData()
def downloadList(self, dateIni, dateEnd):
'''
abstract method
Returns a tuple defining files to be downloaded. It should contain:
a list of file names on disk and
a list of urls for download.
'''
pass
def downloadedDates(self, fileType):
'''
abstract method
Returns a tuple containing:
a list of files in folder that are have a given extension and
a list of dates corresponding to each file
'''
pass
def importData(self, fileName):
'''
abstract method
Returns:
'''
pass
def getData(self, dateIni, dateEnd):
# load data
self.process(dateIni=dateIni, dateEnd=dateEnd, download=False, read=False)
# export data
if 'loaded' in self.__dict__.keys():
return self.loaded
else:
return {}
def getDataForJSON(self, dateIni, dateEnd, returnData=True, returnInfo=True):
# get data
data = self.getData(dateIni, dateEnd)
idxs = np.where((np.nansum(data['data']+1, axis=0)!=0).ravel())[0]
idxsList = idxs.tolist()
# trim data
if len(data)>0:
data['dates'] = [dt.isoformat() for dt in data['dates']]
data['missing'] = data['missing'].tolist()
if returnInfo:
data['lon'] = data['lon'].tolist()
data['lat'] = data['lat'].tolist()
data['idxs'] = idxsList
else:
data.pop('lon', None)
data.pop('lat', None)
data.pop('idxs', None)
if returnData:
tmp = []
for i0 in range(data['data'].shape[0]):
tmpValidData = data['data'][i0,:,:].ravel()[idxsList]
tmpValidData[np.isnan(tmpValidData)] = -999;
tmpPositiveIdxs = np.where(tmpValidData!=0)[0]
tmp.append({'idxs': idxs[tmpPositiveIdxs].tolist(), 'values': tmpValidData[tmpPositiveIdxs].tolist()})
data['data'] = tmp
else:
data.pop('data', None)
return data
else:
return {}
def download(self, dateIni, dateEnd, threads=3):
# Call data-specific method to define file names and download urls
fileList, urlList = self.downloadList(dateIni, dateEnd)
# File list
toDownload = []
for i0 in range(len(fileList)):
if not os.path.isfile(fileList[i0]):
toDownload.append((fileList[i0], urlList[i0]))
ctr = 0
failed = []
notFound = []
while ctr==0 or (ctr < 4 and len(failed)>0):
# Download files
downloadSizes = []
if len(toDownload)>0:
pool = ThreadPool(threads)
toDownloadSplit = [toDownload[i0:i0+threads] for i0 in range(0, len(toDownload), threads)]
tmpBarLen = len(toDownloadSplit)
if ctr==0:
print('Downloading files:')
else:
warnings.warn(str(len(failed)) + ' failed download(s)...', UserWarning)
print('Reattempting failed downloads (' + str(ctr) + '):')
for i0, l0 in enumerate(toDownloadSplit):
self._printProgress(i0, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
tmp = pool.map(self._downloadFile, l0)
downloadSizes.extend(tmp)
self._printProgress(tmpBarLen, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
pool.close()
pool.join()
# Check sizes and delete failed ones
failed = []
for i0, s0 in enumerate(downloadSizes):
if s0<0:
if os.path.isfile(toDownload[i0][0]):
os.remove(toDownload[i0][0])
notFound.append((toDownload[i0][0], toDownload[i0][1]))
elif s0<self.downloadFailThreshold:
if os.path.isfile(toDownload[i0][0]):
os.remove(toDownload[i0][0])
failed.append((toDownload[i0][0], toDownload[i0][1]))
toDownload = failed
ctr += 1
if len(failed)>0:
warnings.warn('permanently failed download(s). Re-run the download method and consider reducing the number of threads:\n' + str([f0 for f0 in failed[0]]), UserWarning)
if len(notFound)>0:
warnings.warn('download file(s) not found. The files may not be available yet:\n' + str([f0 for f0 in notFound[0]]), UserWarning)
# return halt signal
if len(urlList)>0 and len(notFound)==len(urlList):
return True
else:
return False
def readDownloads(self, dates, geometryFile=None, threads=1, geometryStr=''):
'''
Reads the downloaded files using methods specific to the subclasses
'''
# retrieve a list of filenames and dates
filePaths, fileDates = self.downloadedDates('.gz')
# find which dates are covered by files
existingFiles = []
existingDates = []
for d0 in dates:
if d0 in fileDates:
idx = fileDates.index(d0)
existingFiles.append(filePaths[idx])
existingDates.append(fileDates[idx])
# create a temporary folder
self.tmpFolder = tempfile.mkdtemp(prefix='tmp__', dir=self.dataFolder)
try:
# interpret first file in the list and create the downloaded dictionary
self.downloaded = {}
self.downloaded['dates'] = dates
tmpData, self.downloaded['lat'], self.downloaded['lon']=self.importData(existingFiles[0])
self.downloaded['data']=np.empty((len(dates), tmpData.shape[1], tmpData.shape[2]), dtype=self.precision)
self.downloaded['data'][:] = np.nan
self.downloaded['missing'] = np.ones((len(dates),), dtype=np.bool)
self.downloaded['data'][0, :,:]=tmpData
self.downloaded['missing'][0] = False
# interpret all the remaining files
existingFiles.pop(0)
existingDates.pop(0)
with ThreadPool(threads) as pool:
toInterpretSplit = [existingFiles[i0:i0+threads] for i0 in range(0, len(existingFiles), threads)]
tmpBarLen = len(toInterpretSplit)
print('Reading files:')
tmp = []
for i0, l0 in enumerate(toInterpretSplit):
self._printProgress(i0, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
tmp.extend(pool.map(self.importData, l0))
self._printProgress(tmpBarLen, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
except Exception as ex:
raise(ex)
finally:
os.rmdir(self.tmpFolder)
# store interpretations
for i0, t0 in enumerate(tmp):
idx = dates.index(existingDates[i0])
self.downloaded['data'][idx, :,:] = t0[0]
self.downloaded['missing'][idx] = False
# define indexes
if geometryStr!='':
self.setGeometryInfo(geometryStr)
elif 'geometryInfo' not in self.__dict__.keys():
self.geometryInfo = self._getGeometyIdxs(lat=self.downloaded['lat'], lon=self.downloaded['lon'], filePath=geometryFile)
# crop data
self.downloaded['lat'] = self.geometryInfo['lat']
self.downloaded['lon'] = self.geometryInfo['lon']
tmp = np.empty((len(dates), self.geometryInfo['lat'].shape[0], self.geometryInfo['lon'].shape[0]), dtype=self.precision)
tmp[:] = np.nan
tmp[:, self.geometryInfo['idxReduced'][0], self.geometryInfo['idxReduced'][1]] = self.downloaded['data'][:, self.geometryInfo['idxOriginal'][0], self.geometryInfo['idxOriginal'][1]]
self.downloaded['data'] = tmp
def update(self, download=True, threadsDownload=3, threadsRead=1, geometryFile=None, geometryStr=''):
year = max(self.netCDFDict.keys())
month = max(self.netCDFDict[year].keys())
self.store(dateIni=dt.datetime(year, month, 1), dateEnd=dt.datetime.now(), download=True, threadsDownload=threadsDownload, threadsRead=threadsRead, geometryFile=geometryFile, geometryStr=geometryStr)
def store(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now(), download=True, threadsDownload=3, threadsRead=1, geometryFile=None, geometryStr=''):
dates = self._filePeriod(dateIni=dateIni, dateEnd=dateEnd)
monthIdxs = np.array(self._splitByMonth(dates))
tmp = [np.where(monthIdxs==m0)[0][0] for m0 in np.unique(monthIdxs)]
sortedIdxs = sorted(range(len(tmp)), key=lambda i0: tmp[i0])
tmpPeriods = len(sortedIdxs)
for i0, m0 in enumerate(sortedIdxs):
tmp = np.where(monthIdxs==m0)[0]
monthDates = np.array(dates)[tmp]
dateIni = np.min(monthDates)
dateEnd = np.max(monthDates)
print('Storing %02u/%04u...' % (dateIni.month, dateIni.year))
halt = self.process(dateIni=dateIni, dateEnd=dateEnd, download=download, threadsDownload=threadsDownload, threadsRead=threadsRead, geometryFile=geometryFile, geometryStr=geometryStr)
if halt:
warnings.warn('Data retrieval process halted before end date.', UserWarning)
break
self.save()
self.__dict__.pop('loaded', None)
if 'downloaded' in self.__dict__.keys():
self.__dict__.pop('downloaded', None)
self._listData()
def process(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now(), download=True, read=True, threadsDownload=3, threadsRead=1, geometryFile=None, geometryStr=''):
'''
Reads the downloaded files and processes them by interpolating missing data and aggregating it to the desired timestep.
'''
# Load existing NetCDFs (to self.loaded)
self.load(dateIni, dateEnd)
# Download if needed
if download:
halt = self.download(dateIni=dateIni, dateEnd=dateEnd, threads=threadsDownload)
if halt:
return halt
# Process downloads
if read:
dateList = self._notProcessed(self._filePeriod(dateIni=dateIni, dateEnd=dateEnd))
if len(dateList)>0:
self.readDownloads(dateList, geometryFile=geometryFile, geometryStr=geometryStr, threads=threadsRead)
# Check if loaded and downloaded are compatible
if 'loaded' in self.__dict__:
lat = self.loaded['lat']
lon = self.loaded['lon']
if 'downloaded' in self.__dict__:
if not (lat==self.downloaded['lat']).all() or not (lon==self.downloaded['lon']).all():
raise Exception('Stored and downloaded coordinates do not match.')
else:
lat = self.downloaded['lat']
lon = self.downloaded['lon']
# Interpolates the missing values in the matrix. The interpolation is made just on the time dimension
# Loop through all x - axis 0 of the matrix
if 'downloaded' in self.__dict__:
tmplat = self.downloaded['data'].shape[1]
print('Interpolating missing data:')
for i0 in range(self.downloaded['data'].shape[1]):
self._printProgress(i0, tmplat, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
# Loop through all y - axis 1 of the matrix
for i1 in range(self.downloaded['data'].shape[2]):
# Temporary array with all precipitation values (z axis) for a given lat and lon (x and y axis)
tmp = np.squeeze(self.downloaded['data'][:, i0, i1])
nans = np.isnan(tmp)
tmpNanSum = np.sum(nans)
if tmpNanSum>0 and tmpNanSum!=nans.shape[0]:
# Creates an array with the size of the temporary but with values that correspond to the axis [0,1,2..., n]
idx = np.arange(len(tmp))
valid = np.logical_not(nans)
# The interpolate function requires the index of the points to interpolate (idx[nans]),
# the index of the points with valid values (idx[valid]) and
# the valid values tha will be used to interpolate (tmp[valid])
self.downloaded['data'][nans, i0, i1]=np.interp(idx[nans], idx[valid], tmp[valid])
self._printProgress(tmplat, tmplat, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
# Join downloads and stored data (loaded)
if 'loaded' not in self.__dict__.keys():
dates = self._filePeriod(dateIni=dateIni, dateEnd=dateEnd)
self.loaded = {}
self.loaded['lat'] = lat
self.loaded['lon'] = lon
self.loaded['dates'] = dates
self.loaded['data'] = np.empty((len(dates), len(lat), len(lon)), dtype=self.precision)
self.loaded['data'][:] = np.nan
self.loaded['missing'] = np.ones((len(dates),), dtype=np.bool)
if 'downloaded' in self.__dict__.keys():
idxsLoaded = self._ismember(self.downloaded['dates'], self.loaded['dates'])
idxsDownloaded = self._ismember(self.loaded['dates'], self.downloaded['dates'])
self.loaded['data'][idxsLoaded, :, :] = self.downloaded['data'][idxsDownloaded, :, :]
self.loaded['missing'][idxsLoaded] = self.downloaded['missing'][idxsDownloaded]
def plot(self):
mean=np.flipud(np.nanmean(self.loaded['data'], 0)*365*8)
ax = plt.matshow(mean)
plt.colorbar(ax)
plt.show(block=True)
def save(self, overwriteAll=False, overwriteIncomplete=True):
'''
Splits the data in blocks of 1 month and stores them in NetCDF files
'''
tmpDates = np.array(self.loaded['dates'])
monthIdxs = np.array(self._splitByMonth(self.loaded['dates']))
uniqueMonthIdxs = np.unique(monthIdxs)
print('Saving NetCDFs:')
tmpPeriods = len(uniqueMonthIdxs)
for c0, i0 in enumerate(uniqueMonthIdxs):
self._printProgress(c0, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
tmp = np.where(monthIdxs==i0)[0]
monthDates = tmpDates[tmp]
if not overwriteAll:
if monthDates[0].year in self.netCDFDict.keys() and monthDates[0].month in self.netCDFDict[ monthDates[0].year].keys():
if self.netCDFDict[monthDates[0].year][monthDates[0].month][1]==True:
# prevents complete files from being overwritten
continue
else:
# incomplete file
if not overwriteIncomplete:
# prevents overwriting
continue
monthData = self.loaded['data'][tmp, :, :]
monthMissing = self.loaded['missing'][tmp]
rootgrp = Dataset(os.path.join(self.dataFolder, self.filePrefix + '_%04d.%02d.nc' % (monthDates[0].year, monthDates[0].month)), 'w', format='NETCDF4', clobber=True)
time = rootgrp.createDimension('time', None)
lat = rootgrp.createDimension('lat', monthData.shape[1])
lon = rootgrp.createDimension('lon', monthData.shape[2])
times = rootgrp.createVariable('time', np.double, dimensions=('time',), zlib=True)
lats = rootgrp.createVariable('lat', np.double, dimensions=('lat',), zlib=True)
lons = rootgrp.createVariable('lon', np.double, dimensions=('lon',), zlib=True)
precips = rootgrp.createVariable('precipitation', self.precision, dimensions=('time', 'lat', 'lon'), zlib=True, least_significant_digit=self.significantDigits)
missing = rootgrp.createVariable('missing', np.int8, dimensions=('time'), zlib=True)
rootgrp.description = 'Rainfall data (' + self.filePrefix + ')'
rootgrp.history = 'Created the ' + str(dt.datetime.now())
lats.units = 'degrees of the center of the pixel (WGS84)'
lons.units = 'degrees of the center of the pixel (WGS84)'
times.units = "hours since 0001-01-01 00:00:00.0"
times.calendar = 'standard'
precips.units = 'mm of rain accumulated over a 3-hour interval centered on the time reference [-1.5, +1.5]'
# Check completeness
monthDates[0] + relativedelta(months=1)
tmp = self._filePeriod(dateIni=monthDates[-1] - relativedelta(months=1), dateEnd=monthDates[0] + relativedelta(months=1))
tmp = [dt0 for dt0 in tmp if dt0.month==monthDates[0].month and dt0.year==monthDates[0].year]
if len(self._ismember(tmp, monthDates)) == len(tmp):
# The month is complete
if np.all(np.logical_not(monthMissing)):
rootgrp.complete = 1
else:
rootgrp.complete = 0
else:
# The month is not complete
rootgrp.complete = 0
if rootgrp.complete==0:
warnings.warn(' netCDF not complete (' + self.filePrefix + '_%04d.%02d.nc' % (monthDates[0].year, monthDates[0].month) + ').', UserWarning)
lats[:] = self.loaded['lat']
lons[:] = self.loaded['lon']
times[:] = date2num(monthDates, units=times.units, calendar=times.calendar)
precips[:, :, :] = monthData
missing[:] = monthMissing
rootgrp.close()
self._printProgress(tmpPeriods, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
def load(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
'''
Loads the data from 1-month NetCDF files into a numpy array
'''
dates = self._filePeriod(dateIni=dateIni, dateEnd=dateEnd)
yearMonth = list(set([(dt.year, dt.month) for dt in dates]))
print('Attempting to load NetCDFs:')
tmpPeriods = len(yearMonth)
data = None
for i0, t0 in enumerate(yearMonth):
self._printProgress(i0, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
if t0[0] in self.netCDFDict.keys() and t0[1] in self.netCDFDict[t0[0]].keys():
tmp = self._loadNetCDF(os.path.join(self.dataFolder, self.netCDFDict[t0[0]][t0[1]][0]))
self.netCDFDict[t0[0]][t0[1]][1] = tmp['complete']
if 'loaded' not in self.__dict__:
self.loaded = {}
self.loaded['dates'] = dates
self.loaded['lat'] = tmp['lat']
self.loaded['lon'] = tmp['lon']
self.loaded['data'] = np.empty((len(dates), len(self.loaded['lat']), len(self.loaded['lon'])), dtype=self.precision)
self.loaded['data'][:] = np.nan
self.loaded['missing'] = np.ones((len(dates),), dtype=np.bool)
idxsLoaded = np.array(self._ismember(tmp['dates'], self.loaded['dates']))
idxsTmp = np.array(self._ismember(self.loaded['dates'], tmp['dates']))
self.loaded['data'][idxsLoaded, :, :] = tmp['data'][idxsTmp, :, :]
self.loaded['missing'][idxsLoaded] = tmp['missing'][idxsTmp]
self._printProgress(tmpPeriods, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
def getGeometryInfo(self):
if 'geometryInfo' not in self.__dict__.keys():
return ''
else:
tmp = {}
tmp['lat'] = self.geometryInfo['lat'].tolist()
tmp['lon'] = self.geometryInfo['lon'].tolist()
tmp['idxOriginal'] = (self.geometryInfo['idxOriginal'][0].tolist(), self.geometryInfo['idxOriginal'][1].tolist())
tmp['idxReduced'] = (self.geometryInfo['idxReduced'][0].tolist(), self.geometryInfo['idxReduced'][1].tolist())
return json.dumps(tmp)
def setGeometryInfo(self, jsonStr):
if jsonStr != '':
tmp = json.loads(jsonStr)
tmp['lat'] = np.array(tmp['lat'])
tmp['lon'] = np.array(tmp['lon'])
tmp['idxOriginal'] = (np.array(tmp['idxOriginal'][0]), np.array(tmp['idxOriginal'][1]))
tmp['idxReduced'] = (np.array(tmp['idxReduced'][0]), np.array(tmp['idxReduced'][1]))
self.geometryInfo = tmp
def _getGeometyIdxs(self, lat, lon, filePath=None):
if filePath!=None:
# load geometry
with open(filePath, 'r') as myfile:
geojsonStr=myfile.read()
obj = geojson.loads(geojsonStr)
# compute logical matrix of valid pixels
chosenPixels = np.zeros((len(lat), len(lon)),dtype=np.bool)
for f0 in obj['features']:
if f0['type'] != 'Feature':
continue
if f0['geometry']['type'] == 'Polygon':
g0 = f0['geometry']['coordinates']
tmp = self._intersection(lon, lat, [i0[0] for i0 in g0[0]], [i0[1] for i0 in g0[0]])
if len(g0)>1:
for i0 in range(1, len(g0)):
tmp = np.logical_and(tmp, np.logical_not(self._intersection(lon, lat, [i0[0] for i0 in g0[i0]], [i0[1] for i0 in g0[i0]])))
chosenPixels = np.logical_or(chosenPixels, tmp)
elif f0['geometry']['type'] == 'MultiPolygon':
tmp = np.zeros((len(lat), len(lon)),dtype=np.bool)
for g0 in f0['geometry']['coordinates']:
tmp = np.logical_or(tmp, self._intersection(lon, lat, [i0[0] for i0 in g0[0]], [i0[1] for i0 in g0[0]]))
if len(g0)>1:
for i0 in range(1, len(g0)):
tmp = np.logical_and(tmp, np.logical_not(self._intersection(lon, lat, [i0[0] for i0 in g0[i0]], [i0[1] for i0 in g0[i0]])))
chosenPixels = np.logical_or(chosenPixels, tmp)
#=======================================================================
# plt.imshow(np.flipud(chosenPixels), cmap='Greys', interpolation='nearest')
#=======================================================================
# get indexes to retrieve information
geometryInfo = {}
tmp = np.where(chosenPixels!=0)
geometryInfo['lat'] = lat[np.min(tmp[0]):np.max(tmp[0])+1]
geometryInfo['lon'] = lon[np.min(tmp[1]):np.max(tmp[1])+1]
geometryInfo['idxOriginal'] = np.where(chosenPixels)
geometryInfo['idxReduced'] = np.where(chosenPixels[np.min(tmp[0]):np.max(tmp[0])+1, np.min(tmp[1]):np.max(tmp[1])+1])
else:
geometryInfo = {}
geometryInfo['lat'] = lat
geometryInfo['lon'] = lon
tmpLat = np.repeat(np.expand_dims(range(len(lat)), 1), len(lon), axis=1)
tmpLon = np.repeat(np.expand_dims(range(len(lon)), 0), len(lat), axis=0)
geometryInfo['idxOriginal'] = (tmpLat.ravel(), tmpLon.ravel())
geometryInfo['idxReduced'] = geometryInfo['idxOriginal']
return geometryInfo
def _intersection(self, pointsX, pointsY, borderX, borderY):
pixels = len(pointsX) * len(pointsY)
segments = len(borderX)-1
# Defining matrices for calculation
pointsX = np.expand_dims(pointsX, 1)
pointsY = np.expand_dims(pointsY, 0)
X1 = np.repeat(np.expand_dims(np.repeat(pointsX, pointsY.shape[1], axis=1).ravel(), 1), segments, axis=1)
Y1 = np.repeat(np.expand_dims(np.repeat(pointsY, pointsX.shape[0], axis=0).ravel(), 1), segments, axis=1)
X3 = np.repeat(np.expand_dims(np.array(borderX)[:len(borderX)-1], 0), pixels, axis=0)
Y3 = np.repeat(np.expand_dims(np.array(borderY)[:len(borderY)-1], 0), pixels, axis=0)
X4 = np.repeat(np.expand_dims(np.array(borderX)[1:], 0), pixels, axis=0)
Y4 = np.repeat(np.expand_dims(np.array(borderY)[1:], 0), pixels, axis=0)
x2 = 9999
y2 = 9999
# Computing intersection coordinates
denom = (X1-x2)*(Y3-Y4)-(Y1-y2)*(X3-X4)
Px = ((X1*y2-Y1*x2)*(X3-X4)-(X1-x2)*(X3*Y4-Y3*X4))/denom
Py = ((X1*y2-Y1*x2)*(Y3-Y4)-(Y1-y2)*(X3*Y4-Y3*X4))/denom
# Bounding intersections to the real lines
Lx = np.logical_and(
Px>=X1,
np.logical_or(
np.logical_and(Px<=X3+1E-6, Px>=X4-1E-6),
np.logical_and(Px<=X4+1E-6, Px>=X3-1E-6)))
Ly = np.logical_and(
Py>=Y1,
np.logical_or(
np.logical_and(Py<=Y3+1E-6, Py>=Y4-1E-6),
np.logical_and(Py<=Y4+1E-6, Py>=Y3-1E-6)))
L = np.mod(np.sum(np.logical_and(Lx, Ly),1),2)==1
L = np.reshape(L, (pointsY.shape[1], pointsX.shape[0]), order='F')
return L
def _listData(self):
# List and pre-process available netCDF files
self.netCDFDict = {}
for f0 in os.listdir(self.dataFolder):
tmp = re.match('^' + self.filePrefix + '_([\d]{4}).([\d]{2}).nc$', f0)
if tmp != None:
tmp = (tmp.group(0), int(tmp.group(1)), int(tmp.group(2)))
if tmp[1] not in self.netCDFDict.keys():
self.netCDFDict[tmp[1]] = {}
self.netCDFDict[tmp[1]][tmp[2]] = [tmp[0], True]
def _filePeriod(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
# Define the period of time to retrieve files and creates a list of dates
return [d0.astype(object) for d0 in np.arange(dateIni, dateEnd+dt.timedelta(**self.timestep), dt.timedelta(**self.timestep))]
def _downloadFile(self, toDownload):
'''
Downloads the file from the url and saves it in the directory folderPath with the name fileName.
'''
fileName, url = toDownload
# Opens the web page and creates a file in the folder folderPAth and with the name fileName
try:
u = urlopen(url)
f = open(fileName, 'wb')
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
f.write(buffer)
# Closes the file
f.close()
return os.path.getsize(fileName)
except Exception as ex:
warnings.warn(str(ex), UserWarning)
return -1
def _printProgress (self, iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):
'''
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
'''
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
print("\n")
def _sumChunksMatrix(self, matrix, chunkSize, axis=-1):
'''
Sums sequences of values along a given axis.
The chunkSize defines the size of the sequence to sum.
'''
shape = matrix.shape
if axis < 0:
axis += matrix.ndim
shape = shape[:axis] + (-1, chunkSize) + shape[axis+1:]
x = matrix.reshape(shape)
return x.sum(axis=axis+1)
def _ismember(self, a, b):
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
return [bind.get(itm, None) for itm in a if itm in bind.keys()]
def _findLastNetCDF(self):
tmp0 = max(self.netCDFDict.keys())
tmp1 = max(self.netCDFDict[tmp0].keys())
return (tmp0, tmp1, self.netCDFDict[tmp0][tmp1])
def _notProcessed(self, dateRange):
tmpDateMonth = [(dt.year, dt.month) for dt in dateRange]
for i0 in range(len(dateRange)-1,-1,-1):
tmp = dateRange[i0]
if tmp.year in self.netCDFDict.keys():
if tmp.month in self.netCDFDict[tmp.year].keys():
if self.netCDFDict[tmp.year][tmp.month][1]:
# the file is complete
dateRange.pop(i0)
else:
# the file is not complete
if 'loaded' in self.__dict__:
if not self.loaded['missing'][self.loaded['dates'].index(dateRange[i0])]:
# this value is not missing
dateRange.pop(i0)
else:
dateRange.pop(i0)
return dateRange
def _splitByMonth(self, dateRange):
tmpDateMonth = [(dt.year, dt.month) for dt in dateRange]
uniqueMonths = list(set(tmpDateMonth))
tmpTuple = None
idxs = []
for s0 in tmpDateMonth:
if s0 != tmpTuple:
tmpIdx = uniqueMonths.index(s0)
tmpTuple = s0
idxs.append(tmpIdx)
return idxs
def _loadNetCDF(self, path, data=True):
rootgrp = Dataset(path, 'r', format="NETCDF4")
out = {}
tmp = rootgrp.variables['time']
out['dates'] = num2date(tmp[:], tmp.units, tmp.calendar)
out['lat'] = rootgrp.variables['lat'][:]
out['lon']= rootgrp.variables['lon'][:]
out['complete'] = rootgrp.complete == 1
out['missing'] = rootgrp.variables['missing'][:]
if data:
out['data'] = rootgrp.variables['precipitation'][:,:,:]
return out
class TRMMSatelliteRainfall(SatelliteData):
'''
Data downloaded from:
http://mirador.gsfc.nasa.gov/cgi-bin/mirador/presentNavigation.pl?tree=project&&dataGroup=Gridded&project=TRMM&dataset=3B42:%203-Hour%200.25%20x%200.25%20degree%20merged%20TRMM%20and%20other%20satellite%20estimates&version=007
'''
filePrefix = 'trmm3B42v7'
precision = np.single
significantDigits = 2
downloadFailThreshold = 50000
productSite = 'http://trmm.gsfc.nasa.gov/'
downloadSite = 'http://mirador.gsfc.nasa.gov/cgi-bin/mirador/presentNavigation.pl?tree=project&&dataGroup=Gridded&project=TRMM&dataset=3B42:%203-Hour%200.25%20x%200.25%20degree%20merged%20TRMM%20and%20other%20satellite%20estimates&version=007'
description = 'Tropical Rainfall Measuring Mission, TMPA 3B42 version 7. Accumulated rainfall over 3h intervals in mm. Grid of 0.25x0.25 deg.'
timestep = {}
timestep['hours'] = 3
units = 'mm/3h'
def downloadList(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
'''
implementation for TRMM 3B42 data
returns a tuple containing a list of dates, a numpy 3D matrix with all the data, and numpy arrays with the pixel latitudes and longitudes
'''
urlFormat0="http://disc2.gesdisc.eosdis.nasa.gov/daac-bin/OTF/HTTP_services.cgi?FILENAME=%2Fs4pa%2FTRMM_L3%2FTRMM_3B42%2F{1}%2F{2}%2F3B42.{0}.7.HDF.Z&FORMAT=L2d6aXA&LABEL=3B42.{0}.7.nc.gz&SHORTNAME=TRMM_3B42&SERVICE=HDF_TO_NetCDF&VERSION=1.02&DATASET_VERSION=007"
urlFormat1="http://disc2.gesdisc.eosdis.nasa.gov/daac-bin/OTF/HTTP_services.cgi?FILENAME=%2Fs4pa%2FTRMM_L3%2FTRMM_3B42%2F{1}%2F{2}%2F3B42.{0}.7A.HDF.Z&FORMAT=L2d6aXA&LABEL=3B42.{0}.7.nc.gz&SHORTNAME=TRMM_3B42&SERVICE=HDF_TO_NetCDF&VERSION=1.02&DATASET_VERSION=007"
# Dates and urls to download
dateList = self._notProcessed(self._filePeriod(dateIni=dateIni, dateEnd=dateEnd))
dateList = [dt0.strftime('%Y%m%d.%H') for dt0 in dateList]
urlList=[]
for date in dateList:
year, dayOfYear = self._fDayYear(date)
if int(date[0:4]) < 2000 or year>2010:
urlList.append(urlFormat0.format(date, year, dayOfYear))
elif year==2010 and (int(dayOfYear)>273 or date=='20101001.00'):
urlList.append(urlFormat0.format(date, year, dayOfYear))
else:
urlList.append(urlFormat1.format(date, year, dayOfYear))
# File list
fileList = [os.path.join(self.downloadFolder, '3B42.' + d0 + '.7.nc.gz') for d0 in dateList]
return (fileList, urlList)
def downloadedDates(self, fileType):
'''
Provides a list of files in folder that are have a given extension.
'''
# Reads the content of the data folder.
# Returns the list of the files with the file type defined.
filesFolder=os.listdir(self.downloadFolder)
fileList=[]
dateList=[]
for f0 in filesFolder:
if os.path.splitext(f0)[1] == fileType:
fileList.append(os.path.join(self.downloadFolder, f0))
dateList.append(dt.datetime.strptime(f0[5:16],'%Y%m%d.%H'))
return (fileList, dateList)
def importData(self, fileName):
'''
Imports the data of the files into python.
'''
# Defines the folder in which the temporary files are produced
tmpFolder = self.tmpFolder
# SAFELY create a new file by providing a random name with tmp in the name and extension nc
# The tempfile.mkstemp creates the file and returns an handle
# This is a strange because is not the file but a reference to it (understood by the operative system) that is used to do any operation in it, using the file name probably won't work
fOutIdx, fOutPath = tempfile.mkstemp(suffix='.nc', prefix='tmp', dir=tmpFolder)
# Opens the temporary file and returns the descriptor that can be use to do things with the open file
fOut = os.fdopen(fOutIdx, 'wb+')
# Open the gz file and copy the nc file to the temporary file
# Using the with ... as ... ensures that the gzip file opened is automatically closed in the end
# The lenght=-1 specifies the buffer length, using a negative number makes the copy all at once instead of chunks
# For large files these may lead to a uncontrolled memory consumption
with gzip.open(fileName, 'rb') as fIn:
shutil.copyfileobj(fIn, fOut, length=-1)
fOut.close()
# Reads the file fOut as a netcdf file, refering to it as rootgrp
# Dataset returns an object with the dimensions and variables of the netcdf file, not the data in it
rootgrp = Dataset(fOutPath, "r")
data = rootgrp.variables['pcp'][:, :, :]
longitudes = rootgrp.variables['longitude'][:]
latitudes = rootgrp.variables['latitude'][:]
# Replace missing values with nan
data[data<=-999]=np.nan
# Delete the temporary file
os.remove(fOutPath)
return (data, latitudes, longitudes)
def _fDayYear(self, url):
'''
This function returns the day of the year in 0-365 format and the year
'''
# This is to correct that the date that the hour 00 is named on day n but day of the year n-1
# This affects the year when in the 1st of january and the day when changing between days
# First convert string to date and then, if hour=00 decrease one minute to make it return the previous day
tmpDate = dt.datetime.strptime(url, '%Y%m%d.%H')
if url[-2:]=='00':
tmpDiff = dt.timedelta(minutes=1)
tmpDate -= tmpDiff
return (tmpDate.year, '{dayYear:03d}'.format(dayYear=tmpDate.timetuple().tm_yday))
``` |
{
"source": "JosepFanals/HELM",
"score": 3
} |
#### File: HELM/Codi/diode.py
```python
import numpy as np
import math
import matplotlib.pyplot as plt
U = 5 # equival a l'E
R = 2 # equival a R1
R2 = 3
P = 1.2
Vt = 0.026
Is = 0.000005
n = 200 # profunditat
Vd = np.zeros(n) # sèries
Vl = np.zeros(n)
I1 = np.zeros(n)
I1[0] = U / R # inicialització de les sèries
Vd[0] = Vt * math.log(1 + I1[0] / Is)
Vl[0] = P / I1[0]
def convVd(Vd, I, i): # convolució pel càlcul de Vd[i]
suma = 0
for k in range(1, i):
suma += k * Vd[k] * I[i - k]
return suma
def convVlI(Vl, I1, i): # convolució pel càlcul de Vl[i]
suma = 0
for k in range(i):
suma = suma + Vl[k] * I1[i - k]
return suma
for i in range(1, n): # càlcul dels coeficients
I1[i] = (1 / R + 1 / R2) * (-Vd[i - 1] - Vl[i - 1])
Vd[i] = (i * Vt * I1[i] - convVd(Vd, I1, i)) / (i * (Is + I1[0]))
Vl[i] = -convVlI(Vl, I1, i) / I1[0]
If = sum(I1)
Vdf = sum(Vd)
Vlf = sum(Vl)
print('I1: ' + str(If))
print('Vd: ' + str(Vdf))
print('Vl: ' + str(Vlf))
print('P: ' + str(Vlf * If))
Vdfinal = np.zeros(n) # per tal de veure com evoluciona la tensió del díode
for j in range(n):
Vdfinal[j] = np.sum([Vd[:(j+1)]])
print(Vdfinal)
``` |
{
"source": "josepfont65/neurodsp",
"score": 3
} |
#### File: neurodsp/sim/periodic.py
```python
import numpy as np
from neurodsp.utils.decorators import normalize
from neurodsp.sim.transients import sim_cycle
###################################################################################################
###################################################################################################
@normalize
def sim_oscillation(n_seconds, fs, freq, cycle='sine', **cycle_params):
"""Simulate an oscillation.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Signal sampling rate, in Hz.
freq : float
Oscillation frequency.
cycle : {'sine', 'asine', 'sawtooth', 'gaussian', 'exp', '2exp'}
What type of oscillation cycle to simulate.
See `sim_cycle` for details on cycle types and parameters.
**cycle_params
Parameters for the simulated oscillation cycle.
Returns
-------
sig : 1d array
Simulated oscillation.
"""
# Figure out how many cycles are needed for the signal, & length of each cycle
n_cycles = int(np.ceil(n_seconds * freq))
n_seconds_cycle = int(np.ceil(fs / freq)) / fs
# Create oscillation by tiling a single cycle of the desired oscillation
osc_cycle = sim_cycle(n_seconds_cycle, fs, cycle, **cycle_params)
sig = np.tile(osc_cycle, n_cycles)
# Truncate the length of the signal to be the number of expected samples
n_samps = int(n_seconds * fs)
sig = sig[:n_samps]
return sig
@normalize
def sim_bursty_oscillation(n_seconds, fs, freq, enter_burst=.2, leave_burst=.2,
cycle='sine', **cycle_params):
"""Simulate a bursty oscillation.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
freq : float
Oscillation frequency, in Hz.
enter_burst : float, optional, default: 0.2
Probability of a cycle being oscillating given the last cycle is not oscillating.
leave_burst : float, optional, default: 0.2
Probability of a cycle not being oscillating given the last cycle is oscillating.
cycle : {'sine', 'asine', 'sawtooth', 'gaussian', 'exp', '2exp'}
What type of oscillation cycle to simulate.
See `sim_cycle` for details on cycle types and parameters.
**cycle_params
Parameters for the simulated oscillation cycle.
Returns
-------
sig : 1d array
Simulated bursty oscillation.
Notes
-----
This function takes a 'tiled' approach to simulating cycles, with evenly spaced
and consistent cycles across the whole signal, that are either oscillating or not.
If the cycle length does not fit evenly into the simulated data length,
then the last few samples will be non-oscillating.
"""
# Determine number of samples & cycles
n_samples = int(n_seconds * fs)
n_seconds_cycle = (1/freq * fs)/fs
# Make a single cycle of an oscillation
osc_cycle = sim_cycle(n_seconds_cycle, fs, cycle, **cycle_params)
n_samples_cycle = len(osc_cycle)
n_cycles = int(np.floor(n_samples / n_samples_cycle))
# Determine which periods will be oscillating
is_oscillating = _make_is_osc(n_cycles, enter_burst, leave_burst)
# Fill in the signal with cycle oscillations, for all bursting cycles
sig = np.zeros([n_samples])
for is_osc, cycle_ind in zip(is_oscillating, range(0, n_samples, n_samples_cycle)):
if is_osc:
sig[cycle_ind:cycle_ind+n_samples_cycle] = osc_cycle
return sig
###################################################################################################
###################################################################################################
def _make_is_osc(n_cycles, enter_burst, leave_burst):
"""Create a vector describing if each cycle is oscillating, for bursting oscillations."""
is_oscillating = [None] * (n_cycles)
is_oscillating[0] = False
for ii in range(1, n_cycles):
rand_num = np.random.rand()
if is_oscillating[ii-1]:
is_oscillating[ii] = rand_num > leave_burst
else:
is_oscillating[ii] = rand_num < enter_burst
return is_oscillating
```
#### File: neurodsp/tests/test_filt_fir.py
```python
import numpy as np
from neurodsp.tests.settings import FS
from neurodsp.filt.fir import *
###################################################################################################
###################################################################################################
def test_filter_signal_fir(tsig):
out = filter_signal_fir(tsig, FS, 'bandpass', (8, 12))
assert out.shape == tsig.shape
def test_filter_signal_fir_2d(tsig2d):
out = filter_signal_fir(tsig2d, FS, 'bandpass', (8, 12))
assert out.shape == tsig2d.shape
assert sum(~np.isnan(out[0, :])) > 0
def test_design_fir_filter():
test_filts = {'bandpass' : (5, 10), 'bandstop' : (5, 6),
'lowpass' : (None, 5), 'highpass' : (5, None)}
for pass_type, f_range in test_filts.items():
filter_coefs = design_fir_filter(FS, pass_type, f_range)
def test_apply_fir_filter(tsig):
out = apply_fir_filter(tsig, np.array([1, 1, 1, 1, 1]))
assert out.shape == tsig.shape
def test_compute_filter_length():
# Settings for checks
fs = 500
f_lo, f_hi = 4, 8
# Check filt_len, if defined using n_seconds
n_seconds = 1.75 # Number chosen to create odd expected filt_len (not needing rounding up)
expected_filt_len = n_seconds * fs
filt_len = compute_filter_length(fs, 'bandpass', f_lo, f_hi, n_cycles=None, n_seconds=n_seconds)
assert filt_len == expected_filt_len
# Check filt_len, if defined using n_cycles
n_cycles = 5
expected_filt_len = int(np.ceil(fs * n_cycles / f_lo))
filt_len = compute_filter_length(fs, 'bandpass', f_lo, f_hi, n_cycles=n_cycles, n_seconds=None)
assert filt_len == expected_filt_len
# Check filt_len, if expected to be rounded up to be odd
n_cycles = 4
expected_filt_len = int(np.ceil(fs * n_cycles / f_lo)) + 1
filt_len = compute_filter_length(fs, 'bandpass', f_lo, f_hi, n_cycles=n_cycles, n_seconds=None)
assert filt_len == expected_filt_len
```
#### File: neurodsp/tests/test_plts_spectral.py
```python
import numpy as np
from neurodsp.spectral.variance import compute_spectral_hist
from neurodsp.tests.utils import plot_test
from neurodsp.plts.spectral import *
###################################################################################################
###################################################################################################
@plot_test
def test_plot_power_spectra():
freqs, powers = np.array([1, 2, 3, 4]), np.array([10, 20, 30, 40])
plot_power_spectra(freqs, powers)
plot_power_spectra([freqs, freqs], [powers, powers])
@plot_test
def test_plot_scv():
freqs, scv = np.array([1, 2, 3, 4]), np.array([10, 20, 30, 40])
plot_scv(freqs, scv)
@plot_test
def test_plot_scv_rs_lines():
freqs, scv_rs = np.array([1, 2, 3]), np.array([[2, 3, 4], [2, 3, 4], [3, 4, 5]])
plot_scv_rs_lines(freqs, scv_rs)
@plot_test
def test_plot_scv_rs_matrix():
freqs, times = np.array([1, 2, 3]), np.array([1, 2, 3])
scv_rs = np.array([[2, 3, 4], [2, 3, 4], [3, 4, 5]])
plot_scv_rs_matrix(freqs, times, scv_rs)
@plot_test
def test_plot_spectral_hist(tsig):
freqs, power_bins, spectral_hist = compute_spectral_hist(tsig, fs=1000)
plot_spectral_hist(freqs, power_bins, spectral_hist)
```
#### File: neurodsp/tests/test_spectral_power.py
```python
from neurodsp.tests.settings import FS, FREQS_LST, FREQS_ARR
from neurodsp.spectral.power import *
###################################################################################################
###################################################################################################
def test_compute_spectrum(tsig):
freqs, spectrum = compute_spectrum(tsig, FS, method='welch')
assert freqs.shape == spectrum.shape
freqs, spectrum = compute_spectrum(tsig, FS, method='wavelet', freqs=FREQS_ARR)
assert freqs.shape == spectrum.shape
freqs, spectrum = compute_spectrum(tsig, FS, method='medfilt')
assert freqs.shape == spectrum.shape
def test_compute_spectrum_2d(tsig2d):
freqs, spectrum = compute_spectrum(tsig2d, FS, method='welch')
assert freqs.shape[-1] == spectrum.shape[-1]
assert spectrum.ndim == 2
freqs, spectrum = compute_spectrum(tsig2d, FS, method='wavelet', freqs=FREQS_ARR)
assert freqs.shape[-1] == spectrum.shape[-1]
assert spectrum.ndim == 2
freqs, spectrum = compute_spectrum(tsig2d, FS, method='medfilt')
assert freqs.shape[-1] == spectrum.shape[-1]
assert spectrum.ndim == 2
def test_compute_spectrum_welch(tsig):
freqs, spectrum = compute_spectrum_welch(tsig, FS, avg_type='mean')
assert freqs.shape == spectrum.shape
freqs, spectrum = compute_spectrum_welch(tsig, FS, avg_type='median')
assert freqs.shape == spectrum.shape
def test_compute_spectrum_wavelet(tsig):
freqs, spectrum = compute_spectrum_wavelet(tsig, FS, freqs=FREQS_ARR, avg_type='mean')
assert freqs.shape == spectrum.shape
freqs, spectrum = compute_spectrum_wavelet(tsig, FS, freqs=FREQS_LST, avg_type='median')
assert freqs.shape == spectrum.shape
def test_compute_spectrum_medfilt(tsig):
freqs, spectrum = compute_spectrum_medfilt(tsig, FS)
assert freqs.shape == spectrum.shape
```
#### File: neurodsp/tests/test_spectral_utils.py
```python
import numpy as np
from numpy.testing import assert_equal
from neurodsp.spectral.utils import *
###################################################################################################
###################################################################################################
def test_trim_spectrum():
freqs = np.array([5, 6, 7, 8, 9])
pows = np.array([1, 2, 3, 4, 5])
freqs_new, pows_new = trim_spectrum(freqs, pows, [6, 8])
assert_equal(freqs_new, np.array([6, 7, 8]))
assert_equal(pows_new, np.array([2, 3, 4]))
def test_rotate_powerlaw():
freqs = np.array([5, 6, 7, 8, 9])
pows = np.array([1, 2, 3, 4, 5])
d_exp = 1
pows_new = rotate_powerlaw(freqs, pows, d_exp)
assert pows.shape == pows_new.shape
```
#### File: neurodsp/tests/test_timefrequency_hilbert.py
```python
from neurodsp.tests.settings import FS
from neurodsp.timefrequency.hilbert import *
###################################################################################################
###################################################################################################
def test_robust_hilbert():
# Generate a signal with NaNs
fs, n_points, n_nans = 100, 1000, 10
sig = np.random.randn(n_points)
sig[0:n_nans] = np.nan
# Check has correct number of nans (not all nan), without increase_n
hilb_sig = robust_hilbert(sig)
assert sum(np.isnan(hilb_sig)) == n_nans
# Check has correct number of nans (not all nan), with increase_n
hilb_sig = robust_hilbert(sig, True)
assert sum(np.isnan(hilb_sig)) == n_nans
def test_phase_by_time(tsig):
out = phase_by_time(tsig, FS, (8, 12))
assert out.shape == tsig.shape
def test_amp_by_time(tsig):
out = amp_by_time(tsig, FS, (8, 12))
assert out.shape == tsig.shape
def test_freq_by_time(tsig):
out = freq_by_time(tsig, FS, (8, 12))
assert out.shape == tsig.shape
def test_no_filters(tsig):
out = phase_by_time(tsig, FS)
assert out.shape == tsig.shape
out = amp_by_time(tsig, FS)
assert out.shape == tsig.shape
out = freq_by_time(tsig, FS)
assert out.shape == tsig.shape
def test_2d(tsig2d):
out = phase_by_time(tsig2d, FS, (8, 12))
assert out.shape == tsig2d.shape
out = amp_by_time(tsig2d, FS, (8, 12))
assert out.shape == tsig2d.shape
out = freq_by_time(tsig2d, FS, (8, 12))
assert out.shape == tsig2d.shape
```
#### File: neurodsp/tests/test_utils_core.py
```python
from pytest import raises
from neurodsp.utils.core import *
###################################################################################################
###################################################################################################
def test_get_avg_func():
func = get_avg_func('mean')
assert callable(func)
func = get_avg_func('median')
assert callable(func)
with raises(ValueError):
get_avg_func('not_a_thing')
def test_check_n_cycles():
n_cycles = check_n_cycles(3)
n_cycles = check_n_cycles([3, 4, 5])
n_cycles = check_n_cycles([3, 4, 5], 3)
with raises(ValueError):
check_n_cycles(-1)
with raises(ValueError):
check_n_cycles([-1, 1])
with raises(ValueError):
check_n_cycles([1, 2], 3)
```
#### File: neurodsp/tests/test_utils_norm.py
```python
import numpy as np
from neurodsp.utils.norm import *
###################################################################################################
###################################################################################################
def test_demean():
d1 = np.array([1, 2, 3])
d2 = np.array([0, 1, 2, 3, 0])
# Check default - demean to 0
out1 = demean(d1)
assert np.isclose(out1.mean(), 0.)
# Check demeaning and adding specific mean
out2 = demean(d1, mean=1.)
assert np.isclose(out2.mean(), 1.)
# Check dealing with zero entries
out3 = demean(d2)
assert np.isclose(out3[np.nonzero(out3)].mean(), 0)
# Check turning of non-zero selection
out3 = demean(d2, mean=1, select_nonzero=False)
assert np.isclose(out3.mean(), 1)
def test_normalize_variance():
d1 = np.array([1, 2, 3])
d2 = np.array([0, 1, 2, 3, 0])
# Check default - normalize variance to 1
out1 = normalize_variance(d1)
np.isclose(out1.var(), 1.)
# Check normalizing and add specific variance
out2 = normalize_variance(d1, 2.)
np.isclose(out2.var(), 2.)
``` |
{
"source": "josepfpinto/webscraping",
"score": 3
} |
#### File: webscraping/services/google_sheets.py
```python
import os
from pathlib import Path
import config.sheets_id as sheet
from gspread import Spreadsheet, Worksheet
from selenium.common.exceptions import TimeoutException
import gspread
from google.oauth2 import service_account
from services import exceptions
wks = Worksheet
wksInput = Worksheet
sh = Spreadsheet
# --- Connects to Google Sheets and prepares sheets ---
def init(day):
global wks, wksInput
connect_to_google()
# Prepare Google Sheets
print("\nPreparing Google Sheets")
print()
if wks.acell("A1").value != "PLATF":
print("- wrong set up in sheet!")
wks.clear()
wks.append_row(["PLATF", "TODAY", "DATE", "NAME", "RESERV", "SCORE", "PRICES", "SUPERHOST"])
elif len(wks.col_values(1)) == 1:
print("- sheet empty")
elif wks.row_values(2)[1] == day:
print("- same day")
else:
wks.clear()
wks.append_row(["PLATF", "TODAY", "DATE", "NAME", "RESERV", "SCORE", "PRICES", "SUPERHOST"])
return wks, wksInput
# --- Connects to Google API ---
def connect_to_google():
print("\nConnecting to Google API")
global wks, wksInput, sh
try:
file_path = os.path.realpath(Path(__file__).parent.parent / "config/credentials.json")
credentials = service_account.Credentials.from_service_account_file(
file_path, scopes=['https://www.googleapis.com/auth/drive'])
goog = gspread.authorize(credentials)
sh = goog.open(sheet.main)
wks = goog.open(sheet.main).sheet1
wksInput = sh.get_worksheet(1)
except Exception as error:
exceptions.more_info("GOOGLE API FAILED!", error)
return wks, wksInput
# --- Sends values to sheet ---
def send_values(finalList):
print("\nSending values")
global wks, sh
try:
emptyCell = len(wks.col_values(1)) + 1
sh.values_update("Sheet1!A{}".format(emptyCell), params={
"valueInputOption": "RAW"}, body={"values": finalList})
except TimeoutException as error:
exceptions.simple("Timeout - failed to send values to sheet. Error:", error)
except Exception as error:
exceptions.more_info("Send values to sheet FAILED!", error)
```
#### File: webscraping/services/new_url.py
```python
import config.web_url as webUrl
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from services import webpage_actions, exceptions, g_driver
def get(dateIn, totalDays, totalAdults):
print("\nCreating and fetching new URL")
try:
startDay = str(dateIn.day)
month = str(dateIn.month)
year = str(dateIn.year)
endDay = str(dateIn.day + totalDays)
midURL = webUrl.mid.format(month, startDay, year, month, endDay, year, totalAdults)
g_driver.google_driver.get(webUrl.start + midURL + webUrl.end)
webpage_actions.wait(15, "div.sr_header")
except (NoSuchElementException, TimeoutException) as error:
exceptions.simple("Timeout - no page load. Error:", error)
except Exception as error:
exceptions.more_info("URL FAILED!", error)
```
#### File: webscraping/services/webpage_actions.py
```python
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from services import exceptions, webpage_scraping, g_driver
def wait(seconds, css_selector):
w = WebDriverWait(g_driver.google_driver, seconds)
if len(css_selector) > 1:
w.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, css_selector)))
def close_cookies():
print("- closing cookies")
try:
w = WebDriverWait(g_driver.google_driver, 15)
w.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, "button#onetrust-accept-btn-handler")))
g_driver.google_driver.find_element_by_css_selector(
"button#onetrust-accept-btn-handler").click()
print("- cookie button clicked")
except (NoSuchElementException, TimeoutException) as error:
exceptions.simple("- no cookie button found... Moving on:", error)
finally:
webpage_scraping.is_first_page = False
time.sleep(3)
def wait_for_apartments():
try:
wait(15, "div.sr_item.sr_item_new.sr_item_default.sr_property_block.sr_flex_layout")
except (NoSuchElementException, TimeoutException) as error:
exceptions.simple("- no apartments found:", error)
return error
def get_price(apartment, totalAdults, totalDays, cleaningFee):
price = ""
for elem in apartment.find_elements_by_css_selector("span.bui-u-sr-only"):
text = elem.text
if ("Price" in text) or ("Preço" in text):
price = int(text.split(' ')[-1])
dayTax = int(totalAdults) * 2
tax = 7 * dayTax if totalDays > 7 else totalDays * dayTax
return (price - cleaningFee - tax) / totalDays
def get_score(apartment):
scoreRaw = apartment.find_element_by_css_selector(
"div.bui-review-score__badge").text
return int(scoreRaw) if scoreRaw == "10" else float(scoreRaw[0] + "." + scoreRaw[2])
def get_reviews(apartment):
reviewsRaw = apartment.find_element_by_css_selector(
"div.bui-review-score__text").text.split(' ')[0]
return int(reviewsRaw[0] + reviewsRaw[2:] if (("," in reviewsRaw) or ("." in reviewsRaw)) else reviewsRaw)
``` |
{
"source": "josepgl/itviec",
"score": 2
} |
#### File: josepgl/itviec/config.py
```python
import os
# - Environment config
# - Instance config or
# - Test config
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
req_http_headers = dict()
# Function for debugging, avoiding circular dependency with helpers module
def print_json(to_json_string):
import json
json = json.dumps(to_json_string, indent=4, sort_keys=True, default=str)
print(json)
class Config:
INSTANCE_DIR = os.path.join(basedir, "instance")
CACHE_DIR = os.path.join(INSTANCE_DIR, "cache")
JOBS_CACHE_DIR = os.path.join(CACHE_DIR, "jobs")
EMPLOYERS_CACHE_DIR = os.path.join(CACHE_DIR, "employers")
CONFIG_FILENAME = "config.py"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# ItViec urls
BASE_URL = "https://itviec.com"
JOBS_URL = "https://itviec.com/it-jobs"
EMPLOYERS_JSON_URL = "https://itviec.com/api/v1/employers.json"
TEMPLATE_EMPLOYER_URL = "https://itviec.com/companies/{}"
TEMPLATE_EMPLOYER_REVIEW_URL = "https://itviec.com/companies/{}/review"
# Database / SQLAlchemy
SQLALCHEMY_TRACK_MODIFICATIONS = False
BOOTSTRAP_SERVE_LOCAL = True
# ItViec request header for json
HTTP_HEADER_X_REQUESTED_WITH = "XMLHttpRequest"
# HTTP_HEADER_COOKIE = "_ITViec_session=..."
# Cache files
EMPLOYERS_JSON_FILE = os.path.join(CACHE_DIR, "employers.json")
JOBS_JSON_FILE = os.path.join(CACHE_DIR, "jobs.json")
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, "instance", "sqlalchemy.sqlite")
SQLALCHEMY_ECHO = True
VERBOSE = True
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or "sqlite://"
SQLALCHEMY_ECHO = True
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, "instance", "sqlalchemy.sqlite")
config = dict(
development=DevelopmentConfig,
testing=TestingConfig,
production=ProductionConfig,
default=DevelopmentConfig,
)
# Uses flask.Config (app.config)
def init_app(app, profile=None, test_config=None):
global req_http_headers
# load the instance config, if it exists, when not testing
config_name = profile or os.getenv("FLASK_ENV", "production")
print("Loading '{}' configuration profile".format(config_name))
# load default config
app.config.from_object(config[config_name]())
if test_config:
print("Loading test configuration.")
app.config.from_mapping(test_config)
else:
try:
print("Loading instance configuration.")
app.config.from_pyfile("config.py")
except OSError as error:
print("Custom config.py file missing in instance folder: {}".format(error))
req_http_headers = collect_http_headers(app.config)
# if app.config["DEBUG"] is True:
# print_json(app.config)
def collect_http_headers(conf):
for k, v in conf.get_namespace('HTTP_HEADER_').items():
req_http_headers[k.replace("_", "-").capitalize()] = v
return req_http_headers
```
#### File: itviec/itviec/feeds.py
```python
from bs4 import BeautifulSoup
from flask import current_app as app
from itviec.helpers import fetch_url
# Employers feed
class EmployersFeed:
def __init__(self, **kwargs):
self.url = app.config["EMPLOYERS_JSON_URL"]
self.response = fetch_url(self.url)
self.json = self.response.json()
def __len__(self):
return len(self.json)
def __repr__(self):
return "<EmployerFeed>"
def __iter__(self):
return self.json.__iter__()
# Jobs Feed
class JobsFeed:
'''JobsFeed can iterate over pages or over all job blocks over all pages.
pages() is the page iterator returns Page objects on iterations:
feed = JobsFeed()
for page in feed.pages():
print(page)
job_tags() is the job iterator, returns BeautifulSoup Tag objects
feed = JobsFeed()
for job_tag in feed.job_tags():
print(job_tag)
The default iterator is over pages:
feed = JobsFeed()
for page in feed:
print(page)
'''
def __init__(self, **kwargs):
self.location = ''
self.tags = ''
if 'location' in kwargs:
self.location = kwargs['location']
if 'tags' in kwargs:
self.tags = kwargs['tags']
def url(self):
feed_url = app.config["JOBS_URL"]
if self.tags:
feed_url = feed_url + '/' + '-'.join(self.tags)
if self.location:
feed_url = feed_url + '/' + self.location
return feed_url
def __repr__(self):
return "<Feed location='{}' tags='{}'>".format(self.location, self.tags)
def __iter__(self):
return JobPageIterator(self.url())
def pages(self):
return JobPageIterator(self.url())
def job_tags(self):
for page in JobPageIterator(self.url()):
for job_tag in page:
yield job_tag
class JobPageIterator:
def __init__(self, url):
self.url = url
def __iter__(self):
return self
def __next__(self):
if self.url is None or self.url is "":
raise StopIteration("Error: No URL for current iteration")
response = fetch_url(self.url)
resp_json = response.json()
# Key: suggestion
# Key: show_more_html
# Key: jobs_html
# 1.- Next URL
next_url_block = resp_json["show_more_html"]
soup = BeautifulSoup(next_url_block, "html.parser")
# Define the local variable
next_url = None
prev_url = None
# Get next page url if exists
a = soup.find("a", href=True, rel="next")
next_url = a["href"] if type(a).__name__ is "Tag" else ""
# Get previous page url if exists
for a in soup.find_all("a", href=True, rel="prev"):
prev_url = a["href"]
break
# Build page
page = JobPage(self.url, resp_json["jobs_html"], prev_url, next_url)
self.url = next_url
return page
class JobPage:
def __init__(self, url, content, prev_p, next_p):
self.url = url
self.content = content
self.prev_p = prev_p
self.next_p = next_p
def __iter__(self):
return JobTagIterator(self.content)
def __repr__(self):
return "<Page url:{} prev:{} next:{}>".format(self.url, self.prev_p, self.next_p)
class JobTagIterator:
def __init__(self, content):
if content is None:
raise Exception("Page is empty")
elif content.__class__.__name__ == "Tag":
self.job_panel_tag = content
else:
self.job_panel_tag = BeautifulSoup(content, "html.parser")
self.next_block = self.job_panel_tag.div
def __next__(self):
if self.next_block is None:
raise StopIteration("No more blocks in page")
job_block = self.next_block
self.next_block = self.next_block.find_next(class_="job")
return job_block
def __iter__(self):
return self
# Reviews Feed
class ReviewsFeed:
def __init__(self, code):
self.code = code
def url(self):
return app.config["TEMPLATE_EMPLOYER_REVIEW_URL"].format(self.code)
def __iter__(self):
return ReviewPageIterator(self.url())
def reviews(self):
for page in ReviewPageIterator(self.url()):
for review_tag in page:
yield review_tag
class ReviewPageIterator:
def __init__(self, url):
self.url = url
def __iter__(self):
return self
def __next__(self):
if self.url is None or self.url is "":
raise StopIteration("Error: No URL for current iteration")
response = fetch_url(self.url)
prev_url = None
next_url = None
soup = BeautifulSoup(response.text, "html.parser")
review_panel_tag = soup.find("div", class_="panel-body content-review disable-user-select")
pagination_tag = soup.find("ul", class_="pagination")
if pagination_tag:
a_tag = pagination_tag.find("a", rel="next")
if a_tag:
next_url = app.config["BASE_URL"] + a_tag["href"]
page = ReviewPage(self.url, review_panel_tag, prev_url, next_url)
self.url = next_url
return page
class ReviewIterator:
def __init__(self, panel_tag):
self.next_block = panel_tag
def __iter__(self):
return self
def __next__(self):
try:
self.next_block = self.next_block.find_next(class_="content-of-review")
except AttributeError:
raise StopIteration
if self.next_block.__class__.__name__ != "Tag":
raise StopIteration
return self.next_block
class ReviewPage(JobPage):
def __iter__(self):
return ReviewIterator(self.content)
def __repr__(self):
return "<ReviewPage url:{} next:{}>".format(self.url, self.next_p)
```
#### File: itviec/itviec/helpers.py
```python
import requests
import json
import config
VIETNAMESE_CHARACTERS = "ăắằẳẵặâấầẩẫậĐđêếềểễệôốồổỗộơớờởỡợưứừửữự"
def first_line(string):
return str(string).splitlines()[0]
def class_name(instance):
return instance.__class__.__name__
def log(string, *args):
print("ItViec " + string.format(*args))
def log_msg(string, *args):
string = "{}:{}() " + string
print(string.format(*args))
def msg(string):
if config.DEBUG:
print(string)
def fetch_url(url, headers=None):
error_msg = "Error {0} fetching url: {1}"
if headers is None:
headers = config.req_http_headers
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(e)
exit(1)
# Check response code
if response.status_code != 200:
raise StopIteration(error_msg.format(response.status_code, url))
return response
def to_json(to_json, indent=2):
return json.dumps(to_json, sort_keys=True, indent=indent, ensure_ascii=False)
def to_json_file(to_json, filename):
with open(filename, 'wb') as json_file:
s = json.dumps(to_json, sort_keys=True, indent=2, ensure_ascii=False)
json_file.write(s.encode('utf8'))
```
#### File: itviec/itviec/__init__.py
```python
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
import config
from itviec.db import db
bootstrap = Bootstrap()
def page_not_found(e):
return render_template("404.html"), 404
def create_app(profile=None, test_config=None):
print("Starting app instance")
app = Flask(__name__, instance_relative_config=True)
# Load configuration and modules
config.init_app(app, profile=profile, test_config=test_config)
bootstrap.init_app(app)
db.init_app(app)
app.register_error_handler(404, page_not_found)
# Blueprints
import itviec.views
app.register_blueprint(itviec.views.bp)
app.add_url_rule('/', endpoint='index')
import itviec.cmd_views
app.register_blueprint(itviec.cmd_views.cmd_bp)
if app.config['ENV'] != 'production':
from . import dev
app.register_blueprint(dev.bp)
return app
```
#### File: itviec/itviec/upgrade.py
```python
import time
from datetime import timedelta
from flask import current_app as app
import itviec.source
from itviec.db import db
from itviec.models import Employer, Job
from itviec.time import str_to_datetime
from itviec.composers import install_employer
from itviec.update import update_employer
def download():
feed_jobs = itviec.source.get_job_tags()
downloads = calculate_downloads(feed_jobs)
input("Press any key to continue...")
download_jobs(downloads["jobs"])
download_employers(downloads["employers"])
def upgrade():
feed_jobs = itviec.source.get_job_tags()
upd = calculate_updates(feed_jobs)
if not (upd["employers"]["update"] or upd["employers"]["create"] or
upd["jobs"]["update"] or upd["jobs"]["create"]):
exit()
input("Press any key to continue...")
for employer_code in upd["employers"]["create"]:
print("Creating new employer {}...".format(employer_code))
install_employer(employer_code)
for employer_code in upd["employers"]["update"]:
print("Updating employer {}".format(employer_code))
update_employer(employer_code)
calculate_updates(feed_jobs)
def calculate_downloads(feed_jobs):
jobs = calculate_job_downloads(feed_jobs)
employers = calculate_employer_downloads(feed_jobs)
for job_tag in jobs:
if not itviec.cache.is_employer_cache_hit(job_tag["employer_code"]):
if job_tag["employer_code"] not in employers:
employers.append(job_tag["employer_code"])
print("Downloads: jobs: {} employers: {}".format(len(jobs), len(employers)))
return {"jobs": jobs, "employers": employers}
def calculate_job_downloads(feed_jobs):
jobs = []
for job in feed_jobs:
valid_cache = itviec.cache.is_job_cache_hit(job)
if not valid_cache:
jobs.append(job)
return jobs
def calculate_employer_downloads(feed_jobs):
employers = {}
for job in feed_jobs:
employer_code = job["employer_code"]
if employer_code in employers:
continue
valid_cache = itviec.cache.is_employer_cache_hit(employer_code, job["last_post"])
if not valid_cache:
employers[employer_code] = None
return list(employers)
def download_jobs(job_tags):
j_count = 0
j_total = len(job_tags)
for job in job_tags:
j_count += 1
print("Downloading job {}/{} {}...".format(j_count, j_total, job["code"]))
itviec.cache.fetch_job(job["code"])
time.sleep(0.7)
def download_employers(employers):
'''Input: employer_code list'''
e_count = 0
e_total = len(employers)
for employer_code in employers:
e_count += 1
print("Downloading employer {}/{} {}...".format(e_count, e_total, employer_code))
itviec.cache.fetch_employer(employer_code)
time.sleep(0.7)
def calculate_updates(feed_jobs):
jobs = calculate_job_upgrades(feed_jobs)
if jobs["create"]:
print("Refreshing cache of employers with new jobs")
for job_tag in jobs["create"]:
# force fetch employer for new jobs
itviec.cache.fetch_employer(job_tag["employer_code"])
employers = calculate_employer_upgrades(feed_jobs)
_add_employers_from_jobs(employers, jobs)
_print_report_message(employers, jobs)
return {"jobs": jobs, "employers": employers}
def _print_report_message(employers, jobs):
if employers["update"] or employers["create"] or jobs["update"] or jobs["create"]:
print("Total employer upgrades: updates: {}, new: {}".format(
len(employers["update"]), len(employers["create"])))
print("Total job upgrades: updates: {}, new: {}".format(
len(jobs["update"]), len(jobs["create"])))
else:
print("Done.")
def _add_employers_from_jobs(employers, jobs):
for job_tag in jobs["create"]:
db_emp = db.session.query(Employer).filter_by(code=job_tag["employer_code"]).first()
if db_emp:
if job_tag["employer_code"] not in employers["update"]:
employers["update"].append(job_tag["employer_code"])
else:
if job_tag["employer_code"] not in employers["create"]:
employers["create"].append(job_tag["employer_code"])
for job_tag in jobs["update"]:
if job_tag["employer_code"] not in employers["update"]:
employers["update"].append(job_tag["employer_code"])
def calculate_job_upgrades(feed_jobs):
jobs = {"create": [], "update": []}
up_to_date_counter = 0
for job in feed_jobs:
has_job = Job.query.filter(Job.code == job["code"]).first()
if has_job:
threshold = timedelta(days=1)
db_date = str_to_datetime(has_job.last_post).date()
feed_date = str_to_datetime(job["last_post"]).date()
delta = feed_date - db_date
updated_db = delta <= threshold
if updated_db:
up_to_date_counter += 1
else:
jobs["update"].append(job)
else:
print("Job '{}' not found in database, needs to be created.".format(job["code"]))
jobs["create"].append(job)
if "VERBOSE" in app.config and app.config["VERBOSE"]:
print("Jobs upgrades: update: {}, create: {}".format(
len(jobs["update"]), len(jobs["create"])))
return jobs
def calculate_employer_upgrades(feed_jobs):
employers = {"create": [], "update": []}
already_up_to_date = 0
emp_dates = itviec.source.get_employers_with_feed_date()
done = []
for job in feed_jobs:
employer_code = job["employer_code"]
if employer_code in done:
continue
done.append(employer_code)
query = db.session.query(Employer).filter_by(code=employer_code)
has_emp = query.first()
if has_emp:
threshold = timedelta(days=1)
db_date = str_to_datetime(has_emp.last_post).date()
feed_date = str_to_datetime(emp_dates[employer_code]).date()
delta = feed_date - db_date
updated_db = delta <= threshold
if updated_db:
already_up_to_date += 1
else:
if "VERBOSE" in app.config and app.config["VERBOSE"]:
print("Delta: {} | Employer: {}".format(delta, employer_code))
employers["update"].append(employer_code)
else:
employers["create"].append(employer_code)
if "VERBOSE" in app.config and app.config["VERBOSE"]:
print("Employer upgrades: update: {}, create: {}".format(
len(employers["update"]), len(employers["create"])))
return employers
``` |
{
"source": "joseph0919/Student_Management_Django",
"score": 2
} |
#### File: Student_Management_Django/clubs/views.py
```python
from django.shortcuts import render
from clubs.models import Club, Activity
def clubshome(request):
topclubs = Club.objects.all()[:4]
allclubs = Club.objects.all()
activities = Activity.objects.order_by('-date')
context = {
'topclubs': topclubs,
'allclubs': allclubs,
'activities': activities,
}
return render(request, 'clubs/clubshome.html', context)
def clubs_history(request):
clubs = Activity.objects.order_by('-date')
context = {
'clubs': clubs,
}
return render(request, 'clubs/history.html', context)
```
#### File: Student_Management_Django/teachers/models.py
```python
from django.db import models
from datetime import date
class Teacher(models.Model):
name = models.CharField(max_length=30)
techer_id = models.CharField(unique=True, blank=True, null=True, max_length=6)
designation = models.CharField(null=False, max_length=30)
joined = models.DateField('Year-Month')
phone = models.CharField(null=True, max_length=12)
def __str__(self):
return self.name
class TeacherDetail(models.Model):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE)
dept = models.ForeignKey('departments.Department', on_delete=models.CASCADE)
short_bio = models.TextField(max_length=100)
gender = models.CharField(max_length=6)
birthdate = models.DateField()
qualification = models.CharField(max_length=100)
englis_skill = models.CharField(max_length=10)
math_skill = models.CharField(max_length=10, blank=True, null=True)
programming_skill = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return str(self.teacher)
def age(self, dob):
dob = self.birthdate
today = date.today()
return today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))
``` |
{
"source": "Joseph1337/Snkr-Findr-API",
"score": 3
} |
#### File: Joseph1337/Snkr-Findr-API/goat_scraper.py
```python
import json
import requests
import pprint
from time import sleep
import random
#extracts all user-agents from the provided 'ua_file.txt' into a list then randomly selects a user-agent
def getUserAgent():
randomUserAgent = ""
listOfUserAgents = []
userAgentFile = 'ua_file.txt'
with open('ua_file.txt') as file:
listOfUserAgents = [line.rstrip("\n") for line in file]
return random.choice(listOfUserAgents)
class Sneaker:
def __init__(self, name, query_id, retail_price, displayed_size, price, image_url):
self.name = name
self.query_id = query_id
if(retail_price == None):
self.retail_price = "N/A"
else:
self.retail_price = retail_price/100
if(displayed_size == None):
self.displayed_size = "N/A"
else:
self.displayed_size = displayed_size
if(price==None):
self.lowest_price = "N/A"
else:
self.lowest_price = price/100
self.image_url = image_url
# self.sizeAndPrice = sizeAndPrice
#function to get all sneakers from 'Shop All' page
def getAllSneakers(keyword=''):
sneakersList = []
#api call to retrieve sneaker details
url = 'https://2fwotdvm2o-3.algolianet.com/1/indexes/*/queries'
#size you want to look for:
shoe_size = ""
search_field = keyword
#data sent with POST request
for page in range(0,5):
form_data = {
"requests": [{
"indexName":"product_variants_v2",
"params":"",
"highlightPreTag" : "<ais-highlight-0000000000>",
"highlightPostTag": "</ais-highlight-0000000000>",
"distinct": "true",
"query": keyword,
"facetFilters": [["presentation_size:" + str(shoe_size)],["product_category:shoes"]],
"maxValuesPerFacet": 30,
"page": page,
"facets": ["instant_ship_lowest_price_cents","single_gender","presentation_size","shoe_condition","product_category","brand_name","color","silhouette","designer","upper_material","midsole","category","release_date_name"],
"tagFilters":""
}]
}
query_params = {
'x-algolia-agent': 'Algolia for JavaScript (3.35.1); Browser (lite); JS Helper (3.2.2); react (16.13.1); react-instantsearch (6.8.2)',
'x-algolia-application-id': '2FWOTDVM2O',
'x-algolia-api-key': 'ac96de6fef0e02bb95d433d8d5c7038a'
}
response = requests.post(url, data=json.dumps(form_data), params=query_params).json()['results'][0]['hits']
for sneaker in response:
sneakersList.append((Sneaker(sneaker['name'], sneaker['slug'], sneaker['retail_price_cents'], sneaker['size'], sneaker['lowest_price_cents'], sneaker['original_picture_url']).__dict__)) # getSneakerSizesAndPrices(sneaker['slug'])))
# sleep(random.randrange(1,3))
return sneakersList
def getSneaker(query_id):
sneakerInfo = {}
url = "https://www.goat.com/web-api/v1/product_templates/" + query_id
user_agent = getUserAgent()
headers = {
"User-Agent": user_agent,
"Accept": "application/json",
"Referer": "https://www.goat.com/sneakers/" + query_id
}
for i in range(0, 10):
try:
headers.update({"user-agent": getUserAgent()})
response = requests.get(url, headers=headers).json()
print(response)
sneakerInfo['Name'] = response['name']
sneakerInfo['Colorway'] = response['details']
sneakerInfo['Style ID'] = response['sku']
sneakerInfo['Release Date'] = response['releaseDate'].split('T')[0]
sneakerInfo['Price Map'] = getSneakerSizesAndPrices(query_id)
sneakerInfo['Image'] = response['mainPictureUrl']
break
except: #runs into captcha, so retry
sleep(random.randrange(1,3))
continue
else:
return {"message": "Could not connect to GOAT.com while searching for " + query_id}
return sneakerInfo
def getSneakerSizesAndPrices(query_id): #helper method for getSneakr to get prices via separate api call
sizeAndPrice = {}
url = 'https://www.goat.com/web-api/v1/product_variants'
user_agent = getUserAgent()
headers = {
"user-agent": user_agent,
"accept" : "application/json",
"accept-encoding": "gzip, deflate, br",
"accept-language" : "en-US,en;q=0.9",
"referer": 'https://www.google.com/'
}
query_params = {
"productTemplateId": query_id
}
for i in range(0, 10):
try:
headers.update({"user-agent": getUserAgent()})
response = requests.get(url, headers=headers, params=query_params, timeout=10)
# print(response.text)
if(response.status_code >= 200 and response.status_code < 400):
page = response.json()
for i in range(0, len(page)):
#check ONLY for new shoes with boxes in good condition
if(page[i]['boxCondition'] == "good_condition" and page[i]['shoeCondition'] == "new_no_defects"):
sizeAndPrice.update({page[i]['size']: page[i]['lowestPriceCents']['amount']/100})
# elif (response.json()['success'] == False): #catches if query_id invalid
elif("success" in response.json()):
if(response.json()['success'] == False):
sizeAndPrice.update({"message": "Invalid product id."})
break
else:
raise PermissionError
except (PermissionError):#request got blocked by captcha
continue
except requests.exceptions.Timeout as err:
continue
else:
break
else: # if not sizeAndPrice:
sizeAndPrice.update({"Size_Timeout": "Price_Timeout"})
return sizeAndPrice
``` |
{
"source": "joseph2rs/netman",
"score": 2
} |
#### File: core/objects/exceptions.py
```python
from netman.core.objects.access_groups import IN, OUT
class NetmanException(Exception):
pass
class InvalidValue(NetmanException):
def __init__(self, msg="Invalid Value"):
super(InvalidValue, self).__init__(msg)
class UnknownResource(NetmanException):
def __init__(self, msg="Resource not found"):
super(UnknownResource, self).__init__(msg)
class Conflict(NetmanException):
def __init__(self, msg="Conflicting value"):
super(Conflict, self).__init__(msg)
class SessionAlreadyExists(Conflict):
def __init__(self, session_id=None):
super(SessionAlreadyExists, self).__init__(msg="Session ID already exists: {}".format(session_id))
class UnavailableResource(NetmanException):
def __init__(self, msg="Resource not available"):
super(UnavailableResource, self).__init__(msg)
class OperationNotCompleted(NetmanException):
def __init__(self, problem=None):
super(OperationNotCompleted, self).__init__("An error occured while completing operation, no modifications have been applied : {0}".format(problem))
class InterfaceResetIncomplete(NetmanException):
def __init__(self, interface_data=None):
super(InterfaceResetIncomplete, self).__init__("The interface reset has failed to remove these properties: {0}".format(interface_data))
class UnknownVlan(UnknownResource):
def __init__(self, vlan_number=None):
super(UnknownVlan, self).__init__("Vlan {} not found".format(vlan_number))
class UnknownInterface(UnknownResource):
def __init__(self, interface=None):
super(UnknownInterface, self).__init__("Unknown interface {}".format(interface))
class UnknownIP(UnknownResource):
def __init__(self, ip_network=None):
super(UnknownIP, self).__init__("IP {} not found".format(ip_network))
class UnknownAccessGroup(UnknownResource):
def __init__(self, direction=None):
super(UnknownAccessGroup, self).__init__("{} IP access group not found".format({IN: "Inbound", OUT: "Outgoing"}[direction] if direction else ""))
class UnknownSession(UnknownResource):
def __init__(self, session_id=None):
super(UnknownSession, self).__init__("Session \"{}\" not found.".format(session_id))
class UnknownVrf(UnknownResource):
def __init__(self, name=None):
super(UnknownVrf, self).__init__("VRF name \"{}\" was not configured.".format(name))
class UnknownDhcpRelayServer(UnknownResource):
def __init__(self, vlan_number, ip_address):
super(UnknownDhcpRelayServer, self).__init__("DHCP relay server {} not found on VLAN {}".format(ip_address, vlan_number))
class DhcpRelayServerAlreadyExists(UnknownResource):
def __init__(self, vlan_number, ip_address):
super(DhcpRelayServerAlreadyExists, self).__init__("DHCP relay server {} already exists on VLAN {}".format(ip_address, vlan_number))
class AccessVlanNotSet(UnknownResource):
def __init__(self, interface=None):
super(AccessVlanNotSet, self).__init__("Access Vlan is not set on interface {}".format(interface))
class TrunkVlanNotSet(UnknownResource):
def __init__(self, interface=None):
super(TrunkVlanNotSet, self).__init__("Trunk Vlan is not set on interface {}".format(interface))
class NativeVlanNotSet(UnknownResource):
def __init__(self, interface=None):
super(NativeVlanNotSet, self).__init__("Trunk native Vlan is not set on interface {}".format(interface))
class InterfaceSpanningTreeNotEnabled(UnknownResource):
def __init__(self, interface=None):
super(InterfaceSpanningTreeNotEnabled, self).__init__("Spanning tree is not enabled on interface {}".format(interface))
class VlanVrfNotSet(UnknownResource):
def __init__(self, vlan=None):
super(VlanVrfNotSet, self).__init__("VRF is not set on vlan {}".format(vlan))
class IPNotAvailable(Conflict):
def __init__(self, ip_network=None, reason=None):
super(IPNotAvailable, self).__init__("IP {} is not available in this vlan{}".format(ip_network, (": " + reason) if reason is not None else ""))
class IPAlreadySet(Conflict):
def __init__(self, ip_network=None, present_ip_network=None):
super(IPAlreadySet, self).__init__("IP {} is already present in this vlan as {}".format(ip_network, present_ip_network))
class VlanAlreadyExist(Conflict):
def __init__(self, vlan_number=None):
super(VlanAlreadyExist, self).__init__("Vlan {} already exists".format(vlan_number))
class InterfaceInWrongPortMode(Conflict):
def __init__(self, mode=None):
super(InterfaceInWrongPortMode, self).__init__("Operation cannot be performed on a {} mode interface".format(mode))
class VlanAlreadyInTrunk(Conflict):
def __init__(self, vlan=None):
super(VlanAlreadyInTrunk, self).__init__("Vlan {} cannot be set as native vlan because it is already a member of the trunk".format(vlan))
class VrrpAlreadyExistsForVlan(Conflict):
def __init__(self, vlan=None, vrrp_group_id=None):
super(VrrpAlreadyExistsForVlan, self).__init__("Vrrp group {group} is already in use on vlan {vlan}".format(group=vrrp_group_id, vlan=vlan))
class VrrpDoesNotExistForVlan(InvalidValue):
def __init__(self, vlan=None, vrrp_group_id=None):
super(VrrpDoesNotExistForVlan, self).__init__("Vrrp group {group} does not exist for vlan {vlan}".format(group=vrrp_group_id, vlan=vlan))
class NoIpOnVlanForVrrp(InvalidValue):
def __init__(self, vlan=None):
super(NoIpOnVlanForVrrp, self).__init__("Vlan {vlan} needs an IP before configuring VRRP".format(vlan=vlan))
class BadVlanNumber(InvalidValue):
def __init__(self):
super(BadVlanNumber, self).__init__("Vlan number is invalid")
class BadInterfaceDescription(InvalidValue):
def __init__(self, desc=None):
super(BadInterfaceDescription, self).__init__("Invalid description : {}".format(desc))
class BadVrrpGroupNumber(InvalidValue):
def __init__(self, minimum=None, maximum=None):
super(BadVrrpGroupNumber, self).__init__("VRRP group number is invalid, must be contained between {min} and {max}".format(min=minimum, max=maximum))
class BadVrrpPriorityNumber(InvalidValue):
def __init__(self, minimum=None, maximum=None):
super(BadVrrpPriorityNumber, self).__init__("VRRP priority value is invalid, must be contained between {min} and {max}".format(min=minimum, max=maximum))
class BadVrrpTimers(InvalidValue):
def __init__(self):
super(BadVrrpTimers, self).__init__("VRRP timers values are invalid")
class BadVrrpAuthentication(InvalidValue):
def __init__(self):
super(BadVrrpAuthentication, self).__init__("VRRP authentication is invalid")
class BadVrrpTracking(InvalidValue):
def __init__(self):
super(BadVrrpTracking, self).__init__("VRRP tracking values are invalid")
class BadVlanName(InvalidValue):
def __init__(self):
super(BadVlanName, self).__init__("Vlan name is invalid")
class LockedSwitch(UnavailableResource):
def __init__(self):
super(LockedSwitch, self).__init__("Switch is locked and can't be modified")
class UnableToAcquireLock(UnavailableResource):
def __init__(self):
super(UnableToAcquireLock, self).__init__("Unable to acquire a lock in a timely fashion")
class BadBondNumber(InvalidValue):
def __init__(self):
super(BadBondNumber, self).__init__("Bond number is invalid")
class InterfaceNotInBond(UnknownResource):
def __init__(self):
super(InterfaceNotInBond, self).__init__("Interface not associated to specified bond")
class BondAlreadyExist(Conflict):
def __init__(self, number=None):
super(BondAlreadyExist, self).__init__("Bond {} already exists".format(number))
class UnknownBond(UnknownResource):
def __init__(self, number=None):
super(UnknownBond, self).__init__("Bond {} not found".format(number))
class BadBondLinkSpeed(InvalidValue):
def __init__(self):
super(BadBondLinkSpeed, self).__init__("Malformed bond link speed")
class UnknownSwitch(UnknownResource):
def __init__(self, name=None):
super(UnknownSwitch, self).__init__("Switch \"{0}\" is not configured".format(name))
class MalformedSwitchSessionRequest(InvalidValue):
def __init__(self):
super(MalformedSwitchSessionRequest, self).__init__("Malformed switch session request")
class Timeout(Exception):
pass
class ConnectTimeout(Exception):
def __init__(self, host=None, port=None):
super(ConnectTimeout, self).__init__("Timed out while connecting to {} on port {}".format(host, port))
class CommandTimeout(Exception):
def __init__(self, wait_for=None, buffer=None):
super(CommandTimeout, self).__init__("Command timed out expecting {}. Current read buffer: {}"
.format(repr(wait_for), buffer))
class PrivilegedAccessRefused(Exception):
def __init__(self, buffer=None):
super(PrivilegedAccessRefused, self).__init__("Could not get PRIVILEGED exec mode. "
"Current read buffer: {}".
format(buffer))
class CouldNotConnect(Exception):
def __init__(self, host=None, port=None):
super(CouldNotConnect, self).__init__("Could not connect to {} on port {}".format(host, port))
class InvalidAccessGroupName(InvalidValue):
def __init__(self, name=None):
super(InvalidAccessGroupName, self).__init__("Access Group Name is invalid: {}".format(name))
class InvalidMtuSize(InvalidValue):
def __init__(self, err_msg=None):
super(InvalidMtuSize, self).__init__("MTU value is invalid : {}".format(err_msg))
class InvalidUnicastRPFMode(InvalidValue):
def __init__(self, mode=None):
super(InvalidUnicastRPFMode, self).__init__("Unknown Unicast RPF mode: \"{}\"".format(mode))
class UnsupportedOperation(NotImplementedError):
def __init__(self, operation=None, message=None):
super(UnsupportedOperation, self).__init__("Operation \"{}\" is not supported on this equipment: {}".format(operation, message))
```
#### File: adapters/compliance_tests/add_vlan_test.py
```python
from hamcrest import assert_that, is_, none, empty
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import BadVlanNumber, BadVlanName, VlanAlreadyExist
from tests import has_message
from tests.adapters.compliance_test_case import ComplianceTestCase
class AddVlanTest(ComplianceTestCase):
_dev_sample = "juniper_qfx_copper"
def test_creates_an_empty_vlan(self):
self.client.add_vlan(1000)
vlan = self.get_vlan_from_list(1000)
assert_that(vlan.number, is_(1000))
assert_that(vlan.name, is_(none()))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
def test_sets_the_name_when_given(self):
self.client.add_vlan(1000, name="Hey")
vlan = self.get_vlan_from_list(1000)
assert_that(vlan.name, is_("Hey"))
def test_fails_if_the_vlan_already_exist(self):
self.client.add_vlan(1000)
with self.assertRaises(VlanAlreadyExist) as expect:
self.client.add_vlan(1000)
assert_that(expect.exception, has_message("Vlan 1000 already exists"))
def test_fails_with_a_wrong_number(self):
with self.assertRaises(BadVlanNumber) as expect:
self.client.add_vlan(9001)
assert_that(expect.exception, has_message("Vlan number is invalid"))
def test_fails_with_a_wrong_name(self):
with self.assertRaises(BadVlanName) as expect:
self.client.add_vlan(1000, name="A space isn't good")
assert_that(expect.exception, has_message("Vlan name is invalid"))
def tearDown(self):
self.janitor.remove_vlan(1000)
super(AddVlanTest, self).tearDown()
```
#### File: adapters/unified_tests/interface_management_test.py
```python
from netman.core.objects.interface_states import OFF, ON
from tests.adapters.configured_test_case import ConfiguredTestCase, skip_on_switches
class InterfaceManagementTest(ConfiguredTestCase):
__test__ = False
@skip_on_switches("juniper", "juniper_qfx_copper", "juniper_mx")
def test_set_interface_state_off(self):
self.client.set_interface_state(self.test_port, OFF)
@skip_on_switches("juniper", "juniper_qfx_copper", "juniper_mx")
def test_set_interface_state_on(self):
self.client.set_interface_state(self.test_port, ON)
@skip_on_switches("cisco", "brocade", "brocade_telnet", "juniper_mx")
def test_edit_spanning_tree(self):
self.client.edit_interface_spanning_tree(self.test_port, edge=True)
@skip_on_switches("cisco", "brocade", "brocade_telnet", "juniper_mx")
def test_set_interface_lldp_state(self):
self.client.set_interface_lldp_state(self.test_port, enabled=True)
@skip_on_switches("cisco", "brocade", "brocade_telnet", "juniper_mx")
def test_disable_lldp(self):
self.client.set_interface_lldp_state(self.test_port, enabled=False)
```
#### File: adapters/unified_tests/vlan_management_test.py
```python
from hamcrest import equal_to, assert_that, has_length, is_, none
from netaddr import IPNetwork, IPAddress
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import UnknownVlan, UnknownInterface, \
UnknownResource
from netman.core.objects.interface_states import OFF, ON
from netman.core.objects.port_modes import ACCESS, TRUNK
from tests.adapters.configured_test_case import ConfiguredTestCase, skip_on_switches
class VlanManagementTest(ConfiguredTestCase):
__test__ = False
@skip_on_switches("juniper", "juniper_qfx_copper", "dell", "dell_telnet", "dell10g", "dell10g_telnet", "juniper_mx")
def test_get_vlan(self):
self.client.add_vlan(2999, name="my-test-vlan")
self.client.set_vlan_access_group(2999, IN, "ACL-IN")
self.client.set_vlan_access_group(2999, OUT, "ACL-OUT")
self.client.set_vlan_vrf(2999, "DEFAULT-LAN")
self.client.add_ip_to_vlan(2999, IPNetwork("10.10.10.2/29"))
self.client.add_vrrp_group(vlan_number=2999, group_id=73, ips=[IPAddress("10.10.10.1")], priority=110,
track_id=self.test_vrrp_track_id, track_decrement=50, hello_interval=5, dead_interval=15)
self.client.add_dhcp_relay_server(2999, IPAddress("10.10.10.11"))
self.try_to.set_vlan_icmp_redirects_state(2999, False)
single_vlan = self.client.get_vlan(2999)
vlan_from_list = self.get_vlan_from_list(2999)
assert_that(single_vlan, equal_to(vlan_from_list))
@skip_on_switches("juniper", "juniper_qfx_copper", "dell", "dell_telnet", "dell10g", "dell10g_telnet", "juniper_mx")
def test_get_vlan_defaults(self):
self.client.add_vlan(2999, name="my-test-vlan")
single_vlan = self.client.get_vlan(2999)
vlan_from_list = self.get_vlan_from_list(2999)
assert_that(single_vlan, equal_to(vlan_from_list))
@skip_on_switches("juniper", "juniper_qfx_copper", "dell", "dell_telnet", "dell10g", "dell10g_telnet", "juniper_mx")
def test_get_vlan_fails(self):
with self.assertRaises(UnknownVlan):
self.client.get_vlan(4000)
@skip_on_switches("juniper_mx")
def test_adding_and_removing_a_vlan(self):
self.client.add_vlan(2999, name="my-test-vlan")
vlan = self.get_vlan_from_list(2999)
assert_that(vlan.name, equal_to('my-test-vlan'))
assert_that(len(vlan.ips), equal_to(0))
self.client.remove_vlan(2999)
vlans = self.client.get_vlans()
vlan = next((vlan for vlan in vlans if vlan.number == 2999), None)
assert_that(vlan is None)
@skip_on_switches("juniper_mx")
def test_setting_a_vlan_on_an_interface(self):
self.client.add_vlan(2999, name="my-test-vlan")
self.client.set_access_mode(self.test_port)
self.client.set_access_vlan(self.test_port, vlan=2999)
self.client.unset_interface_access_vlan(self.test_port)
self.client.remove_vlan(2999)
@skip_on_switches("juniper_mx")
def test_port_mode_trunk(self):
self.client.add_vlan(2999, name="my-test-vlan")
self.client.set_trunk_mode(self.test_port)
self.client.remove_vlan(2999)
@skip_on_switches("juniper_mx")
def test_port_mode_access(self):
self.client.add_vlan(2999, name="my-test-vlan")
self.client.set_access_mode(self.test_port)
self.client.remove_vlan(2999)
@skip_on_switches("juniper_mx")
def test_native_trunk(self):
self.client.add_vlan(2999, name="my-test-vlan")
self.client.set_trunk_mode(self.test_port)
self.client.set_interface_native_vlan(self.test_port, vlan=2999)
self.client.unset_interface_native_vlan(self.test_port)
self.client.set_access_mode(self.test_port)
self.client.remove_vlan(2999)
@skip_on_switches("juniper_mx")
def test_passing_from_trunk_mode_to_access_gets_rid_of_stuff_in_trunk_mode(self):
self.client.add_vlan(1100)
self.client.add_vlan(1200)
self.client.add_vlan(1300)
self.client.add_vlan(1400)
self.client.set_trunk_mode(self.test_port)
self.client.set_interface_native_vlan(self.test_port, vlan=1200)
self.client.add_trunk_vlan(self.test_port, vlan=1100)
self.client.add_trunk_vlan(self.test_port, vlan=1300)
self.client.add_trunk_vlan(self.test_port, vlan=1400)
interfaces = self.client.get_interfaces()
test_if = next(i for i in interfaces if i.name == self.test_port)
assert_that(test_if.port_mode, equal_to(TRUNK))
assert_that(test_if.trunk_native_vlan, equal_to(1200))
assert_that(test_if.access_vlan, equal_to(None))
assert_that(test_if.trunk_vlans, equal_to([1100, 1300, 1400]))
self.client.set_access_mode(self.test_port)
interfaces = self.client.get_interfaces()
test_if = next(i for i in interfaces if i.name == self.test_port)
assert_that(test_if.port_mode, equal_to(ACCESS))
assert_that(test_if.trunk_native_vlan, equal_to(None))
assert_that(test_if.access_vlan, equal_to(None))
assert_that(test_if.trunk_vlans, has_length(0))
self.client.remove_vlan(1100)
self.client.remove_vlan(1200)
self.client.remove_vlan(1300)
self.client.remove_vlan(1400)
@skip_on_switches("juniper_mx")
def test_invalid_vlan_parameter_fails(self):
with self.assertRaises(UnknownVlan):
self.client.remove_vlan(2999)
with self.assertRaises(UnknownVlan):
self.client.set_access_vlan(self.test_port, vlan=2999)
self.client.set_trunk_mode(self.test_port)
with self.assertRaises(UnknownVlan):
self.client.add_trunk_vlan(self.test_port, vlan=2999)
with self.assertRaises(UnknownVlan):
self.client.set_interface_native_vlan(self.test_port, vlan=2999)
with self.assertRaises(UnknownVlan):
self.client.add_trunk_vlan(self.test_port, vlan=2999)
self.client.add_vlan(2999, name="my-test-vlan")
# TODO(jprovost) Unify switch adapters to raise the same exception
with self.assertRaises(UnknownResource):
self.client.remove_trunk_vlan(self.test_port, vlan=2999)
@skip_on_switches("juniper", "juniper_qfx_copper", "juniper_mx")
def test_invalid_interface_parameter_fails(self):
with self.assertRaises(UnknownInterface):
self.client.set_interface_state('42/9999', ON)
with self.assertRaises(UnknownInterface):
self.client.set_interface_state('42/9999', OFF)
with self.assertRaises(UnknownInterface):
self.client.set_access_mode('42/9999')
with self.assertRaises(UnknownInterface):
self.client.set_trunk_mode('42/9999')
# TODO(jprovost) Unify switch adapters to raise the same exception
with self.assertRaises(UnknownResource):
self.client.set_access_vlan('42/9999', 1234)
with self.assertRaises(UnknownInterface):
self.client.unset_interface_access_vlan('42/9999')
# TODO(jprovost) Unify switch adapters to raise the same exception
# Dell 10G raises UnknownInterface
# other raises UnknownVlan
with self.assertRaises(UnknownResource):
self.client.add_trunk_vlan('42/9999', 2999)
# TODO(jprovost) Behavior is inconsistent across switch adapters
# Brocade raises UnknownInterface
# Dell 10G raises NativeVlanNotSet
# Cisco does not raise
try:
self.client.unset_interface_native_vlan(self.test_port)
except UnknownResource:
pass
self.client.add_vlan(2999, name="my-test-vlan")
with self.assertRaises(UnknownInterface):
self.client.set_access_vlan('42/9999', 2999)
# TODO(jprovost) Unify switch adapters to raise the same exception
# Dell 10G raises UnknownInterface
# other raises TrunkVlanNotSet
with self.assertRaises(UnknownResource):
self.client.remove_trunk_vlan('42/9999', 2999)
with self.assertRaises(UnknownInterface):
self.client.set_interface_native_vlan('42/9999', 2999)
@skip_on_switches("juniper", "juniper_qfx_copper", "dell", "dell_telnet", "dell10g", "dell10g_telnet", "juniper_mx")
def test_vrf_management(self):
self.client.add_vlan(2999, name="my-test-vlan")
self.client.set_vlan_vrf(2999, 'DEFAULT-LAN')
vlan = self.get_vlan_from_list(2999)
assert_that(vlan.vrf_forwarding, is_('DEFAULT-LAN'))
self.client.unset_vlan_vrf(2999)
vlan = self.get_vlan_from_list(2999)
assert_that(vlan.vrf_forwarding, is_(none()))
def tearDown(self):
self.janitor.remove_vlan(2999)
self.janitor.set_access_mode(self.test_port)
super(VlanManagementTest, self).tearDown()
``` |
{
"source": "joseph346/connector",
"score": 4
} |
#### File: connector/connector/ai.py
```python
inf = 100000000000000000000000
class AI(object):
"""An AI for playing Connect Four-like games."""
def __init__(self, my_t, depth=4):
self.t = my_t
if my_t == 'X':
self.other_t = 'O'
else:
self.other_t = 'X'
self.depth = depth
def get_move(self, board):
"""Get the best possible move."""
move, score = self.neg_max(board, self.depth, True, -inf, inf)
return move, self.t
def neg_max(self, board, depth, my_turn, alpha, beta):
"""Recursively search for the best move."""
wg = board.get_winning_group()
if wg is not None:
sign = 1
if not my_turn:
sign = -sign
if board[wg[0]] != self.t:
sign = -sign
return None, inf*sign
if depth == 0:
score = self.score_state(board, self.t)
if not my_turn:
score = -score
return None, score
possible_moves = board.get_valid_moves()
def order_move(m):
board.drop_piece(m, self.t if my_turn else self.other_t)
score = self.score_state(board, self.t)
board.remove_piece(m)
if my_turn:
score = -score
return score
possible_moves = sorted(possible_moves, key=order_move)
max_score = None
max_move = None
for m in possible_moves:
board.drop_piece(m, self.t if my_turn else self.other_t)
move, score = self.neg_max(board, depth-1, not my_turn, -beta, -alpha)
score = -score
board.remove_piece(m)
if max_score is None or score > max_score:
max_score = score
max_move = m
#if depth == self.depth:
# print "[%s] %s: %s" % (self.t, m, score)
alpha = max(alpha, score)
if alpha >= beta:
break
if max_score is None:
return None, self.score_state(board, self.t)
return max_move, max_score
def score_state(self, board, to_play):
"""Score a board's state as-is. A higher score represents a more favorable position."""
score = 0
def score_group(start, dir, t):
r, c = start
dr, dc = dir
changes = 0
for i in range(board.win_size):
if board[r, c] == '.':
dist = 0
tr = r
while board.in_bounds(tr, c) and board[tr, c] == '.':
dist += 1
tr -= 1
changes += dist
elif board[r, c] == t:
pass
else:
return 0
r, c = r + dr, c + dc
#print "STARTING AT %s, %s MOVING %s, %s (TYPE: %s) CHANGES: %s" % (start+dir+(t,changes))
if changes == 0:
return inf
return 2**max(10 - changes, 0) / 100.0
# Get all horizontal groups
for sr in range(board.rows):
for sc in range(board.cols - board.win_size + 1):
score += score_group((sr, sc), (0, 1), self.t)
score -= score_group((sr, sc), (0, 1), self.other_t)
# Get all vertical groups
for sc in range(board.cols):
for sr in range(board.rows - board.win_size + 1):
score += score_group((sr, sc), (1, 0), self.t)
score -= score_group((sr, sc), (1, 0), self.other_t)
# Get all ascending diagonal groups
for sr in range(board.rows - board.win_size + 1):
for sc in range(board.cols - board.win_size + 1):
score += score_group((sr, sc), (1, 1), self.t)
score -= score_group((sr, sc), (1, 1), self.other_t)
# Get all descending diagonal groups
for sr in range(board.rows-1, board.win_size-1, -1):
for sc in range(board.cols - board.win_size + 1):
score += score_group((sr, sc), (-1, 1), self.t)
score -= score_group((sr, sc), (-1, 1), self.other_t)
return score
``` |
{
"source": "joseph62/Scripts",
"score": 3
} |
#### File: pyscripts/json/expand_field.py
```python
import argparse
import json
import sys
def get_arguments(args):
parser = argparse.ArgumentParser(description="Expand a field from the given JSON")
parser.add_argument("-k", "--key", help="The key to expand", required=True)
json_input = parser.add_mutually_exclusive_group(required=True)
json_input.add_argument("--stdin", help="Read JSON from stdin", action="store_true")
json_input.add_argument("--json", help="JSON string", type=str)
json_input.add_argument("--file", help="JSON file", type=argparse.FileType("r"))
args = parser.parse_args(args)
if args.stdin:
args.json = json.loads(sys.stdin.read())
elif args.file:
args.json = json.load(args.file)
else:
args.json = json.loads(args.json)
return args
def expand_dict_key(key, dict_):
return dict_.get(key) if isinstance(dict_, dict) else None
def main(args):
args = get_arguments(args)
result = expand_dict_key(args.key, args.json)
if not result:
return 1
else:
print(json.dumps(result, indent=2))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: Scripts/pyscripts/replace_identifier.py
```python
import argparse
import os
import sys
def get_arguments(args):
parser = argparse.ArgumentParser(
description="Expand appname identifier into the application name"
)
parser.add_argument(
"-p",
"--path",
default=os.getcwd(),
help="The path to work with. Current working directory by default",
)
parser.add_argument(
"-i", "--identifier", required=True, help="The identifier to substitute."
)
parser.add_argument(
"-s",
"--substitute",
required=True,
help="The value to replace the identifier with.",
)
parser.add_argument(
"--dry-run",
default=False,
action="store_true",
help="Print changes that would be made without making them",
)
return parser.parse_args(args)
def replace_identifier(path, identifier, substitute, dry_run=False):
files = os.listdir(path)
replace_pairs = []
for file in files:
new_file = file.replace(identifier, substitute)
old_path = os.path.join(path, file)
new_path = os.path.join(path, new_file)
if not dry_run:
os.replace(old_path, new_path)
replace_pairs.append((file, new_file))
return replace_pairs
def main(args):
args = get_arguments(args)
output = replace_identifier(
args.path, args.identifier, args.substitute, args.dry_run
)
for old_file, new_file in output:
print(f'"{old_file}" -> "{new_file}"')
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: Scripts/pyscripts/truncate_lines.py
```python
import sys
import argparse
import signal
DEFAULT_LINE_LENGTH = 80
def parse_arguments(args):
parser = argparse.ArgumentParser(
description="Trucate incoming lines to a specified length with an optional suffix"
)
parser.add_argument(
"-l", "--length", help="The maximum length of each line", type=int, default=80
)
parser.add_argument(
"-s",
"--suffix",
help="A suffix to add to the end of truncated lines",
default="",
)
return parser.parse_args(args)
def truncate_lines_from_handle(handle, length, suffix):
for line in handle:
if len(line) > length:
yield f"{line[:length-len(suffix)]}{suffix}"
else:
yield line
def main(args):
args = parse_arguments(args)
for line in truncate_lines_from_handle(sys.stdin, args.length, args.suffix):
print(line)
return 0
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
sys.exit(main(sys.argv[1:]))
``` |
{
"source": "Joseph7117/Wifi",
"score": 2
} |
#### File: Joseph7117/Wifi/main.py
```python
from flask import Flask
from flask import render_template
from model.app import Wifi
app = Flask(__name__) #Main Application
app.secret_key = 'josephkagiri'
@app.route('/')
def website_root():
wifi = Wifi()
wifi_networks = wifi.Search()
return render_template("index.html", wifi_networks=wifi_networks)
if __name__ == '__main__':
app.run()
``` |
{
"source": "joseph97git/projects-by-language",
"score": 3
} |
#### File: dev/machine-learning/mnist_classification.py
```python
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(model, dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(histories[i].history['loss'], color='blue', label='train')
pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(histories[i].history['accuracy'], color='blue', label='train')
pyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')
pyplot.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# define model
model = define_model()
# evaluate model
scores, histories = evaluate_model(model, trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness()
``` |
{
"source": "joseph9991/Milestone1",
"score": 3
} |
#### File: Milestone1/task/task1.py
```python
import os, sys
import datetime, time
import librosa
import boto3, smart_open
import operator
import json
class Task1:
def __init__(self, file_name, bucket_name, number):
self.file_name = file_name
self.bucket_name = bucket_name
self.audio_format = ""
self.s3_client = boto3.client('s3')
self.n = number
# Validates the file, checks for the valid file extension and returns audio-format
def identifyFormat(self):
valid_extensions = ('.mp3','.ogg','.wav','.m4a','.flac', '.mpeg', '.aac')
file_path, file_extension = os.path.splitext(self.file_name)
if file_extension in valid_extensions:
return file_extension[1:]
elif not file_extension:
error = file_path + file_extension + ' is either a directory or not a valid file'
raise AssertionError(error)
else:
error = 'File extension ' + file_extension + ' not valid'
raise AssertionError(error)
def printfilename(self):
print(os.path.basename(os.path.splitext(self.file_name)[0]))
# Converts Seconds to Minutes:Seconds OR Hours:Minutes:Seconds
def seconds_to_minutes(self,seconds):
time = str(datetime.timedelta(seconds=round(seconds,0)))
return time[2:] if time[0] == '0' else time
# Converts m4a/aac file to wav, stores it as a temporary file, and replaces the object's
# filename with temp.wav
def convert_file_to_wav(self):
print("\nConverting {} to wav format...".format(os.path.basename(self.file_name)))
start_time = time.time()
self.audio_format = 'wav'
data, sampling_rate = librosa.load(self.file_name,sr=44100)
new_file_name = os.path.basename(os.path.splitext(self.file_name)[0]) + '.wav'
librosa.output.write_wav(new_file_name, data, sampling_rate)
self.file_name = new_file_name
end_time = time.time()
print("Finished conversion to .wav in " + self.seconds_to_minutes(end_time - start_time) +
" seconds")
def upload_file(self,path):
self.audio_format = self.identifyFormat()
if self.audio_format == 'm4a' or self.audio_format == 'aac':
self.convert_file_to_wav()
print("\nUploading {} to S3...".format(os.path.basename(self.file_name)))
try:
start_time = time.time()
response = self.s3_client.upload_file(self.file_name, self.bucket_name,
'{}{}'.format(path,os.path.basename(self.file_name)))
end_time = time.time()
print("Finished uploading file to S3 in " + self.seconds_to_minutes(end_time - start_time) +
" seconds")
except Exception as err:
print(f'Error occurred: {err}')
exit(0)
def start_transcribe(self,bucket,path):
start_time = time.time()
transcribe = boto3.client('transcribe')
job_name = '{}-{}'.format(os.path.basename(os.path.splitext(self.file_name)[0]),str(self.n))
job_uri = "https://{}.s3.amazonaws.com/{}{}".format(self.bucket_name,path,
os.path.basename(self.file_name))
print("\nCreating a new Transcribe Job {}!!\nPlease wait...\n".format(job_name))
transcribe.start_transcription_job(
TranscriptionJobName=job_name,
Media={'MediaFileUri':job_uri},
MediaFormat=self.audio_format,
LanguageCode='en-US',
OutputBucketName=bucket,
Settings={
'ShowSpeakerLabels':True,
'MaxSpeakerLabels':3,
}
)
while True:
status = transcribe.get_transcription_job(TranscriptionJobName=job_name)
if status['TranscriptionJob']['TranscriptionJobStatus'] in ['COMPLETED', 'FAILED']:
break
# print("Not ready yet...")
time.sleep(15)
end_time = time.time()
print("Transcribe Job has been successfully completed in " +
self.seconds_to_minutes(end_time - start_time) + " minutes")
def read_json_response(self):
print("\nWaiting for the output file to generate...")
time.sleep(2)
jsonFile = '{}-{}.json'.format(os.path.basename(os.path.splitext(self.file_name)[0]),str(self.n))
file_link = 's3://{}/transcript/{}'.format(self.bucket_name, jsonFile)
return json.load(smart_open.open(file_link))
def analyze(self,jsonData):
# stream lines from an S3 object
jsonData = jsonData
print("\n\n----------------------------------------------")
print("Speaker\tStopwords\tFillerwords\tSpeech")
print("----------------------------------------------")
for data in jsonData[0]:
print('{}\t{}\t\t{}\t{}'.format(data['speaker'],data['stopwords'],
data['fillerwords'],data['comment']))
print("----------------------------------------------")
print("\n\n\n----------------------------------------------")
print("Speaker\tStopwords\tFillerwords")
print("----------------------------------------------")
for speaker in jsonData[1].keys():
print('{}\t{}\t{}'.format(speaker,sum(jsonData[1][speaker]["stopwords"].values()),
sum(jsonData[1][speaker]["fillerwords"].values())))
print("----------------------------------------------")
for speaker,value in jsonData[1].items():
flag = True
print("\n\n\n----------------------------------------------")
print('{} Analytics'.format(speaker))
print("----------------------------------------------")
print('Stopwords')
for word, count in value['stopwords'].items():
if flag:
print(word,count,end='\t')
flag = False
else:
print(word,count)
flag = True
if value['fillerwords']:
print("\n\n----------")
print('Fillerwords')
for word, count in value['fillerwords'].items():
print(word,count)
print("\n\n----------------------------------------------")
return jsonData[0]
def execute_all_functions(self):
print("Commencing Task 1: Identify & Count stopwords of each speaker")
self.upload_file('audio/')
self.start_transcribe('surfboard-response','audio/')
jsonData = self.read_json_response()
data = self.analyze(jsonData)
return data
# # For Testing
# if __name__ == "__main__":
# file_name = sys.argv[1]
# # For ignoring UserWarnings
# warnings.filterwarnings("ignore")
# bucket_name = 'surfboard-transcribe'
# sb = Task1(file_name,bucket_name)
# sb.execute_all_functions()
```
#### File: Milestone1/task/task2.py
```python
import pandas as pd
from pandas import read_csv
import os
import sys
import glob
import re
import soundfile as sf
import pyloudnorm as pyln
from .thdncalculator import execute_thdn
class Task2:
def __init__(self,data,file_name):
self.df = pd.DataFrame.from_dict(data, orient='columns')
self.file_name = file_name
self.speakers = []
self.speaker_set = ()
def merge_timestamp(self):
'''
This functions helps us to correct small error in the speaker end
time obtained from response from Task 1.
Basically, uses the next speaker's start time and rerplaces it with the end time
of the current speaker
'''
df_length = len(self.df.index)
cursor = 0
speaker_list = self.df['speaker'].values.tolist()
start_list = self.df['start_time'].values.tolist()
end_list = self.df['end_time'].values.tolist()
self.speaker_set = sorted(list(set(speaker_list)))
for i in range(0,len(speaker_list)):
current_row = []
current_speaker = speaker_list[i]
if cursor == 0:
current_row = [current_speaker,start_list[0],end_list[0]]
self.speakers.append(current_row)
cursor = cursor + 1
continue
if current_speaker == speaker_list[i] and current_speaker == speaker_list[i-1]:
self.speakers[-1][2] = end_list[i]
else:
current_row = [current_speaker,start_list[i],end_list[i]]
self.speakers.append(current_row)
cursor = cursor + 1
for i in range(len(self.speakers)):
if i == len(self.speakers)-1:
break
self.speakers[i][2] = self.speakers[i+1][1]
print("\nComputed merged Timestamps for every speaker!!")
def trim(self):
'''
This function helps us to trim the files according to the each individual speaker using FFMPEG.
But, there will be multiple files per speaker
OUTPUT: spk_0-1.wav,spk_0-2.wav,spk_0-3.wav
spk_1-1.wav, spk_1-2.wav
spk_2-1.wav,spk_2-2.wav
'''
cursor = 0
for speaker in self.speakers:
new_file = speaker[0]+str(cursor)+'.wav'
command = f"ffmpeg -loglevel quiet -y -i {self.file_name} -ss {speaker[1]} -to \
{speaker[2]} -c:v copy -c:a copy {new_file}"
try:
os.system(command)
content = "file '{}'".format(new_file)
except Exception as err:
print(f'Error occurred: {err}')
cursor = cursor + 1
print("Divided audio file into {} individual speaker files!!".format(len(self.speakers)))
def generate_files(self):
'''
Merges each individual speaker files.
OUTPUT: spk_0.wav,spk_1.wav,spk_2.wav
'''
txt_files = []
for i in range(len(self.speaker_set)):
fileName = '{}.txt'.format(self.speaker_set[i])
with open(fileName,'a+') as f:
txt_files.append(fileName)
wavFiles = glob.glob('{}*.wav'.format(self.speaker_set[i]))
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
wavFiles = sorted(wavFiles,key=alphanum_key)
for wavFile in wavFiles:
f.write('file \'{}\'\n'.format(wavFile))
# speaker_set = wavFiles
# Deleting all the text files needed for merging
for txt_file in txt_files:
command = f"ffmpeg -loglevel quiet -y -f concat -i {txt_file} -c copy {txt_file[:-4]}.wav"
os.system(command)
os.remove(txt_file)
## Deleting the individual speaker audio clip [which were not merged]
# for wav_file in glob.glob('spk_[0-4][0-9]*.wav'):
# os.remove(wav_file)
print("Merged the individual speaker files into {} files!!\n".format(len(self.speaker_set)))
def calculate_rank(self):
'''
Calcualtes Loudness of each speaker file and THDN value
'''
speaker_loudness = {}
speaker_thdn = {}
speaker_frequency = {}
for speaker in self.speaker_set:
wav_file = speaker+'.wav'
data, rate = sf.read(wav_file)
print('Analyzing "' + wav_file + '"...')
meter = pyln.Meter(rate)
loudness = meter.integrated_loudness(data)
speaker_loudness[speaker] = loudness
response = execute_thdn(wav_file)
speaker_thdn[speaker] = response['thdn']
speaker_frequency[speaker] = response['frequency']
speaker_loudness = sorted( ((v,k) for k,v in speaker_loudness.items()), reverse=True)
print("\n\nThere is no \"better\" loudness. But the larger the value (closer to 0 dB), the louder. ")
print("--------------------------------------------------------------------------------------------")
print("Speaker\t\tLoudness\t\tTHDN\t\tFrequency\tRank")
print("--------------------------------------------------------------------------------------------")
for i in range(len(speaker_loudness)):
print('{}\t {} LUFS\t{}\t\t{}\t {}'.format(speaker_loudness[i][1], speaker_loudness[i][0],
speaker_thdn[speaker_loudness[i][1]], speaker_frequency[speaker_loudness[i][1]],i+1))
print("--------------------------------------------------------------------------------------------")
def execute_all_functions(self):
print("\n\nCommencing Task 2: Judge Sound Quality")
self.merge_timestamp()
self.trim()
self.generate_files()
self.calculate_rank()
return self.speaker_set
# # For Testing
# if __name__ == "__main__":
# file_name = sys.argv[1]
# # Temp Code
# data =[
# {
# "Unnamed: 0": 0,
# "start_time": "00:00:00",
# "end_time": "00:00:00",
# "speaker": "spk_1",
# "comment": "Well,",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 1,
# "start_time": "00:00:01",
# "end_time": "00:00:02",
# "speaker": "spk_1",
# "comment": "Hi, everyone.",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 2,
# "start_time": "00:00:03",
# "end_time": "00:00:05",
# "speaker": "spk_0",
# "comment": "Everyone's money. Good",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 3,
# "start_time": "00:00:05",
# "end_time": "00:00:10",
# "speaker": "spk_2",
# "comment": "morning, everyone. Money. Thanks for joining. Uh, so let's quickly get started with the meeting.",
# "stopwords": 4,
# "fillerwords": 1
# },
# {
# "Unnamed: 0": 4,
# "start_time": "00:00:11",
# "end_time": "00:00:14",
# "speaker": "spk_2",
# "comment": "Today's agenda is to discuss how we plan to increase the reach off our website",
# "stopwords": 8,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 5,
# "start_time": "00:00:15",
# "end_time": "00:00:20",
# "speaker": "spk_2",
# "comment": "and how to make it popular. Do you have any ideas, guys? Yes.",
# "stopwords": 8,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 6,
# "start_time": "00:00:20",
# "end_time": "00:00:22",
# "speaker": "spk_0",
# "comment": "Oh, Whoa. Um,",
# "stopwords": 0,
# "fillerwords": 1
# },
# {
# "Unnamed: 0": 7,
# "start_time": "00:00:23",
# "end_time": "00:00:36",
# "speaker": "spk_1",
# "comment": "it's okay. Thank you so much. Yes. Asai was saying one off. The ideas could be to make it more such friendly, you know? And to that I think we can. We need to improve the issue off our website.",
# "stopwords": 21,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 8,
# "start_time": "00:00:37",
# "end_time": "00:00:41",
# "speaker": "spk_2",
# "comment": "Yeah, that's a great point. We certainly need to improve the SC off our site.",
# "stopwords": 6,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 9,
# "start_time": "00:00:42",
# "end_time": "00:00:43",
# "speaker": "spk_2",
# "comment": "Let me let me take a note of this.",
# "stopwords": 4,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 10,
# "start_time": "00:00:45",
# "end_time": "00:00:57",
# "speaker": "spk_0",
# "comment": "How about using social media channels to promote our website? Everyone is on social media these days on way. We just need to target the right audience and share outside with them. Were often Oh, what do you think?",
# "stopwords": 18,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 11,
# "start_time": "00:00:58",
# "end_time": "00:01:05",
# "speaker": "spk_2",
# "comment": "It's definitely a great idea on since we already have our social accounts, I think we can get started on this one immediately.",
# "stopwords": 11,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 12,
# "start_time": "00:01:06",
# "end_time": "00:01:11",
# "speaker": "spk_0",
# "comment": "Yes, I can work on creating a plan for this. I come up with the content calendar base.",
# "stopwords": 9,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 13,
# "start_time": "00:01:11",
# "end_time": "00:01:17",
# "speaker": "spk_1",
# "comment": "Yeah, and I can start with creating the CEO content for all the periods off our website.",
# "stopwords": 10,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 14,
# "start_time": "00:01:17",
# "end_time": "00:01:24",
# "speaker": "spk_2",
# "comment": "Awesome. I think we already have a plan in place. Let's get rolling Eyes. Yeah, definitely.",
# "stopwords": 5,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 15,
# "start_time": "00:01:24",
# "end_time": "00:01:25",
# "speaker": "spk_2",
# "comment": "Yeah, sure.",
# "stopwords": 0,
# "fillerwords": 0
# },
# {
# "Unnamed: 0": 16,
# "start_time": "00:01:26",
# "end_time": "00:01:33",
# "speaker": "spk_2",
# "comment": "Great. Thanks. Thanks, everyone, for your ideas. I'm ending the call now. Talk to you soon. Bye. Bye bye. Thanks.",
# "stopwords": 5,
# "fillerwords": 0
# }]
# obj = Task2(data,file_name)
# obj.execute_all_functions()
``` |
{
"source": "joseph9991/SpeakerDiarisation-Python",
"score": 3
} |
#### File: joseph9991/SpeakerDiarisation-Python/speakerDiarisation.py
```python
import os, sys
import librosa
import requests, json
import datetime
import time
import warnings
from dotenv import load_dotenv
from requests.exceptions import HTTPError
class SpeakerDiarisation:
def __init__(self,file_name):
self.file_name = file_name
# Validates the file, checks for the valid file extension and returns audio-format
def identifyFormat(self):
valid_extensions = ('.mp3','.ogg','.wav','.m4a','.flac', '.mpeg', '.aac')
file_path, file_extension = os.path.splitext(self.file_name)
if file_extension in valid_extensions:
return file_extension[1:]
elif not file_extension:
error = file_name + file_extension + ' is either a directory or not a valid file'
raise AssertionError(error)
else:
error = 'File extension ' + file_extension + ' not valid'
raise AssertionError(error)
# Converts m4a/aac file to wav, stores is as a temporary file, and replaces the object's filename with temp.wav
def convert_file_to_wav(self):
print("Converting file to wav format...")
start_time = time.time()
data, sampling_rate = librosa.load(self.file_name,sr=16000)
librosa.output.write_wav('temp.wav', data, sampling_rate)
self.file_name = 'temp.wav'
end_time = time.time()
print("Finished conversion to wav format in " + self.seconds_to_minutes(end_time - start_time) + " seconds")
# Opens the File, sets headers, sends POST request to IBM Watson Text-to-Speech API, & returns JSON response
def request(self,audio_format):
load_dotenv()
url = os.getenv('SPEECH_TO_TEXT_URL')
api_key = os.getenv('SPEECH_TO_TEXT_APIKEY')
auth = ('apikey', api_key)
headers_key = 'Content-Type'
if audio_format == 'm4a' or audio_format == 'aac':
self.convert_file_to_wav()
headers_value = 'audio/wav'
else:
headers_value = 'audio/' + audio_format
headers = {headers_key : headers_value}
data = open(self.file_name, 'rb').read()
try:
print("Sending Request to Watson Speech-to-text API...")
start_time = time.time()
response = requests.post(url=url,headers=headers,data=data,auth=auth)
end_time = time.time()
print("Time taken by API: " + self.seconds_to_minutes(end_time - start_time) + " minutes")
response.raise_for_status()
jsonResponse = response.json()
# with open('temp.json','w') as f:
# f.write(jsonResponse)
return jsonResponse
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
# Converts Seconds to Minutes:Seconds OR Hours:Minutes:Seconds
def seconds_to_minutes(self,seconds):
time = str(datetime.timedelta(seconds=round(seconds,0)))
return time[2:] if time[0] == '0' else time
# Prints the desired Output -- format "Person Number - time-time" --example "Person 1 - 1:00-1:52"
def printResponse(self, response):
response_speakers = response['speaker_labels']
speakers = set()
current_speaker = 0
for i in range(len(response_speakers)):
if (response_speakers[i]['speaker'] + 1) not in speakers:
speakers.add(response_speakers[i]['speaker'] + 1)
current_speaker = response_speakers[i]['speaker'] + 1
if i > 0:
print(self.seconds_to_minutes(response_speakers[i-1]['to']))
print('Person ' + str(response_speakers[i]['speaker'] + 1) + ' - ' +
self.seconds_to_minutes(response_speakers[i]['from']) + '-', end = "")
elif response_speakers[i]['final'] == True:
print(self.seconds_to_minutes(response_speakers[i-1]['to']))
print('Person ' + str(response_speakers[i]['speaker'] + 1) + ' - ' +
self.seconds_to_minutes(response_speakers[i]['from']) + '-', end = "")
print(self.seconds_to_minutes(response_speakers[i]['to']))
else:
if current_speaker != response_speakers[i]['speaker'] + 1:
current_speaker = response_speakers[i]['speaker'] + 1
print(self.seconds_to_minutes(response_speakers[i-1]['to']))
print('Person ' + str(response_speakers[i]['speaker'] + 1) + ' - ' +
self.seconds_to_minutes(response_speakers[i]['from']) + '-', end = "")
if (self.file_name) == 'temp.wav':
os.remove('temp.wav')
if __name__ == "__main__":
file_name = sys.argv[1]
# For ignoring UserWarnings
warnings.filterwarnings("ignore")
speakerDiarisation = SpeakerDiarisation(file_name)
audio_format = speakerDiarisation.identifyFormat()
# print(audio_format)
response = speakerDiarisation.request(audio_format)
# print(response)
# with open("response.json", "r") as read_file:
# response = json.load(read_file)
speakerDiarisation.printResponse(response)
``` |
{
"source": "JosephAbbey/pyVHDLParser",
"score": 2
} |
#### File: pyVHDLParser/CLI/__init__.py
```python
from typing import Protocol, Callable, Dict
from pyAttributes import Attribute
from pyAttributes.ArgParseAttributes import ArgumentAttribute, SwitchArgumentAttribute
from pyVHDLParser.Token import LinebreakToken, IndentationToken, CommentToken, StringLiteralToken
from pyVHDLParser.Token import IntegerLiteralToken, WordToken, Token, SpaceToken, CharacterToken
from pyVHDLParser.Token.Keywords import KeywordToken
class FilenameAttribute(Attribute):
def __call__(self, func):
self._AppendAttribute(func, ArgumentAttribute(metavar="filename", dest="Filename", type=str, help="The filename to parse."))
return func
class WithTokensAttribute(Attribute):
def __call__(self, func):
self._AppendAttribute(func, SwitchArgumentAttribute("-T", "--with-tokens", dest="withTokens", help="Display tokens in between."))
return func
class WithBlocksAttribute(Attribute):
def __call__(self, func):
self._AppendAttribute(func, SwitchArgumentAttribute("-B", "--with-blocks", dest="withBlocks", help="Display blocks in between."))
return func
class FrontEndProtocol(Protocol):
# TerminalUI
Foreground: Dict
WriteError: Callable[[str], None]
WriteWarning: Callable[[str], None]
WriteQuiet: Callable[[str], None]
WriteNormal: Callable[[str], None]
WriteVerbose: Callable[[str], None]
WriteDebug: Callable[[str], None]
exit: Callable[[int], None]
# Frontend
PrintHeadline: Callable
TOKENTYPE_TO_COLOR_TRANSLATION = {
LinebreakToken: "black",
IndentationToken: "grey",
SpaceToken: "lightblue1",
CharacterToken: "darkorange",
CommentToken: "forestgreen",
StringLiteralToken: "<PASSWORD>",
IntegerLiteralToken: "<PASSWORD>",
WordToken: "aqu<PASSWORD>",
KeywordToken: "dod<PASSWORD>",
}
def translate(token: Token) -> str:
if isinstance(token, Token):
tokenCls = token.__class__
else:
tokenCls = token
try:
return TOKENTYPE_TO_COLOR_TRANSLATION[tokenCls]
except KeyError:
for key, color in TOKENTYPE_TO_COLOR_TRANSLATION.items():
if issubclass(tokenCls, key):
return color
else:
return "crimson"
```
#### File: DocumentModel/DesignUnit/Entity.py
```python
from pydecor import export
from typing import List
from pyVHDLModel.VHDLModel import Entity as EntityVHDLModel
from pyVHDLParser.Token.Keywords import IdentifierToken
from pyVHDLParser.Blocks import BlockParserException
from pyVHDLParser.Blocks.List import GenericList as GenericListBlocks, PortList as PortListBlocks
from pyVHDLParser.Blocks.Object.Constant import ConstantDeclarationBlock
import pyVHDLParser.Blocks.InterfaceObject
from pyVHDLParser.Blocks.Structural import Entity as EntityBlocks
from pyVHDLParser.Groups import ParserState
from pyVHDLParser.Groups.List import GenericListGroup, PortListGroup
from pyVHDLParser.DocumentModel.Reference import Library, Use
__all__ = []
__api__ = __all__
DEBUG = True
@export
class Entity(EntityVHDLModel):
def __init__(self, entityName):
super().__init__()
self._name = entityName
@classmethod
def stateParse(cls, parserState: ParserState): #document, group):
for block in parserState.CurrentGroup:
if isinstance(block, EntityBlocks.NameBlock):
for token in block:
if isinstance(token, IdentifierToken):
entityName = token.Value
break
else:
raise BlockParserException("EntityName not found.", None) # FIXME: change to DOMParserException
entity = cls(entityName)
entity.AddLibraryReferences(document.Libraries)
entity.AddUses(document.Uses)
print("Found library '{0}'. Adding to current node '{1!s}'.".format(entityName, document))
document.AddEntity(entity)
break
subGroupIterator = iter(parserState.CurrentGroup.GetSubGroups())
subGroup = next(subGroupIterator)
if isinstance(subGroup, GenericListGroup):
cls.stateParseGenericList(document, subGroup)
subGroup = next(subGroupIterator)
if isinstance(subGroup, PortListGroup):
cls.stateParsePortList(document, subGroup)
subGroup = next(subGroupIterator)
# FIXME entity declarative region
# if isinstance(subGroup, ):
# cls.stateParsePortList(document, subGroup)
# subGroup = next(subGroupIterator)
# FIXME entity statements
# if isinstance(subGroup, ):
# cls.stateParsePortList(document, subGroup)
# subGroup = next(subGroupIterator)
# FIXME: how to check if everthing is consumed?
@classmethod
def stateParseGenericList(cls, parserState: ParserState): #document, group):
assert isinstance(parserState.CurrentGroup, GenericListBlocks.OpenBlock)
for block in parserState.GroupIterator:
if isinstance(block, pyVHDLParser.Blocks.InterfaceObject.InterfaceConstantBlock):
cls.stateParseGeneric(parserState)
elif isinstance(block, GenericListBlocks.CloseBlock):
break
else:
raise BlockParserException("", None) # FIXME: change to DOMParserException
parserState.Pop()
@classmethod
def stateParseGeneric(cls, parserState: ParserState): #document, group):
assert isinstance(parserState.CurrentGroup, pyVHDLParser.Blocks.InterfaceObject.InterfaceConstantBlock)
tokenIterator = iter(parserState)
for token in tokenIterator:
if isinstance(token, IdentifierToken):
genericName = token.Value
break
else:
raise BlockParserException("", None) # FIXME: change to DOMParserException
parserState.CurrentNode.AddGeneric(genericName)
@classmethod
def stateParsePortList(cls, parserState: ParserState): #document, group):
assert isinstance(parserState.CurrentGroup, PortListBlocks.OpenBlock)
for block in parserState.GroupIterator:
if isinstance(block, pyVHDLParser.Blocks.InterfaceObject.InterfaceSignalBlock):
cls.stateParsePort(parserState)
elif isinstance(block, PortListBlocks.CloseBlock):
break
else:
raise BlockParserException("", None) # FIXME: change to DOMParserException
parserState.Pop()
@classmethod
def stateParsePort(cls, parserState: ParserState): #document, group):
assert isinstance(parserState.CurrentGroup, pyVHDLParser.Blocks.InterfaceObject.InterfaceSignalBlock)
tokenIterator = iter(parserState)
for token in tokenIterator:
if isinstance(token, IdentifierToken):
portName = token.Value
break
else:
raise BlockParserException("", None) # FIXME: change to DOMParserException
parserState.CurrentNode.AddPort(portName)
def AddLibraries(self, libraries):
for library in libraries:
self._libraries.append(library)
def AddUses(self, uses):
for use in uses:
self._uses.append(use)
def AddGeneric(self, generic):
self._genericItems.append(generic)
def AddPort(self, port):
self._portItems.append(port)
def Print(self, indent=0):
indentation = " "*indent
for lib in self._libraries:
print("{indent}{DARK_CYAN}LIBRARY{NOCOLOR} {GREEN}{lib}{NOCOLOR};".format(indent=indentation, lib=lib, **Console.Foreground))
for lib, pack, obj in self._uses:
print("{indent}{DARK_CYAN}USE {GREEN}{lib}{NOCOLOR}.{GREEN}{pack}{NOCOLOR}.{GREEN}{obj}{NOCOLOR};".format(indent=indentation, lib=lib, pack=pack, obj=obj, **Console.Foreground))
print()
print("{indent}{DARK_CYAN}ENTITY{NOCOLOR} {YELLOW}{name}{NOCOLOR} {DARK_CYAN}IS{NOCOLOR}".format(name=self._name, indent=indentation, **Console.Foreground))
if (len(self._genericItems) > 0):
print("{indent} {DARK_CYAN}GENERIC{NOCOLOR} (".format(indent=indentation, **Console.Foreground))
for generic in self._genericItems:
print("{indent} {YELLOW}{name}{NOCOLOR} : {GREEN}{type}{NOCOLOR}".format(indent=indentation, name=generic, type="", **Console.Foreground))
print("{indent} );".format(indent=indentation, **Console.Foreground))
if (len(self._portItems) > 0):
print("{indent} {DARK_CYAN}PORT{NOCOLOR} (".format(indent=indentation, **Console.Foreground))
for port in self._portItems:
print("{indent} {YELLOW}{name}{NOCOLOR} : {GREEN}{type}{NOCOLOR}".format(indent=indentation, name=port, type="", **Console.Foreground))
print("{indent} );".format(indent=indentation, **Console.Foreground))
print("{indent}{DARK_CYAN}END ENTITY{NOCOLOR};".format(name=self._name, indent=indentation, **Console.Foreground))
```
#### File: pyVHDLParser/Token/Keywords.py
```python
from pydecor.decorators import export
from pyVHDLParser.Token import Token, WordToken, VHDLToken, CharacterToken
from pyVHDLParser.Token.Parser import TokenizerException
__all__ = []
__api__ = __all__
@export
class SpecificVHDLToken(VHDLToken):
"""Base-class for all specific tokens.
Simple token will be converted to specific tokens while parsing.
The internal data is copied, and the original token is replaced by this token.
"""
def __init__(self, token: Token):
"""
Initialize a specific token, by copying the simple token's data and link
this new token to the previous token as a replacement.
"""
super().__init__(token.PreviousToken, token.Value, token.Start, token.End)
@export
class BoundaryToken(SpecificVHDLToken):
"""
Token representing a boundary between (reserved) words.
In many cases, a :class:`SpaceToken`, :class:`CommentToken`,
:class:`LinebreakToken` or :class:`CharacterToken` becomes a BoundaryToken.
"""
# ==============================================================================
# Bracket tokens: (), [], {}, <>
# ==============================================================================
@export
class BracketToken(SpecificVHDLToken):
"""Base-class for all bracket tokens: ``(``, ``)``, ``[``, ``]``, ``{``, ``}``, ``<`` and ``>``."""
# Round bracket / parenthesis / ()
# ----------------------------------------------------------
@export
class RoundBracketToken(BracketToken):
"""Base-class for all round bracket tokens: ``(`` and ``)``."""
@export
class OpeningRoundBracketToken(RoundBracketToken):
"""Token representing an opening round bracket: ``(``."""
@export
class ClosingRoundBracketToken(RoundBracketToken):
"""Token representing a closing round bracket: ``)``."""
# Square bracket / []
# ----------------------------------------------------------
@export
class SquareBracketToken(BracketToken):
"""Base-class for all square bracket tokens: ``[`` and ``]``."""
@export
class OpeningSquareBracketToken(SquareBracketToken):
"""Token representing an square round bracket: ``[``."""
@export
class ClosingSquareBracketToken(SquareBracketToken):
"""Token representing a closing square bracket: ``]``."""
# Curly bracket / brace / curved bracket / {}
# ----------------------------------------------------------
@export
class CurlyBracketToken(BracketToken):
"""Base-class for all curly bracket tokens: ``{`` and ``}``."""
@export
class OpeningCurlyBracketToken(CurlyBracketToken):
"""Token representing an opening curly bracket: ``{``."""
@export
class ClosingCurlyBracketToken(CurlyBracketToken):
"""Token representing a closing curly bracket: ``}``."""
# Angle bracket / arrow bracket / <>
# ----------------------------------------------------------
@export
class AngleBracketToken(BracketToken):
"""Base-class for all angle bracket tokens: ``<`` and ``>``."""
@export
class OpeningAngleBracketToken(AngleBracketToken):
"""Token representing an opening angle bracket: ``<``."""
@export
class ClosingAngleBracketToken(AngleBracketToken):
"""Token representing a closing angle bracket: ``>``."""
# ==============================================================================
# Operator tokens: +, -, *, /, **, &
# ==============================================================================
@export
class OperatorToken(SpecificVHDLToken):
"""Base-class for all operator tokens."""
@export
class PlusOperator(OperatorToken):
"""Token representing a plus operator: ``+``."""
__KEYWORD__ = "+"
@export
class MinusOperator(OperatorToken):
"""Token representing a minus operator: ``-``."""
__KEYWORD__ = "-"
@export
class MultiplyOperator(OperatorToken):
"""Token representing a multiply operator: ``*``."""
__KEYWORD__ = "*"
@export
class DivideOperator(OperatorToken):
"""Token representing a divide operator: ``/``."""
__KEYWORD__ = "/"
@export
class PowerOperator(OperatorToken):
"""Token representing a power operator: ``**``."""
__KEYWORD__ = "**"
@export
class ConcatOperator(OperatorToken):
"""Token representing a concat operator: ``&``."""
__KEYWORD__ = "&"
# Relational operatrors
# ----------------------------------------------------------
@export
class RelationalOperator(OperatorToken):
"""Base-class for all relational operator tokens."""
@export
class EqualOperator(RelationalOperator):
__KEYWORD__ = "="
@export
class UnequalOperator(RelationalOperator):
__KEYWORD__ = "/="
@export
class LessThanOperator(RelationalOperator):
__KEYWORD__ = "<"
@export
class LessThanOrEqualOperator(RelationalOperator):
__KEYWORD__ = "<="
@export
class GreaterThanOperator(RelationalOperator):
__KEYWORD__ = ">"
@export
class GreaterThanOrEqualOperator(RelationalOperator):
__KEYWORD__ = ">="
@export
class MatchingEqualOperator(RelationalOperator):
__KEYWORD__ = "?="
@export
class MatchingUnequalOperator(RelationalOperator):
__KEYWORD__ = "?/="
@export
class MatchingLessThanOperator(RelationalOperator):
__KEYWORD__ = "?<"
@export
class MatchingLessThanOrEqualOperator(RelationalOperator):
__KEYWORD__ = "?<="
@export
class MatchingGreaterThanOperator(RelationalOperator):
__KEYWORD__ = "?>"
@export
class MatchingGreaterThanOrEqualOperator(RelationalOperator):
__KEYWORD__ = "?>="
@export
class DelimiterToken(SpecificVHDLToken):
"""
Token representing a delimiter sign in between list items.
This token is usually created from a :class:`CharacterToken` with values ``,``
or ``;``.
"""
@export
class EndToken(SpecificVHDLToken):
"""
Token representing an end of a statement.
This token is usually created from a :class:`CharacterToken` with value ``;``.
"""
@export
class IdentifierToken(SpecificVHDLToken):
"""
Token representing an identifier.
This token is usually created from a :class:`WordToken` or :class:`ExtendedIdentifierToken`.
"""
@export
class RepeatedIdentifierToken(IdentifierToken):
"""
Token representing a repeated identifier.
This token is usually created from a :class:`WordToken` or :class:`ExtendedIdentifierToken`.
"""
@export
class SimpleNameToken(SpecificVHDLToken):
pass
@export
class LabelToken(SpecificVHDLToken):
"""
Token representing a label.
This token is usually created from a :class:`WordToken` or :class:`ExtendedIdentifierToken`.
"""
@export
class RepeatedLabelToken(LabelToken):
"""
Token representing a repeated label.
This token is usually created from a :class:`WordToken` or :class:`ExtendedIdentifierToken`.
"""
@export
class MultiCharKeyword(VHDLToken):
__KEYWORD__ = None
def __init__(self, characterToken: CharacterToken):
super().__init__(characterToken.PreviousToken, self.__KEYWORD__, characterToken.Start, characterToken.End)
def __str__(self) -> str:
return "<{name: <50} '{value}' at {pos!r}>".format(
name=self.__class__.__name__[:-7],
value=self.__KEYWORD__,
pos=self.Start
)
@export
class CommentKeyword(MultiCharKeyword):
"""Base-class for all comment keywords: ``--``, ``/*`` and ``*/``."""
@export
class SingleLineCommentKeyword(CommentKeyword):
"""Token representing a starting sequence for a single-line comment: ``--``."""
__KEYWORD__ = "--"
@export
class MultiLineCommentKeyword(CommentKeyword):
"""Base-class for all tokens related to multi-line comments: ``/*`` and ``*/``."""
@export
class MultiLineCommentStartKeyword(MultiLineCommentKeyword):
"""Token representing a starting sequence for a multi-line comment: ``/*``."""
__KEYWORD__ = "/*"
@export
class MultiLineCommentEndKeyword(MultiLineCommentKeyword):
"""Token representing a closing sequence for a multi-line comment: ``*/``."""
__KEYWORD__ = "*/"
@export
class AssignmentKeyword(MultiCharKeyword):
"""Base-class for all assignment keywords: ``:=`` and ``<=``."""
@export
class VariableAssignmentKeyword(AssignmentKeyword):
"""Token representing a variable assignment: ``:=``."""
__KEYWORD__ = ":="
@export
class SignalAssignmentKeyword(AssignmentKeyword):
"""Token representing a signal assignment: ``<=``."""
__KEYWORD__ = "<="
@export
class AssociationKeyword(MultiCharKeyword):
pass
@export
class MapAssociationKeyword(AssociationKeyword):
__KEYWORD__ = "=>"
@export
class SignalAssociationKeyword(AssociationKeyword):
__KEYWORD__ = "<=>"
@export
class KeywordToken(VHDLToken):
__KEYWORD__ : str
def __init__(self, wordToken: WordToken):
if (not (isinstance(wordToken, WordToken) and (wordToken <= self.__KEYWORD__))):
raise TokenizerException("Expected keyword {0}.".format(self.__KEYWORD__.upper()), wordToken)
super().__init__(wordToken.PreviousToken, self.__KEYWORD__, wordToken.Start, wordToken.End)
def __str__(self) -> str:
return "<{name: <50} {value:.<59} at {pos!r}>".format(
name=self.__class__.__name__,
value="'" + self.Value + "' ",
pos=self.Start
)
@export
class DirectionKeyword(KeywordToken):
pass
@export
class Operator(KeywordToken):
pass
@export
class LogicalOperator(Operator):
pass
@export
class MiscellaneousOperator(Operator):
pass
@export
class ShiftOperator(Operator):
pass
@export
class AbsKeyword(KeywordToken):
"""Reserved word 'abs' for unary operator *absolute value*."""
__KEYWORD__ = "abs"
@export
class AccessKeyword(KeywordToken):
"""Reserved word 'access' to defined access types."""
__KEYWORD__ = "access"
@export
class AfterKeyword(KeywordToken):
"""Reserved word 'after'."""
__KEYWORD__ = "after"
@export
class AliasKeyword(KeywordToken):
"""Reserved word 'alias' to declare aliases."""
__KEYWORD__ = "alias"
@export
class AllKeyword(KeywordToken):
"""Reserved word 'all'."""
__KEYWORD__ = "all"
@export
class AndKeyword(LogicalOperator):
"""Reserved word 'and' for binary logical operator *and*."""
__KEYWORD__ = "and"
@export
class ArchitectureKeyword(KeywordToken):
"""Reserved word 'architecture' to define architectures."""
__KEYWORD__ = "architecture"
@export
class ArrayKeyword(KeywordToken):
"""Reserved word 'array' to define array types."""
__KEYWORD__ = "array"
@export
class AssertKeyword(KeywordToken):
"""Reserved word 'assert' for *assert*-statements."""
__KEYWORD__ = "assert"
@export
class AttributeKeyword(KeywordToken):
"""Reserved word 'attribute'."""
__KEYWORD__ = "attribute"
@export
class BeginKeyword(KeywordToken):
"""Reserved word 'begin' to distinguish declarative regions from statements regions."""
__KEYWORD__ = "begin"
@export
class BlockKeyword(KeywordToken):
"""Reserved word 'block' for *block*-statements."""
__KEYWORD__ = "block"
@export
class BodyKeyword(KeywordToken):
"""Reserved word 'body' to distinguish declarations from implementations (bodies)."""
__KEYWORD__ = "body"
@export
class BufferKeyword(KeywordToken):
"""Reserved word 'buffer' for mode *buffer*."""
__KEYWORD__ = "buffer"
@export
class BusKeyword(KeywordToken):
"""Reserved word 'bus'."""
__KEYWORD__ = "bus"
@export
class CaseKeyword(KeywordToken):
__KEYWORD__ = "case"
@export
class ComponentKeyword(KeywordToken):
__KEYWORD__ = "component"
@export
class ConfigurationKeyword(KeywordToken):
__KEYWORD__ = "configuration"
@export
class ConstantKeyword(KeywordToken):
__KEYWORD__ = "constant"
@export
class ContextKeyword(KeywordToken):
__KEYWORD__ = "context"
@export
class DefaultKeyword(KeywordToken):
__KEYWORD__ = "default"
@export
class DisconnectKeyword(KeywordToken):
__KEYWORD__ = "disconnect"
@export
class DowntoKeyword(DirectionKeyword):
__KEYWORD__ = "downto"
@export
class ElseKeyword(KeywordToken):
__KEYWORD__ = "else"
@export
class ElsIfKeyword(KeywordToken):
__KEYWORD__ = "elsif"
@export
class EndKeyword(KeywordToken):
__KEYWORD__ = "end"
@export
class EntityKeyword(KeywordToken):
__KEYWORD__ = "entity"
@export
class ExitKeyword(KeywordToken):
__KEYWORD__ = "exit"
@export
class FileKeyword(KeywordToken):
__KEYWORD__ = "file"
@export
class ForKeyword(KeywordToken):
__KEYWORD__ = "for"
@export
class ForceKeyword(KeywordToken):
__KEYWORD__ = "force"
@export
class FunctionKeyword(KeywordToken):
__KEYWORD__ = "function"
@export
class GenerateKeyword(KeywordToken):
__KEYWORD__ = "generate"
@export
class GenericKeyword(KeywordToken):
__KEYWORD__ = "generic"
@export
class GroupKeyword(KeywordToken):
__KEYWORD__ = "group"
@export
class GuardedKeyword(KeywordToken):
__KEYWORD__ = "guarded"
@export
class IfKeyword(KeywordToken):
__KEYWORD__ = "if"
@export
class IsKeyword(KeywordToken):
__KEYWORD__ = "is"
@export
class InKeyword(KeywordToken):
__KEYWORD__ = "in"
@export
class InoutKeyword(KeywordToken):
__KEYWORD__ = "inout"
@export
class ImpureKeyword(KeywordToken):
__KEYWORD__ = "impure"
@export
class InertialKeyword(KeywordToken):
__KEYWORD__ = "inertial"
@export
class LableKeyword(KeywordToken):
__KEYWORD__ = "lable"
@export
class LibraryKeyword(KeywordToken):
__KEYWORD__ = "library"
@export
class LinkageKeyword(KeywordToken):
__KEYWORD__ = "linkage"
@export
class LiteralKeyword(KeywordToken):
__KEYWORD__ = "literal"
@export
class LoopKeyword(KeywordToken):
__KEYWORD__ = "loop"
@export
class MapKeyword(KeywordToken):
__KEYWORD__ = "map"
@export
class NandKeyword(LogicalOperator):
__KEYWORD__ = "nand"
@export
class NewKeyword(KeywordToken):
__KEYWORD__ = "new"
@export
class NextKeyword(KeywordToken):
__KEYWORD__ = "next"
@export
class NorKeyword(LogicalOperator):
__KEYWORD__ = "nor"
@export
class NotKeyword(KeywordToken):
__KEYWORD__ = "not"
@export
class NullKeyword(KeywordToken):
__KEYWORD__ = "null"
@export
class OfKeyword(KeywordToken):
__KEYWORD__ = "of"
@export
class OnKeyword(KeywordToken):
__KEYWORD__ = "on"
@export
class OpenKeyword(KeywordToken):
__KEYWORD__ = "open"
@export
class OrKeyword(LogicalOperator):
__KEYWORD__ = "or"
@export
class OthersKeyword(KeywordToken):
__KEYWORD__ = "others"
@export
class OutKeyword(KeywordToken):
__KEYWORD__ = "out"
@export
class PackageKeyword(KeywordToken):
__KEYWORD__ = "package"
@export
class ParameterKeyword(KeywordToken):
__KEYWORD__ = "parameter"
@export
class PortKeyword(KeywordToken):
__KEYWORD__ = "port"
@export
class PostponendKeyword(KeywordToken):
__KEYWORD__ = "postponend"
@export
class ProcedureKeyword(KeywordToken):
__KEYWORD__ = "procedure"
@export
class ProcessKeyword(KeywordToken):
__KEYWORD__ = "process"
@export
class PropertyKeyword(KeywordToken):
__KEYWORD__ = "property"
@export
class ProtectedKeyword(KeywordToken):
__KEYWORD__ = "protected"
@export
class PureKeyword(KeywordToken):
__KEYWORD__ = "pure"
@export
class RangeKeyword(KeywordToken):
__KEYWORD__ = "range"
@export
class RecordKeyword(KeywordToken):
__KEYWORD__ = "record"
@export
class RegisterKeyword(KeywordToken):
__KEYWORD__ = "register"
@export
class RejectKeyword(KeywordToken):
__KEYWORD__ = "reject"
@export
class ReleaseKeyword(KeywordToken):
__KEYWORD__ = "release"
@export
class ReportKeyword(KeywordToken):
__KEYWORD__ = "report"
@export
class ReturnKeyword(KeywordToken):
__KEYWORD__ = "return"
@export
class RolKeyword(ShiftOperator):
__KEYWORD__ = "rol"
@export
class RorKeyword(ShiftOperator):
__KEYWORD__ = "ror"
@export
class SelectKeyword(KeywordToken):
__KEYWORD__ = "select"
@export
class SequenceKeyword(KeywordToken):
__KEYWORD__ = "sequence"
@export
class SeverityKeyword(KeywordToken):
__KEYWORD__ = "severity"
@export
class SharedKeyword(KeywordToken):
__KEYWORD__ = "shared"
@export
class SignalKeyword(KeywordToken):
__KEYWORD__ = "signal"
@export
class SlaKeyword(ShiftOperator):
__KEYWORD__ = "sla"
@export
class SllKeyword(ShiftOperator):
__KEYWORD__ = "sll"
@export
class SraKeyword(ShiftOperator):
__KEYWORD__ = "sra"
@export
class SrlKeyword(ShiftOperator):
__KEYWORD__ = "srl"
@export
class SubtypeKeyword(KeywordToken):
__KEYWORD__ = "subtype"
@export
class ThenKeyword(KeywordToken):
__KEYWORD__ = "then"
@export
class ToKeyword(DirectionKeyword):
__KEYWORD__ = "to"
@export
class TransportKeyword(KeywordToken):
__KEYWORD__ = "transport"
@export
class TypeKeyword(KeywordToken):
__KEYWORD__ = "type"
@export
class UnitsKeyword(KeywordToken):
__KEYWORD__ = "units"
@export
class UntilKeyword(KeywordToken):
__KEYWORD__ = "until"
@export
class UseKeyword(KeywordToken):
__KEYWORD__ = "use"
@export
class UnbufferedKeyword(KeywordToken):
__KEYWORD__ = "unbuffered"
@export
class VariableKeyword(KeywordToken):
__KEYWORD__ = "variable"
@export
class VunitKeyword(KeywordToken):
__KEYWORD__ = "vunit"
@export
class WaitKeyword(KeywordToken):
__KEYWORD__ = "wait"
@export
class WhenKeyword(KeywordToken):
__KEYWORD__ = "when"
@export
class WhileKeyword(KeywordToken):
__KEYWORD__ = "while"
@export
class WithKeyword(KeywordToken):
__KEYWORD__ = "with"
@export
class XorKeyword(LogicalOperator):
__KEYWORD__ = "xor"
@export
class XnorKeyword(LogicalOperator):
__KEYWORD__ = "xnor"
```
#### File: tests/unit/Common.py
```python
from dataclasses import dataclass
from typing import List, Tuple, Any
from flags import Flags
from pyMetaClasses import Singleton
from pyVHDLParser.Base import ParserException
from pyVHDLParser.Token import StartOfDocumentToken, EndOfDocumentToken, Token, CharacterTranslation
from pyVHDLParser.Token.Parser import Tokenizer, TokenizerException
from pyVHDLParser.Blocks import StartOfDocumentBlock, EndOfDocumentBlock, TokenToBlockParser, MetaBlock, Block, BlockParserException
# XXX: move to pyVHDLParser.Blocks; call it from frontend
class Initializer(metaclass=Singleton):
def __init__(self):
print("Init all blocks.")
for block in MetaBlock.BLOCKS:
try:
block.__cls_init__()
except AttributeError:
pass
class Result(Flags):
Pass = 1,
Fail = 2
@dataclass
class ExpectedTokenStream:
tokens: List[Tuple[Token, str]]
result: Result = Result.Pass
exception: ParserException = None
@dataclass
class ExpectedBlockStream:
blocks: List[Tuple[Block, str]]
result: Result = Result.Pass
exception: ParserException = None
class ExpectedDataMixin:
name: str = None
code: str = None
tokenStream: ExpectedTokenStream = None
blockStream: ExpectedBlockStream = None
@classmethod
def setUpClass(cls):
print("Starting testcases in {}.".format(cls.__qualname__))
def setUp(self):
print("Starting another test.")
class ITestcase:
code: str
tokenStream: ExpectedTokenStream
blockStream: ExpectedBlockStream
def skipTest(self, reason=None):
pass
def fail(self, msg: str = ""):
pass
def failIf(self, expr: bool, msg: str = ""):
if expr:
self.fail(msg=msg)
def assertEqual(self, left: Any, right: Any, msg: str = ""):
pass
def assertIsInstance(self, obj: Any, typ, msg: str = ""):
pass
def assertIsNotInstance(self, obj: Any, typ, msg: str = ""):
pass
def assertTrue(self, obj: bool, msg: str = ""):
pass
def assertIsNone(self, obj: Any, msg: str = ""):
pass
def assertIsNotNone(self, obj: Any, msg: str = ""):
pass
class TokenSequence(ITestcase): #, ExpectedDataMixin):
def test_TokenSequence(self) -> None:
# test['name']
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
tokenIterator = iter(tokenStream)
listIterator = iter(self.tokenStream.tokens)
try:
while True:
token = next(tokenIterator)
item = next(listIterator)
self.assertIsInstance(
token, item[0],
msg="Token has not expected type.\n Actual: {actual} pos={pos!s}\n Expected: {expected}".format(
actual=token.__class__.__qualname__,
pos=token.Start,
expected=item[0].__qualname__
)
)
if item[1] is not None:
self.assertTrue(
token == item[1],
msg="The token's value does not match.\n Context: {context}\n Actual: {actual}\n Expected: {expected}".format(
context="at {pos!s}".format(pos=token.Start),
actual="'{token!r}' of {type}".format(token=token, type=token.__class__.__qualname__),
expected="'{value}' of {type}".format(value=item[1], type=item[0].__qualname__)
)
)
except TokenizerException as ex:
self.fail(msg="Unexpected 'TokenizerException' ({ex!s}) at {pos}".format(ex=ex, pos=ex.Position))
except StopIteration:
pass
except AssertionError:
raise
except Exception as ex:
self.fail(msg="Unexpected exception '{exname}' := {ex!s}.".format(ex=ex, exname=ex.__class__.__qualname__))
class TokenLinking(ITestcase): #, ExpectedDataMixin):
def test_TokenLinking(self) -> None:
# test['name']
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
tokenIterator = iter(tokenStream)
startToken = next(tokenIterator)
self.assertIsInstance(startToken, StartOfDocumentToken, msg="First token is not StartOfDocumentToken: {token}".format(token=startToken))
self.assertIsNone(startToken.PreviousToken, msg="First token has no open start.")
lastToken: Token = startToken
endToken: Token = None
for token in tokenIterator:
if isinstance(token, EndOfDocumentToken):
endToken = token
break
self.assertEqual(lastToken.NextToken, token, msg="Last token is not connected to the current token: {token}".format(token=token))
self.assertEqual(lastToken, token.PreviousToken, msg="Current token is not connected to lastToken: {token}".format(token=token))
lastToken = token
else:
self.fail(msg="No EndOfDocumentToken found.")
self.assertIsInstance(endToken, EndOfDocumentToken, msg="End token is not EndOfDocumentToken: {token}".format(token=endToken))
self.assertEqual(lastToken.NextToken, endToken, msg="Last token is not connected to the end token: {token}".format(token=token))
self.assertEqual(lastToken, endToken.PreviousToken, msg="End token is not connected to lastToken: {token}".format(token=token))
self.assertIsNone(endToken.NextToken, msg="End token has no open end: {token}".format(token=endToken.NextToken))
class BlockSequence(ITestcase): #, ExpectedDataMixin):
def test_BlockSequence(self) -> None:
# test['name']
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
blockStream = TokenToBlockParser.Transform(tokenStream)
blockIterator = iter(blockStream)
listIterator = iter(self.blockStream.blocks)
try:
while True:
block = next(blockIterator)
item = next(listIterator)
self.assertIsInstance(
block, item[0],
msg="Block has not expected type.\n Actual: {actual!s}\n Expected: {expected}".format(
# actual=block.__class__.__qualname__,
actual=block,
expected=item[0].__qualname__
)
)
if item[1] is not None:
blockValue = str(block)
super().failIf(
blockValue != item[1],
msg="The blocks's value does not match.\n Actual: '{actual}'\n Expected: '{expected}'".format(
actual=CharacterTranslation(blockValue, oneLiner=True),
expected=CharacterTranslation(item[1], oneLiner=True)
)
)
except TokenizerException as ex:
self.fail(msg="Unexpected 'TokenizerException' at {pos}".format(pos=ex.Position))
except BlockParserException as ex:
self.fail(msg="Unexpected 'BlockParserException' at {pos}".format(pos=ex.Token.Start))
except StopIteration:
pass
except AssertionError:
raise
except Exception as ex:
self.fail(msg="Unexpected exception '{exname}' := {ex!s}.".format(ex=ex, exname=ex.__class__.__qualname__))
class BlockSequenceWithParserError(ITestcase): #, ExpectedDataMixin):
def test_BlockSequenceError(self) -> None:
# test['name']
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
blockStream = TokenToBlockParser.Transform(tokenStream)
blockIterator = iter(blockStream)
listIterator = iter(self.blockStream.blocks)
with self.assertRaises(BlockParserException) as ex:
try:
while True:
block = next(blockIterator)
item = next(listIterator)
self.assertIsInstance(
block, item[0],
msg="Block has not expected type.\n Actual: {actual!s}\n Expected: {expected}".format(
# actual=block.__class__.__qualname__,
actual=block,
expected=item[0].__qualname__
)
)
if item[1] is not None:
blockValue = str(block)
super().failIf(
blockValue != item[1],
msg="The blocks's value does not match.\n Actual: '{actual}'\n Expected: '{expected}'".format(
actual=CharacterTranslation(blockValue, oneLiner=True),
expected=CharacterTranslation(item[1], oneLiner=True)
)
)
except TokenizerException as ex:
self.fail(msg="Unexpected 'TokenizerException' at {pos}".format(pos=ex.Position))
except BlockParserException:
raise
except StopIteration:
pass
except AssertionError:
raise
except Exception as ex:
self.fail(msg="Unexpected exception '{exname}' := {ex!s}.".format(ex=ex, exname=ex.__class__.__qualname__))
print(ex)
def test_BlockLinking(self) -> None:
# test['name']
with self.assertRaises(BlockParserException) as ex:
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
blockStream = TokenToBlockParser.Transform(tokenStream)
blockIterator = iter(blockStream)
firstBlock = next(blockIterator)
self.assertIsInstance(firstBlock, StartOfDocumentBlock, msg="First block is not StartOfDocumentBlock: {block}".format(block=firstBlock))
startToken = firstBlock.StartToken
self.assertIsInstance(startToken, StartOfDocumentToken, msg="First token is not StartOfDocumentToken: {token}".format(token=startToken))
lastBlock: Block = firstBlock
endBlock: Block = None
lastToken: Token = startToken
for block in blockIterator:
if isinstance(block, EndOfDocumentBlock):
endBlock = block
break
# Block checks
self.assertEqual(lastBlock.NextBlock, block,
msg="Last block is not connected to the current block: {block}".format(block=block))
self.assertEqual(lastBlock, block.PreviousBlock,
msg="Current block is not connected to last block: {block}".format(block=block))
# Token checks
tokenIterator = iter(block)
for token in tokenIterator:
self.assertIsNotNone(token.NextToken, msg="Token has an open end (token).".format(token=token.NextToken))
self.assertEqual(lastToken.NextToken, token, msg="Last token is not connected to the current token.")
self.assertIsNotNone(token.PreviousToken, msg="Token has an open end (PreviousToken).")
self.assertEqual(token.PreviousToken, lastToken, msg="Current token is not connected to lastToken.")
lastToken = token
lastBlock = block
else:
self.fail(msg="No EndOfDocumentBlock found.")
# Block checks
self.assertIsInstance(endBlock, EndOfDocumentBlock, msg="End block is not EndOfDocumentblock: {token}".format(token=endBlock))
self.assertIsInstance(endBlock.EndToken, EndOfDocumentToken, msg="End block's token is not EndOfDocumentToken: {token}".format(token=endBlock.EndToken))
# Token checks
self.assertEqual(lastToken.NextToken, endBlock.EndToken, msg="Last token is not connected to the end token.")
self.assertEqual(lastToken, endBlock.EndToken.PreviousToken, msg="End token is not connected to lastToken.")
self.assertIsNone(endBlock.EndToken.NextToken, msg="End token has no open end: {token}".format(token=endBlock.EndToken.NextToken))
class BlockLinking(ITestcase): #, ExpectedDataMixin):
def test_BlockLinking(self) -> None:
# test['name']
tokenStream = Tokenizer.GetVHDLTokenizer(self.code)
blockStream = TokenToBlockParser.Transform(tokenStream)
blockIterator = iter(blockStream)
firstBlock = next(blockIterator)
self.assertIsInstance(firstBlock, StartOfDocumentBlock, msg="First block is not StartOfDocumentBlock: {block}".format(block=firstBlock))
startToken = firstBlock.StartToken
self.assertIsInstance(startToken, StartOfDocumentToken, msg="First token is not StartOfDocumentToken: {token}".format(token=startToken))
lastBlock: Block = firstBlock
endBlock: Block = None
lastToken: Token = startToken
for block in blockIterator:
if isinstance(block, EndOfDocumentBlock):
endBlock = block
break
# Block checks
self.assertEqual(lastBlock.NextBlock, block,
msg="Last block is not connected to the current block: {block}".format(block=block))
self.assertEqual(lastBlock, block.PreviousBlock,
msg="Current block is not connected to last block: {block}".format(block=block))
# Token checks
tokenIterator = iter(block)
for token in tokenIterator:
self.assertIsNotNone(token.NextToken, msg="Token has an open end (token).".format(token=token.NextToken))
self.assertEqual(lastToken.NextToken, token, msg="Last token is not connected to the current token.")
self.assertIsNotNone(token.PreviousToken, msg="Token has an open end (PreviousToken).")
self.assertEqual(token.PreviousToken, lastToken, msg="Current token is not connected to lastToken.")
lastToken = token
lastBlock = block
else:
self.fail(msg="No EndOfDocumentBlock found.")
# Block checks
self.assertIsInstance(endBlock, EndOfDocumentBlock, msg="End block is not EndOfDocumentblock: {token}".format(token=endBlock))
self.assertIsInstance(endBlock.EndToken, EndOfDocumentToken, msg="End block's token is not EndOfDocumentToken: {token}".format(
token=endBlock.EndToken))
# Token checks
self.assertEqual(lastToken.NextToken, endBlock.EndToken, msg="Last token is not connected to the end token.")
self.assertEqual(lastToken, endBlock.EndToken.PreviousToken, msg="End token is not connected to lastToken.")
self.assertIsNone(endBlock.EndToken.NextToken, msg="End token has no open end: {token}".format(token=endBlock.EndToken.NextToken))
class LinkingTests(TokenLinking, BlockLinking):
pass
```
#### File: unit/DocumentModel/__init__.py
```python
from unittest import TestSuite
from tests.unit.DocumentModel import Architecture
from tests.unit.DocumentModel import Context
from tests.unit.DocumentModel import Entity
from tests.unit.DocumentModel import Package, PackageBody
def load_tests(loader, testCases, pattern):
suite = TestSuite()
suite.addTests(loader.loadTestsFromModule(Architecture))
suite.addTests(loader.loadTestsFromModule(Context))
suite.addTests(loader.loadTestsFromModule(Entity))
suite.addTests(loader.loadTestsFromModule(Package))
suite.addTests(loader.loadTestsFromModule(PackageBody))
return suite
``` |
{
"source": "josephabirached/papers",
"score": 3
} |
#### File: papers/papersite/db.py
```python
from papersite import app
from flask import g
import papersite.config
import sqlite3
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(papersite.config.DATABASE)
db.row_factory = dict_factory
return db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# fancy sqlrow -> dict converter
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_authors(paperid):
return query_db("select \
a.authorid, a.fullname \
from \
papers_authors as pa, \
authors as a \
where \
pa.authorid = a.authorid and \
pa.paperid = ?",
[paperid])
def get_domains(paperid):
return query_db("select \
d.domainid, d.domainname from \
domains as d, papers_domains as pd \
where \
pd.domainid = d.domainid and \
pd.paperid = ?",
[paperid])
def get_keywords(paperid):
return query_db("select \
k.keywordid, k.keyword \
from keywords as k, papers_keywords as pk \
where \
pk.keywordid = k.keywordid and \
pk.paperid = ?",
[paperid])
def get_comment(commentid):
return query_db("select c.commentid, c.comment, c.createtime, \
u.username, c.userid, c.paperid \
from \
comments as c, \
users as u \
where c.userid = u.userid and \
c.commentid = ? \
", [commentid], one=True)
def get_comments(paperid):
return query_db("select \
c.commentid, c.comment, c.userid, \
c.createtime, c.edited_at, \
e.username as edituser, \
u.username \
from \
comments as c \
left join users as u on c.userid = u.userid \
left join users as e on c.edited_by = e.userid \
where \
c.deleted_at is null and \
c.paperid = ? \
order by c.createtime \
",
[paperid])
def delete_comment(commentid):
con = get_db()
with con:
con.execute('update comments set deleted_at = datetime() \
where commentid = ?', [commentid])
return id
def delete_paper(paperid):
con = get_db()
with con:
con.execute('update papers set deleted_at = datetime() \
where paperid = ?', [paperid])
return id
def get_review(paperid):
return query_db("select \
r.reviewid, r.review, r.userid, \
r.createtime, \
u.username \
from \
reviews as r, \
users as u \
where r.userid = u.userid and \
r.paperid = ? \
order by r.createtime desc \
limit 1 \
",
[paperid], one=True)
# If there is no such keyword/author/domain in db,
# we will insert in into db
def get_insert_keyword(keyword):
con = get_db()
with con:
con.execute("INSERT OR IGNORE INTO keywords(keyword) \
VALUES(?)", [keyword])
id = con.execute("SELECT keywordid FROM keywords \
WHERE keyword = ?",
[keyword]).fetchone()['keywordid']
return id
def get_insert_author(fullname):
con = get_db()
with con:
con.execute("INSERT OR IGNORE INTO authors(fullname) \
VALUES(?)", [fullname])
id = con.execute("SELECT authorid FROM authors \
WHERE fullname = ?",
[fullname]).fetchone()['authorid']
return id
#### Updated to check domain existence in DB
def get_insert_domain(domainname):
con = get_db()
with con:
con.execute("INSERT OR IGNORE INTO domains(domainname) \
SELECT (?) WHERE not exists \
(select (?) from domains where domainname = (?) COLLATE NOCASE)",(domainname,domainname,domainname))
id = con.execute("SELECT domainid FROM domains \
WHERE domainname = (?) COLLATE NOCASE",
[domainname]).fetchone()['domainid']
return id
def likes(paperid):
return query_db(
"select count(*) as c \
from likes \
where paperid=?",
[paperid],
one=True)['c']
def liked_by(paperid):
return query_db(
"select u.username as username \
from likes as l, users as u \
where l.userid = u.userid and \
l.paperid=?",
[paperid])
def get_notifs(userid=1, limit=10):
return query_db(
"select * \
from notifs as n \
where n.userid = ? \
order by createtime desc \
limit ? \
",
[userid, limit])
def get_uploader(paperid):
return query_db(
"select u.* \
from users as u, papers as p \
where u.userid = p.userid \
and p.paperid = ?",
[paperid], one=True)
def get_paper_w_uploader(paperid):
return query_db("select p.*, u.username \
from papers as p, \
users as u \
where \
p.userid = u.userid and \
p.paperid = ?",
[paperid], one=True)
def histore_paper_info(paper):
con = get_db()
with con:
paperid = paper['paperid']
authors = ', '.join([a['fullname'] for a in get_authors(paperid)])
domains = ', '.join([d['domainname'] for d in get_domains(paperid)])
tags = ', '.join([k['keyword'] for k in get_keywords(paperid)])
con.execute('insert into papers_history (paperid, \
old_getlink, \
old_title, \
old_authors, \
old_domains, \
old_tags, \
old_edited_by, \
old_edited_at \
) \
values (?, ?, ?, ?, ?, ?, ?, ?)',
[paper['paperid'],
paper['getlink'],
paper['title'],
authors,
domains,
tags,
paper['edited_by'],
paper['edited_at']
]
)
############ Modifications by Devhub01 ###############
def delete_domain(domainname):
con = get_db()
with con:
con.execute('delete from domains WHERE domainname = ? \
and domainid not in (SELECT DISTINCT domainid FROM papers_domains)', [domainname])
return id
def delete_author(fullname):
con = get_db()
with con:
con.execute('delete from authors \
where fullname = ?', [fullname])
return id
def delete_tag(keyword):
con = get_db()
with con:
con.execute('delete from keywords \
where keyword = ?', [keyword])
return id
def delete_papers_domain(domainid):
con = get_db()
with con:
con.execute('delete from papers_domains \
where domainid = ?', [domainid])
return id
def delete_papers_authors(authorid):
con = get_db()
with con:
con.execute('delete from papers_authors \
where authorid = ?', [authorid])
return id
def delete_papers_tags(keywordid):
con = get_db()
with con:
con.execute('delete from papers_keywords\
where keywordid = ?', [keywordid])
return id
```
#### File: papers/papersite/email.py
```python
############
import smtplib, random, string, datetime
from email.mime.text import MIMEText
from papersite.config import MAIL_SERVER, MAIL_USER, MAIL_PASS
from papersite.db import query_db, get_db
from flask import url_for
from papersite import app
import papersite.user
import threading
## FIXME: should use thread pool
## or even cron tasks
def send_mail(usermail, message, subject):
sending_thread = threading.Thread(
target = send_mail_,
args = (usermail, message, subject))
sending_thread.start()
## send notifs, if notifs are not muted
def send_mail_ (usermail, message, subject):
with app.app_context():
u = query_db('select * \
from users \
where email = ?',
[usermail], one=True)
if (not u['notifs_muted'] and u['userid'] != 1):
# Create a text/plain message
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = 'Papers-gamma Team ' + '<' + MAIL_USER + '>'
msg['To'] = usermail
# Send the message via our own SMTP server.
s = smtplib.SMTP_SSL(MAIL_SERVER)
s.login(MAIL_USER, MAIL_PASS)
s.send_message(msg)
s.quit()
def send_confirmation_mail(username, usermail):
sending_thread = threading.Thread(
target = send_confirmation_mail_,
args = (username, usermail))
sending_thread.start()
def send_confirmation_mail_(username, usermail):
with app.app_context():
key = ''.join(map( lambda x : random.choice(string.ascii_letters),
range(100)))
con = get_db()
with con:
con.execute('update users set key = ? \
where username = ?',
[key, username])
# Create a text/plain message
msg = MIMEText(
"Hello %s, \n\n\
If you want to complete the registeration on 'Papers' \n\
you should click on the following link: \n\
%s \n\n\
Good luck,\n\
Papers' team" % (username, url_for('register_confirmation',
key=key, _external=True)))
msg['Subject'] = 'Email confirmation'
msg['From'] = 'Papers-gamma Team' + '<' + MAIL_USER + '>'
msg['To'] = usermail
# Send the message via our own SMTP server.
s = smtplib.SMTP_SSL(MAIL_SERVER)
s.login(MAIL_USER, MAIL_PASS)
s.send_message(msg)
s.quit()
def send_password_change_mail(usermail):
sending_thread = threading.Thread(
target = send_password_change_mail_,
args = (usermail, ))
sending_thread.start()
def send_password_change_mail_(usermail):
with app.app_context():
key = ''.join(map( lambda x : random.choice(string.ascii_letters),
range(100)))
con = get_db()
with con:
con.execute('update users set \
key = ?, \
chpasstime = ? \
where email = ?',
[key, datetime.datetime.now(), usermail])
u = query_db('select userid,username,email,createtime,valid \
from users \
where email = ?',
[usermail], one=True)
# Create a text/plain message
msg = MIMEText(
"Hello %s, \n\n\
to change your password on 'Papers' site \n\
click on the following link: \n\
%s \n\n\
This link will be valid for 2 days only \n\n\
Good luck,\n\
Papers' team" % (u['username'], url_for('set_new_password',
key=key, _external=True)))
msg['Subject'] = 'Change password'
msg['From'] = 'Papers-gamma Team' + '<' + MAIL_USER + '>'
msg['To'] = usermail
# Send the message via our own SMTP server.
s = smtplib.SMTP_SSL(MAIL_SERVER)
s.login(MAIL_USER, MAIL_PASS)
s.send_message(msg)
s.quit()
```
#### File: papers/papersite/user.py
```python
############
import hashlib, sqlite3
from papersite import app
from flask import session, flash, redirect, url_for
from papersite.db import query_db, get_db
from flask import abort, request, render_template
from papersite.config import SALT1
from papersite.email import send_confirmation_mail, \
send_password_change_mail
from math import ceil
from papersite.spamdetector import is_spam
from papersite import CAPTCHA
def hash(password):
m = hashlib.sha256()
m.update(password)
m.update(SALT1)
return m.hexdigest()
def user_authenticated():
return ('user' in session)
## Anonymous is the first one
ANONYMOUS = 1
def get_user_id():
if user_authenticated():
return session['user']['userid']
else:
# Anoynomous
return ANONYMOUS
def is_super_admin(userid):
return 1 == query_db("select super_admin \
from users \
where userid = ?", [userid], one=True)['super_admin']
def get_user(userid):
return query_db("select * from users where userid = ?",
[userid], one=True)
def is_author_of_comment(userid, commentid):
return 1 == query_db("select count(*) as count \
from comments \
where userid = ? and commentid = ?",
[userid, commentid], one=True)['count']
def is_author_of_paper(userid, paperid):
return 1 == query_db("select count(*) as count \
from papers \
where userid = ? and paperid = ?",
[userid, paperid], one=True)['count']
def handle_sqlite_exception(err):
if ("users.username" in str(err)):
return "Sorry, the user name '%s' has already been taken" % request.form['username']
if ("users.email" in str(err)):
return "Sorry, the email addreser '%s' has already been taken" % request.form['email']
# populate user_authenticated() into jinja2 templates
@app.context_processor
def utility_processor():
return dict(user_authenticated=user_authenticated)
@app.route('/reg', methods=['GET', 'POST'])
def register():
error = None
print(request.form)
if request.method == 'POST':
if request.form['email'] == "":
error = 'Please use a valid email address'
elif request.form['username'] == "":
error = 'Do not forget about your name'
elif request.form['password1'] != request.form['password2']:
error = 'Password and retyped password do not match'
elif request.form['password1'] == "":
error = 'Password cannot be empty'
elif "/" in request.form['username']:
error = 'Username cannot contain symbol "/"'
elif request.form['username'] in \
[r.rule.split('/', maxsplit=2)[1] for r in app.url_map.iter_rules()]:
error = 'You cannot use username "' + \
request.form['username'] + \
'", please choose another.'
elif is_spam(request):
return "<h1>Posted data looks like a spam, contact us if not</h1>", 403
elif not CAPTCHA.verify (request.form['captcha-text'],
request.form['captcha-hash']):
error = 'Watch captcha!!!'
else:
con = get_db()
try:
with con:
con.execute('insert into users \
(username, email, password, valid, about) \
values (?, ?, ?, ?, ?)',
[request.form['username'],
request.form['email'],
hash (request.
form['password1'].
encode('utf-8')),
0,
'...Some information about the user will someday appear here...'
])
send_confirmation_mail (request.form['username'],
request.form['email'])
flash('A confirmation link has been sent to you. \n\
Please, check your mailbox (%s). If it is not the case, please contact us.' % request.form['email'])
return redirect(url_for('index'))
except sqlite3.IntegrityError as err:
error = handle_sqlite_exception(err)
captcha = CAPTCHA.create()
return render_template('users/register.html', error = error, captcha = captcha)
@app.route('/change-password/<string:key>', methods=['GET','POST'])
def set_new_password(key):
error = None
u = query_db('select userid, username, email, \
createtime, valid, about \
from users \
where key = ? \
and chpasstime > datetime("now","-2 days")',
[key], one=True)
if u is not None:
email = u['email']
if request.method == 'POST':
if request.form['password1'] != request.form['password2']:
error = 'Password and retyped password do not match'
elif request.form['password1'] == "":
error = 'Password cannot be empty'
else:
con = get_db()
with con:
con.execute('update users set \
password = ?, valid = 1, key = null \
where key = ?',
[hash (request.form['password1'].
encode('utf-8')),
key
])
session.permanent = True
session['user'] = u
flash('Hello ' + u['username'] + \
'. You have successfully changed your password')
return redirect(url_for('usersite',username=session['user']['username']))
else:
email = 'brrrr. See red error above.'
error = 'Not valid key'
return render_template('users/restore2.html', key=key,
email=email,
error=error)
@app.route('/change-password', methods=['GET', 'POST'])
def new_password_link():
error = None
if request.method == 'POST':
u = query_db('select userid,username,email,createtime,valid \
from users \
where email = ?',
[request.form['email']], one=True)
if u is not None:
send_password_change_mail (request.form['email'])
flash('A confirmation link has been sent to you. \n\
Please, check your mailbox (%s)' %
request.form['email'])
return redirect(url_for('index'))
else:
error = 'User with this email does not exists'
return render_template('users/restore.html', error = error)
@app.route('/reg/<string:key>')
def register_confirmation(key):
error = None
u = query_db('select userid,username,email, \
createtime,valid,about \
from users \
where key = ?',
[key], one=True)
if u is not None:
con = get_db()
with con:
con.execute('update users set valid = 1, key = null \
where key = ?',
[key])
session.permanent = True
session['user'] = u
flash('Hello ' + u['username'] + \
'. You have successfully confirmed your email address')
return redirect(url_for('usersite',username=session['user']['username']))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
u = query_db('select userid,username,email, \
createtime,valid,about,notifs_muted \
from users \
where password = ? and email = ?',
[hash (request.
form['password']
.encode('utf-8')),
request.form['email']], one=True)
if u is not None:
if u['valid'] == 0:
error = 'Please, check your mail box. We have \
sent you an email.'
elif 'rememberme' in request.form:
session.permanent = True
session['user'] = u
flash('You were successfully logged in')
return redirect(url_for('index'))
else:
error = 'Invalid credentials'
return render_template('users/login.html', error=error)
@app.route("/editinfo", methods=['GET','POST'])
def editinfo():
if not user_authenticated():
return "<h1>Forbidden (maybe you forgot to login)</h1>", 403
error = None
if request.method == 'POST':
if request.form['email'] == "":
error = 'Please use a valid email address'
elif request.form['username'] == "":
error = 'Do not forget about your name'
elif "/" in request.form['username']:
error = 'Username cannot contain symbol "/"'
elif request.form['username'] in \
[r.rule.split('/', maxsplit=2)[1] for r in app.url_map.iter_rules()]:
error = 'You cannot use username "' + \
request.form['username'] + \
'", please choose another.'
else:
con = get_db()
if 'notifs_muted' in request.form:
notifs_muted = request.form['notifs_muted']
else:
notifs_muted = 0
try:
with con:
con.execute('update users set about = ?, \
email = ?, username = ?, \
notifs_muted = ? \
where userid = ?',
[request.form['about'],
request.form['email'],
request.form['username'],
notifs_muted,
session['user']['userid']])
session['user']['email'] = request.form['email']
session['user']['about'] = request.form['about']
session['user']['username'] = request.form['username']
session['user']['notifs_muted'] = notifs_muted
# if all is good
return redirect(url_for('usersite',username=session['user']['username']))
except sqlite3.IntegrityError as err:
error = handle_sqlite_exception(err)
# if any error
return render_template('users/editinfo.html', error=error)
@app.route("/mute-email-notifs", methods=['GET'])
def mute_email_notifs():
if not user_authenticated():
return "<h1>Forbidden (maybe you forgot to login)</h1>", 403
con = get_db()
with con:
con.execute('update users set notifs_muted = 1 \
where userid = ?',
[session['user']['userid']])
session['user']['notifs_muted'] = "1"
flash('Email notifications are muted')
return redirect(url_for('usersite',username=session['user']['username']))
return redirect(url_for('usersite'))
@app.route("/unmute-email-notifs", methods=['GET'])
def unmute_email_notifs():
if not user_authenticated():
return "<h1>Forbidden (maybe you forgot to login)</h1>", 403
con = get_db()
with con:
con.execute('update users set notifs_muted = 0 \
where userid = ?',
[session['user']['userid']])
session['user']['notifs_muted'] = "0"
flash('Email notifications are UN-muted')
return redirect(url_for('usersite',username=session['user']['username']))
return redirect(url_for('usersite'))
@app.route("/logout")
def logout():
# remove the user from the session if it's there
session.pop('user', None)
return redirect(url_for('index'))
``` |
{
"source": "josephabrahams/django-sendgrid-v5",
"score": 2
} |
#### File: django-sendgrid-v5/sendgrid_backend/util.py
```python
from typing import Any, Dict
import sendgrid
from django.conf import settings
from sendgrid.helpers.mail import Personalization
SENDGRID_VERSION = sendgrid.__version__
SENDGRID_5 = SENDGRID_VERSION < "6"
SENDGRID_6 = SENDGRID_VERSION >= "6"
def get_django_setting(setting_str, default=None):
"""
If the django setting exists and is set, returns the value. Otherwise returns None.
"""
if hasattr(settings, setting_str):
return getattr(settings, setting_str, default)
return default
def dict_to_personalization(data: Dict[Any, Any]) -> Personalization:
"""
Reverses Sendgrid's Personalization.get() method to create a Personalization
object from its emitted data structure (in the form of a Dict)
"""
personalization = Personalization()
properties = [
p
for p in dir(Personalization)
if isinstance(getattr(Personalization, p), property)
]
for attr in properties:
if attr in ["tos", "ccs", "bccs"]:
key = attr[:-1] # this searches the data for ["to", "cc", "bcc"]
else:
key = attr
value = data.get(key, None)
if value:
setattr(personalization, attr, value)
getattr(personalization, attr)
return personalization
```
#### File: django-sendgrid-v5/test/test_echo_to_stream.py
```python
import warnings
from unittest.mock import MagicMock
from django.core.mail import EmailMessage
from django.test import override_settings
from django.test.testcases import SimpleTestCase
from python_http_client.exceptions import UnauthorizedError
from sendgrid_backend.mail import SendgridBackend
class TestEchoToOutput(SimpleTestCase):
def test_echo(self):
settings = {
"DEBUG": True,
"SENDGRID_API_KEY": "DOESNT_MATTER",
"EMAIL_BACKEND": "sendgrid_backend.SendgridBackend",
"SENDGRID_ECHO_TO_STDOUT": True,
}
with override_settings(**settings):
mocked_output_stream = MagicMock()
connection = SendgridBackend(stream=mocked_output_stream)
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>"],
connection=connection,
)
try:
msg.send()
except UnauthorizedError:
# Since Github only runs live server tests on protected branches (for security),
# we will get an unauthorized error when attempting to hit the sendgrid api endpoint, even in
# sandbox mode.
warnings.warn(
"Sendgrid requests using sandbox mode still need valid credentials for the "
+ "request to succeed."
)
self.assertTrue(mocked_output_stream.write.called)
```
#### File: django-sendgrid-v5/test/test_mail.py
```python
import base64
from email.mime.image import MIMEImage
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.test import override_settings
from django.test.testcases import SimpleTestCase
from sendgrid.helpers.mail import (
CustomArg,
Email,
Header,
Personalization,
Substitution,
)
from sendgrid_backend.mail import SendgridBackend
from sendgrid_backend.util import SENDGRID_5, SENDGRID_6, dict_to_personalization
if SENDGRID_6:
from sendgrid.helpers.mail import Bcc, Cc, To
class TestMailGeneration(SimpleTestCase):
# Any assertDictEqual failures will show the entire diff instead of just a snippet
maxDiff = None
@classmethod
def setUpClass(self):
super(TestMailGeneration, self).setUpClass()
with override_settings(
EMAIL_BACKEND="sendgrid_backend.SendgridBackend",
SENDGRID_API_KEY="DUMMY_API_KEY",
):
self.backend = SendgridBackend()
def test_EmailMessage(self):
"""
Tests that an EmailMessage object is properly serialized into the format
expected by Sendgrid's API
"""
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
cc=["<NAME> <<EMAIL>>"],
bcc=["<NAME> <<EMAIL>>"],
reply_to=["<NAME> <<EMAIL>>"],
)
result = self.backend._build_sg_mail(msg)
expected = {
"personalizations": [
{
"to": [
{"email": "<EMAIL>", "name": "<NAME>"},
{
"email": "<EMAIL>",
},
],
"cc": [
{
"email": "<EMAIL>",
"name": "<NAME>",
}
],
"bcc": [
{"email": "<EMAIL>", "name": "<NAME>"}
],
"subject": "Hello, World!",
}
],
"from": {"email": "<EMAIL>", "name": "<NAME>"},
"mail_settings": {"sandbox_mode": {"enable": False}},
"reply_to": {"email": "<EMAIL>", "name": "<NAME>"},
"subject": "Hello, World!",
"tracking_settings": {
"click_tracking": {"enable": True, "enable_text": True},
"open_tracking": {"enable": True},
},
"content": [{"type": "text/plain", "value": "Hello, World!"}],
}
self.assertDictEqual(result, expected)
def test_EmailMessage_attributes(self):
"""
Test that send_at and categories attributes are correctly written through to output.
"""
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
)
# Set new attributes as message property
msg.send_at = 1518108670
if SENDGRID_5:
msg.categories = ["mammal", "dog"]
else:
msg.categories = ["dog", "mammal"]
msg.ip_pool_name = "some-name"
result = self.backend._build_sg_mail(msg)
expected = {
"personalizations": [
{
"to": [
{"email": "<EMAIL>", "name": "<NAME>"},
{
"email": "<EMAIL>",
},
],
"subject": "Hello, World!",
"send_at": 1518108670,
}
],
"from": {"email": "<EMAIL>", "name": "<NAME>"},
"mail_settings": {"sandbox_mode": {"enable": False}},
"subject": "Hello, World!",
"tracking_settings": {
"click_tracking": {"enable": True, "enable_text": True},
"open_tracking": {"enable": True},
},
"content": [{"type": "text/plain", "value": "Hello, World!"}],
"categories": ["mammal", "dog"],
"ip_pool_name": "some-name",
}
self.assertDictEqual(result, expected)
def test_EmailMultiAlternatives(self):
"""
Tests that django's EmailMultiAlternatives class works as expected.
"""
msg = EmailMultiAlternatives(
subject="Hello, World!",
body=" ",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
cc=["<NAME> <<EMAIL>>"],
bcc=["<NAME> <<EMAIL>>"],
reply_to=["<NAME> <<EMAIL>>"],
)
msg.attach_alternative("<body<div>Hello World!</div></body>", "text/html")
# Test CSV attachment
msg.attach("file.csv", "1,2,3,4", "text/csv")
result = self.backend._build_sg_mail(msg)
expected = {
"personalizations": [
{
"to": [
{"email": "<EMAIL>", "name": "<NAME>"},
{
"email": "<EMAIL>",
},
],
"cc": [
{
"email": "<EMAIL>",
"name": "<NAME>",
}
],
"bcc": [
{"email": "<EMAIL>", "name": "<NAME>"}
],
"subject": "Hello, World!",
}
],
"from": {"email": "<EMAIL>", "name": "<NAME>"},
"mail_settings": {"sandbox_mode": {"enable": False}},
"reply_to": {"email": "<EMAIL>", "name": "<NAME>"},
"subject": "Hello, World!",
"tracking_settings": {
"click_tracking": {"enable": True, "enable_text": True},
"open_tracking": {"enable": True},
},
"attachments": [
{"content": "MSwyLDMsNA==", "filename": "file.csv", "type": "text/csv"}
],
"content": [
{
"type": "text/plain",
"value": " ",
},
{
"type": "text/html",
"value": "<body<div>Hello World!</div></body>",
},
],
}
self.assertDictEqual(result, expected)
def test_EmailMultiAlternatives__unicode_attachment(self):
"""
Tests that django's EmailMultiAlternatives class works as expected with a unicode-formatted
attachment.
"""
msg = EmailMultiAlternatives(
subject="Hello, World!",
body=" ",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
cc=["<NAME> <<EMAIL>>"],
bcc=["<NAME> <<EMAIL>>"],
reply_to=["<NAME> <<EMAIL>>"],
)
msg.attach_alternative("<body<div>Hello World!</div></body>", "text/html")
# Test CSV attachment
attachments = [
("file.xls", b"\xd0", "application/vnd.ms-excel"),
("file.csv", b"C\xc3\xb4te d\xe2\x80\x99Ivoire", "text/csv"),
]
if SENDGRID_5:
for a in attachments:
msg.attach(*a)
else:
for a in reversed(attachments):
msg.attach(*a)
result = self.backend._build_sg_mail(msg)
expected = {
"personalizations": [
{
"to": [
{"email": "<EMAIL>", "name": "<NAME>"},
{
"email": "<EMAIL>",
},
],
"cc": [
{
"email": "<EMAIL>",
"name": "<NAME>",
}
],
"bcc": [
{"email": "<EMAIL>", "name": "<NAME>"}
],
"subject": "Hello, World!",
}
],
"from": {"email": "<EMAIL>", "name": "<NAME>"},
"mail_settings": {"sandbox_mode": {"enable": False}},
"reply_to": {"email": "<EMAIL>", "name": "<NAME>"},
"subject": "Hello, World!",
"tracking_settings": {
"click_tracking": {"enable": True, "enable_text": True},
"open_tracking": {"enable": True},
},
"attachments": [
{
"content": "0A==",
"filename": "file.xls",
"type": "application/vnd.ms-excel",
},
{
"content": "Q8O0dGUgZOKAmUl2b2lyZQ==",
"filename": "file.csv",
"type": "text/csv",
},
],
"content": [
{
"type": "text/plain",
"value": " ",
},
{
"type": "text/html",
"value": "<body<div>Hello World!</div></body>",
},
],
}
self.assertDictEqual(result, expected)
def test_reply_to(self):
"""
Tests reply-to functionality
"""
kwargs = {
"subject": "Hello, World!",
"body": "Hello, World!",
"from_email": "<NAME> <<EMAIL>>",
"to": ["<NAME> <<EMAIL>>"],
"reply_to": ["<NAME> <<EMAIL>>"],
"headers": {"Reply-To": "<NAME> <<EMAIL>>"},
}
# Test different values in Reply-To header and reply_to prop
msg = EmailMessage(**kwargs)
with self.assertRaises(ValueError):
self.backend._build_sg_mail(msg)
# Test different names (but same email) in Reply-To header and reply_to prop
kwargs["headers"] = {"Reply-To": "Bad Name <<EMAIL>>"}
msg = EmailMessage(**kwargs)
with self.assertRaises(ValueError):
self.backend._build_sg_mail(msg)
# Test same name/email in both Reply-To header and reply_to prop
kwargs["headers"] = {"Reply-To": "<NAME> <<EMAIL>>"}
msg = EmailMessage(**kwargs)
result = self.backend._build_sg_mail(msg)
self.assertDictEqual(
result["reply_to"], {"email": "<EMAIL>", "name": "<NAME>"}
)
def test_mime(self):
"""
Tests MIMEImage support for the EmailMultiAlternatives class
"""
msg = EmailMultiAlternatives(
subject="Hello, World!",
body=" ",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
)
content = '<body><img src="cid:linux_penguin" /></body>'
msg.attach_alternative(content, "text/html")
with open("test/linux-penguin.png", "rb") as f:
img = MIMEImage(f.read())
img.add_header("Content-ID", "<linux_penguin>")
msg.attach(img)
result = self.backend._build_sg_mail(msg)
self.assertEqual(len(result["content"]), 2)
self.assertDictEqual(result["content"][0], {"type": "text/plain", "value": " "})
self.assertDictEqual(
result["content"][1], {"type": "text/html", "value": content}
)
self.assertEqual(len(result["attachments"]), 1)
self.assertEqual(result["attachments"][0]["content_id"], "linux_penguin")
with open("test/linux-penguin.png", "rb") as f:
self.assertEqual(
bytearray(result["attachments"][0]["content"], "utf-8"),
base64.b64encode(f.read()),
)
self.assertEqual(result["attachments"][0]["type"], "image/png")
def test_templating_sendgrid_v5(self):
"""
Tests that basic templating functionality works. This is a simple check and
the results are valid for both Sendgrid versions 5 and 6.
"""
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
)
msg.template_id = "test_template"
result = self.backend._build_sg_mail(msg)
self.assertIn("template_id", result)
self.assertEquals(result["template_id"], "test_template")
def test_templating_sendgrid(self):
"""
Tests more complex templating scenarios for versions 5 and 6 of sendgrid
todo: break this up into separate tests
"""
if SENDGRID_5:
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
)
msg.template_id = "test_template"
result = self.backend._build_sg_mail(msg)
self.assertIn("template_id", result)
self.assertEquals(result["template_id"], "test_template")
# Testing that for sendgrid v5 the code behave in the same way
self.assertEquals(
result["content"], [{"type": "text/plain", "value": "Hello, World!"}]
)
self.assertEquals(result["subject"], "Hello, World!")
self.assertEquals(result["personalizations"][0]["subject"], "Hello, World!")
else:
msg = EmailMessage(
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
)
msg.template_id = "test_template"
msg.dynamic_template_data = {
"subject": "Hello, World!",
"content": "Hello, World!",
"link": "http://hello.com",
}
result = self.backend._build_sg_mail(msg)
self.assertIn("template_id", result)
self.assertEquals(result["template_id"], "test_template")
self.assertEquals(
result["personalizations"][0]["dynamic_template_data"],
msg.dynamic_template_data,
)
# Subject and content should not be between request param
self.assertNotIn("subject", result)
self.assertNotIn("content", result)
def test_asm(self):
"""
Tests that unsubscribe group functionality works
"""
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
)
msg.asm = {"group_id": 1}
result = self.backend._build_sg_mail(msg)
self.assertIn("asm", result)
self.assertIn("group_id", result["asm"])
del msg.asm["group_id"]
with self.assertRaises(KeyError):
self.backend._build_sg_mail(msg)
msg.asm = {"group_id": 1, "groups_to_display": [2, 3, 4], "bad_key": None}
result = self.backend._build_sg_mail(msg)
self.assertIn("asm", result)
self.assertIn("group_id", result["asm"])
self.assertIn("groups_to_display", result["asm"])
def test_EmailMessage_custom_args(self):
"""
Tests that the custom_args property is serialized correctly
"""
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
cc=["<NAME> <<EMAIL>>"],
bcc=["<NAME> <<EMAIL>>"],
reply_to=["<NAME> <<EMAIL>>"],
)
msg.custom_args = {"arg_1": "Foo", "arg_2": "bar"}
result = self.backend._build_sg_mail(msg)
expected = {
"personalizations": [
{
"to": [
{"email": "<EMAIL>", "name": "<NAME>"},
{
"email": "<EMAIL>",
},
],
"cc": [
{
"email": "<EMAIL>",
"name": "<NAME>",
}
],
"bcc": [
{"email": "<EMAIL>", "name": "<NAME>"}
],
"subject": "Hello, World!",
"custom_args": {"arg_1": "Foo", "arg_2": "bar"},
}
],
"from": {"email": "<EMAIL>", "name": "<NAME>"},
"mail_settings": {"sandbox_mode": {"enable": False}},
"reply_to": {"email": "<EMAIL>", "name": "<NAME>"},
"subject": "Hello, World!",
"tracking_settings": {
"click_tracking": {"enable": True, "enable_text": True},
"open_tracking": {"enable": True},
},
"content": [{"type": "text/plain", "value": "Hello, World!"}],
}
self.assertDictEqual(result, expected)
def test_personalizations_resolution(self):
"""
Tests that adding a Personalization() object directly to an EmailMessage object
works as expected.
Written to test functionality introduced in the PR:
https://github.com/sklarsa/django-sendgrid-v5/pull/90
"""
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
to=["<NAME> <<EMAIL>>", "<EMAIL>"],
cc=["<NAME> <<EMAIL>>"],
bcc=["<NAME> <<EMAIL>>"],
reply_to=["<NAME> <<EMAIL>>"],
)
# Tests that personalizations take priority
test_str = "<EMAIL>"
test_key_str = "my key"
test_val_str = "my val"
personalization = Personalization()
if SENDGRID_5:
personalization.add_to(Email(test_str))
personalization.add_cc(Email(test_str))
personalization.add_bcc(Email(test_str))
else:
personalization.add_to(To(test_str))
personalization.add_cc(Cc(test_str))
personalization.add_bcc(Bcc(test_str))
personalization.add_custom_arg(CustomArg(test_key_str, test_val_str))
personalization.add_header(Header(test_key_str, test_val_str))
personalization.add_substitution(Substitution(test_key_str, test_val_str))
msg.personalizations = [personalization]
result = self.backend._build_sg_mail(msg)
personalization = result["personalizations"][0]
for field in ("to", "cc", "bcc"):
data = personalization[field]
self.assertEquals(len(data), 1)
self.assertEquals(data[0]["email"], test_str)
for field in ("custom_args", "headers", "substitutions"):
data = personalization[field]
self.assertEquals(len(data), 1)
self.assertIn(test_key_str, data)
self.assertEquals(test_val_str, data[test_key_str])
def test_dict_to_personalization(self):
"""
Tests that dict_to_personalization works
"""
data = {
"to": [
{"email": "<EMAIL>", "name": "<NAME>"},
{
"email": "<EMAIL>",
},
],
"cc": [
{
"email": "<EMAIL>",
"name": "<NAME>",
}
],
"bcc": [{"email": "<EMAIL>", "name": "<NAME>"}],
"subject": "Hello, World!",
"custom_args": {"arg_1": "Foo", "arg_2": "bar"},
"headers": {"header_1": "Foo", "header_2": "Bar"},
"substitutions": {"sub_a": "foo", "sub_b": "bar"},
"send_at": 1518108670,
"dynamic_template_data": {
"subject": "Hello, World!",
"content": "Hello, World!",
"link": "http://hello.com",
},
}
p = dict_to_personalization(data)
fields_to_test = (
("tos", "to"),
("ccs", "cc"),
("bccs", "bcc"),
("subject", "subject"),
("custom_args", "custom_args"),
("headers", "headers"),
("substitutions", "substitutions"),
("send_at", "send_at"),
("dynamic_template_data", "dynamic_template_data"),
)
for arg, key in fields_to_test:
val = getattr(p, arg)
if type(val) == list:
self.assertListEqual(val, data[key])
elif type(val) == dict:
self.assertDictEqual(val, data[key])
else:
self.assertEquals(val, data[key])
def test_build_personalization_errors(self):
msg = EmailMessage(
subject="Hello, World!",
body="Hello, World!",
from_email="<NAME> <<EMAIL>>",
cc=["<NAME> <<EMAIL>>"],
bcc=["<NAME> <<EMAIL>>"],
reply_to=["<NAME> <<EMAIL>>"],
)
test_str = "<EMAIL>"
test_key_str = "my key"
test_val_str = "my val"
personalization = Personalization()
if SENDGRID_5:
personalization.add_cc(Email(test_str))
personalization.add_bcc(Email(test_str))
else:
personalization.add_cc(Cc(test_str))
personalization.add_bcc(Bcc(test_str))
personalization.add_custom_arg(CustomArg(test_key_str, test_val_str))
personalization.add_header(Header(test_key_str, test_val_str))
personalization.add_substitution(Substitution(test_key_str, test_val_str))
msg.personalizations = [personalization]
self.assertRaisesRegex(
ValueError,
"Each msg personalization must have recipients",
self.backend._build_sg_mail,
msg,
)
delattr(msg, "personalizations")
msg.dynamic_template_data = {"obi_wan": "hello there"}
self.assertRaisesRegex(
ValueError,
r"Either msg\.to or msg\.personalizations \(with recipients\) must be set",
self.backend._build_sg_mail,
msg,
)
``` |
{
"source": "JosephAdkins/CleanSlips-Django",
"score": 2
} |
#### File: CleanSlips-Django/cleanslips/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponseBadRequest, HttpResponse
from django import forms
from operator import itemgetter
from io import StringIO, BytesIO
import os
import re
# local imports
from . import helpers
from . modules import callnumber
from . modules.docx_mailmerge_local.mailmerge import MailMerge
# form class
class UploadFileForm(forms.Form):
file = forms.FileField()
# main upload and processing form #############################################
def upload(request, campus, template):
# get campus name
campus_name = helpers.get_campus_name(campus)
if campus_name == None:
return render(request, 'errors.html', {'title' : 'CleanSlips | Ooops',
'campus': campus.upper(),
'template': template,
'errors' : f"Campus code '{campus.upper()}' was not found. Are you sure you have your correct 3 character campus code?"},
)
# serve up upload form
if request.method == 'GET':
file = forms.FileField()
form = UploadFileForm()
return render(request, 'upload.html', {'form': form,
'title': 'CleanSlips | '+campus_name,
'header': ('CleanSlips'),
'campus': campus.upper(),
'campus_name': campus_name})
# get spreadsheet
if request.method == "POST":
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
filehandle = request.FILES['file']
# Check file type
if ".xls" not in str(filehandle):
return render(request, 'errors.html', {'title' : 'CleanSlips | Ooops',
'campus': campus.upper(),
'template': template,
'errors' : "Chosen file is not an .xls file. Are you sure that you chose LendingRequestReport.xls?"},
)
# read spreadsheet
ill_requests = []
# check header
rows = filehandle.get_array()
if rows[0] != ['Title', 'Author', 'Publisher', 'Publication date', 'Barcode', 'ISBN/ISSN', 'Availability', 'Volume/Issue', 'Shipping note', 'Requester email', 'Pickup at', 'Electronic available', 'Digital available', 'External request ID', 'Partner name', 'Partner code', 'Copyright Status', 'Level of Service']:
return render(request, 'errors.html', {'title' : 'CleanSlips | Ooops',
'campus': campus.upper(),
'template': template,
'errors' : "The headers on this spreadsheet don't match what CleanSlips is expecting. Are you sure that you chose LendingRequestReport.xls?"},
)
# __________ PARSE SPREADSHEET ____________________________________
for row in rows:
# skip header
if row[0] == "Title":
continue
title = row[0]
author = row[1]
publisher = row[2]
publication_date = row[3]
barcode = row[4]
isbn_issn = row[5]
availability_string = row[6]
volume_issue = row[7]
requestor_email = row[9]
pickup_at = row[10]
electronic_available = row[11]
digital_available = row[12]
external_request_id = row[13]
partner_name = row[14]
partner_code = row[15]
copyright_status = row[16]
level_of_service = row[17]
# ___________ PARSE SHIPPING NOTE _____________________________
shipping_note = row[8]
shipping_notes = shipping_note.split('||')
try:
comments = shipping_notes[0]
requestor_name = shipping_notes[1]
except:
print(f"SHIPPING NOTE FIELD - {shipping_note} - IS NOT AS EXPECTED...ATTEMPTING TO COMPENSATE...")
comments = ""
requestor_name = shipping_note
# __________ PARSE AVAILABILITY _______________________________
availability_array = availability_string.split('||')
full_availability_array = []
full_sort_string_array = []
for availability in availability_array:
# skip if on loan
if "Resource Sharing Long Loan" in availability:
continue
if "Resource Sharing Short Loan" in availability:
continue
# split availability string into parts
regex = r'(.*?),(.*?)\.(.*).*(\(\d{1,3} copy,\d{1,3} available\))'
q = re.findall(regex, availability)
try:
matches = list(q[0])
library = matches[0]
location = matches[1]
call_number = matches[2]
holdings = matches[3]
full_availability_array.append(f"[{location} - {call_number[:-1]}]") # negative index to remove extra space
except IndexError:
library = None
location = None
call_number = None
holdings = None
full_availability_array.append(f"[{availability}]")
# normalize call number for sorting
try:
lccn = callnumber.LC(call_number)
lccn_components = lccn.components(include_blanks=True)
normalized_call_number = lccn.normalized
except:
print(f"CALL NUMBER - {call_number} - IS NOT VALID LC. ATTEMPTING TO COMPENSATE...")
normalized_call_number = None
if normalized_call_number == None:
normalized_call_number = call_number
# generate sort string
sort_string = f"{location}|{normalized_call_number}"
full_sort_string_array.append(sort_string)
# combine availability and sort fields
full_availability = "; ".join(full_availability_array)
full_sort_string = "; ".join(full_sort_string_array)
# __________ ADD TO REQUESTS DICTIONARY _______________________
ill_request = {
'Partner_name' : partner_name,
'External_request_ID' : external_request_id,
'Availability' : full_availability,
'Call_Number' : call_number,
'Comments' : comments,
'RequestorName' : requestor_name,
'VolumeIssue' : volume_issue,
'Title' : title[:40],
'Shipping_note' : requestor_name,
'Sort' : sort_string,
'Campus_Code': campus,
'Campus_Name': campus_name,
}
# add to ongoing list
ill_requests.append(ill_request)
# sort requests by location and normalized call number
requests_sorted = sorted(ill_requests, key=itemgetter('Sort'))
# _________ GENERATE LABELS _______________________________________
# stickers
if template == "stickers":
template = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join('static','slip_templates','campus',campus.upper(),'TEMPLATE_stickers.docx'))
document = MailMerge(template)
document.merge_rows('Shipping_note', requests_sorted)
# flags
if template == "flags":
template = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join('static','slip_templates','campus', campus.upper(), 'TEMPLATE_flags.docx'))
document = MailMerge(template)
document.merge_templates(requests_sorted, separator='column_break')
# generate slips in memory and send as attachment
f = BytesIO()
document.write(f)
length = f.tell()
f.seek(0)
response = HttpResponse(
f.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)
response['Content-Disposition'] = 'attachment; filename=SLIPS.docx'
response['Content-Length'] = length
return response
# Other pages #################################################################
def home(request):
return render(request, 'home.html', {f'title': 'CleanSlips | Home',
'header': 'CleanSlips'})
def find(request):
if request.POST:
return redirect(f"/campus={request.POST['campus']}&template={request.POST['template']}")
else:
return render(request, 'errors.html', {'title': 'CleanSlips | Ooops!',
'header': 'CleanSlips'})
def docs(request):
return render(request, 'docs.html', {'title': 'CleanSlips | Documentation',
'header': 'CleanSlips'})
def contact(request):
return render(request, 'contact.html', {'title': 'CleanSlips | Contact',
'header': 'CleanSlips'})
``` |
{
"source": "joseph-ai/aitoolkit",
"score": 3
} |
#### File: math/scalar/Operation.py
```python
import math
from ..MathOp import MathOp
class SinOp(MathOp):
def __init__(self, x):
self.input_x = x
super().__init__()
def calculate(self):
self.result = math.sin(self.input_x.value)
return self.result
def backward(self, edge_value):
return math.cos(edge_value.value)
def __str__(self):
return "sin(%s) = %s" % (self.input_x, self.result)
class ExpOp(MathOp):
def __init__(self, x):
self.input_x = x
super().__init__()
def calculate(self):
self.result = math.exp(self.input_x.value)
return self.result
def backward(self, edge_value):
return math.exp(edge_value)
def __str__(self):
return "e ** (%s) = %s" % (self.input_x, self.result)
class LnOp(MathOp):
def __init__(self, x):
self.input_x = x
super().__init__()
def calculate(self):
self.result = math.log(self.input_x.value)
return self.result
def backward(self, edge_value):
return 1 / edge_value.value
def __str__(self):
return "ln(%s) = %s" % (self.input_x, self.result)
```
#### File: toolkit/flow/FlowCreator.py
```python
from .NxFlow import NxFlow
from .SnapFlow import SnapFlow
class FlowCreator(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def default_creator(cls, name="default"):
if "snap" in name:
return SnapFlow()
return NxFlow()
``` |
{
"source": "JosephAkim/PetNFT",
"score": 3
} |
#### File: scripts/pet_collectible/create_of_collectible.py
```python
from brownie import PetCollection, accounts, config
from scripts.Useful_scripts import get_petName, fund_with_link
import time
def main():
dev = accounts.add(config["wallets"]["from_key"])
pet_collectible = PetCollection[len(PetCollection) - 1]
fund_with_link(pet_collectible.address)
transaction = pet_collectible.createPetCollectible("None", {"from": dev})
print("Waiting on second transaction...")
# wait for the 2nd transaction
transaction.wait(1)
time.sleep(35)
requestId = transaction.events["requestedCollectible"]["requestId"]
token_id = pet_collectible.requestIdToTokenId(requestId)
pet = get_petName(pet_collectible.tokenIdToBreed(token_id))
print("Pet of tokenId {} is {}".format(token_id, pet))
```
#### File: scripts/pet_collectible/fund.py
```python
from brownie import PetCollection
from scripts.Useful_scripts import fund_with_link
def main():
pet_collectible = PetCollection[len(PetCollection) - 1]
fund_with_link(pet_collectible.address)
``` |
{
"source": "josephalbaph/acam",
"score": 2
} |
#### File: doctype/acam_factor/acam_factor.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt
class AcamFactor(Document):
def validate(self):
self.validate_check_factor();
def validate_check_factor(self):
self.check_factors = flt(self.distribution_services)+flt(self.distribution_connection_services) \
+flt(self.regulated_retail_services)+flt(self.non_regulated_retail_services)+flt(self.supplier_of_last_resort) \
+flt(self.wholesale_aggregator)+flt(self.related_business)+flt(self.generation)+flt(self.supply_services)+flt(self.general_purpose);
``` |
{
"source": "JosephAMumford/CodingDojo",
"score": 2
} |
#### File: apps/main/views.py
```python
from __future__ import unicode_literals
from django.shortcuts import render, redirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib import messages
from .models import User
# Create your views here.
def users(request):
context = {
'user_list' : User.objects.all()
}
return render(request, "main/users.html", context)
def new_user(request):
return render(request, "main/new.html")
def edit_user(request, id):
context = {
'user' : User.objects.get(id=id)
}
return render(request, "main/edit.html", context)
def show_user(request, id):
context = {
'user' : User.objects.get(id=id)
}
return render(request, "main/show.html", context)
def create(request):
errors = User.objects.validator(request.POST)
if(len(errors)):
for tag, error in errors.iteritems():
messages.error(request, error, extra_tags=tag)
return redirect('new_user')
else:
User.objects.create(first_name=request.POST.get('first_name'), last_name=request.POST.get('last_name'), email=request.POST.get('email'))
return redirect ('users')
def destroy(request, id):
user = User.objects.get(id=id)
user.delete()
return redirect ('users')
def update(request):
errors = User.objects.validator(request.POST)
if(len(errors)):
for tag, error in errors.iteritems():
messages.error(request, error, extra_tags=tag)
return redirect('edit_user', id = request.POST.get('user_id'))
else:
user_id = request.POST.get('user_id')
user = User.objects.get(id=user_id)
user.first_name = request.POST.get('first_name')
user.last_name = request.POST.get('last_name')
user.email = request.POST.get('email')
user.save()
return redirect ('users')
```
#### File: FlaskFundamentals/AJAXNinjas/server.py
```python
from flask import Flask, render_template, request, redirect, url_for, json, jsonify
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/process', methods=['post'])
def process():
ninja = ""
data = request.form["color"]
# Compare color and set name and image file path to send back
if data == "red":
ninja = "Raphael"
image_path = "static/raphael.jpg"
elif data == "blue":
ninja = "Leonardo"
image_path = "static/leonardo.jpg"
elif data == "orange":
ninja = "Michelangelo"
image_path = "static/michelangelo.jpg"
elif data == "purple":
ninja = "Donatello"
image_path = "static/donatello.jpg"
else:
ninja = 'April'
image_path = "static/notapril.jpg"
return jsonify(name=ninja, file_path=image_path, color=data)
app.run(debug=True)
```
#### File: FlaskFundamentals/GreatNumberGame/server.py
```python
from flask import Flask, render_template, request, redirect, session
import random
app = Flask(__name__)
app.secret_key = 'MySecretKey'
@app.route('/')
def index():
# Initialize session values
if 'random_number' not in session:
session['random_number'] = random.randrange(0,101)
if 'number_of_guesses' not in session:
session['number_of_guesses'] = 0
if 'state' not in session:
session['state'] = "empty"
if 'user_guess' not in session:
session['user_guess'] = 0
if 'current_guesses' not in session:
session['current_guesses'] = ""
return render_template('index.html')
@app.route('/process', methods=['post'])
def process():
# Keep track of how many guesses
session['number_of_guesses'] += 1
# Keep track of which numbers have been guessed already
session['current_guesses'] += str(request.form['user_guess']) + ", "
session['user_guess'] = request.form['user_guess']
# Check if guess is higher, lower, or equal to random number
if int(session['user_guess']) == session['random_number']:
session['state'] = "won"
if int(session['user_guess']) < session['random_number']:
session['state'] = "low"
if int(session['user_guess']) > session['random_number']:
session['state'] = "high"
return redirect('/')
@app.route('/reset', methods=['POST'])
def reset():
# Reset session information
session['random_number'] = random.randrange(0,101)
session['number_of_guesses'] = 0
session['state'] = "empty"
session['user_guess'] = 0
session['current_guesses'] = ""
return redirect('/')
app.run(debug=True)
```
#### File: FlaskMySQL/FullFriends/server.py
```python
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import datetime
import re
app = Flask(__name__)
app.secret_key = "ThisIsSecret!"
mysql = MySQLConnector(app,'full_friends_db')
DATE_REGEX = re.compile(r'^(1[0-2]|0[1-9])/(3[01]|[12][0-9]|0[1-9])/[0-9]{4}$')
@app.route('/')
def index():
query = "SELECT * FROM friends"
friends = mysql.query_db(query)
return render_template('index.html', all_friends=friends)
@app.route('/add', methods=['POST'])
def create():
error = False
# DATE VALIDATION
if len(request.form['friend_since']) < 1:
print "No length"
flash("Date cannot be blank")
error = True
elif not DATE_REGEX.match(request.form['friend_since']):
print "No format"
flash("Invalid date, use mm/dd/yyyy format")
error = True
else:
current_time = datetime.datetime.now()
temp_time = datetime.datetime.strptime(request.form['friend_since'], "%m/%d/%Y")
if temp_time >= current_time:
print "No future"
flash("Invalid date, cannot be equal or in the future")
error = True
if(error == True):
return redirect('/')
else:
print "No error"
query = "INSERT INTO friends (name, age, friend_since, year) VALUES (:name, :age, DATE_FORMAT(STR_TO_DATE(:friend_since, '%m/%d/%Y'), '%M %e, %Y'), DATE_FORMAT(STR_TO_DATE(:friend_since, '%m/%d/%Y'), '%Y'))"
data = {
'name': request.form['name'],
'age': request.form['age'],
'friend_since': request.form['friend_since']
}
mysql.query_db(query, data)
return redirect('/')
app.run(debug=True)
```
#### File: FlaskMySQL/LoginAndRegistration/server.py
```python
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import datetime
import re
import md5 # imports the md5 module to generate a hash
app = Flask(__name__)
app.secret_key = "ThisIsSecret!"
mysql = MySQLConnector(app,'login_registration_db')
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
PASSWORD_REGEX = re.compile(r'\d.*[A-Z]|[A-Z].*\d')
# Main page, login or register
@app.route('/')
def index():
return render_template('index.html')
# Go back to main page, delete old flash messages
@app.route('/home')
def home():
session.pop('_flashes', None)
return redirect('/')
# View current user table
@app.route('/view')
def view():
query = "SELECT * FROM users"
users_list = mysql.query_db(query)
return render_template('users.html', users=users_list)
# Process login request
@app.route('/login', methods=['POST'])
def login():
error = False
hashed_password = md5.new(request.form['password']).hexdigest()
# Get info from database
query = "SELECT username, password FROM users WHERE username = :username"
data = {
'username': request.form['username']
}
username = mysql.query_db(query,data)
# Check if username exists, then compare stored password and input password
if len(username) != 0:
if username[0]['password'] != hashed_password:
flash("Username or password is incorrect")
return redirect('/')
else:
session['username'] = request.form['username']
session['login_success'] = True
session['register_success'] = False
return redirect('/success')
else:
flash("Username or password is incorrect")
return redirect('/')
# Route to success page, will vary depending on login or register based on session
@app.route('/success')
def success():
return render_template('success.html')
# Proces registeration form
@app.route('/register', methods=['POST'])
def process():
error = False
# FIRST NAME VALIDATION
if len(request.form['first_name']) < 2:
flash("First Name must be two or more letters")
error = True
elif request.form['first_name'].isalpha() == False:
flash("First Name cannot contain numbers")
error = True
# LAST NAME VALIDATION
if len(request.form['last_name']) < 2:
flash("Last Name must be two or more letters")
error = True
elif request.form['last_name'].isalpha() == False:
flash("Last Name cannot contain numbers")
error = True
# USERNAME VALIDATION
query = "SELECT username FROM users WHERE username = :username"
data = {
'username': request.form['username']
}
username = mysql.query_db(query,data)
if len(username) != 0:
flash(request.form['username'] + " has already been registered")
error = True
# EMAIL VALIDATION
if len(request.form['email_address']) < 1:
flash("Email Address cannot be blank")
error = True
elif not EMAIL_REGEX.match(request.form['email_address']):
flash("Invalid Email Address")
error = True
# PASSWORD VALIDATION
if len(request.form['password']) < 1:
flash("Password cannot be blank")
error = True
elif len(request.form['password']) < 8:
flash("Password must be at least 8 characters")
error = True
elif not PASSWORD_REGEX.match(request.form['password']):
flash("Invalid Password, must contain at least one uppercase and one number")
error = True
# CONFIRM PASSWORD VALIDATION
if len(request.form['confirm_password']) < 1:
flash("Confirm Password cannot be blank")
error = True
elif request.form['password'] != request.form['confirm_password']:
flash("Passwords do not match")
error = True
if error == True:
return redirect('/')
if error == False:
# Add to database
hashed_password = <PASSWORD>(request.form['password']).hexdigest()
query = "INSERT INTO users (first_name, last_name, username, email_address, password, created_at) VALUES (:first_name, :last_name, :username, :email_address, :password, NOW())"
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email_address': request.form['email_address'],
'username': request.form['username'],
'password': <PASSWORD>,
}
mysql.query_db(query, data)
flash(request.form['username'] + " was added to the system")
# Create session
session['username'] = request.form['username']
session['register_success'] = True
session['login_success'] = False
return redirect('/success')
app.run(debug=True)
#Validations and Fields to Include
#1. First Name - letters only, at least 2 characters and that it was submitted
#2. Last Name - letters only, at least 2 characters and that it was submitted
#3. Email - Valid Email format, and that it was submitted
#4. Password - at least 8 characters, and that it was submitted
#5. Password Confirmation - matches password
```
#### File: Python/PythonFundamentals/MakingDictionaries.py
```python
name = ["Anna", "Eli", "Pariece", "Brendan", "Amy", "Shane", "Oscar"]
favorite_animal = ["horse", "cat", "spider", "giraffe", "ticks", "dolphins", "llamas"]
# This function makes a dictionary out of two lists. It will test to ensure both
# lists are the same size. If not, the shorter list will be used for the keys
def make_dict(list1, list2):
new_dict = {}
if len(list1) != len(list2):
if len(list1) > len(list2):
for i in range(0, len(list2)):
new_dict[list2[i]] = list1[i]
else:
for i in range(0, len(list1)):
new_dict[list1[i]] = list2[i]
else:
for i in range(0, len(list1)):
new_dict[list1[i]] = list2[i]
return new_dict
my_dictionary = make_dict(name, favorite_animal)
print my_dictionary
```
#### File: Python/PythonOOP/animals.py
```python
class Animal(object):
def __init__(self,name,health):
self.name = name
self.health = 50
def walk(self):
self.health = self.health - 1
return self
def run(self):
self.health = self.health - 5
return self
def display_health(self):
print "Health: " + str(self.health)
return self
# Create instance of Animal
animal1 = Animal("Edgar",30)
animal1.walk().walk().walk().run().run().display_health()
class Dog(Animal):
def pet(self):
self.health = self.health + 5
return self
# Create instance of Dog
dog1 = Dog("Raspberry",150)
dog1.walk().walk().walk().run().run().pet().display_health()
class Dragon(Animal):
def fly(self):
self.health = self.health - 10
return self
def display_health(self):
print "I am a Dragon"
return self
# Create instance of Dragon
dragon1 = Dragon("Phantoon", 500)
dragon1.walk().run().fly().fly().fly().display_health()
# Create new Animal
animal2 = Animal("Probos",200)
#animal2.pet()
#AttributeError: 'Animal' object has no attribute 'pet'
#animal2.fly()
#AttributeError: 'Animal' object has no attribute 'fly'
animal2.display_health()
#Health: 50 - does not say "I am a Dragon"
```
#### File: Python/PythonOOP/hospital.py
```python
class Patient(object):
def __init__(self,id,name,allergies):
self.id = id
self.name = name
self.allergies = allergies
self.bed_number = None
def display_info(self):
print " "
print "Patient: " + self.name
print "Id: " + str(self.id)
# Create a list of allergies. Check if there are any, add commas between each one
allergy_string = ""
if len(self.allergies) > 0:
for i in range(0,len(self.allergies)):
allergy_string = allergy_string + self.allergies[i]
if i < len(self.allergies) - 1:
allergy_string = allergy_string + ", "
print "Allergies: " + allergy_string
print "Bed Number: " + str(self.bed_number)
# Class to hold hospital information
class Hospital(object):
def __init__(self, name):
self.patients = []
self.name = name
self.capacity = 50
# Create a boolean list to keep track of which beds are in use
self.beds = []
for i in range(0,self.capacity):
self.beds.append(False)
def admit(self, _patient):
if len(self.patients) < self.capacity:
self.patients.append(_patient)
# Find the first unused bed to assign patient to
for i in range(0,self.capacity):
if self.beds[i] == False:
self.beds[i] = True;
_patient.bed_number = i+1
break
return self
def disharge(self, _patient):
index = 0
for i in range(0, len(self.patients)):
if _patient.name == self.patients[i].name:
index = i
break
self.patients.pop(index)
self.beds[index] = False
_patient.bed_number = None
return self
def display_info(self):
print ""
print "====================="
print self.name
print "====================="
beds_used = len(self.patients)
print str(beds_used) + " of " + str(self.capacity) + " beds in use"
for i in range(0,len(self.patients)):
self.patients[i].display_info()
#Create patients
patient1 = Patient(100,"<NAME>",["Peanuts"])
patient2 = Patient(110,"<NAME>",["Milk","Shellfish","Cinnamon"])
#Create hospital
hospital = Hospital("Ninja Hopsital")
hospital.admit(patient1)
hospital.admit(patient2)
hospital.display_info()
#Disharge patient
hospital.disharge(patient1)
hospital.display_info()
#Print patient to show bed_number was reset
print " "
patient1.display_info()
```
#### File: super_test/modules/Human.py
```python
class Human(object):
def __init__(self,health):
self.health = health
#self.intelligence = intelligence
#self.stealth = stealth
#self.strength = strength
```
#### File: Python/PythonOOP/underscore.py
```python
class Underscore(object):
# Return a list of value where each element has been applied to function
def map(self, list, function):
for i in range(0,len(list)):
list[i] = function(list[i])
return list
# Use intitial value if needed, like in multiplication; if it is zero, result will always
# be zero. For addition, you would pass 0 or leave blank
def reduce(self, list, function,initial=None):
if initial == None:
value = 0
else:
value = initial
for i in list:
value = function(value,i)
return value
# Return index of first element which satisfies function
def find(self, list, function):
index = None
for i in range(0,len(list)):
if function(list[i]) == True:
index = i
break
return index
# Return a list of all values that satisfy the function
def filter(self, list, function):
values = []
for i in range(0,len(list)):
if function(list[i]) == True:
values.append(list[i])
return values
# Return a list of all values that do not satisfy the function
def reject(self, list, function):
values = []
for i in range(0,len(list)):
if function(list[i]) == False:
values.append(list[i])
return values
# Create underscore object
_ = Underscore()
# Use map function to apply function to all list elements
mapped = _.map([1,2,3,4], lambda x: x * x)
print mapped
# Use reduce function to reduce list of values to a single value with supplied function
reduced = _.reduce([4,1,3,2,5,2], lambda x,y: x + y, 0)
print reduced
# Use find function to return first result (index of list) where expression is true
found = _.find([1,4,3,2,5,6], lambda x: x == 2)
print found
# Use function to return all values that return true when evaluated with function
filtered = _.filter([1,2,3,4,5,6], lambda x: x % 2 == 0)
print filtered
# Use function to return all values that return false when evaluated with function
rejected = _.reject([1,2,3,4,5,6], lambda x: x % 2 == 0)
print rejected
```
#### File: TDD/TDD_I/test_insert.py
```python
import unittest
from insert_value import insert_val_at
class InsertValueTest(unittest.TestCase):
def setUp(self):
self.test_list = [0,1,2,3,4]
self.result = insert_val_at(2, self.test_list, 100)
self.result2 = insert_val_at(6, self.test_list, 100)
def testInsertAtIndexTwo(self):
return self.assertEqual([0,1,100,2,3,4], self.result)
def testReturnFalseForInvalidIndex(self):
return self.assertEqual(False, self.result2)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joseph-analyticscc/Discord-Role-Manager",
"score": 3
} |
#### File: Discord-Role-Manager/Role_Manager_Bot/role_manager.py
```python
from os import path, sys
# Discord API Imports
import discord
from discord.ext import commands
# Google Docs/Sheets API Imports
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
# Assistance Files Imports
from media import *
from request_data import *
""" Google API Initializations """
SCOPES = link("SCOPE")
CREDENTIALS = ServiceAccountCredentials.from_json_keyfile_name(path.join(sys.path[0], "credentials.json"), SCOPES)
CLIENT = gspread.authorize(CREDENTIALS)
SERVICE = build("sheets", "v4", credentials=CREDENTIALS)
""" Discord API Initializations """
BOT = commands.Bot(command_prefix='!')
@BOT.event
async def on_ready():
print("BOT is ready and running!")
"""
!setuphelp - Admin Only
This command sends an embed with direct instructions on how
to get the Role Manager Bot set up and running on a server.
"""
@BOT.command()
@commands.has_permissions(administrator=True)
async def setuphelp(ctx):
embed = discord.Embed(title="Role Manager Setup Tutorial", description="Click the link above for detailed instructions with pictures!", url=link("TUTORIAL"), color=color("GREEN"))
embed.add_field(name="Step 1:", value="Create a Google Sheets Worksheet.", inline=False)
embed.add_field(name="Step 2:", value="Click the Share button on the top right and add this e-mail as an author: ```\n" + CREDENTIALS.service_account_email + "```", inline=False)
embed.add_field(name="Step 3:", value="Select 8 columns and right-click >> Insert 8 Columns in your worksheet.", inline=False)
embed.add_field(name="Step 4:", value="Run the !configure command and link your server with your Google spreadsheet.\n```!configure <WORKSHEET ID>```", inline=False)
embed.add_field(name="Step 5:", value="Export the role permissions onto the Google Sheet using:\n``` !export ```", inline=False)
embed.add_field(name="Finished!", value="The bot is now set up and you can start managing your roles! Make sure you provide a valid Spreadsheet ID, or you will encounter an error!", inline=False)
embed.set_thumbnail(url=picture("GSHEET"))
await ctx.send(embed=embed)
"""
!configure - Owner Only
This command creates a file for the server in the database (serverdata)
and stores the Google Worksheet ID inside a .txt file named after the server's ID.
If a file already exists, it prompts the user to update the file instead of reconfiguring it.
"""
@BOT.command()
@commands.has_permissions(administrator=True)
async def configure(ctx, *, spreadsheet_id=None):
if len(spreadsheet_id) == 44: # Ensure input was given and that it is valid.
if ctx.message.author.id == ctx.guild.owner_id: # If the sender is the server owner, proceed.
file_name = str(ctx.guild.id) + ".txt" # The name of the file is that of the server's unique ID.
try: # If the file exists, open and read it and give the link.
with open(path.join("serverdata", file_name), "r+") as server_file:
server_file.truncate(0)
server_file.write(spreadsheet_id)
embed = discord.Embed(title="You already have a worksheet!", description="Your spreadsheet ID has been updated instead!", color=color("GREEN"))
embed.add_field(name="Your worksheet has been linked! Here's the link: ", value=link("SPREADSHEET") + spreadsheet_id)
embed.set_thumbnail(url=picture("GSHEET"))
await ctx.send(embed=embed)
except FileNotFoundError: # If it doesn't, create it and give the complete link.
with open(path.join("serverdata", file_name), "w+") as server_file:
server_file.write(spreadsheet_id)
embed = discord.Embed(title="Worksheet Configuration Complete!", description="Your server has been added to the database.", color=color("GREEN"))
embed.add_field(name="Your worksheet has been linked! Here's the link: ", value=link("SPREADSHEET") + spreadsheet_id)
embed.set_thumbnail(url=picture("GSHEET"))
await ctx.send(embed=embed)
except Exception as exception:
print("Server ID:" + ctx.guild.id + "\n Exception:" + str(exception))
embed = discord.Embed(title="Something went wrong!", description="Please contact the BOT owner on GitHub!", color=color("RED"))
embed.add_field(name="Error code: ", value=str(exception))
embed.set_thumbnail(url=picture("ERROR"))
await ctx.send(embed=embed)
else: # If the sender is a simple Admin, refuse permission with an error embed.
embed = discord.Embed(title="Access Denied!", description="You have no proper authorization for this command.", color=color("RED"))
embed.add_field(name="This command may only be used by the server owner! ", value='<@' + str(ctx.guild.owner_id) + '>')
embed.set_thumbnail(url=picture("ERROR"))
await ctx.send(embed=embed)
else: # If no valid ID was given, ask for a valid ID and show instructions.
embed = discord.Embed(title="No worksheet ID specified!", description="Please specify a valid worksheet ID.", color=color("RED"))
embed.add_field(name="If want to see how to setup this bot use the command: ", value="```!setuphelp```", inline=False)
embed.set_thumbnail(url=picture("ERROR"))
await ctx.send(embed=embed)
"""
!export - Owner Only
This command exports all the roles and their permissions
from the Discord Server, organizes them and imports them
to the Google Sheet assigned to that Discord Server.
"""
@BOT.command()
@commands.has_permissions(administrator=True)
async def export(ctx):
if ctx.message.author.id == ctx.guild.owner_id:
file_name = str(ctx.guild.id) + ".txt"
try:
with open(path.join("serverdata", file_name), "r+") as server_file:
spreadsheet_id = server_file.read()
try:
role_list = ctx.guild.roles # Export all the roles from a server. List of role type Objects.
role_list.reverse()
role_names = [role.name for role in role_list] # Get all the role names from the role Objects.
role_permissions = {role: dict(role.permissions) for role in role_list} # Put Roles in a dictionary and their permission_values in sub-dictionaries.
permission_names = list(role_permissions[role_list[0]].keys()) # Get all the permission names.
permission_values = permission_values_to_emojis(list(role_permissions.values()), permission_names) # Get all of the permissions values and convert them to √ or X.
clear_request = SERVICE.spreadsheets().values().clear(spreadsheetId=spreadsheet_id, range="A1:AH1000", body=clear_request_body())
titles_request = SERVICE.spreadsheets().values().batchUpdate(spreadsheetId=spreadsheet_id, body=titles_request_body(role_names, permission_names))
values_request = SERVICE.spreadsheets().values().batchUpdate(spreadsheetId=spreadsheet_id, body=values_request_body(permission_values))
clear_request.execute() # Clears the spreadsheet.
titles_request.execute()
values_request.execute() # Handling and execution of the requests to the Google API. See request_data.py for more info.
embed = discord.Embed(title="Permission Export Complete!", description="Your server's role permission_values have been successfully exported!", color=color("GREEN"))
embed.add_field(name="Here's the link to your worksheet: ", value=link("SPREADSHEET") + spreadsheet_id)
embed.set_thumbnail(url=picture("GSHEET"))
await ctx.send(embed=embed)
except Exception as exception:
print("Server ID:" + ctx.guild.id + "\n Exception:" + str(exception))
embed = discord.Embed(title="Worksheet unavailable!", description="There was an issue trying to access your server's worksheet!", color=color("RED"))
embed.add_field(name="Make sure you have followed the !setuphelp steps correctly. If the issue persists, contact the BOT Owner.", value="```!setuphelp```")
embed.set_thumbnail(url=picture("ERROR"))
await ctx.send(embed=embed)
except FileNotFoundError: # If the file does not exist, prompt user to configure.
embed = discord.Embed(title="No file found!", description="There was an issue trying to import your server's file from the database.", color=color("RED"))
embed.add_field(name="You have to configure your server first. Please try the command !setuphelp for more information.", value="```!setuphelp```")
embed.set_thumbnail(url=picture("ERROR"))
await ctx.send(embed=embed)
else: # If the sender is a simple Admin, refuse permission with an error embed.
embed = discord.Embed(title="Access Denied!", description="You have no proper authorization for this command.", color=color("RED"))
embed.add_field(name="This command may only be used by the server owner! ", value='<@' + str(ctx.guild.owner_id) + '>')
embed.set_thumbnail(url=picture("ERROR"))
await ctx.send(embed=embed)
"""
BOT RUN Command that logs in the bot with our credentials.
Has to be in the end of the file.
"""
BOT.run('BOT_TOKEN_HERE')
``` |
{
"source": "JosephAnderson234/discrod-bot",
"score": 3
} |
#### File: discrod-bot/test/test2.py
```python
import discord
from discord.utils import get
from discord.ext import commands
import os
import youtube_dl
client = commands.Bot(command_prefix="!")
@client.command()
async def conectar(ctx):
canal = ctx.message.author.voice.channel
if not canal:
await ctx.send("No estas conectado a un canal de voz")
return
voz = get(client.voice_clients, guild=ctx.guild)
if voz and voz.is_connected():
await voz.move_to(canal)
else:
voz = await canal.connect()
client.run('<KEY>')
``` |
{
"source": "JosephAnderson234/Program",
"score": 3
} |
#### File: mainI/test/test2.py
```python
from tkinter import *
import tkinter
top = Tk()
CheckVar1 = BooleanVar()
CheckVar2 = BooleanVar()
C1 = Checkbutton(top, text = "Music", variable = CheckVar1, onvalue = True, offvalue = False, height=5, width = 20, )
C2 = Checkbutton(top, text = "Video", variable = CheckVar2, onvalue = True, offvalue = False, height=5, width = 20)
C1.pack()
C2.pack()
def tverificar():
print(CheckVar1.get())
print(CheckVar2.get())
boton = Button(top, text = "Verificar", command = tverificar)
boton.pack()
top.mainloop()
``` |
{
"source": "JosephAnderson234/Py-DownloadTube",
"score": 3
} |
#### File: Py-DownloadTube/test/test3.py
```python
import pafy
from colorama import init, Fore, Back, Style, Cursor
link = "https://www.youtube.com/watch?v=czKFHqlH158"
video = pafy.new(link)
best = video.getbest()
def XD(total,recvd,ratio,rate,eta):
msg = "Se descargó "+str(round(recvd/1000000, 2))+" de "+str(round(total/1000000, 2))+" Mg a una velocidad de "+str(int(rate))+" kbps/s"
print(Fore.CYAN + "========================================================================================")
print(Cursor.UP(1)+Cursor.FORWARD(20)+Fore.YELLOW+str(msg))
best.download(quiet=True, callback=XD)
``` |
{
"source": "JosephAntony1/led-control",
"score": 3
} |
#### File: led-control/ledcontrol/animationpatterns.py
```python
from random import random
from enum import Enum
import ledcontrol.driver as driver
import ledcontrol.utils as utils
ColorMode = Enum('ColorMode', ['hsv', 'rgb'])
# Primary animations that generate patterns in HSV or RGB color spaces
# return color, mode
def blank(t, dt, x, y, prev_state):
return (0, 0, 0), ColorMode.hsv
static_patterns = [0, 1, 2] # pattern IDs that display a solid color
default = {
0: {
'name': 'Static Color',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(0), hsv
'''
},
1: {
'name': 'Static White',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (0, 0, 1), hsv
'''
},
2: {
'name': 'Static Gradient 1D',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(x), hsv
'''
},
3: {
'name': 'Static Gradient Mirrored 1D',
'primary_speed': 0.0,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(x), hsv
'''
},
10: {
'name': 'Hue Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (t + x, 1, 1), hsv
'''
},
20: {
'name': 'Hue Cycle Quantized 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
hue = (t + x) % 1
return (hue - (hue % 0.1666), 1, 1), hsv
'''
},
30: {
'name': 'Hue Scan 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_triangle(t) + x, 1, 1), hsv
'''
},
31: {
'name': 'Hue Bounce 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_sine(t) + x, 1, 1), hsv
'''
},
40: {
'name': 'Hue Waves 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
h = (x + t) * 0.5 + x + wave_sine(t)
return (h, 1, wave_sine(h + t)), hsv
'''
},
50: {
'name': 'Hue Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
return (wave3 % 0.15 + t, 1, wave1 + wave3), hsv
'''
},
100: {
'name': 'Palette Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(t + x), hsv
'''
},
110: {
'name': 'Palette Cycle Mirrored 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(t + x), hsv
'''
},
120: {
'name': 'Palette Cycle Quantized 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
t = (t + x) % 1
return palette(t - (t % (1 / palette_length()))), hsv
'''
},
130: {
'name': 'Palette Cycle Random 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
t = t + x
i = (t - (t % 0.2)) / 0.2
return palette(i * 0.618034), hsv
'''
},
140: {
'name': 'Palette Scan Mirrored 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(wave_triangle(t) + x), hsv
'''
},
141: {
'name': 'Palette Bounce Mirrored 1D',
'primary_speed': 0.1,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette_mirrored(wave_sine(t) + x), hsv
'''
},
150: { # Performance isn't as good as it could be
'name': 'Palette Waves 1D',
'primary_speed': 0.05,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
h = (x + t) * 0.1 + x + wave_sine(t)
c = palette(wave_triangle(h))
return (c[0], c[1], wave_sine(h + t)), hsv
'''
},
160: {
'name': 'Palette Ripples 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
c = palette(wave3 % 0.15 + t)
return (c[0], c[1], wave1 + wave3), hsv
'''
},
161: {
'name': 'Palette Ripples (Fast Cycle) 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
wave1 = wave_sine(t / 4 + x)
wave2 = wave_sine(t / 8 - x)
wave3 = wave_sine(x + wave1 + wave2)
c = palette(wave3 % 0.8 + t)
return (c[0], c[1], wave1 + wave3), hsv
'''
},
170: {
'name': 'Palette Plasma 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return palette(wave_triangle(v)), hsv
'''
},
180: {
'name': 'Palette Fractal Plasma 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines_octave(x, y, t, 7, 2.0, 0.5)
return palette(wave_triangle(v)), hsv
'''
},
190: {
'name': 'Palette Twinkle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = prev_state[2] - dt
if v <= 0:
c = palette(t + x)
return (c[0], c[1], random.random()), hsv
elif v > 0:
return (prev_state[0], prev_state[1], v), hsv
else:
return (0, 0, 0), hsv
'''
},
200: {
'name': 'Palette Perlin Noise 2D',
'primary_speed': 0.3,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return palette(perlin_noise_3d(x, y, t)), hsv
'''
},
300: {
'name': 'RGB Sines 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_sine(t + x),
wave_sine((t + x) * 1.2),
wave_sine((t + x) * 1.4)), rgb
'''
},
310: {
'name': 'RGB Cubics 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
return (wave_cubic(t + x),
wave_cubic((t + x) * 1.2),
wave_cubic((t + x) * 1.4)), rgb
'''
},
320: {
'name': 'RGB Ripples 1 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v0 = x + (wave_sine(t)) + wave_sine(x + 0.666 * t)
v1 = x + (wave_sine(t + 0.05)) + wave_sine(x + 0.666 * t + 0.05)
v2 = x + (wave_sine(t + 0.1)) + wave_sine(x + 0.666 * t + 0.1)
return (0.01 / (wave_triangle(v0) + 0.01), 0.01 / (wave_triangle(v1) + 0.01), 0.01 / (wave_triangle(v2) + 0.01)), rgb
'''
},
330: {
'name': 'RGB Plasma (Spectrum Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return (wave_sine(v),
wave_sine(v + 0.333),
wave_sine(v + 0.666)), rgb
'''
},
340: {
'name': 'RGB Plasma (Fire Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines(x, y, t, 1.0, 0.5, 0.5, 1.0)
return (0.9 - wave_sine(v),
wave_sine(v + 0.333) - 0.1,
0.9 - wave_sine(v + 0.666)), rgb
'''
},
350: {
'name': 'RGB Fractal Plasma (Fire Sines) 2D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = plasma_sines_octave(x, y, t, 7, 2.0, 0.5)
return (1.0 - wave_sine(v),
wave_sine(v + 0.333),
1.0 - wave_sine(v + 0.666)), rgb
'''
},
360: {
'name': 'Blackbody Cycle 1D',
'primary_speed': 0.2,
'primary_scale': 1.0,
'source': '''
def pattern(t, dt, x, y, z, prev_state):
v = wave_triangle(t + x)
c = blackbody_to_rgb(v * v * 5500 + 1000)
return (c[0] * v, c[1] * v, c[2] * v), rgb
'''
},
}
# Secondary animations that transform finalized colors to add brightness effects
# return brightness, colorRGB
def sine_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(t + x)
def cubic_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_cubic(t + x)
def ramp_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, (t + x) % 1 # test ramp^2
def bounce_linear_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(x + driver.wave_triangle(t))
def bounce_sine_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(x + driver.wave_sine(t))
def bounce_cubic_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.wave_sine(x + driver.wave_cubic(t))
def perlin_noise_2d(t, dt, x, y, z, prev_state, in_color):
return in_color, driver.perlin_noise_3d(x, y, t)
def twinkle_pulse_1d(t, dt, x, y, z, prev_state, in_color):
v = prev_state[1] - dt
if v <= -0.2:
return in_color, random()
elif v > 0:
return prev_state[0], v
else:
return (0, 0, 0), v
def wipe_across_1d(t, dt, x, y, z, prev_state, in_color):
return in_color, ((t + x) % 1 > 0.5) * 1.0
def wipe_from_center_1d(t, dt, x, y, z, prev_state, in_color):
if x < 0.5:
return in_color, ((t + x) % 1 < 0.5) * 1.0
else:
return in_color, ((x - t) % 1 < 0.5) * 1.0
def wipe_from_ends_1d(t, dt, x, y, z, prev_state, in_color):
if x < 0.5:
return in_color, ((x - t) % 1 < 0.5) * 1.0
else:
return in_color, ((t + x) % 1 < 0.5) * 1.0
default_secondary = {
0: None,
1: sine_1d,
2: cubic_1d,
3: ramp_1d,
4: bounce_linear_1d,
5: bounce_sine_1d,
6: bounce_cubic_1d,
7: perlin_noise_2d,
8: twinkle_pulse_1d,
9: wipe_across_1d,
10: wipe_from_center_1d,
11: wipe_from_ends_1d,
}
default_secondary_names = {
k: utils.snake_to_title(v.__name__) if v else 'None' for k, v in default_secondary.items()
}
``` |
{
"source": "JosephArizpe/DisplayFPVS",
"score": 2
} |
#### File: JosephArizpe/DisplayFPVS/SSVEP.py
```python
from psychopy import visual, core, event, gui, logging
import os
import sys
import random
import math
import csv
import imghdr
ACTUAL_SCREEN_RESOLUTION = [5120,2880] # Change this to reflect the actual screen resolution, or else your stimuli will be the wrong sizes
class SSVEP:
#init sets the window(mywin), and the frequency of the flashing (frame_on => number of frames that the image is visible, frame_off => number of frames that only the background is visible)
#Frame duration in seconds = 1/monitorframerate(in Hz)
#Thus the fastest frame rate could be 1 frame on 1 frame off
# IMPORTANT NOTE: The values between the parentheses just below in this initialization set the DEFAULT values, NOT necessarily the ACTUAL values for your run, which can be set in the instatiation at the BOTTOM of this script
# IN OTHER WORDS: Do NOT touch these default init values, unless you know what you are doing.
def __init__(self, mywin=visual.Window(size=ACTUAL_SCREEN_RESOLUTION,color=[138,138,138], colorSpace='rgb255', fullscr=False , monitor='testMonitor',units='deg'),
frame_off=1, target_freq=6, blockdur = 5.0, port='/dev/ttyACM0',
fname='SSVEP', numblocks=1, waitdur=2, randomlyVarySize=False, isSinusoidalStim=True, doFixationTask=True, numFixColorChanges=8, fixChangeDurSecs=0.2, minSecsBtwFixChgs=1.2, showDiodeStimulator=True):
self.baseStimDir = 'stimuli'#'stimuli' # folder containing the image directories for each category
self.StimDir = ['objects','faces'] # the image category directory names
self.StimPattern = [4, 1] # [n base images - n oddball images] -> change this to change the oddball pattern
self.doRandomList = False # i.e. no periodic pattern for the oddball. Use this for making a "base rate" condition.
self.mywin = mywin
self.stimSize = [6.53,6.53]
self.randomlyVarySize = randomlyVarySize
self.fadeIn = True
self.fadeInCycles = 2.0 # important: put the decimal
self.fadeOut = True
self.fadeOutCycles = 2.0 # important: put the decimal
self.sizePercentRange = [74, 120]
self.sizePercentSteps = 2
self.isSinusoidalStim = isSinusoidalStim
self.doFixationTask = doFixationTask
self.normalFixColor = [0,0,0]
self.detectFixColor = [255,0,0]
self.showDiodeStimulator = showDiodeStimulator
self.diodeOnStimColor = [255,255,255]
self.diodeOffStimColor = [0,0,0]
self.respondChar = 'space'
# self.pattern1 = visual.GratingStim(win=self.mywin, name='pattern1',units='deg',
# tex=None, pos=[0, 0], size=self.stimSize, color=self.mywin.color, colorSpace='rgb255',
# opacity=1,interpolate=True)
self.pattern2 = visual.ImageStim(win=self.mywin, name='pattern2',units='deg',
pos=[0, 0], size=self.stimSize,
opacity=1,interpolate=True)
#self.fixation = visual.GratingStim(win=self.mywin, name='fixation', size = 0.3, pos=[0,0], sf=0, color=[0,0,0], colorSpace='rgb255')
self.fixation = visual.TextStim(win=self.mywin, text='+', name='fixation', units='deg', height = 0.5, pos=[0,0], color=self.normalFixColor, colorSpace='rgb255')
self.diodeStimulator = visual.GratingStim(win=self.mywin, name='diodeStim',units='norm',
tex=None, pos=[-1, -1], size=[0.1,0.1], color=[0,0,0], colorSpace='rgb255',
opacity=1,interpolate=True)
self.frame_off = frame_off
self.frameRate = self.mywin.getActualFrameRate()
print "Detected Monitor Refresh: " + str(self.frameRate)
self.targFreq = target_freq
self.StimulationFreq = self.frameRate/(round(self.frameRate/self.targFreq)) #calculate actual stimulation frequency rounded to the screen refresh rate
print "Actual Stimulation Frequency: " + str(self.StimulationFreq)
self.framesPerCycle = round(self.frameRate/self.targFreq) # rounded to the screen refresh rate
print "Frames Per Individual Stimulus Cycle: " + str(self.framesPerCycle)
self.frame_on = int(self.framesPerCycle) - self.frame_off
self.blockdur = blockdur
self.fname = fname
self.numblocks = numblocks
self.waitdur = waitdur
self.port = port
# ------------- Uncomment only the relevant line for your experiment!!! -------------
#self.stimFileName = self.Generate_stimListOddball() # Uncomment for exemplar oddball experiments
self.stimFileName = self.Generate_stimList() # Uncomment for base category vs. oddball category experiments
# -------------------------------------------------------------------------------------
kHandle = open(self.stimFileName)
kReader = csv.reader(kHandle,'excel-tab')
self.stimMat = []
for row in kReader:
self.stimMat.append(row[0])
#print self.stimMat
self.fixChangeDurSecs = fixChangeDurSecs
self.minSecsBtwFixChgs = minSecsBtwFixChgs
if self.doFixationTask:
framesPerSec = self.StimulationFreq * self.framesPerCycle
self.numFixColorChanges = numFixColorChanges
self.numFramesFixChange = int(round(framesPerSec*fixChangeDurSecs))
print "Num Frames Each Fix Color Change: "+str(self.numFramesFixChange)
self.minFramesBtwFixChgs = int(math.ceil(framesPerSec*minSecsBtwFixChgs))
totFrames = len(self.stimMat) * self.framesPerCycle
print "Total Frames: "+str(totFrames)
self.fixChgFrames = []
tooManyTries = 1000
for fixChangeInd in range(self.numFixColorChanges):
thisChgFrame = random.randint(self.numFramesFixChange,totFrames-self.numFramesFixChange-self.minFramesBtwFixChgs)
if fixChangeInd > 0:
moreThanOneSecDiffsFromThis = [abs(x - thisChgFrame) > self.minFramesBtwFixChgs for x in self.fixChgFrames]
howManyTries = 0
while not all(moreThanOneSecDiffsFromThis):
thisChgFrame = random.randint(self.numFramesFixChange,totFrames-self.numFramesFixChange-self.minFramesBtwFixChgs)
moreThanOneSecDiffsFromThis = [abs(x - thisChgFrame) > self.minFramesBtwFixChgs for x in self.fixChgFrames]
#print moreThanOneSecDiffsFromThis
howManyTries = howManyTries + 1
if howManyTries >= tooManyTries:
print "ERROR: You put too many fixation color change events and/or set too wide a minimum spacing between the events to actually fit in the experiment."
print "Adjust these parameters or you will likely run into this problem again!!!"
sys.exit()
self.fixChgFrames.append(thisChgFrame)
self.fixChgFrames.sort()
self.fixChgBackFrames = [x + self.numFramesFixChange for x in self.fixChgFrames]
self.fixChgsDetected = [0] * len(self.fixChgFrames)
#print self.fixChgFrames
#print self.fixChgBackFrames
if self.randomlyVarySize:
self.sizesFileName = "SizesOf"+self.stimFileName
possiblePercentVals = range(self.sizePercentRange[0],self.sizePercentRange[1]+self.sizePercentSteps,self.sizePercentSteps)
self.randScalingVals = []
for i in range(len(self.stimMat)):
nextVal = random.choice(possiblePercentVals)*0.01
while i > 0 and nextVal == self.randScalingVals[-1]:
nextVal = random.choice(possiblePercentVals)*0.01
self.randScalingVals.append(nextVal)
with open(self.sizesFileName,"w") as saveFile:
for size in self.randScalingVals:
saveFile.write("%s\n" % size)
else:
self.sizesFileName = "Not Applicable"
self.sizePercentRange = [100, 100]
self.sizePercentSteps = 0
def Generate_stimList(self):
self.tryNum = 1
ListSaveName = "Stimuli_list_"+self.fname+"_"+str(self.tryNum)+".txt" # saves a list of your stimuli to be used during presentation
while os.path.isfile(ListSaveName):
self.tryNum = self.tryNum + 1
ListSaveName = "Stimuli_list_"+self.fname+"_"+str(self.tryNum)+".txt" # saves a list of your stimuli to be used during presentation
#calculate n of stims needed
#NbStims = int(math.ceil(self.blockdur*self.StimulationFreq)) #calculate n of stimuli to display sequence duration
NbPatterns = self.numblocks*int(math.ceil(self.blockdur*self.StimulationFreq/sum(self.StimPattern))) #(NbStims / sum(self.StimPattern))
# Get names of all stimuli in directories and shuffle the order within each stimulus type
AllStims = []
for e in range(len(self.StimDir)):
AllStims.append([])
files = [f for f in os.listdir(os.path.join(self.baseStimDir,self.StimDir[e])) if (os.path.isfile(os.path.join(self.baseStimDir,self.StimDir[e], f)) and (imghdr.what(os.path.join(self.baseStimDir,self.StimDir[e], f)) is not None))] # get names of all stimuli in directories
random.shuffle(files)
for i in range(len(files)):
AllStims[e].append(os.path.join(self.baseStimDir,self.StimDir[e], files[i]))
StimList = []
stimInds = [0, 0]
for i in range(NbPatterns):
for stimType in range(len(self.StimPattern)):
for stimRep in range(self.StimPattern[stimType]):
#print stimInds[stimType]
StimList.append(AllStims[stimType][stimInds[stimType]])
stimInds[stimType] += 1
if stimInds[stimType] >= len(AllStims[stimType]):
stimInds[stimType] = 0
random.shuffle(AllStims[stimType])
while StimList[-1] is AllStims[stimType][0]: # while the last stimulus is the same as the next random one for this stimulus type, re-randomize so as to avoid repeats of identical stimuli
random.shuffle(AllStims[stimType])
if self.doRandomList:
random.shuffle(StimList)
with open(ListSaveName,"w") as saveFile:
for stim in StimList:
saveFile.write("%s\n" % stim)
return ListSaveName
def waitForTrigger(self):
self.instructions = visual.TextStim(self.mywin, text = "Waiting for trigger",pos = [0,0])
self.triggerChar = 't'
response = False
self.mywin.clearBuffer()
self.instructions.draw()
self.mywin.update()
notTriggered = 1
while notTriggered:
self.response = event.getKeys()
if self.response:
if self.response[0] == 'escape':
sys.exit()
elif self.response[0] == self.triggerChar:
notTriggered = 0
def Generate_stimListOddball(self):
self.tryNum = 1
ListSaveName = "OddballStimuli_list_"+self.fname+"_"+str(self.tryNum)+".txt" # saves a list of your stimuli to be used during presentation
while os.path.isfile(ListSaveName):
self.tryNum = self.tryNum + 1
ListSaveName = "OddballStimuli_list_"+self.fname+"_"+str(self.tryNum)+".txt" # saves a list of your stimuli to be used during presentation
#calculate n of stims needed
NbPatterns = self.numblocks*int(math.ceil(self.blockdur*self.StimulationFreq/sum(self.StimPattern))) #(NbStims / sum(self.StimPattern))
# Get names of all stimuli in directories and shuffle the order within each stimulus type
AllStims = []
AllStims.append([])
AllStims.append([])
files = [f for f in os.listdir(self.baseStimDir) if (os.path.isfile(os.path.join(self.baseStimDir, f)) and (imghdr.what(os.path.join(self.baseStimDir, f)) is not None))] # get names of all stimuli in directories
random.shuffle(files)
for i in range(len(files)):
AllStims[1].append(os.path.join(self.baseStimDir, files[i]))
# choose the random one that will be the "base" (i.e, not oddball) image, and keep it separate from the others
AllStims[0].append(AllStims[1][-1])
del AllStims[1][-1]
StimList = []
stimInds = [0, 0]
for i in range(NbPatterns):
for stimType in range(len(self.StimPattern)):
for stimRep in range(self.StimPattern[stimType]):
#print stimInds[stimType]
StimList.append(AllStims[stimType][stimInds[stimType]])
stimInds[stimType] += 1
if stimInds[stimType] >= len(AllStims[stimType]):
stimInds[stimType] = 0
if stimType == 1:
random.shuffle(AllStims[stimType])
while StimList[-1] is AllStims[stimType][0]: # while the last stimulus is the same as the next random one for this stimulus type, re-randomize so as to avoid repeats of identical stimuli
random.shuffle(AllStims[stimType])
if self.doRandomList:
random.shuffle(StimList)
with open(ListSaveName,"w") as saveFile:
for stim in StimList:
saveFile.write("%s\n" % stim)
return ListSaveName
# def collecting(self):
# self.collector = csv_collector.CSVCollector(fname=self.fname, port= self.port)
# self.collector.start()
#
# def epoch(self, mark):
# self.collector.tag(mark)
def stop(self):
self.mywin.close()
core.quit()
def start(self):
self.Trialclock = core.Clock()
self.waitClock = core.Clock()
#start saving data from EEG device.
#self.collecting()
self.count = 0
self.stimNum = 0
self.stimulusNumberError = False
self.stimulusNumberErrorKind = "Not Applicable"
if self.doFixationTask:
self.fixChgTimes = [0] * len(self.fixChgFrames)
self.responseFrameNums = [0] * len(self.fixChgFrames)
self.responseTimes = [0] * len(self.fixChgFrames)
self.fixHasChangedThisBlock = False
#pre-calculate the sinusoidal opacity scaling vals
if self.isSinusoidalStim:
self.sinPhaseStepSize = 2.0*math.pi/(self.frame_on+1.0)
self.sinPhaseStep = self.sinPhaseStepSize # start at the step after 0 radians because the frame_off frames aleady constitute the 0 opacity display
self.OpacityScaleVals = []
for frameOnNum in range(self.frame_on):
self.OpacityScaleVals.append((math.cos(self.sinPhaseStep + math.pi)+1.0)/2.0) # the function is adjusted to start at a min of 0 and have a max of 1 mid-way
self.sinPhaseStep = self.sinPhaseStep + self.sinPhaseStepSize
self.OpacityScaleVals[self.OpacityScaleVals.index(max(self.OpacityScaleVals))] = 1.0 # Enforce that the max opacity scale val is always 1.0, even when the phase step size would not otherwise allow for it. If there are two max values, it changes just the first max value to 1. In practice though there is usually just one max value, even though with an odd number of frame_on frames that max may differ only by a practically infinitesmal amount from another number in the array. That max is not always the first occuring between those two near identical values.
print self.OpacityScaleVals
print "Sinusoidal phase step size (radians): "+str(self.sinPhaseStepSize)
print "Total number of stimuli (including repeats): "+str(len(self.stimMat))
totStimuliPrepped = len(self.stimMat)
self.fadeOutDur = self.fadeOutCycles*sum(self.StimPattern)/self.StimulationFreq
self.waitForTrigger()
self.diodeStimulator.setAutoDraw(self.showDiodeStimulator)
while self.count<self.numblocks:
#self.fixation.setAutoDraw(True) ###
#clean black screen off
self.fixation.color = self.normalFixColor
self.fixation.draw()
self.mywin.flip()
if self.count == 0:
self.thisFrameInd = 0
self.thisFixChangeInd = 0
#wait certain time for next block
#core.wait(self.waitdur)
self.waitClock.reset()
while self.waitClock.getTime()<self.waitdur:
self.response = event.getKeys()
if self.response:
if self.response[0] == 'escape':
sys.exit()
elif self.doFixationTask and self.response[0] == self.respondChar and self.thisFixChangeInd > 0 and self.fixHasChangedThisBlock and not self.fixChgsDetected[self.thisFixChangeInd-1]:
self.timeSinceLastFixChange = self.Trialclock.getTime() - self.timeofLastFixChange
self.responseTimes[self.thisFixChangeInd-1] = self.timeSinceLastFixChange
self.responseFrameNums[self.thisFixChangeInd-1] = self.thisFrameInd
self.fixChgsDetected[self.thisFixChangeInd-1] = 1
else:
event.clearEvents()
#reset tagging
self.should_tag = False
#self.epoch(0)
self.firstStimNumOfBlock = self.stimNum
#self.lastStimNumOfBlock = self.firstStimNumOfBlock + totStimuliPrepped/self.numblocks - 1
self.fixHasChangedThisBlock = False
#reset clock for next block
self.Trialclock.reset()
while self.Trialclock.getTime()<self.blockdur:
#draws square
#self.pattern1.setAutoDraw(True)
"""
###Tagging the data with the calculated frequency###
Attempting to only get 1 sample tagged, however, this is hard.
"""
"""alternative way to tag
if self.should_tag == False:
#self.epoch(self.freq)
self.epoch(70)
self.mywin.flip()
self.epoch(0)
self.should_tag = True
"""
#self.epoch(70)
if self.showDiodeStimulator:
shownDiodeStim = False
self.diodeStimulator.color = self.diodeOffStimColor
for frameN in range(self.frame_off):
if self.doFixationTask:
if self.thisFixChangeInd < self.numFixColorChanges:
if self.fixChgFrames[self.thisFixChangeInd] == self.thisFrameInd:
self.fixation.color = self.detectFixColor
self.timeofLastFixChange = self.Trialclock.getTime()
self.fixChgTimes[self.thisFixChangeInd] = self.timeofLastFixChange
self.fixHasChangedThisBlock = True
event.clearEvents() # clear responses so any made just before this fix change are not recorded as the response
elif self.fixChgBackFrames[self.thisFixChangeInd] == self.thisFrameInd:
self.fixation.color = self.normalFixColor
self.thisFixChangeInd = self.thisFixChangeInd + 1
#print self.thisFixChangeInd
self.fixation.draw()
self.mywin.flip()
self.response = event.getKeys()
if self.response:
if self.response[0] == 'escape':
sys.exit()
elif self.doFixationTask and self.response[0] == self.respondChar and self.thisFixChangeInd > 0 and self.fixHasChangedThisBlock and not self.fixChgsDetected[self.thisFixChangeInd-1]:
self.timeSinceLastFixChange = self.Trialclock.getTime() - self.timeofLastFixChange
self.responseTimes[self.thisFixChangeInd-1] = self.timeSinceLastFixChange
self.responseFrameNums[self.thisFixChangeInd-1] = self.thisFrameInd
self.fixChgsDetected[self.thisFixChangeInd-1] = 1
else:
event.clearEvents()
self.thisFrameInd = self.thisFrameInd + 1
if self.stimNum >= totStimuliPrepped:
print "ERROR: The script is trying to show more stimuli than the number originally prepared"
print "This error is likely due to the actual screen refresh rate not being stable across the run. To salvage any task data and prevent the code from breaking, no more images will be displayed for the rest of the run!"
self.stimulusNumberError = True
self.stimulusNumberErrorKind = "Too few prepped"
break
self.pattern2.image = self.stimMat[self.stimNum]
if self.randomlyVarySize:
self.pattern2.size = [self.stimSize[0]*self.randScalingVals[self.stimNum],self.stimSize[1]*self.randScalingVals[self.stimNum]]
if self.fadeIn and self.stimNum == self.firstStimNumOfBlock:
self.stimPeakOpacity = 0.0
if self.fadeIn and self.stimNum-self.firstStimNumOfBlock < sum(self.StimPattern)*self.fadeInCycles:
self.stimPeakOpacity = self.stimPeakOpacity + 1.0/(sum(self.StimPattern)*self.fadeInCycles)
elif self.fadeOut and self.fadeOutDur >= self.blockdur-self.Trialclock.getTime(): #self.stimNum >= (self.lastStimNumOfBlock-sum(self.StimPattern)*self.fadeOutCycles):
#print self.blockdur-self.Trialclock.getTime()
self.stimPeakOpacity = self.stimPeakOpacity - 1.0/(sum(self.StimPattern)*self.fadeOutCycles)
#print self.stimPeakOpacity
else:
self.stimPeakOpacity = 1.0
#print self.stimPeakOpacity
#print self.pattern2.image
self.stimNum += 1
#print self.stimNum
#self.pattern1.setAutoDraw(False)
#self.pattern2.setAutoDraw(True) ###
if not self.isSinusoidalStim:
self.pattern2.opacity = self.stimPeakOpacity
for frameN in range(self.frame_on):
if self.isSinusoidalStim:
self.pattern2.opacity = self.stimPeakOpacity * self.OpacityScaleVals[frameN]
#print self.pattern2.opacity
if self.showDiodeStimulator:
if not shownDiodeStim and self.pattern2.opacity == self.stimPeakOpacity:
self.diodeStimulator.color = self.diodeOnStimColor
shownDiodeStim = True
else:
self.diodeStimulator.color = self.diodeOffStimColor
if self.doFixationTask:
if self.thisFixChangeInd < self.numFixColorChanges:
if self.fixChgFrames[self.thisFixChangeInd] == self.thisFrameInd:
self.fixation.color = self.detectFixColor
self.timeofLastFixChange = self.Trialclock.getTime()
self.fixChgTimes[self.thisFixChangeInd] = self.timeofLastFixChange
self.fixHasChangedThisBlock = True
event.clearEvents() # clear responses so any made jsut before this fix change are not recorded as the response
elif self.fixChgBackFrames[self.thisFixChangeInd] == self.thisFrameInd:
self.fixation.color = self.normalFixColor
self.thisFixChangeInd = self.thisFixChangeInd + 1
#print self.thisFixChangeInd
self.pattern2.draw() # do not set these to auto-draw because then the image always gets drawn over the fixation
self.fixation.draw() # for the same reason, draw the fixation second
self.mywin.flip()
self.response = event.getKeys()
if self.response:
if self.response[0] == 'escape':
sys.exit()
elif self.doFixationTask and self.response[0] == self.respondChar and self.thisFixChangeInd > 0 and self.fixHasChangedThisBlock and not self.fixChgsDetected[self.thisFixChangeInd-1]:
self.timeSinceLastFixChange = self.Trialclock.getTime() - self.timeofLastFixChange
self.responseTimes[self.thisFixChangeInd-1] = self.timeSinceLastFixChange
self.responseFrameNums[self.thisFixChangeInd-1] = self.thisFrameInd
self.fixChgsDetected[self.thisFixChangeInd-1] = 1
else:
event.clearEvents()
self.thisFrameInd = self.thisFrameInd + 1
#self.pattern2.setAutoDraw(False) ###
#print "current frame Num: "+str(self.thisFrameInd)
#self.epoch(0)
#count number of blocks
self.count+=1
"""
###Tagging the Data at end of stimulus###
"""
print "Last Frame Num: "+str(self.thisFrameInd)
print "Last Stim Num: "+str(self.stimNum)
print "Actual Number of Fix Color Changes: "+str(self.thisFixChangeInd)
#self.collector.disconnect()
#clean black screen off
self.fixation.color = self.normalFixColor
self.fixation.draw()
self.mywin.flip()
#wait certain time for next block
#core.wait(self.waitdur)
self.waitClock.reset()
while self.waitClock.getTime()<self.waitdur:
self.response = event.getKeys()
if self.response:
if self.response[0] == 'escape':
sys.exit()
elif self.doFixationTask and self.response[0] == self.respondChar and self.thisFixChangeInd > 0 and self.fixHasChangedThisBlock and not self.fixChgsDetected[self.thisFixChangeInd-1]:
self.timeSinceLastFixChange = self.Trialclock.getTime() - self.timeofLastFixChange
self.responseTimes[self.thisFixChangeInd-1] = self.timeSinceLastFixChange
self.responseFrameNums[self.thisFixChangeInd-1] = self.thisFrameInd
self.fixChgsDetected[self.thisFixChangeInd-1] = 1
else:
event.clearEvents()
if self.stimNum < totStimuliPrepped:
self.stimulusNumberError = True
self.stimulusNumberErrorKind = "More prepped than shown"
if self.doFixationTask:
self.runData = []
self.runData.append(self.fixChgFrames)
self.runData.append(self.fixChgBackFrames)
self.runData.append(self.fixChgTimes)
self.runData.append(self.responseFrameNums)
self.runData.append(self.fixChgsDetected)
self.runData.append(self.responseTimes)
#print self.runData
self.runData = zip(*self.runData) # "transpose" this matix for saving
#print self.runData
# dataSaveFile = 'runData_'+self.fname+'_run'+str(self.tryNum)+'.txt'
runNum = 1
dataSaveFile = 'runData_'+self.fname+'_run'+str(runNum)+'.txt'
while os.path.isfile(dataSaveFile):
runNum = runNum + 1
dataSaveFile = 'runData_'+self.fname+'_run'+str(runNum)+'.txt'
with open(dataSaveFile, 'w') as csvfile:
writer = csv.writer(csvfile,delimiter="\t")
[writer.writerow(r) for r in self.runData]
print "Task data Saved to: "+dataSaveFile
fileCorrespondSaveFile = 'runInfo_'+self.fname+'_run'+str(runNum)+'.txt'
else:
dataSaveFile = "Not Applicable"
fileCorrespondSaveFile = 'runInfo_'+self.fname+'_try'+str(self.tryNum)+'.txt'
self.runInfo = ["Task data file: "+dataSaveFile,
"Stimuli list file: "+self.stimFileName,
"Stimuli sizes file: "+self.sizesFileName,
"Length of blocks(secs): "+str(self.blockdur),
"Number of blocks: "+str(self.numblocks),
"Wait time before, between, after blocks (secs): "+str(self.waitdur),
"Actual stimulation frequency: "+str(self.StimulationFreq),
"Sinusoidal stimulation?: "+str(self.isSinusoidalStim),
"Number of frames off (i.e., no stimulus present) btw stimuli: "+str(self.frame_off),
"Number of frames on (i.e., stimulus visible) for each stimulus: "+str(self.frame_on),
"Non-periodic oddball Stimulation?: "+str(self.doRandomList),
"Proportion base vs oddball: "+str(self.StimPattern[0])+" : "+str(self.StimPattern[1]),
"Stimulus size (the central size, if varying): "+str(self.stimSize[0])+" x "+str(self.stimSize[1]),
"Stimulus size units: "+self.pattern2.units,
"Did stimulus size vary?: "+str(self.randomlyVarySize),
"Stimulus size range (percent of central size): "+str(self.sizePercentRange[0])+" - "+str(self.sizePercentRange[1]),
"Stimulus size steps (percent of central size): "+str(self.sizePercentSteps),
"Had fade in?: "+str(self.fadeIn),
"Length of fade in (in number of oddball cycles): "+str(self.fadeInCycles),
"Had fade out?: "+str(self.fadeOut),
"Length of fade out (in number of oddball cycles): "+str(self.fadeOutCycles),
"Had fixation task?: "+str(self.doFixationTask),
"Number of actual fix color changes: "+str(self.thisFixChangeInd),
"Length of each color change (secs): "+str(self.fixChangeDurSecs),
"Theoretical minimum time between color change onsets: "+str(self.minSecsBtwFixChgs),
"Had diode stimulator?: "+str(self.showDiodeStimulator),
"Stimulus Number Error Occurred?: "+str(self.stimulusNumberError),
"Kind of stimulus number error: "+str(self.stimulusNumberErrorKind),
"Number of stimuli (including repeats) prepped: "+str(totStimuliPrepped),
"Number of stimuli (including repeats) shown: "+str(self.stimNum)]
#print self.runInfo
with open(fileCorrespondSaveFile, 'w') as csvfile:
writer = csv.writer(csvfile,delimiter="\n")
writer.writerow(self.runInfo)
#[writer.writerow(r) for r in self.runInfo]
self.stop()
class InputBox(object):
def __init__(self):
self.myDlg = gui.Dlg(title="SSVEP Menu")
self.myDlg.addText('Subject info')
self.myDlg.addField('Participant:','0')
self.myDlg.addField('Session', 001)
self.myDlg.addField('Port', '/dev/tty/ACM0')
self.myDlg.addText('Frequency Selection (Approximate... depends of monitor refresh rate)')
self.myDlg.addField('Frequency Target', choices=["1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","20"], initial = "6")#3
self.myDlg.addText('Block Duration')
self.myDlg.addField('Duration', '64')
self.myDlg.addText('Time before/between/after block(s)')
self.myDlg.addField('InterBlockTime', '2')
self.myDlg.addText('Choose Number of Blocks')
self.myDlg.addField('NumberBlocks', '1')
self.myDlg.show() # show dialog and wait for OK or Cancel
if self.myDlg.OK: # then the user pressed OK
self.thisInfo = self.myDlg.data
self.options = {'participant': self.thisInfo[0], 'session': self.thisInfo[1], 'port': self.thisInfo[2], 'Frequency': self.thisInfo[3], 'Duration': self.thisInfo[4], 'InterBlockTime': self.thisInfo[5], 'NumberBlocks': self.thisInfo[6]}
else:
print 'User Cancelled'
# Setup filename for saving
self.fname = 'sub%s_sess%s' %(self.options['participant'], self.options['session'])
#port name
self.port = '%s' %self.options['port']
#target frequency
self.target_freq = '%s' %self.options['Frequency']
#flash duration
self.flash_duration= '%s' %self.options['Duration']
#number of stimulation blocks
self.num_blocks= '%s' %self.options['NumberBlocks']
#time to wait between blocks
self.wait_dur= '%s' %self.options['InterBlockTime']
def file(self):
return str(self.fname)
def port_name(self):
return str(self.port)
def stim_freq(self):
return int(self.target_freq)
def stim_duration(self):
return int(self.flash_duration)
def stim_blocks(self):
return int(self.num_blocks)
def waitduration(self):
return int(self.wait_dur)
# An SSVEP object appears to have already been constructed before being instantiated, which means setting the psychoPy window to fullscreen in the initalization interferes with the dialogue box, even if SSVEP has not been instantiated before the dialogue box. The workaround here is to initialize SSVEP without fullscreen, do the dialogue box stuff, then set the the psychoPy window to fullscreen before starting the SSVEP.
expinfos = InputBox()
filename = expinfos.file()
print expinfos.port_name()
port_addr = expinfos.port_name()
print filename
freq = expinfos.stim_freq()
flash_dur = expinfos.stim_duration()
blocknums = expinfos.stim_blocks()
waitduration = expinfos.waitduration()
stimuli=SSVEP(frame_off=1, target_freq=freq, fname=filename, port=port_addr,
blockdur=flash_dur, numblocks=blocknums, waitdur=waitduration, randomlyVarySize=True, isSinusoidalStim=True, doFixationTask=True, showDiodeStimulator=True)
stimuli.mywin.fullscr = True
stimuli.mywin.winHandle.set_fullscreen(True)
stimuli.mywin.flip()
stimuli.start()
``` |
{
"source": "Josephat-n/instagram",
"score": 2
} |
#### File: instagram/insta/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .forms import ImageUploadForm
from django.contrib.auth.decorators import login_required
from .models import Image
# Create your views here.
def home(request):
images = Image.get_all()
return render(request, 'insta/home.html', {'images':images})
@login_required(login_url='/accounts/login/')
def new_image(request):
current_user = request.user
if request.method == 'POST':
form = ImageUploadForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.save()
return redirect('instagram-home')
else:
form = ImageUploadForm()
return render(request, 'new_image.html', {"form": form})
``` |
{
"source": "Josephat-n/myBlog",
"score": 4
} |
#### File: myBlog/tests/test_user.py
```python
import unittest
from app.models import User
class UserModelTest(unittest.TestCase):
"""
test class to test the behavior of the User class
"""
def setUp(self):
self.new_user = User("1","josphat","<EMAIL>", "1234", "yudbc", "this is a great article") #create a user object
def test_setting_password(self):
'''
testcase to test on setting a new password
'''
self.assertFalse(self.new_user.pass_secure is None)
def test_no_access_to_passwd(self):
'''
testcase to test that no can access a password from the database
'''
if self.new_user.pass_secure:
return self.assertRaises(AttributeError)
else:
return self.assertRaises('False')
def test_password_verify(self):
'''
testcase to check verifying of hashed password to login a user
'''
self.assertTrue(self.new_user.verify_password,('<PASSWORD>'))
``` |
{
"source": "josephAttia/typingBot",
"score": 4
} |
#### File: josephAttia/typingBot/temp.py
```python
from pynput.mouse import Button
from pynput.keyboard import Key
import pynput
import time
import random
mouse = pynput.mouse.Controller()
keyboard = pynput.keyboard.Controller()
for x in range(49):
time.sleep(2)
mouse.scroll(0, -3)
mouse.position = (918, 510)
time.sleep(0.5)
mouse.click(Button.left, 1)
time.sleep(0.5)
mouse.scroll(0, 3)
time.sleep(0.5)
mouse.position = (741, 556)
time.sleep(0.5)
mouse.click(Button.left, 1)
time.sleep(0.5)
def controlKeyboard():
for char in "Question 2 of the FRQ section will be a prose analysis prompt. You will need to read a given prose passage of 500 to 700 words and a prompt to guide your analytical essay about the passage. The prompt will help you figure out what to look for as you read the passage":
keyboard.press(char)
keyboard.release(char)
time.sleep(0.01)
controlKeyboard()
time.sleep(random.randrange(47, 50))
keyboard.press('.')
keyboard.release('.')
print(x)
``` |
{
"source": "josephaw1022/AbstractAlgebraTool",
"score": 4
} |
#### File: AbstractAlgebraTool/myMath/myMath.py
```python
class myMath:
def add(a, b):
return a + b
def subtract(a, b):
return a - b
def multiply(a, b):
return a * b
def divide(a, b):
return a / b
def divideInt(a, b):
return a // b
def mod(a, b):
return a % b
def exponent(a, b):
return a ** b
## More specific stuff
def GCD(a, b):
bigVal, smallVal = max([a, b]), min([a, b])
# start euclid's alogirthm
# See if this works :)
done = False
while not done:
tempVal = bigVal
bigVal = smallVal
potentialGCD = smallVal
smallVal = tempVal % smallVal
if smallVal == 0:
return "\n\ngcd( {} , {} ) = {}\n\n".format(a, b, potentialGCD)
def gcdSteps(a, b):
def equationFormat(valOne, valTwo, valThree, valFour):
return "{} = {}*{} + {}".format(valOne, valTwo, valThree, valFour)
def endingsFormat(valOne, valTwo, valThree, valFour):
return "{} = {} - {}*{}".format(valFour, valOne, valTwo, valThree)
def popEndValue(list):
return list[0 : len(list) - 1]
endingVals = []
allEquations = []
bigVal, smallVal = max([a, b]), min([a, b])
# start euclid's alogirthm
done = False
while not done:
tempVal = bigVal
bigVal = smallVal
smallVal = tempVal % smallVal
endingVals.append(
endingsFormat(tempVal, bigVal, tempVal // bigVal, smallVal)
)
allEquations.append(
equationFormat(tempVal, bigVal, tempVal // bigVal, smallVal)
)
if smallVal == 0:
break
endingVals = popEndValue(endingVals)
return allEquations, endingVals
def simplifyCongruence(initVal1, initVal2, iterVal):
returnVals = []
while initVal1 <= 0:
initVal1 += iterVal
if initVal1 > 0:
returnVals.append(initVal1)
break
while initVal1 - iterVal >= 0:
initVal1 -= iterVal
if initVal1 - iterVal < 0:
returnVals.append(initVal1)
break
while initVal2 <= 0:
initVal2 += iterVal
if initVal2 > 0:
returnVals.append(initVal2)
break
while initVal2 - iterVal >= 0:
initVal2 -= iterVal
if initVal2 - iterVal < 0:
returnVals.append(initVal2)
break
topline = "{}a ≡ {}b (mod {}) is equivalent to\n\n".format(
initVal1, initVal2, iterVal
)
templateFormat = topline + "{}a ≡ {}b (mod {})\n\n".format(
initVal1, initVal2, iterVal
)
return templateFormat
# This is still in the works at the moment
def linearCombination(valueOne, valueTwo):
s = 0
old_s = 1
t = 1
old_t = 0
r = valueOne
old_r = valueTwo
while r != 0:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return "{}({})+ {}({}) = {}".format(old_t, valueOne, old_s, valueTwo, old_r)
def zTable(zOne, zTwo, operation=""):
if zOne > 3 or zTwo > 3:
indent = ""
for i in range(2):
indent += " "
else:
indent = "\t"
if operation == "" or operation == "None":
return "\n\nATTENTION\n\nMust give a string of a operation ['+', '*'] argument. Cannot be anything else.\n\nExample of correct method use:\n\n{}\n\n{}\n\n{}\n\n".format(
"ZTable(3,4,'+')", "ZTable(3,2,'*')", "ZTable(9,4,'+')"
)
def doOperation(valueOne, valueTwo, operation):
if operation == "+":
return Abstract.add(valueOne, valueTwo)
elif operation == "*":
return Abstract.multiply(valueOne, valueTwo)
def makeArray(lengthDesired):
anyArray = []
for i in range(lengthDesired):
anyArray.append(i)
return anyArray
def line(num=35):
string = ""
for i in range(num):
string += "-"
return string
outpieces = []
for zOneElement in makeArray(zOne):
for zTwoElement in makeArray(zTwo):
outpieces.append([zOneElement, zTwoElement])
finalTable = []
for chunk1 in outpieces:
tempRow = []
for chunk2 in outpieces:
tempRow.append(
[
Abstract.mod(
doOperation(chunk1[0], chunk2[0], operation), zOne
),
Abstract.mod(
doOperation(chunk1[-1], chunk2[-1], operation), zTwo
),
]
)
finalTable.append(tempRow)
def formatTableValue(pieceOne, pieceTwo):
return "({} , {})".format(pieceOne, pieceTwo)
def formattedTable(givenTable, key=None):
string = []
newTable = []
iterCount = 0
for row in givenTable:
innerIterCount = 0
row = sorted(row)
for pair in row:
tempArray = row[iterCount - 1]
string.append(
str(
"{} {} {} => {}\n\n".format(
formatTableValue(pair[0], pair[1]),
operation,
formatTableValue(tempArray[0], tempArray[1]),
formatTableValue(
(doOperation(pair[0], tempArray[0], operation))
% zOne,
(doOperation(pair[1], tempArray[1], operation))
% zTwo,
),
)
)
)
innerIterCount += 1
string += "\n"
iterCount += 1
return string
def formatTitleRow(row):
finalString = ""
for value in row:
finalString += formatTableValue(value[0], value[1])
print(finalString)
return finalString
return formattedTable(finalTable)
``` |
{
"source": "josephbajor/triage_NN",
"score": 2
} |
#### File: tests/architect_tests/test_feature_group_mixer.py
```python
import itertools
from triage.component.architect.feature_group_mixer import FeatureGroupMixer
from triage.component.architect.feature_group_creator import FeatureGroup
import pytest
@pytest.fixture
def english_numbers():
return FeatureGroup(
name="english_numbers",
features_by_table={"one": ["two", "three"], "four": ["five", "six"]},
)
@pytest.fixture
def letters():
return FeatureGroup(
name="letters", features_by_table={"a": ["b", "c"], "d": ["e", "f"]}
)
@pytest.fixture
def german_numbers():
return FeatureGroup(
name="german_numbers",
features_by_table={"eins": ["zwei", "drei"], "vier": ["funf", "sechs"]},
)
def test_feature_group_mixer_leave_one_out(english_numbers, letters, german_numbers):
feature_groups = [english_numbers, letters, german_numbers]
result = FeatureGroupMixer(["leave-one-out"]).generate(feature_groups)
expected = [
dict(itertools.chain(letters.items(), german_numbers.items())),
dict(itertools.chain(english_numbers.items(), german_numbers.items())),
dict(itertools.chain(english_numbers.items(), letters.items())),
]
assert result == expected
assert [g.names for g in result] == [
["letters", "german_numbers"],
["english_numbers", "german_numbers"],
["english_numbers", "letters"],
]
def test_feature_group_mixer_leave_one_in(english_numbers, letters, german_numbers):
feature_groups = [english_numbers, letters, german_numbers]
result = FeatureGroupMixer(["leave-one-in"]).generate(feature_groups)
expected = [english_numbers, letters, german_numbers]
assert result == expected
assert [g.names for g in result] == [
["english_numbers"],
["letters"],
["german_numbers"],
]
def test_feature_group_mixer_all_combinations(english_numbers, letters,
german_numbers):
feature_groups = [english_numbers, letters, german_numbers]
result = FeatureGroupMixer(['all-combinations']).generate(feature_groups)
expected = [
dict(itertools.chain(english_numbers.items())),
dict(itertools.chain(letters.items())),
dict(itertools.chain(german_numbers.items())),
dict(itertools.chain(english_numbers.items(), letters.items())),
dict(itertools.chain(english_numbers.items(), german_numbers.items())),
dict(itertools.chain(letters.items(), german_numbers.items())),
dict(itertools.chain(english_numbers.items(), letters.items(),
german_numbers.items()))
]
assert result == expected
assert [g.names for g in result] == [
['english_numbers'],
['letters'],
['german_numbers'],
['english_numbers', 'letters'],
['english_numbers', 'german_numbers'],
['letters', 'german_numbers'],
['english_numbers', 'letters', 'german_numbers']
]
def test_feature_group_mixer_all(english_numbers, letters, german_numbers):
feature_groups = [english_numbers, letters, german_numbers]
result = FeatureGroupMixer(["all"]).generate(feature_groups)
expected = [
dict(
itertools.chain(
english_numbers.items(), letters.items(), german_numbers.items()
)
)
]
assert result == expected
assert result[0].names == ["english_numbers", "letters", "german_numbers"]
```
#### File: tests/architect_tests/test_planner.py
```python
import datetime
from triage.component.architect import Planner
from triage.component.architect.feature_group_creator import FeatureGroup
def test_Planner():
matrix_set_definitions = [
{
"feature_start_time": datetime.datetime(1990, 1, 1, 0, 0),
"modeling_start_time": datetime.datetime(2010, 1, 1, 0, 0),
"modeling_end_time": datetime.datetime(2010, 1, 16, 0, 0),
"train_matrix": {
"first_as_of_time": datetime.datetime(2010, 1, 1, 0, 0),
"matrix_info_end_time": datetime.datetime(2010, 1, 6, 0, 0),
"as_of_times": [
datetime.datetime(2010, 1, 1, 0, 0),
datetime.datetime(2010, 1, 2, 0, 0),
datetime.datetime(2010, 1, 3, 0, 0),
datetime.datetime(2010, 1, 4, 0, 0),
datetime.datetime(2010, 1, 5, 0, 0),
],
},
"test_matrices": [
{
"first_as_of_time": datetime.datetime(2010, 1, 6, 0, 0),
"matrix_info_end_time": datetime.datetime(2010, 1, 11, 0, 0),
"as_of_times": [
datetime.datetime(2010, 1, 6, 0, 0),
datetime.datetime(2010, 1, 7, 0, 0),
datetime.datetime(2010, 1, 8, 0, 0),
datetime.datetime(2010, 1, 9, 0, 0),
datetime.datetime(2010, 1, 10, 0, 0),
],
}
],
},
{
"feature_start_time": datetime.datetime(1990, 1, 1, 0, 0),
"modeling_start_time": datetime.datetime(2010, 1, 1, 0, 0),
"modeling_end_time": datetime.datetime(2010, 1, 16, 0, 0),
"train_matrix": {
"first_as_of_time": datetime.datetime(2010, 1, 6, 0, 0),
"matrix_info_end_time": datetime.datetime(2010, 1, 11, 0, 0),
"as_of_times": [
datetime.datetime(2010, 1, 6, 0, 0),
datetime.datetime(2010, 1, 7, 0, 0),
datetime.datetime(2010, 1, 8, 0, 0),
datetime.datetime(2010, 1, 9, 0, 0),
datetime.datetime(2010, 1, 10, 0, 0),
],
},
"test_matrices": [
{
"first_as_of_time": datetime.datetime(2010, 1, 11, 0, 0),
"matrix_info_end_time": datetime.datetime(2010, 1, 16, 0, 0),
"as_of_times": [
datetime.datetime(2010, 1, 11, 0, 0),
datetime.datetime(2010, 1, 12, 0, 0),
datetime.datetime(2010, 1, 13, 0, 0),
datetime.datetime(2010, 1, 14, 0, 0),
datetime.datetime(2010, 1, 15, 0, 0),
],
}
],
},
]
feature_dict_one = FeatureGroup(
name="first_features",
features_by_table={"features0": ["f1", "f2"], "features1": ["f1", "f2"]},
)
feature_dict_two = FeatureGroup(
name="second_features",
features_by_table={"features2": ["f3", "f4"], "features3": ["f5", "f6"]},
)
feature_dicts = [feature_dict_one, feature_dict_two]
planner = Planner(
feature_start_time=datetime.datetime(2010, 1, 1, 0, 0),
label_names=["booking"],
label_types=["binary"],
cohort_names=["prior_bookings"],
user_metadata={},
)
updated_matrix_definitions, build_tasks = planner.generate_plans(
matrix_set_definitions, feature_dicts
)
# test that it added uuids: we don't much care what they are
matrix_uuids = []
for matrix_def in updated_matrix_definitions:
assert isinstance(matrix_def["train_uuid"], str)
matrix_uuids.append(matrix_def["train_uuid"])
for test_uuid in matrix_def["test_uuids"]:
assert isinstance(test_uuid, str)
assert len(set(matrix_uuids)) == 4
# not going to assert anything on the keys (uuids), just get out the values
build_tasks = build_tasks.values()
assert len(build_tasks) == 8 # 2 splits * 2 matrices per split * 2 feature dicts
assert sum(1 for task in build_tasks if task["matrix_type"] == "train") == 4
assert sum(1 for task in build_tasks if task["matrix_type"] == "test") == 4
assert (
sum(1 for task in build_tasks if task["feature_dictionary"] == feature_dict_one)
== 4
)
assert (
sum(1 for task in build_tasks if task["feature_dictionary"] == feature_dict_two)
== 4
)
assert (
sum(
1
for task in build_tasks
if task["matrix_metadata"]["feature_groups"] == ["first_features"]
)
== 4
)
assert (
sum(
1
for task in build_tasks
if task["matrix_metadata"]["feature_groups"] == ["second_features"]
)
== 4
)
assert (
sum(
1
for task in build_tasks
if task["matrix_metadata"]["cohort_name"] == "prior_bookings"
)
== 8
)
```
#### File: tests/audition_tests/test_rules_maker.py
```python
import unittest
from triage.component.audition.selection_rules import BoundSelectionRule
from triage.component.audition.selection_rule_grid import make_selection_rule_grid
from triage.component.audition.rules_maker import (
SimpleRuleMaker,
RandomGroupRuleMaker,
TwoMetricsRuleMaker,
create_selection_grid,
)
class TestSimpleRuleMaker(unittest.TestCase):
def test_add_rule_best_current_value(self):
Rule = SimpleRuleMaker()
Rule.add_rule_best_current_value(metric="precision@", parameter="100_abs")
assert Rule.create() == [
{
"selection_rules": [{"name": "best_current_value", "n": 1}],
"shared_parameters": [{"metric": "precision@", "parameter": "100_abs"}],
}
]
def test_add_rule_best_average_value(self):
Rule = SimpleRuleMaker()
Rule.add_rule_best_average_value(metric="precision@", parameter="100_abs")
assert Rule.create() == [
{
"selection_rules": [{"name": "best_average_value", "n": 1}],
"shared_parameters": [{"metric": "precision@", "parameter": "100_abs"}],
}
]
def test_add_rule_lowest_metric_variance(self):
Rule = SimpleRuleMaker()
Rule.add_rule_lowest_metric_variance(metric="precision@", parameter="100_abs")
assert Rule.create() == [
{
"selection_rules": [{"name": "lowest_metric_variance", "n": 1}],
"shared_parameters": [{"metric": "precision@", "parameter": "100_abs"}],
}
]
def test_add_rule_most_frequent_best_dist(self):
Rule = SimpleRuleMaker()
Rule.add_rule_most_frequent_best_dist(
metric="precision@",
parameter="100_abs",
dist_from_best_case=[0.01, 0.05, 0.1, 0.15],
)
assert Rule.create() == [
{
"selection_rules": [
{
"dist_from_best_case": [0.01, 0.05, 0.1, 0.15],
"name": "most_frequent_best_dist",
"n": 1,
}
],
"shared_parameters": [{"metric": "precision@", "parameter": "100_abs"}],
}
]
def test_add_rule_best_avg_recency_weight(self):
Rule = SimpleRuleMaker()
Rule.add_rule_best_avg_recency_weight(metric="precision@", parameter="100_abs")
assert Rule.create() == [
{
"selection_rules": [
{
"curr_weight": [1.5, 2.0, 5.0],
"decay_type": ["linear"],
"name": "best_avg_recency_weight",
"n": 1,
}
],
"shared_parameters": [{"metric": "precision@", "parameter": "100_abs"}],
}
]
def test_add_rule_best_avg_var_penalized(self):
Rule = SimpleRuleMaker()
Rule.add_rule_best_avg_var_penalized(
metric="precision@", parameter="100_abs", stdev_penalty=0.5
)
assert Rule.create() == [
{
"selection_rules": [
{"name": "best_avg_var_penalized", "stdev_penalty": 0.5, "n": 1}
],
"shared_parameters": [{"metric": "precision@", "parameter": "100_abs"}],
}
]
class TestRandomGroupRuleMaker(unittest.TestCase):
def test_random_model_groups(self):
Rule = RandomGroupRuleMaker()
assert Rule.create() == [
{
"selection_rules": [{"name": "random_model_group", "n": 1}],
"shared_parameters": [{}],
}
]
class TestTwoMetricsRuleMaker(unittest.TestCase):
def test_add_two_metrics_rule_maker(self):
Rule = TwoMetricsRuleMaker()
Rule.add_rule_best_average_two_metrics(
metric1="precision@",
parameter1="100_abs",
metric2="recall@",
parameter2="300_abs",
metric1_weight=[0.5],
)
assert Rule.create() == [
{
"selection_rules": [
{
"metric1_weight": [0.5],
"name": "best_average_two_metrics",
"metric2": ["recall@"],
"parameter2": ["300_abs"],
"n": 1,
}
],
"shared_parameters": [
{"metric1": "precision@", "parameter1": "100_abs"}
],
}
]
class TestCreateSelectionRuleGrid(unittest.TestCase):
def test_create_grid(self):
"""
input_data = [{
'shared_parameters': [
{'metric': 'precision@', 'parameter': '100_abs'},
{'metric': 'recall@', 'parameter': '100_abs'},
],
'selection_rules': [
{'name': 'most_frequent_best_dist', 'dist_from_best_case': [0.1, 0.2, 0.3]},
{'name': 'best_current_value'}
]
}, {
'shared_parameters': [
{'metric1': 'precision@', 'parameter1': '100_abs'},
],
'selection_rules': [
{
'name': 'best_average_two_metrics',
'metric2': ['recall@'],
'parameter2': ['100_abs'],
'metric1_weight': [0.4, 0.5, 0.6]
},
]
}]
"""
Rule1 = SimpleRuleMaker()
Rule1.add_rule_best_current_value(metric="precision@", parameter="100_abs")
Rule1.add_rule_most_frequent_best_dist(
metric="recall@", parameter="100_abs", dist_from_best_case=[0.1, 0.2, 0.3]
)
Rule2 = TwoMetricsRuleMaker()
Rule2.add_rule_best_average_two_metrics(
metric1="precision@",
parameter1="100_abs",
metric2="recall@",
parameter2="100_abs",
metric1_weight=[0.4, 0.5, 0.6],
)
expected_output = [
BoundSelectionRule(
descriptive_name="most_frequent_best_dist_precision@_100_abs_0.1",
function_name="most_frequent_best_dist",
args={
"metric": "precision@",
"parameter": "100_abs",
"dist_from_best_case": 0.1,
},
),
BoundSelectionRule(
descriptive_name="most_frequent_best_dist_precision@_100_abs_0.2",
function_name="most_frequent_best_dist",
args={
"metric": "precision@",
"parameter": "100_abs",
"dist_from_best_case": 0.2,
},
),
BoundSelectionRule(
descriptive_name="most_frequent_best_dist_precision@_100_abs_0.3",
function_name="most_frequent_best_dist",
args={
"metric": "precision@",
"parameter": "100_abs",
"dist_from_best_case": 0.3,
},
),
BoundSelectionRule(
descriptive_name="most_frequent_best_dist_recall@_100_abs_0.1",
function_name="most_frequent_best_dist",
args={
"metric": "recall@",
"parameter": "100_abs",
"dist_from_best_case": 0.1,
},
),
BoundSelectionRule(
descriptive_name="most_frequent_best_dist_recall@_100_abs_0.2",
function_name="most_frequent_best_dist",
args={
"metric": "recall@",
"parameter": "100_abs",
"dist_from_best_case": 0.2,
},
),
BoundSelectionRule(
descriptive_name="most_frequent_best_dist_recall@_100_abs_0.3",
function_name="most_frequent_best_dist",
args={
"metric": "recall@",
"parameter": "100_abs",
"dist_from_best_case": 0.3,
},
),
BoundSelectionRule(
descriptive_name="best_current_value_precision@_100_abs",
function_name="best_current_value",
args={"metric": "precision@", "parameter": "100_abs"},
),
BoundSelectionRule(
descriptive_name="best_current_value_recall@_100_abs",
function_name="best_current_value",
args={"metric": "recall@", "parameter": "100_abs"},
),
BoundSelectionRule(
descriptive_name="best_average_two_metrics_precision@_100_abs_recall@_100_abs_0.4",
function_name="best_average_two_metrics",
args={
"metric1": "precision@",
"parameter1": "100_abs",
"metric2": "recall@",
"parameter2": "100_abs",
"metric1_weight": 0.4,
},
),
BoundSelectionRule(
descriptive_name="best_average_two_metrics_precision@_100_abs_recall@_100_abs_0.5",
function_name="best_average_two_metrics",
args={
"metric1": "precision@",
"parameter1": "100_abs",
"metric2": "recall@",
"parameter2": "100_abs",
"metric1_weight": 0.5,
},
),
BoundSelectionRule(
descriptive_name="best_average_two_metrics_precision@_100_abs_recall@_100_abs_0.6",
function_name="best_average_two_metrics",
args={
"metric1": "precision@",
"parameter1": "100_abs",
"metric2": "recall@",
"parameter2": "100_abs",
"metric1_weight": 0.6,
},
),
]
expected_output.sort(key=lambda x: x.descriptive_name)
grid = sorted(
make_selection_rule_grid(create_selection_grid(Rule1, Rule2)),
key=lambda x: x.descriptive_name,
)
assert len(grid) == len(expected_output)
for expected_rule, actual_rule in zip(expected_output, grid):
assert expected_rule.descriptive_name == actual_rule.descriptive_name
```
#### File: tests/catwalk_tests/test_storage.py
```python
import datetime
import os
import tempfile
from collections import OrderedDict
import boto3
import pandas as pd
import pytest
import yaml
from moto import mock_s3
from numpy.testing import assert_almost_equal
from pandas.testing import assert_frame_equal
from unittest import mock
from triage.component.catwalk.storage import (
MatrixStore,
CSVMatrixStore,
FSStore,
S3Store,
ProjectStorage,
ModelStorageEngine,
)
from tests.utils import CallSpy
class SomeClass:
def __init__(self, val):
self.val = val
def test_S3Store():
with mock_s3():
client = boto3.client("s3")
client.create_bucket(Bucket="test_bucket", ACL="public-read-write")
store = S3Store(f"s3://test_bucket/a_path")
assert not store.exists()
store.write("val".encode("utf-8"))
assert store.exists()
newVal = store.load()
assert newVal.decode("utf-8") == "val"
store.delete()
assert not store.exists()
@mock_s3
def test_S3Store_large():
client = boto3.client('s3')
client.create_bucket(Bucket='test_bucket', ACL='public-read-write')
store = S3Store('s3://test_bucket/a_path')
assert not store.exists()
# NOTE: The issue under test (currently) arises when too large a "part"
# NOTE: is sent to S3 for upload -- greater than its 5 GiB limit on any
# NOTE: single upload request.
#
# NOTE: Though s3fs uploads file parts as soon as its buffer reaches
# NOTE: 5+ MiB, it does not ensure that its buffer -- and resulting
# NOTE: upload "parts" -- remain under this limit (as the result of a
# NOTE: single "write()").
#
# NOTE: Therefore, until s3fs adds handling to ensure it never attempts
# NOTE: to upload such large payloads, we'll handle this in S3Store,
# NOTE: by chunking out writes to s3fs.
#
# NOTE: This is all not only to explain the raison d'etre of this test,
# NOTE: but also as context for the following warning: The
# NOTE: payload we'll attempt to write, below, is far less than 5 GiB!!
# NOTE: (Attempting to provision a 5 GiB string in RAM just for this
# NOTE: test would be an ENORMOUS drag on test runs, and a conceivable
# NOTE: disruption, depending on the test environment's resources.)
#
# NOTE: As such, this test *may* fall out of sync with either the code
# NOTE: that it means to test or with the reality of the S3 API -- even
# NOTE: to the point of self-invalidation. (But, this should do the
# NOTE: trick; and, we can always increase the payload size here, or
# NOTE: otherwise tweak configuration, as necessary.)
one_mb = 2 ** 20
payload = b"0" * (10 * one_mb) # 10MiB text of all zeros
with CallSpy('botocore.client.BaseClient._make_api_call') as spy:
store.write(payload)
call_args = [call[0] for call in spy.calls]
call_methods = [args[1] for args in call_args]
assert call_methods == [
'CreateMultipartUpload',
'UploadPart',
'UploadPart',
'CompleteMultipartUpload',
]
upload_args = call_args[1]
upload_body = upload_args[2]['Body']
# NOTE: Why is this a BufferIO rather than the underlying buffer?!
# NOTE: (Would have expected the result of BufferIO.read() -- str.)
body_length = len(upload_body.getvalue())
assert body_length == 5 * one_mb
assert store.exists()
assert store.load() == payload
store.delete()
assert not store.exists()
def test_FSStore():
with tempfile.TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tmpfile")
store = FSStore(tmpfile)
assert not store.exists()
store.write("val".encode("utf-8"))
assert store.exists()
newVal = store.load()
assert newVal.decode("utf-8") == "val"
store.delete()
assert not store.exists()
def test_ModelStorageEngine_nocaching(project_storage):
mse = ModelStorageEngine(project_storage)
mse.write('testobject', 'myhash')
assert mse.exists('myhash')
assert mse.load('myhash') == 'testobject'
assert 'myhash' not in mse.cache
def test_ModelStorageEngine_caching(project_storage):
mse = ModelStorageEngine(project_storage)
with mse.cache_models():
mse.write('testobject', 'myhash')
with mock.patch.object(mse, "_get_store") as get_store_mock:
assert mse.load('myhash') == 'testobject'
assert not get_store_mock.called
assert 'myhash' in mse.cache
# when cache_models goes out of scope the cache should be empty
assert 'myhash' not in mse.cache
DATA_DICT = OrderedDict(
[
("entity_id", [1, 2]),
("as_of_date", [datetime.date(2017, 1, 1), datetime.date(2017, 1, 1)]),
("k_feature", [0.5, 0.4]),
("m_feature", [0.4, 0.5]),
("label", [0, 1]),
]
)
METADATA = {"label_name": "label"}
def matrix_stores():
df = pd.DataFrame.from_dict(DATA_DICT).set_index(MatrixStore.indices)
with tempfile.TemporaryDirectory() as tmpdir:
project_storage = ProjectStorage(tmpdir)
tmpcsv = os.path.join(tmpdir, "df.csv.gz")
tmpyaml = os.path.join(tmpdir, "df.yaml")
with open(tmpyaml, "w") as outfile:
yaml.dump(METADATA, outfile, default_flow_style=False)
df.to_csv(tmpcsv, compression="gzip")
csv = CSVMatrixStore(project_storage, [], "df")
# first test with caching
with csv.cache():
yield csv
# with the caching out of scope they will be nuked
# and this last version will not have any cache
yield csv
def test_MatrixStore_empty():
for matrix_store in matrix_stores():
assert not matrix_store.empty
def test_MatrixStore_metadata():
for matrix_store in matrix_stores():
assert matrix_store.metadata == METADATA
def test_MatrixStore_columns():
for matrix_store in matrix_stores():
assert matrix_store.columns() == ["k_feature", "m_feature"]
def test_MatrixStore_resort_columns():
for matrix_store in matrix_stores():
result = matrix_store.matrix_with_sorted_columns(
["m_feature", "k_feature"]
).values.tolist()
expected = [[0.4, 0.5], [0.5, 0.4]]
assert_almost_equal(expected, result)
def test_MatrixStore_already_sorted_columns():
for matrix_store in matrix_stores():
result = matrix_store.matrix_with_sorted_columns(
["k_feature", "m_feature"]
).values.tolist()
expected = [[0.5, 0.4], [0.4, 0.5]]
assert_almost_equal(expected, result)
def test_MatrixStore_sorted_columns_subset():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(["m_feature"]).values.tolist()
def test_MatrixStore_sorted_columns_superset():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(
["k_feature", "l_feature", "m_feature"]
).values.tolist()
def test_MatrixStore_sorted_columns_mismatch():
with pytest.raises(ValueError):
for matrix_store in matrix_stores():
matrix_store.matrix_with_sorted_columns(
["k_feature", "l_feature"]
).values.tolist()
def test_MatrixStore_labels_idempotency():
for matrix_store in matrix_stores():
assert matrix_store.labels.tolist() == [0, 1]
assert matrix_store.labels.tolist() == [0, 1]
def test_MatrixStore_save():
data = {
"entity_id": [1, 2],
"as_of_date": [pd.Timestamp(2017, 1, 1), pd.Timestamp(2017, 1, 1)],
"feature_one": [0.5, 0.6],
"feature_two": [0.5, 0.6],
"label": [1, 0]
}
df = pd.DataFrame.from_dict(data)
labels = df.pop("label")
for matrix_store in matrix_stores():
matrix_store.metadata = METADATA
matrix_store.matrix_label_tuple = df, labels
matrix_store.save()
assert_frame_equal(
matrix_store.design_matrix,
df
)
def test_MatrixStore_caching():
for matrix_store in matrix_stores():
with matrix_store.cache():
matrix = matrix_store.design_matrix
with mock.patch.object(matrix_store, "_load") as load_mock:
assert_frame_equal(matrix_store.design_matrix, matrix)
assert not load_mock.called
def test_as_of_dates(project_storage):
data = {
"entity_id": [1, 2, 1, 2],
"feature_one": [0.5, 0.6, 0.5, 0.6],
"feature_two": [0.5, 0.6, 0.5, 0.6],
"as_of_date": [
pd.Timestamp(2016, 1, 1),
pd.Timestamp(2016, 1, 1),
pd.Timestamp(2017, 1, 1),
pd.Timestamp(2017, 1, 1),
],
"label": [1, 0, 1, 0]
}
df = pd.DataFrame.from_dict(data)
matrix_store = CSVMatrixStore(
project_storage,
[],
"test",
matrix=df,
metadata={"indices": ["entity_id", "as_of_date"], "label_name": "label"}
)
assert matrix_store.as_of_dates == [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)]
def test_s3_save():
with mock_s3():
client = boto3.client("s3")
client.create_bucket(Bucket="fake-matrix-bucket", ACL="public-read-write")
for example in matrix_stores():
if not isinstance(example, CSVMatrixStore):
continue
project_storage = ProjectStorage("s3://fake-matrix-bucket")
tosave = CSVMatrixStore(project_storage, [], "test")
tosave.metadata = example.metadata
tosave.matrix_label_tuple = example.matrix_label_tuple
tosave.save()
tocheck = CSVMatrixStore(project_storage, [], "test")
assert tocheck.metadata == example.metadata
assert tocheck.design_matrix.to_dict() == example.design_matrix.to_dict()
```
#### File: tests/collate_tests/test_from_obj.py
```python
from datetime import date
from itertools import product
import sqlalchemy
import testing.postgresql
from triage.component.collate import FromObj
from triage.database_reflection import table_exists
import pytest
events_data = [
# entity id, event_date, outcome
[1, date(2014, 1, 1), True],
[1, date(2014, 11, 10), False],
[1, date(2015, 1, 1), False],
[1, date(2015, 11, 10), True],
[2, date(2013, 6, 8), True],
[2, date(2014, 6, 8), False],
[3, date(2014, 3, 3), False],
[3, date(2014, 7, 24), False],
[3, date(2015, 3, 3), True],
[3, date(2015, 7, 24), False],
[4, date(2015, 12, 13), False],
[4, date(2016, 12, 13), True],
]
# distinct entity_id, event_date pairs
state_data = sorted(
product(
set([l[0] for l in events_data]),
set([l[1] for l in events_data] + [date(2016, 1, 1)]),
)
)
def test_materialized_from_obj_create():
materialized_query = FromObj(
from_obj='events where event_date < "2016-01-01"',
name="myquery",
knowledge_date_column='knowledge_date'
)
assert materialized_query.create_materialized_table_sql == 'create table myquery_from_obj as ' +\
'(select * from events where event_date < "2016-01-01")'
def test_materialized_from_obj_index():
materialized_query = FromObj(
from_obj='events where event_date < "2016-01-01"',
name="myquery",
knowledge_date_column='knowledge_date'
)
assert materialized_query.index_materialized_table_sql == 'create index on myquery_from_obj (knowledge_date)'
def test_materialized_from_obj_drop():
materialized_query = FromObj(
from_obj='events where event_date < "2016-01-01"',
name="myquery",
knowledge_date_column='knowledge_date'
)
assert materialized_query.drop_materialized_table_sql == 'drop table if exists myquery_from_obj'
@pytest.fixture(name="db_engine_with_events_table", scope='function')
def db_engine_with_events_table(db_engine):
db_engine.execute(
"create table events (entity_id int, event_date date, outcome bool)"
)
for event in events_data:
db_engine.execute("insert into events values (%s, %s, %s::bool)", event)
return db_engine
def test_materialized_from_obj_validate_needs_entity_id(db_engine_with_events_table):
from_obj = FromObj(
from_obj="(select event_date from events where event_date < '2016-01-01') from_obj",
name="myquery",
knowledge_date_column='event_date'
)
db_engine_with_events_table.execute(from_obj.create_materialized_table_sql)
with pytest.raises(ValueError):
from_obj.validate(db_engine_with_events_table)
def test_materialized_from_obj_validate_needs_knowledge_date(db_engine_with_events_table):
from_obj = FromObj(
from_obj="(select entity_id from events where event_date < '2016-01-01') from_obj",
name="myquery",
knowledge_date_column='event_date'
)
db_engine_with_events_table.execute(from_obj.create_materialized_table_sql)
with pytest.raises(ValueError):
from_obj.validate(db_engine_with_events_table)
def test_materialized_from_obj_validate_success(db_engine_with_events_table):
from_obj = FromObj(
from_obj="events where event_date < '2016-01-01'",
name="myquery",
knowledge_date_column='event_date'
)
db_engine_with_events_table.execute(from_obj.create_materialized_table_sql)
from_obj.validate(db_engine_with_events_table)
def test_materialized_from_obj_should_not_materialize_tbl():
from_obj = FromObj(from_obj="mytable1", name="events", knowledge_date_column="date")
assert not from_obj.should_materialize()
assert from_obj.table == "mytable1"
def test_materialized_from_obj_should_not_materialize_tbl_with_alias():
from_obj = FromObj(from_obj="mytable1 as mt1", name="events", knowledge_date_column="date")
assert not from_obj.should_materialize()
assert from_obj.table == "mytable1 as mt1"
def test_materialized_from_obj_should_not_materialize_join():
from_obj = FromObj(from_obj="mytable1 join entities using(entity_id)", name="events", knowledge_date_column="date")
assert not from_obj.should_materialize()
assert from_obj.table == "mytable1 join entities using(entity_id)"
def test_materialized_from_obj_should_materialize_subquery():
from_obj = FromObj(from_obj="(select entity_id, date from mytable1 join entities using(entity_id)) joined_events", name="events", knowledge_date_column="date")
assert from_obj.should_materialize()
assert from_obj.table == "events_from_obj"
def test_materialized_from_obj_should_handle_leading_whitespace():
q = """ (
SELECT entity_id, date
from mytable1
join entities using (entity_id)
) AS joined_events"""
from_obj = FromObj(from_obj=q, name="events", knowledge_date_column="date")
assert from_obj.should_materialize()
assert from_obj.table == "events_from_obj"
def test_materialized_from_obj_should_handle_keywords():
from_obj = FromObj(from_obj="events", name="events", knowledge_date_column="date")
assert not from_obj.should_materialize()
assert from_obj.table == "events"
def test_materialized_from_obj_maybe_materialize(db_engine_with_events_table):
from_obj = FromObj(
from_obj="events",
name="myquery",
knowledge_date_column='event_date'
)
from_obj.should_materialize = lambda: True
from_obj.maybe_materialize(db_engine_with_events_table)
assert table_exists(from_obj.table, db_engine_with_events_table)
```
#### File: tests/collate_tests/test_integration.py
```python
import testing.postgresql
from sqlalchemy import create_engine
from sqlalchemy.sql import expression as ex
from triage.component.collate import Aggregation, Aggregate
from triage.component.collate.spacetime import SpacetimeAggregation
from . import initialize_db
IMPUTE_RULES = {
"coltype": "aggregate",
"count": {"type": "mean"},
"mode": {"type": "mean"},
}
Postgresql = testing.postgresql.PostgresqlFactory(
cache_initialized_db=True, on_initialized=initialize_db.handler
)
def teardown_module():
Postgresql.clear_cache()
def test_engine():
with Postgresql() as postgresql:
engine = create_engine(postgresql.url())
((result,),) = engine.execute("SELECT COUNT(*) FROM food_inspections")
assert result == 966
def test_st_explicit_execute():
agg = Aggregate({"F": "results='Fail'"}, ["count"], IMPUTE_RULES)
mode = Aggregate("", "mode", IMPUTE_RULES, order="zip")
st = SpacetimeAggregation(
[agg, agg + agg, mode],
from_obj=ex.table("food_inspections"),
groups={"license": ex.column("license_no"), "zip": ex.column("zip")},
intervals={"license": ["1 year", "2 years", "all"], "zip": ["1 year"]},
dates=["2016-08-30", "2015-11-06"],
state_table="inspection_states",
state_group="license_no",
date_column="inspection_date",
prefix="food_inspections",
)
with Postgresql() as postgresql:
engine = create_engine(postgresql.url())
st.execute(engine.connect())
def test_st_lazy_execute():
agg = Aggregate("results='Fail'", ["count"], IMPUTE_RULES)
st = SpacetimeAggregation(
[agg],
from_obj="food_inspections",
groups=["license_no", "zip"],
intervals={"license_no": ["1 year", "2 years", "all"], "zip": ["1 year"]},
dates=["2016-08-30", "2015-11-06"],
state_table="inspection_states",
state_group="license_no",
date_column='"inspection_date"',
)
with Postgresql() as postgresql:
engine = create_engine(postgresql.url())
st.execute(engine.connect())
def test_st_execute_broadcast_intervals():
agg = Aggregate("results='Fail'", ["count"], IMPUTE_RULES)
st = SpacetimeAggregation(
[agg],
from_obj="food_inspections",
groups=["license_no", "zip"],
intervals=["1 year", "2 years", "all"],
dates=["2016-08-30", "2015-11-06"],
state_table="inspection_states",
state_group="license_no",
date_column='"inspection_date"',
)
with Postgresql() as postgresql:
engine = create_engine(postgresql.url())
st.execute(engine.connect())
def test_execute():
agg = Aggregate("results='Fail'", ["count"], IMPUTE_RULES)
st = Aggregation(
[agg],
from_obj="food_inspections",
groups=["license_no", "zip"],
state_table="all_licenses",
state_group="license_no",
)
with Postgresql() as postgresql:
engine = create_engine(postgresql.url())
st.execute(engine.connect())
def test_execute_schema_output_date_column():
agg = Aggregate("results='Fail'", ["count"], IMPUTE_RULES)
st = SpacetimeAggregation(
[agg],
from_obj="food_inspections",
groups=["license_no", "zip"],
intervals={"license_no": ["1 year", "2 years", "all"], "zip": ["1 year"]},
dates=["2016-08-30", "2015-11-06"],
state_table="inspection_states_diff_colname",
state_group="license_no",
schema="agg",
date_column='"inspection_date"',
output_date_column="aggregation_date",
)
with Postgresql() as postgresql:
engine = create_engine(postgresql.url())
st.execute(engine.connect())
```
#### File: tests/collate_tests/test_spacetime.py
```python
from datetime import date
from itertools import product
import pytest
import sqlalchemy
import testing.postgresql
from triage.component.collate import Aggregate, SpacetimeAggregation
events_data = [
# entity id, event_date, outcome
[1, date(2014, 1, 1), True],
[1, date(2014, 11, 10), False],
[1, date(2015, 1, 1), False],
[1, date(2015, 11, 10), True],
[2, date(2013, 6, 8), True],
[2, date(2014, 6, 8), False],
[3, date(2014, 3, 3), False],
[3, date(2014, 7, 24), False],
[3, date(2015, 3, 3), True],
[3, date(2015, 7, 24), False],
[4, date(2015, 12, 13), False],
[4, date(2016, 12, 13), True],
]
# distinct entity_id, event_date pairs
state_data = sorted(
list(
product(
set([l[0] for l in events_data]),
set([l[1] for l in events_data] + [date(2016, 1, 1)]),
)
)
)
def test_basic_spacetime():
with testing.postgresql.Postgresql() as psql:
engine = sqlalchemy.create_engine(psql.url())
engine.execute(
"create table events (entity_id int, event_date date, outcome bool)"
)
for event in events_data:
engine.execute("insert into events values (%s, %s, %s::bool)", event)
engine.execute("create table states (entity_id int, as_of_date date)")
for state in state_data:
engine.execute("insert into states values (%s, %s)", state)
agg = Aggregate(
"outcome::int",
["sum", "avg", "stddev"],
{
"coltype": "aggregate",
"avg": {"type": "mean"},
"sum": {"type": "constant", "value": 3},
"stddev": {"type": "constant", "value": 2},
},
)
st = SpacetimeAggregation(
aggregates=[agg],
from_obj="events",
groups=["entity_id"],
intervals=["1y", "2y", "all"],
dates=["2016-01-01", "2015-01-01"],
state_table="states",
state_group="entity_id",
date_column="event_date",
output_date_column="as_of_date",
)
st.execute(engine.connect())
r = engine.execute(
"select * from events_entity_id order by entity_id, as_of_date"
)
rows = [x for x in r]
assert rows[0]["entity_id"] == 1
assert rows[0]["as_of_date"] == date(2015, 1, 1)
assert rows[0]["events_entity_id_1y_outcome::int_sum"] == 1
assert rows[0]["events_entity_id_1y_outcome::int_avg"] == 0.5
assert rows[0]["events_entity_id_2y_outcome::int_sum"] == 1
assert rows[0]["events_entity_id_2y_outcome::int_avg"] == 0.5
assert rows[0]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[0]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[1]["entity_id"] == 1
assert rows[1]["as_of_date"] == date(2016, 1, 1)
assert rows[1]["events_entity_id_1y_outcome::int_sum"] == 1
assert rows[1]["events_entity_id_1y_outcome::int_avg"] == 0.5
assert rows[1]["events_entity_id_2y_outcome::int_sum"] == 2
assert rows[1]["events_entity_id_2y_outcome::int_avg"] == 0.5
assert rows[1]["events_entity_id_all_outcome::int_sum"] == 2
assert rows[1]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[2]["entity_id"] == 2
assert rows[2]["as_of_date"] == date(2015, 1, 1)
assert rows[2]["events_entity_id_1y_outcome::int_sum"] == 0
assert rows[2]["events_entity_id_1y_outcome::int_avg"] == 0
assert rows[2]["events_entity_id_2y_outcome::int_sum"] == 1
assert rows[2]["events_entity_id_2y_outcome::int_avg"] == 0.5
assert rows[2]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[2]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[3]["entity_id"] == 2
assert rows[3]["as_of_date"] == date(2016, 1, 1)
assert rows[3]["events_entity_id_1y_outcome::int_sum"] is None
assert rows[3]["events_entity_id_1y_outcome::int_avg"] is None
assert rows[3]["events_entity_id_2y_outcome::int_sum"] == 0
assert rows[3]["events_entity_id_2y_outcome::int_avg"] == 0
assert rows[3]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[3]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[4]["entity_id"] == 3
assert rows[4]["as_of_date"] == date(2015, 1, 1)
assert rows[4]["events_entity_id_1y_outcome::int_sum"] == 0
assert rows[4]["events_entity_id_1y_outcome::int_avg"] == 0
assert rows[4]["events_entity_id_2y_outcome::int_sum"] == 0
assert rows[4]["events_entity_id_2y_outcome::int_avg"] == 0
assert rows[4]["events_entity_id_all_outcome::int_sum"] == 0
assert rows[4]["events_entity_id_all_outcome::int_avg"] == 0
assert rows[5]["entity_id"] == 3
assert rows[5]["as_of_date"] == date(2016, 1, 1)
assert rows[5]["events_entity_id_1y_outcome::int_sum"] == 1
assert rows[5]["events_entity_id_1y_outcome::int_avg"] == 0.5
assert rows[5]["events_entity_id_2y_outcome::int_sum"] == 1
assert rows[5]["events_entity_id_2y_outcome::int_avg"] == 0.25
assert rows[5]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[5]["events_entity_id_all_outcome::int_avg"] == 0.25
assert rows[6]["entity_id"] == 4
# rows[6]['date'] == date(2015, 1, 1) is skipped due to no data!
assert rows[6]["as_of_date"] == date(2016, 1, 1)
assert rows[6]["events_entity_id_1y_outcome::int_sum"] == 0
assert rows[6]["events_entity_id_1y_outcome::int_avg"] == 0
assert rows[6]["events_entity_id_2y_outcome::int_sum"] == 0
assert rows[6]["events_entity_id_2y_outcome::int_avg"] == 0
assert rows[6]["events_entity_id_all_outcome::int_sum"] == 0
assert rows[6]["events_entity_id_all_outcome::int_avg"] == 0
assert len(rows) == 7
# check some imputation results
r = engine.execute(
"select * from events_aggregation_imputed order by entity_id, as_of_date"
)
rows = [x for x in r]
assert rows[6]["entity_id"] == 4
assert rows[6]["as_of_date"] == date(2015, 1, 1)
assert rows[6]["events_entity_id_1y_outcome::int_sum"] == 3
assert rows[6]["events_entity_id_1y_outcome::int_imp"] == 1
assert rows[6]["events_entity_id_1y_outcome::int_stddev"] == 2
assert rows[6]["events_entity_id_1y_outcome::int_stddev_imp"] == 1
assert (
round(float(rows[6]["events_entity_id_1y_outcome::int_avg"]), 4) == 0.1667
)
assert rows[6]["events_entity_id_2y_outcome::int_sum"] == 3
assert rows[6]["events_entity_id_2y_outcome::int_imp"] == 1
assert rows[6]["events_entity_id_2y_outcome::int_stddev"] == 2
assert rows[6]["events_entity_id_2y_outcome::int_stddev_imp"] == 1
assert (
round(float(rows[6]["events_entity_id_2y_outcome::int_avg"]), 4) == 0.3333
)
assert rows[6]["events_entity_id_all_outcome::int_sum"] == 3
assert rows[6]["events_entity_id_all_outcome::int_imp"] == 1
assert rows[6]["events_entity_id_all_outcome::int_stddev"] == 2
assert rows[6]["events_entity_id_all_outcome::int_stddev_imp"] == 1
assert (
round(float(rows[6]["events_entity_id_all_outcome::int_avg"]), 4) == 0.3333
)
assert rows[6]["events_entity_id_all_outcome::int_imp"] == 1
assert rows[7]["entity_id"] == 4
assert rows[7]["as_of_date"] == date(2016, 1, 1)
assert rows[7]["events_entity_id_1y_outcome::int_sum"] == 0
assert rows[7]["events_entity_id_1y_outcome::int_imp"] == 0
assert rows[7]["events_entity_id_1y_outcome::int_avg"] == 0
assert rows[7]["events_entity_id_1y_outcome::int_stddev"] == 2
assert rows[7]["events_entity_id_1y_outcome::int_stddev_imp"] == 1
assert rows[7]["events_entity_id_2y_outcome::int_sum"] == 0
assert rows[7]["events_entity_id_2y_outcome::int_imp"] == 0
assert rows[7]["events_entity_id_2y_outcome::int_avg"] == 0
assert rows[7]["events_entity_id_2y_outcome::int_stddev"] == 2
assert rows[7]["events_entity_id_2y_outcome::int_stddev_imp"] == 1
assert rows[7]["events_entity_id_all_outcome::int_sum"] == 0
assert rows[7]["events_entity_id_all_outcome::int_imp"] == 0
assert rows[7]["events_entity_id_all_outcome::int_avg"] == 0
assert rows[7]["events_entity_id_all_outcome::int_stddev"] == 2
assert rows[7]["events_entity_id_all_outcome::int_stddev_imp"] == 1
assert len(rows) == 8
def test_input_min_date():
with testing.postgresql.Postgresql() as psql:
engine = sqlalchemy.create_engine(psql.url())
engine.execute("create table events (entity_id int, date date, outcome bool)")
for event in events_data:
engine.execute("insert into events values (%s, %s, %s::bool)", event)
engine.execute("create table states (entity_id int, date date)")
for state in state_data:
engine.execute("insert into states values (%s, %s)", state)
agg = Aggregate(
"outcome::int",
["sum", "avg"],
{
"coltype": "aggregate",
"avg": {"type": "mean"},
"sum": {"type": "constant", "value": 3},
"max": {"type": "zero"},
},
)
st = SpacetimeAggregation(
aggregates=[agg],
from_obj="events",
groups=["entity_id"],
intervals=["all"],
dates=["2016-01-01"],
state_table="states",
state_group="entity_id",
date_column='"date"',
input_min_date="2015-11-10",
)
st.execute(engine.connect())
r = engine.execute("select * from events_entity_id order by entity_id")
rows = [x for x in r]
assert rows[0]["entity_id"] == 1
assert rows[0]["date"] == date(2016, 1, 1)
assert rows[0]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[0]["events_entity_id_all_outcome::int_avg"] == 1
assert rows[1]["entity_id"] == 4
assert rows[1]["date"] == date(2016, 1, 1)
assert rows[1]["events_entity_id_all_outcome::int_sum"] == 0
assert rows[1]["events_entity_id_all_outcome::int_avg"] == 0
assert len(rows) == 2
st = SpacetimeAggregation(
aggregates=[agg],
from_obj="events",
groups=["entity_id"],
intervals=["1y", "all"],
dates=["2016-01-01", "2015-01-01"],
state_table="states",
state_group="entity_id",
date_column='"date"',
input_min_date="2014-11-10",
)
with pytest.raises(ValueError):
st.validate(engine.connect())
with pytest.raises(ValueError):
st.execute(engine.connect())
def test_join_with_cohort_table(db_engine):
# if we specify joining with the cohort table
# only entity_id/date pairs in the cohort table should show up
db_engine.execute("create table events (entity_id int, date date, outcome bool)")
for event in events_data:
db_engine.execute("insert into events values (%s, %s, %s::bool)", event)
db_engine.execute("create table cohort (entity_id int, date date)")
# use the states list from above except only include entities 1 and 2 in the cohort
smaller_cohort = sorted(
product(
set([l[0] for l in events_data if l[0] == 1 or l[0] == 2]),
set([l[1] for l in events_data] + [date(2016, 1, 1)]),
)
)
for state in smaller_cohort:
db_engine.execute("insert into cohort values (%s, %s)", state)
# create our test aggregation with the important 'join_with_cohort_table' flag
agg = Aggregate(
"outcome::int",
["sum", "avg"],
{
"coltype": "aggregate",
"avg": {"type": "mean"},
"sum": {"type": "constant", "value": 3},
"max": {"type": "zero"},
},
)
st = SpacetimeAggregation(
aggregates=[agg],
from_obj="events",
groups=["entity_id"],
intervals=["all"],
dates=["2016-01-01", "2015-01-01"],
state_table="cohort",
state_group="entity_id",
date_column='"date"',
join_with_cohort_table=True,
)
st.execute(db_engine.connect())
r = db_engine.execute("select * from events_entity_id order by entity_id, date")
rows = [x for x in r]
# these rows should be similar to the rows in the basic spacetime test,
# except only the rows for entities 1 and 2 are present
assert len(rows) == 4
assert rows[0]["entity_id"] == 1
assert rows[0]["date"] == date(2015, 1, 1)
assert rows[0]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[0]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[1]["entity_id"] == 1
assert rows[1]["date"] == date(2016, 1, 1)
assert rows[1]["events_entity_id_all_outcome::int_sum"] == 2
assert rows[1]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[2]["entity_id"] == 2
assert rows[2]["date"] == date(2015, 1, 1)
assert rows[2]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[2]["events_entity_id_all_outcome::int_avg"] == 0.5
assert rows[3]["entity_id"] == 2
assert rows[3]["date"] == date(2016, 1, 1)
assert rows[3]["events_entity_id_all_outcome::int_sum"] == 1
assert rows[3]["events_entity_id_all_outcome::int_avg"] == 0.5
```
#### File: tests/postmodeling_tests/test_model_group_evaluator.py
```python
from triage.component.postmodeling.contrast.model_group_evaluator import ModelGroupEvaluator
import pandas as pd
import pytest
from tests.utils import assert_plot_figures_added
@pytest.fixture(scope="module")
def model_group_evaluator(finished_experiment):
return ModelGroupEvaluator((1,1), finished_experiment.db_engine)
def test_ModelGroupEvaluator_metadata(model_group_evaluator):
assert isinstance(model_group_evaluator.metadata, list)
assert len(model_group_evaluator.metadata) == 2 # 2 models expected for a model_group from basic experiment
for row in model_group_evaluator.metadata:
assert isinstance(row, dict)
def test_ModelGroupEvaluator_model_type(model_group_evaluator):
assert model_group_evaluator.model_type[0] == 'sklearn.tree.DecisionTreeClassifier'
def test_ModelGroupEvaluator_predictions(model_group_evaluator):
assert isinstance(model_group_evaluator.predictions, pd.DataFrame)
def test_ModelGroupEvaluator_feature_importances(model_group_evaluator):
assert isinstance(model_group_evaluator.feature_importances, pd.DataFrame)
def test_ModelGroupEvaluator_metrics(model_group_evaluator):
assert isinstance(model_group_evaluator.metrics, pd.DataFrame)
def test_ModelGroupEvaluator_feature_groups(model_group_evaluator):
assert isinstance(model_group_evaluator.feature_groups, pd.DataFrame)
def test_ModelGroupEvaluator_same_time_models(model_group_evaluator):
assert isinstance(model_group_evaluator.same_time_models, pd.DataFrame)
def test_ModelGroupEvaluator_plot_prec_across_time(model_group_evaluator):
with assert_plot_figures_added():
model_group_evaluator.plot_prec_across_time()
def test_ModelGroupEvaluator_feature_loi_loo(model_group_evaluator):
with pytest.raises(IndexError):
model_group_evaluator.feature_loi_loo()
def test_ModelGroupEvaluator_plot_ranked_correlation_preds(model_group_evaluator):
with assert_plot_figures_added():
model_group_evaluator.plot_ranked_correlation_preds(param_type='rank_abs', param=10, top_n_features=10)
def test_ModelGroupEvaluator_plot_ranked_correlation_features(model_group_evaluator):
with assert_plot_figures_added():
model_group_evaluator.plot_ranked_correlation_features(param_type='rank_abs', param=10, top_n_features=10)
def test_ModelGroupEvaluator_plot_jaccard_preds(model_group_evaluator):
with assert_plot_figures_added():
model_group_evaluator.plot_jaccard_preds(param_type='rank_abs', param=10)
def test_ModelGroupEvaluator_plot_jaccard_features(model_group_evaluator):
with assert_plot_figures_added():
model_group_evaluator.plot_jaccard_features()
def test_ModelGroupEvaluator_plot_preds_comparison(model_group_evaluator):
with assert_plot_figures_added():
model_group_evaluator.plot_preds_comparison(param_type='rank_abs', param=10)
```
#### File: tests/results_tests/test_valid_schema.py
```python
import testing.postgresql
from sqlalchemy import create_engine
from triage.component.results_schema import Base
def test_full_schema():
with testing.postgresql.Postgresql() as postgres:
engine = create_engine(postgres.url())
Base.metadata.create_all(bind=engine)
```
#### File: component/audition/rules_maker.py
```python
class BaseRules:
def __init__(self):
self._metric = None
self._parameter = None
self.shared_parameters = []
self.selection_rules = []
def _does_parameters_exist(self, params_dict):
return params_dict in self.shared_parameters
def _does_selection_rule_exisit(self, rule_dict):
return rule_dict in self.selection_rules
def _append(self, params_dict, rule_dict):
if not self._does_parameters_exist(params_dict):
self.shared_parameters.append(params_dict)
if not self._does_selection_rule_exisit(rule_dict):
self.selection_rules.append(rule_dict)
def create(self):
return [
{
"shared_parameters": self.shared_parameters,
"selection_rules": self.selection_rules,
}
]
class SimpleRuleMaker(BaseRules):
"""
Holds methods that generate parameter grids for selection rules that
evaluate the performance of a model group in terms of a single metric.
These include:
- [best_current_value][triage.component.audition.selection_rules.best_current_value]
- [best_average_value][triage.component.audition.selection_rules.best_average_value]
- [lowest_metric_variance][triage.component.audition.selection_rules.lowest_metric_variance]
- [most_frequent_best_dist][triage.component.audition.selection_rules.most_frequent_best_dist]
- [best_avg_var_penalized][triage.component.audition.selection_rules.best_avg_var_penalized]
- [best_avg_recency_weight][triage.component.audition.selection_rules.best_avg_recency_weight]
"""
def add_rule_best_current_value(self, metric=None, parameter=None, n=1):
if metric is not None:
self._metric = metric
if parameter is not None:
self._parameter = parameter
params_dict = {"metric": self._metric, "parameter": self._parameter}
rule_dict = {"name": "best_current_value", "n": n}
self._append(params_dict, rule_dict)
return self.create()
def add_rule_best_average_value(self, metric=None, parameter=None, n=1):
if metric is not None:
self._metric = metric
if parameter is not None:
self._parameter = parameter
params_dict = {"metric": self._metric, "parameter": self._parameter}
rule_dict = {"name": "best_average_value", "n": n}
self._append(params_dict, rule_dict)
return self.create()
def add_rule_lowest_metric_variance(self, metric=None, parameter=None, n=1):
if metric is not None:
self._metric = metric
if parameter is not None:
self._parameter = parameter
params_dict = {"metric": self._metric, "parameter": self._parameter}
rule_dict = {"name": "lowest_metric_variance", "n": n}
self._append(params_dict, rule_dict)
return self.create()
def add_rule_most_frequent_best_dist(
self,
metric=None,
parameter=None,
n=1,
dist_from_best_case=[0.01, 0.05, 0.1, 0.15],
):
if metric is not None:
self._metric = metric
if parameter is not None:
self._parameter = parameter
params_dict = {"metric": self._metric, "parameter": self._parameter}
rule_dict = {
"name": "most_frequent_best_dist",
"dist_from_best_case": dist_from_best_case,
"n": n,
}
self._append(params_dict, rule_dict)
return self.create()
def add_rule_best_avg_recency_weight(
self,
metric=None,
parameter=None,
n=1,
curr_weight=[1.5, 2.0, 5.0],
decay_type=["linear"],
):
if metric is not None:
self._metric = metric
if parameter is not None:
self._parameter = parameter
params_dict = {"metric": metric, "parameter": parameter}
rule_dict = {
"name": "best_avg_recency_weight",
"curr_weight": curr_weight,
"decay_type": decay_type,
"n": n,
}
self._append(params_dict, rule_dict)
return self.create()
def add_rule_best_avg_var_penalized(
self, metric=None, parameter=None, stdev_penalty=0.5, n=1
):
if metric is not None:
self._metric = metric
if parameter is not None:
self._parameter = parameter
params_dict = {"metric": metric, "parameter": parameter}
rule_dict = {
"name": "best_avg_var_penalized",
"stdev_penalty": stdev_penalty,
"n": n,
}
self._append(params_dict, rule_dict)
return self.create()
class RandomGroupRuleMaker(BaseRules):
"""
The `RandomGroupRuleMaker` class generates a rule that randomly selects `n`
model groups for each train set.
Unlike the other two RuleMaker classes, it generates its selection rule spec
on `__init__`
"""
def __init__(self, n=1):
self.shared_parameters = [{}]
self.selection_rules = [{"name": "random_model_group", "n": n}]
class TwoMetricsRuleMaker(BaseRules):
"""
The `TwoMetricsRuleMaker` class allows for the specification of rules that
evaluate a model group's performance in terms of two metrics. It currently
supports one rule:
- [best_average_two_metrics][triage.component.audition.selection_rules.best_average_two_metrics]
"""
def add_rule_best_average_two_metrics(
self,
metric1="precision@",
parameter1="100_abs",
metric2="recall@",
parameter2="300_abs",
metric1_weight=[0.5],
n=1,
):
params_dict = {"metric1": metric1, "parameter1": parameter1}
rule_dict = {
"name": "best_average_two_metrics",
"metric1_weight": metric1_weight,
"metric2": [metric2],
"parameter2": [parameter2],
"n": n,
}
self._append(params_dict, rule_dict)
def create_selection_grid(*args):
return list(map(lambda r: r.create()[0], args))
```
#### File: component/audition/thresholding.py
```python
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
from .metric_directionality import is_better_operator
import pandas as pd
from datetime import datetime
def _past_threshold(df, metric_filter):
return df[
is_better_operator(metric_filter["metric"])(
df["raw_value"], metric_filter["threshold_value"]
)
]
def _close_to_best_case(df, metric_filter):
return df[df["dist_from_best_case"] < metric_filter["max_from_best"]]
def _of_metric(df, metric_filter):
return df[
(df["metric"] == metric_filter["metric"])
& (df["parameter"] == metric_filter["parameter"])
]
def model_groups_filter(
train_end_times, initial_model_group_ids, models_table, db_engine
):
"""Filter the models which related train end times don't contain the user-input
train_end_times
Before creating the distance table, we want to make sure the model_group_ids
and train_end_times are reasonable:
1. the input train_end_times should exisit in the database
2. every model group should have the same train_end_times
to prevent incomparable model groups from being populated to the distance table.
Args:
train_end_times (list) The set of train_end_times to consider during
the iteration
initial_model_group_ids (list) The initial list of model group ids to
narrow down
models_table (string) The name of the results schema
db_engine (sqlalchemy.engine) A database engine with access to results
schema of a completed modeling run
"""
if isinstance(train_end_times, str) or not hasattr(train_end_times, "__iter__"):
raise TypeError("train_end_times should be a list of str or timestamp")
if not bool(train_end_times):
raise ValueError("train_end_times shouldn't be an empty list")
end_times_sql = "ARRAY[{}]".format(
", ".join(
"'{}'".format(
end_time.strftime("%Y-%m-%d")
if isinstance(end_time, datetime)
else end_time
)
for end_time in train_end_times
)
)
logger.debug("Checking if all model groups have the same train end times")
logger.debug(f"Found {len(initial_model_group_ids)} total model groups")
query = f"""
SELECT model_group_id
FROM (
SELECT
model_group_id,
array_agg(distinct train_end_time::date::text) as train_end_time_list
FROM triage_metadata.{models_table}
WHERE model_group_id in ({','.join([str(m) for m in initial_model_group_ids])})
GROUP BY model_group_id
) as t
WHERE train_end_time_list @> {end_times_sql}
"""
model_group_ids = {row['model_group_id'] for row in db_engine.execute(query)}
if not model_group_ids:
raise ValueError("The train_end_times passed in is not a subset of train end times of any model group. Please double check that all the model groups have the specified train end times.")
dropped_model_groups = len(initial_model_group_ids) - len(model_group_ids)
logger.debug(
f"Dropped {dropped_model_groups} model groups which don't match the train end times"
)
logger.debug(f"Found {len(model_group_ids)} total model groups past the checker")
return model_group_ids
class ModelGroupThresholder:
def __init__(
self,
distance_from_best_table,
train_end_times,
initial_model_group_ids,
initial_metric_filters,
):
"""Iteratively narrow down a list of model groups by changing thresholds
for max below best model and minimum absolute value with respect to
different metrics
Args:
distance_from_best_table (audition.DistanceFromBestTable)
A pre-populated distance-from-best database table
train_end_times (list) The set of train end times to consider during iteration
initial_model_group_ids (list) The initial list of model group ids to
narrow down
"""
self.distance_from_best_table = distance_from_best_table
self.train_end_times = train_end_times
self._initial_model_group_ids = initial_model_group_ids
self._metric_filters = initial_metric_filters
def _filter_model_groups(self, df, filter_func):
"""Filter model groups by ensuring each of their metrics meets the given
filtering function.
Args:
df (pandas.DataFrame): A set of rows in the format given by
audition.DistanceFromBestTable.as_dataframe
filter_func (function): A function that takes a dataframe and a
metric filter and returns a subset of the dataframe
Returns: (set) The model group ids that pass filtering
"""
passing = set(self._initial_model_group_ids)
for metric_filter in self._metric_filters:
passing &= set(
filter_func(_of_metric(df, metric_filter), metric_filter)[
"model_group_id"
]
)
return passing
def model_groups_past_threshold(self, df):
"""Return the model groups in the dataframe that are above the
currently-configured minimum value
Args:
df (pandas.DataFrame): A set of rows in the format given by
audition.DistanceFromBestTable.as_dataframe
Returns: (set) The model group ids above the minimum value for each metric
"""
return self._filter_model_groups(df, _past_threshold)
def model_groups_close_to_best_case(self, df):
"""Return the model groups in the dataframe that are close enough to
the best value according to current metric filter configuration
Args:
df (pandas.DataFrame): A set of rows in the format given by
audition.DistanceFromBestTable.as_dataframe
Returns: (set) The model group ids close to the best value for each metric
"""
return self._filter_model_groups(df, _close_to_best_case)
def model_groups_passing_rules(self):
"""Return the model groups passing both the close-to-best and
above-min checks based on the current filters.
Works by ensuring that a model group passes the close-to-best check
for at least one train end time, and that is passes the above-min
check for *all* train end times. Model groups must pass both
of these checks.
Returns: (set) The passing model group ids
"""
past_threshold_model_groups = set(self._initial_model_group_ids)
close_to_best_model_groups = set()
for train_end_time in self.train_end_times:
df_as_of = self.distance_from_best_table.dataframe_as_of(
model_group_ids=self._initial_model_group_ids,
train_end_time=train_end_time,
)
close_to_best = self.model_groups_close_to_best_case(df_as_of)
logger.debug(
f"Found {len(close_to_best)} model groups close to best for {train_end_time}",
)
close_to_best_model_groups |= close_to_best
past_threshold = self.model_groups_past_threshold(df_as_of)
logger.debug(
f"Found {len(past_threshold)} model groups above min for {train_end_time}",
)
past_threshold_model_groups &= past_threshold
total_model_groups = close_to_best_model_groups & past_threshold_model_groups
logger.debug(
"Found {len(total_model_groups)} total model groups past threshold"
)
return total_model_groups
def update_filters(self, new_metric_filters):
"""Update the saved metric filters.
Args: new_metric_filters (list) A list of metrics to filter model
groups on, and how to filter them. Each entry should be a dict
with the keys:
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
max_below_best (float) The maximum value that the given metric
can be below the best for a given train end time
min_value (float) The minimum value that the given metric can be
"""
if new_metric_filters != self._metric_filters:
self._metric_filters = new_metric_filters
@property
def model_group_ids(self):
return self.model_groups_passing_rules()
```
#### File: catwalk/baselines/thresholders.py
```python
import numpy as np
import pandas as pd
from six import string_types
from triage.component.catwalk.exceptions import BaselineFeatureNotInMatrix
OPERATOR_METHODS = {">": "gt", ">=": "ge", "<": "lt", "<=": "le", "==": "eq"}
REQUIRED_KEYS = frozenset(["feature_name", "operator", "threshold"])
def get_operator_method(operator_string):
""" Convert the user-passed operator into the the name of the apprpriate
pandas method.
"""
try:
operator_method = OPERATOR_METHODS[operator_string]
except KeyError:
raise ValueError(
(
"Operator '{operator}' extracted from rule is not a "
"supported operator ({supported_operators}).".format(
operator=operator_string,
supported_operators=OPERATOR_METHODS.keys(),
)
)
)
return operator_method
class SimpleThresholder:
""" The simple thresholder applies a set of predetermined logical rules to a
test matrix to classify entities. By default, it will classify entities as 1
if they satisfy any of the rules. When 'and' is set as the logical_operator,
it will classify entities as 1 only if they pass *all* of the rules.
Rules are passed as either strings in the format 'x1 > 5' or dictionaries in
the format {feature_name: 'x1', operator: '>', threshold: 5}. The
feature_name, operator, and threshold keys are required. Eventually, this
class may be abstracted into a BaseThreshold class and more complicated
thresholders could be built around new keys in the dictionaries (e.g., by
specifying scores that could be applied (and possibly summed) to entities
satisfying rules) or by an alternative dictionary format that specifies
more complicated structures for applying rules (for example:
{
or: [
{or: [{}, {}]},
{and: [{}, {}]}
]
}
where rules and operators that combine them can be nested).
"""
def __init__(self, rules, logical_operator="or"):
self.rules = rules
self.logical_operator = logical_operator
self.feature_importances_ = None
self.rule_combination_method = self.lookup_rule_combination_method(
logical_operator
)
@property
def rules(self):
return vars(self)["rules"]
@rules.setter
def rules(self, rules):
""" Validates the rules passed by the user and converts them to the
internal representation. Can be used to validate rules before running an
experiment.
1. If rules are not a list, make them a list.
2. If rules are strings, convert them to dictionaries.
3. If dictionaries or strings are not in a supported format, raise
helpful exceptions.
"""
if not isinstance(rules, list):
rules = [rules]
converted_rules = []
for rule in rules:
if isinstance(rule, string_types):
converted_rules.append(self._convert_string_rule_to_dict(rule))
else:
if not isinstance(rule, dict):
raise ValueError(
(
'Rule "{rule}" is not of a supported type (string or '
"dict).".format(rule=rule)
)
)
if not rule.keys() >= REQUIRED_KEYS:
raise ValueError(
(
'Rule "{rule}" missing one or more required keys '
"({required_keys}).".format(
rule=rule, required_keys=REQUIRED_KEYS
)
)
)
rule["operator"] = get_operator_method(rule["operator"])
converted_rules.append(rule)
vars(self)["rules"] = converted_rules
@property
def all_feature_names(self):
return [rule["feature_name"] for rule in self.rules]
def lookup_rule_combination_method(self, logical_operator):
""" Convert 'and' to 'all' and 'or' to 'any' for interacting with
pandas DataFrames.
"""
rule_combination_method_lookup = {"or": "any", "and": "all"}
return rule_combination_method_lookup[logical_operator]
def _convert_string_rule_to_dict(self, rule):
""" Converts a string rule into a dict, raising helpful exceptions if it
cannot be parsed.
"""
components = rule.rsplit(" ", 2)
if len(components) < 3:
raise ValueError(
(
'{required_keys} could not be parsed from rule "{rule}". Are '
"they all present and separated by spaces?".format(
required_keys=REQUIRED_KEYS, rule=rule
)
)
)
try:
threshold = int(components[2])
except ValueError:
raise ValueError(
(
'Threshold "{threshold}" parsed from rule "{rule}" is not an '
"int.".format(threshold=components[2], rule=rule)
)
)
operator = get_operator_method(components[1])
return {
"feature_name": components[0],
"operator": operator,
"threshold": threshold,
}
def _set_feature_importances_(self, x):
""" Assigns feature importances following the rule: 1 for the features
we are thresholding on, 0 for all other features.
"""
feature_importances = [0] * len(x.columns)
for feature_name in self.all_feature_names:
try:
position = x.columns.get_loc(feature_name)
except KeyError:
raise BaselineFeatureNotInMatrix(
(
"Rules refer to a feature ({feature_name}) not included in "
"the training matrix!".format(feature_name=feature_name)
)
)
feature_importances[position] = 1
self.feature_importances_ = np.array(feature_importances)
def fit(self, x, y):
""" Set feature importances and return self.
"""
self._set_feature_importances_(x)
return self
def predict_proba(self, x):
""" Assign 1 for entities that meet the rules and 0 for those that do not.
"""
rule_evaluations_list = []
for rule in self.rules:
rule_evaluations_list.append(
getattr(x[rule["feature_name"]], rule["operator"])(rule["threshold"])
)
rule_evaluations_dataframe = pd.concat(rule_evaluations_list, axis=1)
scores = getattr(rule_evaluations_dataframe, self.rule_combination_method)(
axis=1
)
scores = list(scores.astype(int))
# format it like sklearn output and return
return np.array([scores, scores]).transpose()
```
#### File: alembic/versions/0bca1ba9706e_add_matrix_uuid_to_eval.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('evaluations', sa.Column('matrix_uuid', sa.Text(), nullable=True), schema='test_results')
op.create_foreign_key(None, 'evaluations', 'matrices', ['matrix_uuid'], ['matrix_uuid'], source_schema='test_results', referent_schema='model_metadata')
op.add_column('evaluations', sa.Column('matrix_uuid', sa.Text(), nullable=True), schema='train_results')
op.create_foreign_key(None, 'evaluations', 'matrices', ['matrix_uuid'], ['matrix_uuid'], source_schema='train_results', referent_schema='model_metadata')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'evaluations', schema='train_results', type_='foreignkey')
op.drop_column('evaluations', 'matrix_uuid', schema='train_results')
op.drop_constraint(None, 'evaluations', schema='test_results', type_='foreignkey')
op.drop_column('evaluations', 'matrix_uuid', schema='test_results')
# ### end Alembic commands ###
```
#### File: alembic/versions/38f37d013686_associate_experiments_with_models_and_.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'd<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_matrices',
sa.Column('experiment_hash', sa.String(), nullable=False),
sa.Column('matrix_uuid', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['experiment_hash'], ['model_metadata.experiments.experiment_hash'], ),
sa.PrimaryKeyConstraint('experiment_hash', 'matrix_uuid'),
schema='model_metadata'
)
op.create_table('experiment_models',
sa.Column('experiment_hash', sa.String(), nullable=False),
sa.Column('model_hash', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['experiment_hash'], ['model_metadata.experiments.experiment_hash'], ),
sa.PrimaryKeyConstraint('experiment_hash', 'model_hash'),
schema='model_metadata'
)
op.add_column('matrices', sa.Column('built_by_experiment', sa.String(), nullable=True), schema='model_metadata')
op.create_foreign_key(None, 'matrices', 'experiments', ['built_by_experiment'], ['experiment_hash'], source_schema='model_metadata', referent_schema='model_metadata')
op.alter_column('models', 'experiment_hash', new_column_name='built_by_experiment', schema='model_metadata')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('models', 'built_by_experiment', new_column_name='experiment_hash', schema='model_metadata')
op.drop_constraint(None, 'matrices', schema='model_metadata', type_='foreignkey')
op.drop_column('matrices', 'built_by_experiment', schema='model_metadata')
op.drop_table('experiment_models', schema='model_metadata')
op.drop_table('experiment_matrices', schema='model_metadata')
# ### end Alembic commands ###
```
#### File: alembic/versions/50e1f1bc2cac_add_subsets.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '50e1f1bc2cac'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
"""
This upgrade:
1. adds the model_metadata.subsets table to track evaluation subsets
2. adds the subset_hash column to the evaluations table, defaulting to
'' for existing evaluations (on the assumption that they were over
the whole cohort)
3. alters (really, drops and re-adds) the primary key for the
evaluations tables to include the subset_hash
"""
# 1. Add subsets table
op.create_table(
"subsets",
sa.Column("subset_hash", sa.String(), nullable=False),
sa.Column("config", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column(
"created_timestamp",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True
),
sa.PrimaryKeyConstraint("subset_hash"),
schema="model_metadata",
)
# 2. Add subset_hash column
op.add_column(
"evaluations",
sa.Column("subset_hash", sa.String(), nullable=False, server_default=""),
schema="test_results"
)
op.add_column(
"evaluations",
sa.Column("subset_hash", sa.String(), nullable=False, server_default=""),
schema="train_results"
)
# 3. Alter primary keys
# Actual triage databases have been observed with different variats of the
# primary key name in the train_results schema. To ensure that all
# databases can be appropriately updated, procedural is used to look up
# the name of the primary key before droppint it.
op.drop_constraint("evaluations_pkey", "evaluations", schema="test_results")
op.execute(
"""
DO
$body$
DECLARE _pkey_name varchar(100) := (
SELECT conname
FROM pg_catalog.pg_constraint con
INNER JOIN pg_catalog.pg_class rel ON rel.oid = con.conrelid
INNER JOIN pg_catalog.pg_namespace nsp ON nsp.oid = con.connamespace
WHERE rel.relname = 'evaluations'
AND nspname = 'train_results'
AND contype = 'p'
);
BEGIN
EXECUTE('ALTER TABLE train_results.evaluations DROP CONSTRAINT ' || _pkey_name);
END
$body$
"""
)
op.create_primary_key(
constraint_name="evaluations_pkey",
table_name="evaluations",
columns=[
"model_id",
"subset_hash",
"evaluation_start_time",
"evaluation_end_time",
"as_of_date_frequency",
"metric",
"parameter"
],
schema="test_results",
)
op.create_primary_key(
constraint_name="train_evaluations_pkey",
table_name="evaluations",
columns=[
"model_id",
"subset_hash",
"evaluation_start_time",
"evaluation_end_time",
"as_of_date_frequency",
"metric",
"parameter"
],
schema="train_results",
)
def downgrade():
"""
This downgrade revereses the steps of the upgrade:
1. Alters the primary key on the evaluations tables to exclude
subset_hash
2. Drops the subset hash columns from the evaluations tables
3. Drops the model_metadata.subsets table
"""
# 1. Alter primary keys
op.drop_constraint("evaluations_pkey", "evaluations", schema="test_results")
op.drop_constraint("train_evaluations_pkey", "evaluations", schema="train_results")
op.create_primary_key(
name="evaluations_pkey",
table_name="evaluations",
columns=[
"model_id",
"evaluation_start_time",
"evaluation_end_time",
"as_of_date_frequency",
"metric",
"parameter"
],
schema="test_results",
)
op.create_primary_key(
name="train_evaluations_pkey",
table_name="evaluations",
columns=[
"model_id",
"evaluation_start_time",
"evaluation_end_time",
"as_of_date_frequency",
"metric",
"parameter"
],
schema="train_results",
)
# 2. Drop subset_hash columns
op.drop_column("evaluations", "subset_hash", schema="train_results")
op.drop_column("evaluations", "subset_hash", schema="test_results")
# 3. Drop subsets table
op.drop_table("subsets", schema="model_metadata")
```
#### File: alembic/versions/a20104116533_.py
```python
import os
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a20104116533'
down_revision = '8cef808549dd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("CREATE SCHEMA IF NOT EXISTS triage_metadata")
op.execute(
"ALTER TABLE model_metadata.experiment_matrices SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.experiment_models SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.experiment_runs SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.experiments SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.list_predictions SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.matrices SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.model_groups SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.models SET SCHEMA triage_metadata;"
+ " ALTER TABLE model_metadata.subsets SET SCHEMA triage_metadata;"
)
op.execute("DROP SCHEMA IF EXISTS model_metadata")
## We update (replace) the function
group_proc_filename = os.path.join(
os.path.dirname(__file__), "../../sql/model_group_stored_procedure.sql"
)
with open(group_proc_filename) as fd:
stmt = fd.read()
op.execute(stmt)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("CREATE SCHEMA IF NOT EXISTS model_metadata")
op.execute(
"ALTER TABLE triage_metadata.experiment_matrices SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.experiment_models SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.experiment_runs SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.experiments SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.matrices SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.model_groups SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.models SET SCHEMA model_metadata;"
+ " ALTER TABLE triage_metadata.subsets SET SCHEMA model_metadata;"
)
# ### end Alembic commands ###
```
#### File: component/timechop/utils.py
```python
from six import string_types
def convert_to_list(x):
"""
Given an object, if it is not a list, convert it to a list.
Arguments:
x (object): an object to be converted to a list
return:
list: x as a list
"""
if isinstance(x, string_types):
return [x]
try:
iter(x)
except TypeError:
return [x]
else:
return list(x)
```
#### File: triage/experiments/singlethreaded.py
```python
from triage.experiments import ExperimentBase
class SingleThreadedExperiment(ExperimentBase):
def process_query_tasks(self, query_tasks):
self.feature_generator.process_table_tasks(query_tasks)
def process_matrix_build_tasks(self, matrix_build_tasks):
self.matrix_builder.build_all_matrices(matrix_build_tasks)
def process_train_test_batches(self, batches):
self.model_train_tester.process_all_batches(batches)
def process_subset_tasks(self, subset_tasks):
self.subsetter.process_all_tasks(subset_tasks)
```
#### File: triage/util/random.py
```python
import random
FLOAT_TO_INT_MULTIPLIER = 2000000000
def generate_python_random_seed():
"""Generate a random integer suitable for seeding the Python random generator
"""
return int(random.uniform(0, 1.0) * FLOAT_TO_INT_MULTIPLIER)
``` |
{
"source": "josephbakarji/deep-delay-autoencoder",
"score": 2
} |
#### File: deep-delay-autoencoder/examples/waterlorenz.py
```python
import numpy as np
from scipy.integrate import odeint
from scipy.signal import savgol_filter
from scipy.special import legendre, chebyt
from scipy import interpolate
import sys
sys.path.append('../src')
from sindy_utils import library_size
from data_manage import DataStruct
import pdb
import json
class LorenzWW:
# Can use inheritence
def __init__(self,
option='delay',
noise=0.0, # Not used in this case
linear=False, # Not used in this case
input_dim=128,
filename='./data/lorenzww.json',
coefficients=[10, 8/3, 28.],
normalization=[1/40, 1/40, 1/40],
interpolate=False,
interp_dt=0.01,
poly_order=3):
self.option = 'delay'
self.filename = filename
self.input_dim = input_dim
# self.coefficients = coefficients
self.sigma = coefficients[0]
self.beta = coefficients[1]
self.rho = coefficients[2]
self.normalization = np.array(normalization) if normalization is not None else np.array([1, 1, 1])
self.poly_order = poly_order
self.interpolate = interpolate
self.interp_dt = interp_dt
def get_solution(self, tau=None):
output_json = json.load(open(self.filename))
times = np.array(output_json['times'])
omegas = np.array(output_json['omegas'])
domegas = np.array(output_json['domegas'])
print(len(times))
new_times = []
if self.interpolate:
new_dt = self.interp_dt # Include with inputs
# Smoothing and interpolation
for i in range(len(omegas)):
omegas[i] = savgol_filter(omegas[i], 21, 3)
domegas[i] = savgol_filter(domegas[i], 21, 3)
times_new = np.arange(times[i][0], times[i][-2], new_dt)
f = interpolate.interp1d(times[i], omegas[i], kind='cubic')
omegas[i] = f(times_new) # use interpolation function returned by `interp1d`
df = interpolate.interp1d(times[i], domegas[i], kind='cubic')
domegas[i] = df(times_new) # use interpolation function returned by `interp1d`
new_times.append(times_new)
new_times = np.array(new_times)
else:
new_times = times
new_dt = times[0][1] - times[0][0]
dt = new_dt
n_ics = len(omegas)
d = 3
n = self.input_dim
n_delays = n
xic = []
dxic = []
for j, om in enumerate(omegas):
n_steps = len(om) - self.input_dim # careful consistency
xj = np.zeros((n_steps, n_delays))
dxj = np.zeros((n_steps, n_delays))
for k in range(n_steps):
xj[k, :] = om[k:n_delays+k]
dxj[k, :] = domegas[j][k:n_delays+k]
xic.append(xj)
dxic.append(dxj)
x = np.vstack(xic)
dx = np.vstack(dxic)
t = np.hstack(new_times)
self.omega = np.hstack(omegas)
self.domega = np.hstack(domegas)
# Align times
dt = t[1]-t[0]
new_time = t.copy()
for i in range(1, len(t)):
if new_time[i] - new_time[i-1] >= dt*2:
new_time[i] = new_time[i-1] + dt
# Can be made a object rather than dictionary (part of class)
data = DataStruct(name='measurements')
data.t = new_time
data.x = x
data.dx = dx
data.ddx = None
data.z = omegas
data.dz = domegas
data.ddz = None
data.sindy_coefficients = self.lorenz_coefficients()
if self.option == 'projection':
data.y_spatial = y_spatial
data.modes = modes
return data
def lorenz_coefficients(self):
"""
Generate the SINDy coefficient matrix for the Lorenz system.
Arguments:
normalization - 3-element list of array specifying scaling of each Lorenz variable
poly_order - Polynomial order of the SINDy model.
sigma, beta, rho - Parameters of the Lorenz system
"""
Xi = np.zeros((library_size(3, self.poly_order), 3))
Xi[1,0] = -self.sigma
Xi[2,0] = self.sigma*self.normalization[0]/self.normalization[1]
Xi[1,1] = self.rho*self.normalization[1]/self.normalization[0]
Xi[2,1] = -1
Xi[6,1] = -self.normalization[1]/(self.normalization[0]*self.normalization[2])
Xi[3,2] = -self.beta
Xi[5,2] = self.normalization[2]/(self.normalization[0]*self.normalization[1])
return Xi
``` |
{
"source": "josephbakarji/Gluvn",
"score": 3
} |
#### File: gluvn_python/musicFun/GenerateDict.py
```python
from MusicFunction import *
import csv
def makeallnotes(notes, octrange):
allnotes = []
for i in octrange:
for j in notes:
a = j + str(i)
allnotes.append(a)
return allnotes
def noterange(start, end, allnotes):
return allnotes[allnotes.index(start) : allnotes.index(end)+1]
ns = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
nb = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B']
orange = range(0,6)
keyboards = makeallnotes(ns,orange)
#noteslices = noterange('C1','F#5',keyboards)
keyboardb = makeallnotes(nb,orange)
#notesliceb = noterange('C0','Gb5',keyboardb)
tuplist = []
tuplist_num2note = []
for i in range(len(keyboards)):
tuplist.append((keyboards[i], notename_to_midinum(keyboards[i])))
tuplist.append((keyboardb[i], notename_to_midinum(keyboardb[i])))
tuplist_num2note.append((notename_to_midinum(keyboards[i]), keyboards[i]))
tupdict = dict(tuplist)
tupdict_num2note = dict(tuplist_num2note)
w2 = csv.writer(open("../data/tables/num2note.csv", "w"))
for key, val in tupdict_num2note.items():
w2.writerow([key, val])
w = csv.writer(open("../data/tables/note2num.csv", "w"))
for key, val in tupdict.items():
w.writerow([key, val])
dictest = {}
for key, val in csv.reader(open("../data/tables/note2num.csv")):
print(key)
dictest[key] = val
print(dictest)
```
#### File: gluvn_python/testfiles/PlotSensors_ref.py
```python
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import time
import sys
from threading import Thread
# def plotsens(sensdata):
# win = pg.GraphicsWindow()
# win.setWindowTitle('pyqtgraph example: PanningPlot')
# plt = win.addPlot()
# curve = plt.plot()
# t0 = time.time()
#
# def update(sensdata):
# global pdata, tarr, curve
# pdata.append(sensdata)
# tarr.append(t0 - time.time())
# if len(pdata) > 100:
# tarr.pop(0)
# pdata.pop(0)
# curve.setData(x = tarr, y= pdata)
#
# timer = QtCore.QTimer()
# timer.timeout.connect(update)
# timer.start(50)
#
# QtGui.QApplication.instance().exec_()
class PlotSens(QtCore.QThread):
def __init__(self, sensdata):
QtCore.QThread.__init__(self)
self.sensdata = sensdata
self.pdata = []
self.app = QtGui.QApplication([])
win = pg.GraphicsWindow()
win.setWindowTitle('Sensor Reading')
plt = win.addPlot()
self.curve = plt.plot()
self.t0 = time.time()
self.tarr = []
def update(self):
self.pdata.append(self.sensdata)
self.tarr.append(self.t0 - time.time())
if len(self.pdata) > 100:
self.tarr.pop(0)
self.pdata.pop(0)
self.curve.setData(self.tarr, self.pdata)
self.app.processEvents()
def run(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(50)
## Start Qt event loop unless running in interactive mode or using pyside.
#if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
#QtGui.QApplication.instance().exec_()
``` |
{
"source": "josephbakarji/learning-pdf",
"score": 2
} |
#### File: learning-pdf/code/Learning.py
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from scipy.signal import savgol_filter
from numpy.polynomial.chebyshev import chebval, Chebyshev
from sklearn.metrics import mean_squared_error
import json
from __init__ import * ## fix - Imports from testcases directory!
from pdfsolver import PdfSolver, PdfGrid
from datamanage import DataIO
from data_analysis import Analyze
import time
import pdb
class PDElearn:
def __init__(self, fu=None, grid=None, fuk=None, ICparams=None, scase='advection_marginal', trainratio = 0.7, debug=False, verbose=True):
self.fuk = fuk
self.fu = fu
self.grid = grid
self.ICparams = ICparams
self.trainratio = trainratio
self.debug = debug
self.verbose = verbose
self.labels = []
self.featurenames = []
self.scase = scase
#########################################
def train(self, X, y, RegType='L1', RegCoef=0.00001, maxiter=10000, tolerance=0.0001):
if RegType == 'L1':
lin = linear_model.Lasso(alpha=RegCoef, max_iter=maxiter, normalize=True, tol=tolerance)
elif RegType == 'L2':
lin = linear_model.Ridge(alpha=RegCoef, normalize=True, max_iter=maxiter)
elif RegType == 'L0':
lin = linear_model.LinearRegression(normalize=True)
else:
raise Exception("wrong option")
lin.fit(X, y)
return lin
#########################################
def choose_optimizer(self, LassoType='Lasso', RegCoef=0.00001, cv=5, criterion='aic', maxiter=10000, tolerance=0.0001, normalize=True):
if LassoType == 'Lasso':
lin = linear_model.Lasso(alpha=RegCoef, max_iter=maxiter, normalize=normalize, tol=tolerance)
elif LassoType == 'LassoCV':
lin = linear_model.LassoCV(cv=cv, normalize=normalize, max_iter=maxiter)
elif LassoType == 'LassoLarsCV':
lin = linear_model.LassoLarsCV(cv=cv, normalize=normalize, max_iter=maxiter)
elif LassoType == 'LarsCV':
lin = linear_model.LarsCV(cv=cv, normalize=normalize, max_iter=maxiter)
elif LassoType == 'LassoLarsIC':
lin = linear_model.LassoLarsIC(criterion=criterion, normalize=normalize, max_iter=maxiter)
else:
raise Exception("wrong option")
return lin
#########################################
def train_single(self, lin, X, y):
lin.fit(X, y)
rem_feature_idx = []
for idx, coef in enumerate(lin.coef_):
if abs(coef) != 0.0:
rem_feature_idx.append(idx)
return lin, rem_feature_idx
#########################################
def train_rfe(self, lin, X, y, rfe_iter=10, rfe_alpha=0.001, print_rfeiter=False):
# Implements recursive feature elimination (RFE) with Lasso
null_feature_idx = [] # indeces of zeros
rem_feature_idx = range(X.shape[1]) # indeces of nonzero terms
for i in range(rfe_iter):
flag_repeat = False
lin.fit(X[:, rem_feature_idx], y)
if print_rfeiter:
print("\n\nRecursive Feature Elimination iteration : %d"%(i))
# Eliminate terms with coefficients below threshold rfe_alpha
# pdb.set_trace()
for j, coefficient in enumerate(lin.coef_):
if abs(coefficient) <= rfe_alpha:
flag_repeat = True
null_feature_idx.append(rem_feature_idx[j])
if print_rfeiter:
self.print_report(lin, X, y, rem_feature_idx)
# Update indeces of non-zero terms
rem_feature_idx = [i for i in rem_feature_idx if i not in set(null_feature_idx)]
# Check if all feature coefficients are zero
if len(rem_feature_idx) == 0:
print("All coefficients are zero: The trivial solution is optimal...")
return lin, rem_feature_idx
if flag_repeat == False:
return lin, rem_feature_idx
if flag_repeat == True:
print("Recursive Feature Selection did not converge")
return lin, rem_feature_idx
#########################################
#def train_rfe_partialfit(self, Xlist, ylist, RegCoef=0.0001, maxiter=1000, tolerance=0.00001, rfe_iter=10, rfe_alpha=0.001):
#########################################
def fit_sparse(self, feature_opt='1storder', variableCoef=False, variableCoefOrder=0, variableCoefBasis='simple_polynomial', \
LassoType='Lasso', RegCoef=0.00001, cv=None, criterion=None, maxiter=10000, tolerance=0.00001, use_rfe=False, normalize=True,
rfe_iter=10, rfe_alpha=None, print_rfeiter=False, shuffle=False, nzthresh=1e-200, basefile='', adjustgrid={}, save=True,
comments='', checkExistence=True):
# Make Metadata and Check its existence
metadata = self.makeMetadata(basefile, adjustgrid, feature_opt, self.trainratio, variableCoef, variableCoefOrder, variableCoefBasis, \
LassoType, cv, criterion, use_rfe, rfe_alpha, nzthresh, maxiter, comments)
datahandler = DataIO(self.scase, directory=LEARNDIR, basefile=basefile)
if checkExistence:
exists, filename = datahandler.checkMetadataInDir(metadata, ignore_prop='nzthresh')
if exists:
return filename+'.txt'
# Make features and training set
F = Features(scase=self.scase, option=feature_opt, variableCoef=variableCoef, variableCoefOrder=variableCoefOrder, variableCoefBasis=variableCoefBasis)
self.featurelist, self.labels, self.featurenames = F.makeFeatures(self.grid, self.fu, self.ICparams)
Xtrain, ytrain, Xtest, ytest = self.makeTTsets(self.featurelist, self.labels, shuffle=shuffle, threshold=nzthresh)
# Choose optimization algorithm
lin = self.choose_optimizer(LassoType=LassoType, RegCoef=RegCoef, cv=cv, criterion=criterion, maxiter=maxiter, tolerance=tolerance, normalize=normalize)
# Train model with Lasso
# Choose to use Recursive Feature Elimination or not
if use_rfe:
lin, rem_feature_idx = self.train_rfe(lin, Xtrain, ytrain, rfe_iter=rfe_iter, rfe_alpha=rfe_alpha, print_rfeiter=print_rfeiter)
Xtrain = Xtrain[:, rem_feature_idx]
Xtest = Xtest[:, rem_feature_idx]
coefficients = lin.coef_
else:
lin, rem_feature_idx = self.train_single(lin, Xtrain, ytrain)
coefficients = lin.coef_[rem_feature_idx]
# Outputs
output = {}
# Compute Erros and Scores
output['trainRMSE'] = np.sqrt(mean_squared_error(ytrain, lin.predict(Xtrain)))
output['testRMSE'] = np.sqrt(mean_squared_error(ytest, lin.predict(Xtest)))
output['trainScore'] = lin.score(Xtrain, ytrain)
output['testScore'] = lin.score(Xtest, ytest)
rem_featurenames = [self.featurenames[i] for i in rem_feature_idx]
output['featurenames'] = rem_featurenames
output['coef'] = coefficients.tolist() # Might not work for RFE !!
output['n_iter'] = lin.n_iter_
# Different optimizers have different outputs
if LassoType =='LassoLarsIC':
output['alpha'] = lin.alpha_.tolist()
output['criterion_path'] = lin.criterion_.tolist()
elif LassoType == 'LassoCV':
output['alpha'] = lin.alpha_.tolist()
output['alpha_mse_path'] = lin.mse_path_.mean(axis=1).tolist()
output['alpha_path'] = lin.alphas_.tolist()
output['dual_gap'] = lin.dual_gap_
elif LassoType in {'LassoLarsCV', 'LarsCV'}:
output['alpha'] = lin.alpha_
output['alpha_mse_path'] = lin.mse_path_.mean(axis=1).tolist() # Average along CV folds
output['cv_alpha_path'] = lin.cv_alphas_.tolist() # Goes with mse_path
output['coef_path'] = lin.coef_path_.tolist()
output['alpha_path'] = lin.alphas_.tolist() # Goes with coef_path
elif LassoType == 'Lasso':
output['alpha'] = RegCoef
# Printing
if self.verbose:
A = Analyze()
A.print_results(output, metadata)
# Saving
filename = datahandler.saveSolution(output, metadata, fileformat='.txt')
return filename
#########################################
def fit_all(self, feature_opt='1storder', shuffleopt=False, variableCoef=False, variableCoefOrder=2, variableCoefBasis='simple_polynomial',\
RegCoef=0.000001, maxiter=5000, tolerance=0.00001):
F = Features(scase=self.scase, option=feature_opt, variableCoef=variableCoef, variableCoefOrder=variableCoefOrder, variableCoefBasis=variableCoefBasis)
featurelist, labels, featurenames = F.makeFeatures(self.grid, self.fu, self.ICparams)
Xtrain, ytrain, Xtest, ytest = self.makeTTsets(featurelist, labels, shuffle=shuffleopt)
self.featurelist, self.labels = featurelist, labels
lin1 = self.train(Xtrain, ytrain, RegType='L1', RegCoef=RegCoef, maxiter=maxiter, tolerance=tolerance)
lin2 = self.train(Xtrain, ytrain, RegType='L2', RegCoef=RegCoef, maxiter=maxiter)
lin0 = self.train(Xtrain, ytrain, RegType='L0')
if self.verbose:
print(' \n########## ' + feature_opt + ' ###########\n ')
print('L1 Reg coefficients: \n', lin1.sparse_coef_)
print("L1 Reg Test Score = %5.3f" %(lin1.score(Xtest, ytest)))
print("L1 Reg Train Score = %5.3f" %(lin1.score(Xtrain, ytrain)) )
print("L2 Reg Test Score = %5.3f" %(lin2.score(Xtest, ytest)) )
print("L2 Reg Train Score = %5.3f" %(lin2.score(Xtrain, ytrain)) )
print("No Reg Test Score = %5.3f" %(lin0.score(Xtest, ytest)) )
print("No Reg Train Score = %5.3f" %(lin0.score(Xtrain, ytrain)) )
for i in range(len(lin1.coef_)): # Fix for options when not all are used
print("%s \t:\t %5.4f \t %5.4f \t %5.4f" %( featurenames[i], lin1.coef_[i], lin2.coef_[i], lin0.coef_[i]))
#########################################
#########################################
# def saveLearning(self):
# D = DataIO(self.scase, directory=LEARNDIR)
# savename = savedict['ICparams']['basefile'].split('.')[0]
# savenametxt = D.saveJsonFile(savename, savedict)
# return savenametxt
def makeMetadata(self, basefile, adjustgrid, feature_opt, trainratio, variableCoef, variableCoefOrder, variableCoefBasis, \
LassoType, cv, criterion, use_rfe, rfe_alpha, nzthresh, maxiter, comments):
metadata ={ 'ICparams':{
'basefile' : basefile,
'adjustgrid' : adjustgrid
},
'Features':{
'feature_opt' : feature_opt,
'trainratio' : self.trainratio,
'nzthresh' : nzthresh
},
'Algorithm':{
'LassoType' : LassoType,
'use_rfe' : use_rfe,
'maxiter' : maxiter
}
}
if variableCoef:
metadata['Features']['variableCoef'] = variableCoef
metadata['Features']['variableCoefOrder'] = variableCoefOrder
metadata['Features']['variableCoefBasis'] = variableCoefBasis
if use_rfe:
metadata['Algorithm']['rfe_alpha'] = rfe_alpha
if len(comments)>0:
metadata['ICparams']['comments'] = comments
if LassoType == 'LassoLarsIC':
metadata['Algorithm']['criterion'] = criterion
if LassoType in {'LassoCV', 'LassoLarsCV', 'LarsCV'}:
metadata['Algorithm']['cv'] = cv
return metadata
#########################################
#########################################
# def print_results(self, output):
# props = ['feature_opt', 'trainScore', 'testScore', 'trainRMSE', 'testRMSE', 'featurenames', 'coefficients', 'n_iter']
# feature_opt, trainScore, testScore, trainRMSE, testRMSE, featurenames, coefficients, n_iter = [output[p] for p in props]
# print("\n#############################\n ")
# print('Features option: ' + feature_opt )
# print("---- Errors ----")
# print("Train Score \t= %5.3f"%(trainScore))
# print("Test Score \t= %5.3f"%(testScore))
# print("Train RMSE \t= %5.3e"%(trainRMSE))
# print("Test RMSE \t= %5.3e"%(testRMSE) )
# print("---- Coefficients ----")
# for feat, coef in zip(featurenames, coefficients):
# print("%s \t:\t %7.9f" %( feat, coef))
# print("number of iterations: ", n_iter)
def print_report(self, lin, X, y, rem_feature_idx):
print("\n##########\n")
trainMSE = mean_squared_error(y, lin.predict(X[:, rem_feature_idx]))
print("---- Errors ----")
print("Train Score \t= %5.3f" %(lin.score(X[:, rem_feature_idx], y)) )
print("Train MSE \t= %5.3e"%(trainMSE))
print("---- Coefficients ----")
for i, feat_idx in enumerate(rem_feature_idx):
print("%s \t:\t %7.9f" %( self.featurenames[feat_idx], lin.coef_[i]))
print("---- Sparsity = %d / %d "%(len(rem_feature_idx), len(self.featurenames)))
def print_full_report(self, lin, Xtrain, ytrain, Xtest, ytest, rem_feature_idx, featurenames):
# TODO: use tabulate() package/function
print("\n##########\n")
if len(rem_feature_idx) != 0:
trainRMSE = np.sqrt(mean_squared_error(ytrain, lin.predict(Xtrain[:, rem_feature_idx])))
testRMSE = np.sqrt(mean_squared_error(ytest, lin.predict(Xtest[:, rem_feature_idx])))
print("---- Errors ----")
print("Train Score \t= %5.3f" %(lin.score(Xtrain[:, rem_feature_idx], ytrain)) )
print("Test Score \t= %5.3f" %(lin.score(Xtest[:, rem_feature_idx], ytest)) )
print("Train RMSE \t= %5.3e"%(trainRMSE))
print("Test RMSE \t= %5.3e"%(trainRMSE))
print("---- Coefficients ----")
for i, feat_idx in enumerate(rem_feature_idx):
print("%s \t:\t %7.9f" %(featurenames[feat_idx], lin.coef_[i]))
print("---- Sparsity = %d / %d "%(len(rem_feature_idx), len(featurenames)))
# def debug_plot(self, x, y1, y2, name):
# fig, ax = plt.subplots(1, 2, sharey=True)
# ax[0].plot(x, y1)
# ax[0].set_ylabel('f')
# ax[0].set_title(name)
# ax[1].plot(x, y2)
# ax[1].set_ylabel('f')
# ax[1].set_title(name+' smoothed')
#########################################
#########################################
#########################################
def makeTTsets(self, featurelist, labels, shuffle=False, threshold=1e-90):
# Get rid of useless nodes that don't change in time
nzidx = np.where(np.sqrt(np.sum(labels**2, axis=2))>threshold)
print('fu num elem ', np.prod(featurelist[0].shape))
print('fu_red num elem: ', np.prod(featurelist[0][nzidx].shape))
X = self.make_X(featurelist, nzidx)
y = self.make_y(labels, nzidx)
if shuffle:
rng_state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(rng_state)
np.random.shuffle(y)
# Split data into training and test sets
trainlength = int( self.trainratio * X.shape[0] )
Xtrain = X[:trainlength, :]
ytrain = y[:trainlength]
Xtest = X[trainlength:, :]
ytest = y[trainlength:]
return Xtrain, ytrain, Xtest, ytest
def make_X(self, featurelist, nzidx):
f0 = featurelist[0]
nf = len(featurelist)
numelem = np.prod(f0[nzidx].shape)
X = np.zeros((numelem, nf))
for f_idx, f in enumerate(featurelist):
X[:, f_idx] = f[nzidx].reshape(numelem)
return X
def make_y(self, f, nzidx):
return f[nzidx].reshape((np.prod(f[nzidx].shape)))
###########################################
###########################################
###########################################
###########################################
###########################################
class Features:
def __init__(self, scase='advection_marginal', option='1storder', variableCoef=False, variableCoefOrder=2, variableCoefBasis='simple_polynomial', addNonlinear=False):
self.option = option
self.variableCoef = variableCoef
self.variableCoefOrder = variableCoefOrder
self.variableCoefBasis = variableCoefBasis
self.addNonlinear = addNonlinear
self.scase=scase
def makeFeatures(self, grid, fu, ICparams):
### options =
# '2ndorder': second order in time (also adds f_xt)
# '1storder': first order in time
# '1storder_close': learn closure terms
## TODO: Only assumes the forms (u, t) or (u, x, t)
if hasattr(grid, 'xx'):
return self.makeFeatures_uxt(grid, fu, ICparams)
else:
return self.makeFeatures_ut(grid, fu, ICparams)
# else:
# raise Exception("case %s doesn't exist"%(self.scase))
def makeFeatures_uxt(self, grid, fu, ICparams):
nt = len(grid.tt)
nx = len(grid.xx)
nu = len(grid.uu)
dx = grid.xx[1] - grid.xx[0]
dt = grid.tt[1] - grid.tt[0]
du = grid.uu[1] - grid.uu[0]
if self.option == '2ndorder':
ddict = {'', 't', 'tt', 'xt', 'x', 'xx', 'xxx', 'xxxx', 'U', 'UU', 'UUU', 'xU', 'xUU', 'xxU', 'xxUU'}
elif self.option == '1storder' or self.option == '1storder_close':
ddict = {'', 't', 'x', 'xx', 'xxx', 'U', 'UU', 'xU', 'xUU', 'xxU'}
elif self.option == 'conservative':
ddict = {'', 't', 'U', 'Ux', 'Uxx', 'Uxxx', 'UU', 'UUx', 'UUxx', 'UUU', 'UUUx'}
else:
raise Exception('option not valid')
# Derivative terms dictionary
# Computationally inefficient (fix: use previous derivatives)
dimaxis = {'U':0, 'x':1, 't': 2}
diminc = {'U':du, 'x':dx, 't':dt}
maxder = {'U':0, 'x':0, 't':0}
fudict = dict.fromkeys(ddict, None) # fu dictionary of derivatives
dcount = dict.fromkeys(ddict, None) # Counts of derivatives for each term
for term in ddict:
dfu = fu.copy() # copy?
md = {'U':0, 'x':0, 't':0}
if len(term)>0:
for dim in term:
dfu = np.diff(dfu, axis = dimaxis[dim])/diminc[dim]
md[dim] += 1
dcount[term] = md
fudict[term] = dfu
for dim in term:
maxder[dim] = md[dim] if md[dim] > maxder[dim] else maxder[dim]
# Adjust dimensions to match
mu = maxder['U']
mx = maxder['x']
mt = maxder['t']
for term in fudict:
uc = mu - dcount[term]['U']
xc = mx - dcount[term]['x']
tc = mt - dcount[term]['t']
nu = fudict[term].shape[0]
nx = fudict[term].shape[1]
nt = fudict[term].shape[2]
fudict[term] = fudict[term][uc//2:nu-uc//2-uc%2, xc//2:nx-xc//2-xc%2, tc//2:nt-tc//2-tc%2]
xx_adj = grid.xx[mx//2 : len(grid.xx)-mx//2-mx%2]
uu_adj = grid.uu[mu//2 : len(grid.uu)-mu//2-mu%2]
# make labels and feature lists
featurenames = []
featurelist = []
# Add feature of ones
fudict['1'] = np.ones_like(fudict['t'])
ddict.add('1')
# Add variable coefficients
deg = self.variableCoefOrder+1
if self.variableCoef:
print("Variable coefficient type: " + self.variableCoefBasis)
uu_grid, xx_grid = np.meshgrid(uu_adj, xx_adj, indexing='ij')
fudict_var = dict.fromkeys([(term, j, k) for term in ddict for j in range(deg) for k in range(deg)])
for term in ddict:
for i in range(deg):
for j in range(deg):
fux = np.zeros_like(uu_grid)
for k, u in enumerate(uu_adj):
for l, x in enumerate(xx_adj):
if self.variableCoefBasis == 'chebyshev':
# too inefficient (find a way to get individual terms)
ivec = np.zeros(i+1)
ivec[-1] = 1
jvec = np.zeros(j+1)
jvec[-1] = 1
fux[k, l] = chebval(u, ivec) * chebval(x, jvec)
elif self.variableCoefBasis == 'simple_polynomial':
fux[k, l] = u**i * x**j
else:
raise Exception("variableCoefBasis %s doesn't exist".format(self.variableCoefBasis))
fudict_var[(term, i, j)] = fux # nu*nx
for feat, coefarr in fudict_var.items():
# feat = (term, i, j)
fux_t = np.tile(coefarr.transpose(), (nt-mt, 1, 1)).transpose()
fudict_var[feat] = np.multiply( fudict[feat[0]], fux_t )
# Too redundant - fix
if self.option == '2ndorder':
labels = fudict_var[('tt', 0, 0)]
for key, val in fudict_var.items():
if key[0] != 'tt' and key[0] != 't':
featurenames.append('fu_'+key[0]+'^{'+str(key[1])+str(key[2])+'}')
featurelist.append(val)
elif self.option == '1storder' or self.option == 'conservative':
labels = fudict_var[('t', 0, 0)]
for key, val in fudict_var.items():
if key[0] != 't':
featurenames.append('fu_'+key[0]+'^{'+str(key[1])+str(key[2])+'}')
featurelist.append(val)
elif self.option == '1storder_close':
# TODO: Make loadMetadata(filename, directory) into function
mcD = DataIO(self.scase, directory=MCDIR)
with open(mcD.casedir+'metadata.txt', 'r') as jsonfile:
allmc_metadata = json.load(jsonfile)
mc_metadata = allmc_metadata[ICparams['MCfile'].split('.')[0]]
if self.scase == 'advection_reaction_randadv_analytical':
k_coeffs = mc_metadata['ICparams']['distparams'][0]
# TODO: add 'distk' for ICparams and find mean based on it instead
if mc_metadata['ICparams']['fu0'] == 'gaussians_k':
kmean = k_coeffs[0]
print('kmean = ', kmean)
if mc_metadata['ICparams']['source'] == 'quadratic':
labels = fudict_var[('t', 0, 0)] + kmean * fudict_var[('x', 0, 0)] + fudict_var[('U', 2, 0)] + 2*fudict_var[('', 1, 0)]
removekeys = {('t', 0, 0), ('x', 0, 0), ('U', 2, 0), ('', 1, 0)}
elif mc_metadata['ICparams']['source'] == 'linear':
labels = fudict_var[('t', 0, 0)] + kmean * fudict_var[('x', 0, 0)] + fudict_var[('U', 1, 0)] + fudict_var[('', 0, 0)]
removekeys = {('t', 0, 0), ('x', 0, 0), ('U', 1, 0), ('', 0, 0)}
elif mc_metadata['ICparams']['source'] == 'logistic':
## TODO: Assumes kr = K = 1.0
labels = fudict_var[('t', 0, 0)] + kmean * fudict_var[('x', 0, 0)] \
+ fudict_var[('U', 1, 0)] - fudict_var[('U', 2, 0)] + fudict_var[('', 0, 0)] - 2*fudict_var[('', 1, 0)]
removekeys = {('t', 0, 0), ('x', 0, 0), ('U', 2, 0), ('U', 1, 0), ('', 1, 0), ('', 0, 0)}
## TODO: Try removing terms that appear in closure
for key, val in fudict_var.items():
if key[0] != 't' and key not in removekeys:
featurenames.append('fu_'+key[0]+'^{'+str(key[1])+str(key[2])+'}')
featurelist.append(val)
else:
raise Exception("wrong option")
else: # Not variable coefficient
if self.option == '2ndorder':
labels = fudict['tt']
for term, val in fudict.items():
if term != 'tt' and term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
elif self.option == '1storder':
labels = fudict['t']
for term, val in fudict.items():
if term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
elif self.option == '1storder_close':
S = PdfSolver(grid, ICparams=ICparams)
labels = fudict['t'] + S.int_kmean() * fudict['x']
for term, val in fudict.items():
if term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
else:
raise Exception("wrong option")
return featurelist, labels, featurenames
def makeFeatures_ut(self, grid, fu, ICparams):
nt = len(grid.tt)
nu = len(grid.uu)
dt = grid.tt[1] - grid.tt[0]
du = grid.uu[1] - grid.uu[0]
if self.option == '1storder':
ddict = {'', 't', 'U', 'UU', 'UUU'}
else:
raise Exception('option not valid')
# Derivative terms dictionary
# Computationally inefficient (fix: use previous derivatives)
dimaxis = {'U':0, 't': 1}
diminc = {'U':du, 't':dt}
maxder = {'U':0, 't':0}
fudict = dict.fromkeys(ddict, None) # fu dictionary of derivatives
dcount = dict.fromkeys(ddict, None) # Counts of derivatives for each term
for term in ddict:
dfu = fu.copy()
md = {'U':0, 't':0}
if len(term)>0:
for dim in term:
dfu = np.diff(dfu, axis = dimaxis[dim])/diminc[dim]
md[dim] += 1
dcount[term] = md
fudict[term] = dfu
for dim in term:
maxder[dim] = md[dim] if md[dim] > maxder[dim] else maxder[dim]
# Adjust dimensions to match
mu = maxder['U']
mt = maxder['t']
uu_adj = grid.uu[mu//2 : nu-mu//2-mu%2]
for term in fudict:
uc = mu - dcount[term]['U']
tc = mt - dcount[term]['t']
nu = fudict[term].shape[0]
nt = fudict[term].shape[1]
fudict[term] = fudict[term][uc//2:nu-uc//2-uc%2, tc//2:nt-tc//2-tc%2]
# make labels and feature lists
featurenames = []
featurelist = []
# Add feature of ones
fudict['1'] = np.ones_like(fudict['t'])
ddict.add('1')
# Add variable coefficients
deg = self.variableCoefOrder+1
if self.variableCoef:
print("Variable coefficient type: " + self.variableCoefBasis)
fudict_var = dict.fromkeys([(term, j) for term in ddict for j in range(deg)])
for term in ddict:
for i in range(deg):
fuu = np.zeros_like(uu_adj)
for k, u in enumerate(uu_adj):
if self.variableCoefBasis == 'chebyshev':
ivec = np.zeros(i+1)
ivec[-1] = 1
fuu[k] = chebval(u, ivec)
elif self.variableCoefBasis == 'simple_polynomial':
fuu[k] = u**i
else:
raise Exception("variableCoefBasis %s doesn't exist".format(self.variableCoefBasis))
fudict_var[(term, i)] = fuu # nu*1
# Multiply variables coefficients with numerical derivatives
for feat, coefarr in fudict_var.items():
# feat = (term, i, j)
fuu_t = np.tile(coefarr.transpose(), (nt-mt, 1)).transpose()
fudict_var[feat] = np.multiply( fudict[feat[0]], fuu_t )
if self.option == '1storder':
labels = fudict_var[('t', 0)]
for key, val in fudict_var.items():
if key[0] != 't':
featurenames.append('fu_'+key[0]+'*U^'+str(key[1]))
featurelist.append(val)
else:
raise Exception("wrong option")
else: # Not variable coefficient
if self.option == '1storder':
labels = fudict['t']
for term, val in fudict.items():
if term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
else:
raise Exception("wrong option")
return featurelist, labels, featurenames
# INCOMPLETE...
def makeFeatures_Conservative(self, grid, fu, ICparams):
nt = len(grid.tt)
nx = len(grid.xx)
nu = len(grid.uu)
dx = grid.xx[1] - grid.xx[0]
dt = grid.tt[1] - grid.tt[0]
du = grid.uu[1] - grid.uu[0]
ddict = {'', 't', 'x', 'xx', 'xxx', 'U', 'UU', 'xU', 'xUU', 'xxU'}
# Derivative terms dictionary
# Computationally inefficient (fix: use previous derivatives)
dimaxis = {'U':0, 'x':1, 't': 2}
diminc = {'U':du, 'x':dx, 't':dt}
maxder = {'U':0, 'x':0, 't':0}
fudict = dict.fromkeys(ddict, None) # fu dictionary of derivatives
dcount = dict.fromkeys(ddict, None) # Counts of derivatives for each term
for term in ddict:
dfu = fu.copy() # copy?
md = {'U':0, 'x':0, 't':0}
if len(term)>0:
for dim in term:
dfu = np.diff(dfu, axis = dimaxis[dim])/diminc[dim]
md[dim] += 1
dcount[term] = md
fudict[term] = dfu
for dim in term:
maxder[dim] = md[dim] if md[dim] > maxder[dim] else maxder[dim]
# Adjust dimensions to match
mu = maxder['U']
mx = maxder['x']
mt = maxder['t']
for term in fudict:
uc = mu - dcount[term]['U']
xc = mx - dcount[term]['x']
tc = mt - dcount[term]['t']
nu = fudict[term].shape[0]
nx = fudict[term].shape[1]
nt = fudict[term].shape[2]
fudict[term] = fudict[term][uc//2:nu-uc//2-uc%2, xc//2:nx-xc//2-xc%2, tc//2:nt-tc//2-tc%2]
xx_adj = grid.xx[mx//2 : len(grid.xx)-mx//2-mx%2]
uu_adj = grid.uu[mu//2 : len(grid.uu)-mu//2-mu%2]
# make labels and feature lists
featurenames = []
featurelist = []
# Add feature of ones
fudict['1'] = np.ones_like(fudict['t'])
ddict.add('1')
# Add variable coefficients
deg = self.variableCoefOrder+1
if self.variableCoef:
print("Variable coefficient type: " + self.variableCoefBasis)
uu_grid, xx_grid = np.meshgrid(uu_adj, xx_adj, indexing='ij')
fudict_var = dict.fromkeys([(term, j, k) for term in ddict for j in range(deg) for k in range(deg)])
for term in ddict:
for i in range(deg):
for j in range(deg):
fux = np.zeros_like(uu_grid)
for k, u in enumerate(uu_adj):
for l, x in enumerate(xx_adj):
if self.variableCoefBasis == 'chebyshev':
# too inefficient (find a way to get individual terms)
ivec = np.zeros(i+1)
ivec[-1] = 1
jvec = np.zeros(j+1)
jvec[-1] = 1
fux[k, l] = chebval(u, ivec) * chebval(x, jvec)
elif self.variableCoefBasis == 'simple_polynomial':
fux[k, l] = u**i * x**j
else:
raise Exception("variableCoefBasis %s doesn't exist".format(self.variableCoefBasis))
fudict_var[(term, i, j)] = fux # nu*nx
for feat, coefarr in fudict_var.items():
# feat = (term, i, j)
fux_t = np.tile(coefarr.transpose(), (nt-mt, 1, 1)).transpose()
fudict_var[feat] = np.multiply( fudict[feat[0]], fux_t )
# Too redundant - fix
if self.option == '2ndorder':
labels = fudict_var[('tt', 0, 0)]
for key, val in fudict_var.items():
if key[0] != 'tt' and key[0] != 't':
featurenames.append('fu_'+key[0]+'^{'+str(key[1])+str(key[2])+'}')
featurelist.append(val)
elif self.option == '1storder' or self.option == 'conservative':
labels = fudict_var[('t', 0, 0)]
for key, val in fudict_var.items():
if key[0] != 't':
featurenames.append('fu_'+key[0]+'^{'+str(key[1])+str(key[2])+'}')
featurelist.append(val)
elif self.option == '1storder_close':
S = PdfSolver(grid, ICparams=ICparams)
print(S.int_kmean)
labels = fudict_var[('t', 0, 0)] + S.int_kmean() * fudict_var[('x', 0, 0)]
for key, val in fudict_var.items():
if key[0] != 't' and key != ('x', 0, 0):
featurenames.append('fu_'+key[0]+'^{'+str(key[1])+str(key[2])+'}')
featurelist.append(val)
else:
raise Exception("wrong option")
else: # Not variable coefficient
if self.option == '2ndorder':
labels = fudict['tt']
for term, val in fudict.items():
if term != 'tt' and term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
elif self.option == '1storder':
labels = fudict['t']
for term, val in fudict.items():
if term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
elif self.option == '1storder_close':
S = PdfSolver(grid, ICparams=ICparams)
labels = fudict['t'] + S.int_kmean() * fudict['x']
for term, val in fudict.items():
if term != 't':
featurenames.append('fu_'+term)
featurelist.append(val)
else:
raise Exception("wrong option")
return featurelist, labels, featurenames
if __name__ == "__main__":
# LEARN
if len(sys.argv)>1:
basefile = sys.argv[1] + '.npy'
else:
basefile = 'advection_reaction_analytical_726_291.npy'
case = '_'.join(basefile.split('_')[:-2])
dataman = DataIO(case, directory=PDFDIR)
fu, gridvars, ICparams = dataman.loadSolution(basefile, array_opt='marginal')
grid = PdfGrid(gridvars)
difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case)
filename = difflearn.fit_sparse(basefile=basefile)
print(filename)
```
#### File: code/solvers/advection.py
```python
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
# helper functions for the limiting
def minmod(a, b):
if abs(a) < abs(b) and a*b > 0.0:
return a
elif abs(b) < abs(a) and a*b > 0.0:
return b
else:
return 0.0
def maxmod(a, b):
if abs(a) > abs(b) and a*b > 0.0:
return a
elif abs(b) > abs(a) and a*b > 0.0:
return b
else:
return 0.0
class Grid1d(object):
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.ng = ng
self.nx = nx
self.xmin = xmin
self.xmax = xmax
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (np.arange(nx+2*ng)-ng+0.5)*self.dx
self.xl = xmin + (np.arange(nx+2*ng)-ng)*self.dx
self.xr = xmin + (np.arange(nx+2*ng)+1.0)*self.dx
# storage for the solution
self.a = np.zeros((nx+2*ng), dtype=np.float64)
def scratch_array(self):
""" return a scratch array dimensioned for our grid """
return np.zeros((self.nx+2*self.ng), dtype=np.float64)
def fill_BCs(self):
""" fill all single ghostcell with periodic boundary conditions """
for n in range(self.ng):
# left boundary
self.a[self.ilo-1-n] = self.a[self.ihi-n]
# right boundary
self.a[self.ihi+1+n] = self.a[self.ilo+n]
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if len(e) != 2*self.ng + self.nx:
return None
#return np.sqrt(self.dx*np.sum(e[self.ilo:self.ihi+1]**2))
return np.max(abs(e[self.ilo:self.ihi+1]))
class Simulation(object):
def __init__(self, grid, u, C=0.8, slope_type="centered"):
self.grid = grid
self.t = 0.0 # simulation time
self.u = u # the constant advective velocity
self.C = C # CFL number
self.slope_type = slope_type
def init_cond(self, type="tophat"):
""" initialize the data """
if type == "tophat":
self.grid.a[:] = 0.0
self.grid.a[np.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)] = 1.0
elif type == "sine":
self.grid.a[:] = np.sin(2.0*np.pi*self.grid.x/(self.grid.xmax-self.grid.xmin))
elif type == "gaussian":
al = 1.0 + np.exp(-60.0*(self.grid.xl - 0.5)**2)
ar = 1.0 + np.exp(-60.0*(self.grid.xr - 0.5)**2)
ac = 1.0 + np.exp(-60.0*(self.grid.x - 0.5)**2)
self.grid.a[:] = (1./6.)*(al + 4*ac + ar)
def timestep(self):
""" return the advective timestep """
return self.C*self.grid.dx/self.u
def period(self):
""" return the period for advection with velocity u """
return (self.grid.xmax - self.grid.xmin)/self.u
def states(self, dt):
""" compute the left and right interface states """
# compute the piecewise linear slopes
g = self.grid
slope = g.scratch_array()
g = self.grid
if self.slope_type == "godunov":
# piecewise constant = 0 slopes
slope[:] = 0.0
elif self.slope_type == "centered":
# unlimited centered difference slopes
for i in range(g.ilo-1, g.ihi+2):
slope[i] = 0.5*(g.a[i+1] - g.a[i-1])/g.dx
elif self.slope_type == "minmod":
# minmod limited slope
for i in range(g.ilo-1, g.ihi+2):
slope[i] = minmod( (g.a[i] - g.a[i-1])/g.dx,
(g.a[i+1] - g.a[i])/g.dx )
elif self.slope_type == "MC":
# MC limiter
for i in range(g.ilo-1, g.ihi+2):
slope[i] = minmod(minmod( 2.0*(g.a[i] - g.a[i-1])/g.dx,
2.0*(g.a[i+1] - g.a[i])/g.dx ),
0.5*(g.a[i+1] - g.a[i-1])/g.dx)
elif self.slope_type == "superbee":
# superbee limiter
for i in range(g.ilo-1, g.ihi+2):
A = minmod( (g.a[i+1] - g.a[i])/g.dx,
2.0*(g.a[i] - g.a[i-1])/g.dx )
B = minmod( (g.a[i] - g.a[i-1])/g.dx,
2.0*(g.a[i+1] - g.a[i])/g.dx )
slope[i] = maxmod(A, B)
# loop over all the interfaces. Here, i refers to the left
# interface of the zone. Note that thre are 1 more interfaces
# than zones
al = g.scratch_array()
ar = g.scratch_array()
for i in range(g.ilo, g.ihi+2):
# left state on the current interface comes from zone i-1
al[i] = g.a[i-1] + 0.5*g.dx*(1.0 - u*dt/g.dx)*slope[i-1]
# right state on the current interface comes from zone i
ar[i] = g.a[i] - 0.5*g.dx*(1.0 + u*dt/g.dx)*slope[i]
return al, ar
def riemann(self, al, ar):
"""
Riemann problem for advection -- this is simply upwinding,
but we return the flux
"""
if self.u > 0.0:
return self.u*al
else:
return self.u*ar
def update(self, dt, flux):
""" conservative update """
g = self.grid
anew = g.scratch_array()
anew[g.ilo:g.ihi+1] = g.a[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return anew
def evolve(self, num_periods=1):
""" evolve the linear advection equation """
self.t = 0.0
g = self.grid
tmax = num_periods*self.period()
# main evolution loop
while self.t < tmax:
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep()
if self.t + dt > tmax:
dt = tmax - self.t
# get the interface states
al, ar = self.states(dt)
# solve the Riemann problem at all interfaces
flux = self.riemann(al, ar)
# do the conservative update
anew = self.update(dt, flux)
g.a[:] = anew[:]
self.t += dt
if __name__ == "__main__":
#-------------------------------------------------------------------------
# compare limiting and no-limiting
xmin = 0.0
xmax = 1.0
nx = 64
ng = 2
g = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = Simulation(g, u, C=0.7, slope_type="centered")
s.init_cond("tophat")
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
plt.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1],
ls=":", label="exact")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
label="unlimited")
s = Simulation(g, u, C=0.7, slope_type="minmod")
s.init_cond("tophat")
s.evolve(num_periods=5)
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
label="minmod limiter")
plt.legend(frameon=False, loc="best")
plt.xlabel(r"$x$")
plt.ylabel(r"$a$")
plt.savefig("fv-advect.pdf")
#-------------------------------------------------------------------------
# convergence test
problem = "gaussian"
xmin = 0.0
xmax = 1.0
ng = 2
N = [32, 64, 128, 256, 512]
err_god = []
err_nolim = []
err_lim = []
err_lim2 = []
u = 1.0
for nx in N:
# no limiting
gg = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
sg = Simulation(gg, u, C=0.8, slope_type="godunov")
sg.init_cond("gaussian")
ainit = sg.grid.a.copy()
sg.evolve(num_periods=5)
err_god.append(gg.norm(gg.a - ainit))
# no limiting
gu = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = Simulation(gu, u, C=0.8, slope_type="centered")
su.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve(num_periods=5)
err_nolim.append(gu.norm(gu.a - ainit))
# MC limiting
gl = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
sl = Simulation(gl, u, C=0.8, slope_type="MC")
sl.init_cond("gaussian")
ainit = sl.grid.a.copy()
sl.evolve(num_periods=5)
err_lim.append(gl.norm(gl.a - ainit))
# minmod limiting
gl2 = Grid1d(nx, ng, xmin=xmin, xmax=xmax)
sl2 = Simulation(gl2, u, C=0.8, slope_type="minmod")
sl2.init_cond("gaussian")
ainit = sl2.grid.a.copy()
sl2.evolve(num_periods=5)
err_lim2.append(gl2.norm(gl2.a - ainit))
print(g.dx, nx, err_nolim[-1], err_lim[-1], err_lim2[-1])
plt.clf()
N = np.array(N, dtype=np.float64)
err_nolim = np.array(err_nolim)
err_lim = np.array(err_lim)
err_lim2 = np.array(err_lim2)
plt.scatter(N, err_god, label="Godunov", color="C0")
plt.scatter(N, err_nolim, label="unlimited center", color="C1")
plt.scatter(N, err_lim, label="MC", color="C2")
plt.scatter(N, err_lim2, label="minmod", color="C3")
plt.plot(N, err_god[len(N)-1]*(N[len(N)-1]/N),
color="k", label=r"$\mathcal{O}(\Delta x)$")
plt.plot(N, err_nolim[len(N)-1]*(N[len(N)-1]/N)**2,
color="0.5", label=r"$\mathcal{O}(\Delta x^2)$")
ax = plt.gca()
ax.set_xscale('log')
ax.set_yscale('log')
plt.xlabel("N")
plt.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
plt.legend(frameon=False, loc="best", fontsize="small")
plt.savefig("plm-converge.pdf")
#-------------------------------------------------------------------------
# different limiters: run both the Gaussian and tophat
xmin = 0.0
xmax = 1.0
nx = 32
ng = 2
u = 1.0
g= Grid1d(nx, ng, xmin=xmin, xmax=xmax)
for p in ["gaussian", "tophat"]:
plt.clf()
s = Simulation(g, u, C=0.8, slope_type="godunov")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
plt.subplot(231)
plt.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1])
plt.title("piecewise constant")
s = Simulation(g, u, C=0.8, slope_type="centered")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
plt.subplot(232)
plt.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1])
plt.title("centered (unlimited)")
s = Simulation(g, u, C=0.8, slope_type="minmod")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
plt.subplot(233)
plt.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1])
plt.title("minmod limiter")
s = Simulation(g, u, C=0.8, slope_type="MC")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
plt.subplot(234)
plt.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1])
plt.title("MC limiter")
s = Simulation(g, u, C=0.8, slope_type="superbee")
s.init_cond(p)
ainit = s.grid.a.copy()
s.evolve(num_periods=5)
plt.subplot(235)
plt.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1], ls=":")
plt.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1])
plt.title("superbee limiter")
f = plt.gcf()
f.set_size_inches(10.0,7.0)
plt.tight_layout()
plt.savefig("fv-{}-limiters.pdf".format(p), bbox_inches="tight")
```
#### File: code/testcases/advect_react_testcase.py
```python
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
sys.path.append(os.path.abspath('../solvers'))
sys.path.append(os.path.abspath('../solvers/numsolvers'))
import numpy as np
import matplotlib.pyplot as plt
from pdfsolver import PdfGrid
from Learning import PDElearn
from datamanage import DataIO
from montecarlo import MonteCarlo
from mc2pdf import MCprocessing
from visualization import Visualize
import time
import pdb
from __init__ import *
class Runner:
def __init__(self, loadnamenpy=None):
self.case = 'advection_reaction'
self.loadnamenpy = loadnamenpy
def solve(self, testplot=False):
nx = 320
C = .4
x_range = [0.0, 13]
tmax = 1.5
dt = 0.02
ka = 0.6
kr = 1.0
coeffs = [ka, kr]
mu = 5.7
mu_var = .5
sig = .4
sig_var = 0.01
amp = .2
amp_var = 0.01
shift = 0.0
shift_var = 0.0
num_realizations = 3
debug = False
params = [[mu, mu_var], [sig, sig_var], [amp, amp_var], [shift, shift_var]]
MC = MonteCarlo(case=self.case, num_realizations=num_realizations, x_range=x_range, tmax=tmax, debug=debug, nx=nx, C=C)
samples = MC.sampleInitialCondition("gaussians", params=params)
MC.dt = dt # Artificially make dt smaller
if testplot:
MC.plot_extremes_advreact(samples, coeffs=coeffs)
savename = MC.multiSolve(samples, params, coeffs=coeffs)
# FIX THAT
return savename
# TODO: ADD THIS FUNCTION TO PdfGrid
def adjust(self, fu, gridvars, adjustparams):
mx = adjustparams['mx']
mu = adjustparams['mu']
mt = adjustparams['mt']
period = adjustparams['period']
tt = np.linspace(gridvars['t'][0], gridvars['t'][1], int(round( (gridvars['t'][1] - gridvars['t'][0]) / gridvars['t'][2] )))
xx = np.linspace(gridvars['x'][0], gridvars['x'][1], int(round( (gridvars['x'][1] - gridvars['x'][0]) / gridvars['x'][2] )))
uu = np.linspace(gridvars['u'][0], gridvars['u'][1], int(round( (gridvars['u'][1] - gridvars['u'][0]) / gridvars['u'][2] )))
lu = len(uu)
lx = len(xx)
lt = len(tt)
# Take only a portion
uu = uu[mu[0]:lu-mu[1]]
xx = xx[mx[0]:lx-mx[1]]
tt = tt[mt[0]:lt-mt[1]]
fu = fu[mu[0]:lu-mu[1], mx[0]:lx-mx[1], mt[0]:lt-mt[1]]
#decrease time frequency
indexes = np.array([i*period for i in range(len(tt)//period)])
tt = tt[indexes]
fu = fu[:, :, indexes]
gridvars['t'][0] = tt[0]
gridvars['t'][1] = tt[-1]
gridvars['t'][2] = (tt[-1]-tt[0])/len(tt)
gridvars['x'][0] = xx[0]
gridvars['x'][1] = xx[-1]
gridvars['x'][2] = (xx[-1]-xx[0])/len(xx)
gridvars['u'][0] = uu[0]
gridvars['u'][1] = uu[-1]
gridvars['u'][2] = (uu[-1]-uu[0])/len(uu)
return fu, gridvars
def analyze(self, adjust=False, plot=False, learn=False, adjustparams={}, learnparams={'feature_opt':'1storder', 'coeforder':1}):
dataman = DataIO(self.case)
fu, gridvars, ICparams = dataman.loadSolution(self.loadnamenpy, array_opt='marginal')
##Make fu smaller (in time)
if adjust:
fu, gridvars = self.adjust(fu, gridvars, adjustparams)
grid = PdfGrid(gridvars)
if plot:
V = Visualize(grid)
V.plot_fu3D(fu)
V.plot_fu(fu, dim='t', steps=5)
V.plot_fu(fu, dim='x', steps=5)
V.show()
if learn:
t0 = time.time()
print('fu dimension: ', fu.shape)
print('fu num elem.: ', np.prod(fu.shape))
feature_opt = learnparams['feature_opt']
coeforder = learnparams['coeforder']
sindy_alpha = learnparams['sindy_alpha']
RegCoef = learnparams['RegCoef']
nzthresh = learnparams['nzthresh']
# Learn
difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=self.case, trainratio=0.8, debug=False, verbose=True)
difflearn.fit_sparse(feature_opt=feature_opt, variableCoef=True, variableCoefBasis='simple_polynomial', \
variableCoefOrder=coeforder, use_sindy=True, sindy_alpha=sindy_alpha, RegCoef=RegCoef, nzthresh=nzthresh)
print('learning took t = ', str(t0 - time.time()))
if __name__ == "__main__":
#loadnamenpy = 'advection_reaction_3060.npy' # CDF
#loadnamenpy = 'advection_reaction_6536.npy' # PDF
loadnamenpy = 'advection_reaction_8285.npy' # PDF
loadnamenpy = 'advection_reaction_2563.npy' # PDF
loadnamenpy = 'advection_reaction_9279.npy' # PDF
loadnamenpy = 'advection_reaction_6477.npy' # PDF
loadnamenpy = 'advection_reaction_6977.npy' # g=u, PDF
loadnamenpy = 'advection_reaction_3124.npy' # g=u, CDF
#savenameMC = 'advection_reaction500.npy'
savenameMC = 'advection_reaction1200.npy'
savenameMC = 'advection_reaction1300.npy'
savenameMC = 'advection_reaction2500.npy' # g = u**2
savenameMC = 'advection_reaction2100.npy' # g = u
R = Runner()
R.loadnamenpy = loadnamenpy
if len(sys.argv)>1:
if sys.argv[1] == 'solve':
savenameMC = R.solve(testplot=False)
print(savenameMC)
else:
print('some invalid argument')
else:
#f = open("log.out", 'w+')
#sys.stdout = f
buildkde = False
kdedx = False
adjust = True
plot = False
learn = True
nu = 250
u_margin = 0.0
distribution='CDF'
period = 1
mu = [30, 0]
mx = [0, 0]
mt = [0, 0]
feature_opt = '1storder'
coeforder = 2
sindy_alpha = 0.01
nzthresh = 1e-90
RegCoef = 0.000004
if buildkde:
MCprocess = MCprocessing(savenameMC, case=R.case)
kde = MCprocess.buildKDE_deltaX if kdedx else MCprocess.buildKDE
a, b, c, savenamepdf = kde(nu, plot=plot, save=True, u_margin=u_margin, bandwidth='scott', distribution=distribution)
loadnamenpy = savenamepdf + '.npy'
print(loadnamenpy)
R.loadnamenpy = loadnamenpy
aparams = {'mu':mu, 'mx':mx, 'mt':mt, 'period':period}
learnparams = {'feature_opt':feature_opt, 'coeforder':coeforder, 'sindy_alpha':sindy_alpha, 'RegCoef':RegCoef, 'nzthresh':nzthresh}
R.analyze(adjust=adjust, plot=plot, learn=learn, adjustparams=aparams, learnparams=learnparams)
#f.close()
```
#### File: code/testcases/simple_learn.py
```python
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import numpy as np
import matplotlib.pyplot as plt
from pdfsolver import PdfSolver, PdfGrid
from Learning import PDElearn
from datamanage import DataIO
from visualization import Visualize
from scipy.signal import savgol_filter
from sklearn.metrics import mean_squared_error
import time
import pdb
from __init__ import *
################################
####### Load and Learn #########
def advection_reaction():
loadnamenpy = 'advection_reaction_9987.npy' # PDF - gaussians
#loadnamenpy = 'advection_reaction_5739.npy' # PDF - gaussians
case = '_'.join(loadnamenpy.split('_')[:2])
dataman = DataIO(case)
fu, gridvars, ICparams = dataman.loadSolution(loadnamenpy)
# Make fu smaller (in time)
tt = np.linspace(gridvars['t'][0], gridvars['t'][1], round( (gridvars['t'][1] - gridvars['t'][0]) / gridvars['t'][2] ))
period = 6
indexes = np.array([i*period for i in range((len(tt))//period)])
ttnew = tt[indexes]
fu = fu[:, :, indexes]
gridvars['t'][1] = ttnew[-1]
gridvars['t'][2] = (ttnew[-1]-ttnew[0])/len(ttnew)
grid = PdfGrid(gridvars)
# Learn
difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=0.8, debug=False, verbose=True)
difflearn.fit_sparse(feature_opt='1storder', variableCoef=True, variableCoefBasis='simple_polynomial', variableCoefOrder=2, use_sindy=True, sindy_alpha=0.005, shuffle=False)
def burgers():
loadnamenpy = 'burgersMC_9601.npy' # PDF - triangles
loadnamenpy = 'burgersMC_6095.npy' # CDF - triangles
loadnamenpy = 'burgersMC_4147.npy' # PDF - gaussians
#loadnamenpy = 'burgersMC_5042.npy' # CDF - gaussians
case = loadnamenpy.split('_')[0]
dataman = DataIO(case)
fu, gridvars, ICparams = dataman.loadSolution(loadnamenpy)
grid = PdfGrid(gridvars)
# Learn
difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=0.7, debug=False, verbose=True)
difflearn.fit_sparse(feature_opt='1storder', variableCoef=True, variableCoefBasis='simple_polynomial', variableCoefOrder=1, use_sindy=True, sindy_alpha=0.01, shuffle=False)
def advection():
#loadnamenpy = 'advection_marginal_7397.npy'
loadnamenpy = 'advection_marginal_6328.npy'
loadnamenpy = 'advection_marginal_8028.npy'
loadnamenpy = 'advection_marginal_5765.npy'
#loadnamenpy = 'advection_marginal_4527.npy'
case = '_'.join(loadnamenpy.split('_')[:2])
dataman = DataIO(case)
fuk, fu, gridvars, ICparams = dataman.loadSolution(loadnamenpy)
grid = PdfGrid(gridvars)
V = Visualize(grid)
V.plot_fuk3D(fuk)
V.plot_fu3D(fu)
V.plot_fu(fu, dim='t', steps=5)
V.plot_fu(fu, dim='x', steps=5)
V.show()
# Learn
difflearn = PDElearn(fuk, grid, fu=fu, ICparams=ICparams, scase=case, trainratio=0.8, debug=False, verbose=True)
difflearn.fit_sparse(feature_opt='2ndorder', variableCoef=True, variableCoefBasis='simple_polynomial', variableCoefOrder=3, use_sindy=True, sindy_alpha=0.001)
def reaction():
#loadnamenpy = 'reaction_linear_2204.npy'
#loadnamenpy = 'reaction_linear_6632.npy'
loadnamenpy = 'reaction_linear_5966.npy'
case = '_'.join(loadnamenpy.split('_')[:2])
dataman = DataIO(case)
fu, gridvars, ICparams = dataman.loadSolution(loadnamenpy)
grid = PdfGrid(gridvars)
# Learn
difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=0.8, debug=False, verbose=True)
difflearn.fit_sparse(feature_opt='1storder', variableCoef=True, variableCoefBasis='simple_polynomial', variableCoefOrder=2, use_sindy=True, sindy_alpha=0.1)
if __name__ == "__main__":
if len(sys.argv)>1:
if sys.argv[1] == 'reaction':
reaction()
elif sys.argv[1] == 'advection':
advection()
elif sys.argv[1] == 'burgers':
burgers()
elif sys.argv[1] == 'advection_reaction':
advection_reaction()
else:
raise exception("wrong option")
else:
burgers()
``` |
{
"source": "josephbakarji/online-viral-data",
"score": 3
} |
#### File: josephbakarji/online-viral-data/scraper.py
```python
from bs4 import BeautifulSoup
import re
import urllib3
from random import randrange
import json
from random import randint
from tabulate import tabulate
from datetime import datetime
import numpy as np
import pdb
import sys
from __init__ import *
def scrapeWorldometer():
# Get html file using urllib
http = urllib3.PoolManager()
homepage = "https://www.worldometers.info/coronavirus/"
home_html3 = http.request('GET', homepage)
# Parse with beautifulsoup
soup = BeautifulSoup(home_html3.data, "html.parser")
table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="main_table_countries_today")
rows = table.find_all(lambda tag: tag.name=='tr')
# Get headers (automatically) - NOT USED
header = rows[0].find_all(lambda tag: tag.name=='th')
header_list = []
for h in header:
header_list.append( h.renderContents().strip() )
# Parse main table
crows = table.find_all(lambda tag: tag.name=='td')
embeddings = ["a", "span", "strong"]
all_cols = [[] for i in range(len(MAINTABLE_HEAD))]
for i, row in enumerate(rows):
tds = row.find_all("td")
col = []
for jdx, elem in enumerate(tds):
found_emb = False
for emb in embeddings:
if elem.find(emb) is not None:
text = elem.find(emb).renderContents().strip()
found_emb = True
if not found_emb:
text = elem.renderContents().strip()
all_cols[jdx].append( text.decode("utf-8") )
# Transpose
all_rows = map(list, zip(*all_cols))
# convert strings to numbers
new_all_rows = []
for idx, row in enumerate(all_rows):
new_row = []
for jdx, elem in enumerate(row):
if jdx != 0:
if elem == '':
elem = 0
elif ',' in elem:
elem = int(''.join(elem.split(',')))
elif '.' in elem:
elem = float(elem)
else:
elem = int(elem)
new_row.append(elem)
new_all_rows.append(new_row)
# Save the data
## IF file is empty use this line
# data = {str(datetime.now()): new_all_rows}
with open(SAVEFILE) as jsonfile:
data = json.load(jsonfile)
data[str(datetime.now())] = new_all_rows
with open(SAVEFILE, 'w') as jsonfile:
json.dump(data, jsonfile)
if __name__ == "__main__":
scrapeWorldometer()
``` |
{
"source": "josephbaran/NN-Backpropagation-Implementation",
"score": 3
} |
#### File: josephbaran/NN-Backpropagation-Implementation/ANN.py
```python
import numpy as np
from numpy import random
#--------------------------------Defining Functions---------------------------#
#--- Cost Function
def cost(pre, des):
res = 1/2*((abs(des-pre))**2)
if(len(res) > 1):
res = sum(res)
return res
#--- Derivative of Cost Function
def der_cost(pre, des):
return pre-des
#--- Tanh Activation Function
def tanh(x, derv = False):
temp = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))
if(not derv):
return temp
else:
return 1 - temp**2
#--- Sigmoid Activation Function
def sigmoid(x, derv = False):
temp = 1.0 / (1.0 + np.exp(-x))
if (not derv):
return temp
else:
return temp * (1-temp)
#--- Activation Switcher
def actFuncSwitcher(funcName = 'sigmoid'):
return {
'sigmoid' : sigmoid,
'tanh' : tanh,
}[funcName]
#--- Function to randomly initialize weights and biases
def create_network(ınputs, expResult, nOfHidden, hidLayout):
nOfInput = len(ınputs)
nOfOut = len(expResult)
# If the number of nodes in the hidden layers is not given,
# it is set equal to the number of nodes in the input layer.
if(type(hidLayout) == type(None)):
hidLayout = np.array([nOfInput+2] * nOfHidden)
#--- All Node Counts (Inputs - Hiddens - Outputs)
allNodes = np.concatenate((nOfInput, hidLayout, nOfOut), axis=None)
nLoop = len(allNodes)-1
#--- Initialize Weights and Biases
allBiases = []
allWeights = []
for i in range(nLoop):
allBiases.append(random.rand(allNodes[i+1]))
allWeights.append(random.rand(allNodes[i], allNodes[i+1]))
return allWeights,allBiases,allNodes
#--- Calculate Forward Propagation
def forward(ınputs, allWeights, allBiases, actFunc):
actIn = []
actOut = []
actIn.append(ınputs)
for i in range(len(allBiases)):
#---For Inputs
if(i==0):
temp = np.sum((allWeights[i].T * actIn[i]).T ,axis =0) + allBiases[i]
else:
temp = np.sum((allWeights[i].T * actOut[i-1]).T ,axis =0) + allBiases[i]
actIn.append(temp)
actOut.append(actFunc(temp))
actOut = [ınputs] + actOut
return actIn,actOut
#--- Calculate Back Propagation
def backward(allWeights, allBiases, expResult, actIn, actOut, actFunc):
# Derivatives to be transferred to the next layer
propDer = [x-x+1 for x in allWeights]
# Biase Derivatives
biaseDivs = [x-x+1 for x in allBiases]
# Weight Derivatives
weightDivs = [x-x+1 for x in allWeights]
#--- For output layer derivatives
for i in range(len(allWeights), 0, -1):
if(i == (len(allWeights))):
#--- Calculate cost'(prediction - desired) * sig'(actIn^1,1)
# and multiply all weight derivatives
derv = der_cost(actOut[-1], expResult) * (actFunc(actIn[-1], derv = True))
if(len(derv) > 1):
temp = propDer[-1]
for k in range(temp.shape[0]):
temp[k] = derv
propDer[-1] = temp
weightDivs[i-1] = temp * np.tile(actOut[-2],(temp.shape[1],1)).T
else:
weightDivs[i-1] = derv * actOut[-2]
biaseDivs[-1] = derv
else:
#---Multiplication of derivatives,
#--- act'(a1*w1+a2*w2+.....+b) and
#--- weight values from the previous layer
temp = np.sum(allWeights[i]*propDer[i],axis=1) * actFunc(actIn[i], derv = True)
#--- Biase derivatives calculated for the current layer in the loop
biaseDivs [i-1] = temp
temp = np.tile(temp, (propDer[i-1].shape[0], 1))
propDer[i-1] = propDer[i-1]*temp
#--- Weight derivatives calculated for the current layer in the loop
weightDivs[i-1] = temp * np.tile(actOut[i-1],(temp.shape[1],1)).T
return weightDivs, biaseDivs
#--- Update Parameters
def update (weightDivs, biaseDivs, allWeights, allBiases, learnRate):
#--- İf output layer has a one nodes
if(allWeights[-1].shape[0] > 1 and len(allBiases[-1]) == 1):
allWeights[-1] = allWeights[-1].T
for i in range(len(weightDivs)):
allWeights[i] = allWeights[i] - (learnRate * weightDivs[i])
allBiases[i] = allBiases[i] - (learnRate * biaseDivs[i])
allWeights[-1] = allWeights[-1].T
else:
for i in range(len(weightDivs)):
allWeights[i] = allWeights[i] - (learnRate * weightDivs[i])
allBiases[i] = allBiases[i] - (learnRate * biaseDivs[i])
return allWeights, allBiases
#--- Training Network
#-- Training continues under two conditions.
#-- Training is stopped, whichever is provided first.
#-- In other words, if the desired error value cannot be achieved within
#- the given iteration value, the training is stopped.
#' @param ınputs Inputs
#' @param expResult Expected Results
#' @param nOfHidden Sets the number of hidden layers if the layer structure is not given
#' @param epochs If the first criterion is not met, the value to be checked,
#' that is, the maximum number of iterations
#' @param learnRate learning rate
#' @param errorThres the first criterion for the algorithm to stop
#' @param activation determines the activation function. ('tanh' or 'sigmoid')
#' @param hidLayout It determines the structure of hidden layers.If no value is
#' given, it creates 3 layers with two more nodes than the number of inputs.
#' @param verbose sets the information messages
def trainNet (ınputs,
expResult,
nOfHidden = 3,
epochs = 1000,
learnRate = 0.5,
errorThres = 1e-3,
activation = 'sigmoid',
hidLayout = None,
verbose = False):
#--- Selected Activation Function
actFunc = actFuncSwitcher(funcName = activation)
#--- Creating Network
net = create_network(ınputs, expResult, nOfHidden, hidLayout)
allWeights = net[0]
allBiases = net[1]
#---
ıo = forward(ınputs, allWeights, allBiases, actFunc)
if(verbose): print('\n'+'\33[41m' + ' >>>>>>>>>>>>>>>>>>>> Training Starting <<<<<<<<<<<<<<<<<<<< \n' + '\033[0m')
err = 1
epoch = 1
# It works until the desired error level is achieved or the number of epochs.
while (err > errorThres and epoch <= epochs):
#--- Back Propagation
bp = backward(allWeights = allWeights,
allBiases= allBiases,
expResult = expResult,
actIn = ıo[0],
actOut = ıo[1],
actFunc = actFunc)
#--- Updating Parameters
up = update(weightDivs = bp[0],
biaseDivs = bp[1],
allWeights = allWeights,
allBiases = allBiases,
learnRate = learnRate)
#--- Make Prediction with updated parameters
allBiases = up[1]
allWeights = up[0]
ıo = forward(ınputs = ınputs,
allWeights = allWeights,
allBiases = allBiases,
actFunc = actFunc)
prediction = ıo[1][-1]
err = cost(prediction, expResult)
if(verbose and (epoch % 100 == 0)): print('\33[36m' + ' -->> Epoch = %d, learning Rate = %.2f, Error = %.10f' % (epoch, learnRate, err) )
epoch = epoch +1
#--- Printing Results
if(verbose):
print('\033[0m')
print('\n'+'\33[41m' + ' >>>>>>>>>>>>>>>>>>>>>>> Training Results <<<<<<<<<<<<<<<<<<<<<<< \n' + '\033[0m')
print('\033[36m' + ' -->> Error : %.8f\n' %(err))
for i in range(len(expResult)):
print('\33[36m' + ' -->> Exp Result_%d = %.4f' % (i+1,expResult[i]) ,'\33[33m',' Prediction_%d = %.4f' % (i+1,ıo[1][-1][i]) )
return allWeights, allBiases, actFunc, err
#-------------------------------- Example ------------------------------------#
#--- Example
# This example creates 15 random ınput and outputs
# The number of hidden layers is 15 and there are 20 neurons in each layer.
ınputs = random.rand(15)
expResult = random.rand(15)
nOfHidden = 15
layout = np.repeat(20, nOfHidden)
#--- Train Network
trainRes = trainNet(ınputs,
expResult,
learnRate = 0.9,
activation = 'sigmoid',
hidLayout = layout,
epochs = 50000,
errorThres = 1e-10,
verbose = True)
#--- Make Prediction
prediction = forward(ınputs, trainRes[0], trainRes[1], trainRes[2])
result = prediction[1][-1]
``` |
{
"source": "josephbb/Collective-wisdom-in-polarized-groups",
"score": 3
} |
#### File: Collective-wisdom-in-polarized-groups/src/demographics.py
```python
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
def demographics_exp_1(raw):
"""Plots demographics for experiment 1"""
unparsed = raw
#Set some basic plotting parameters.
sns.set_style('white')
sns.set_context('paper',font_scale=1.5)
#Load the unparsed data
#Plot age demographics
plt.subplot(4,1,1)
sns.countplot(y=unparsed.age,color='grey',order=['18-24','25-34','35-44','45-54','55-64','65+'])
plt.ylabel('')
plt.xlabel('')
#Plot gender demographics
plt.subplot(4,1,2)
sns.countplot(y=unparsed.gender,color='grey')
plt.ylabel('')
plt.xlabel('')
plt.subplot(4,1,3)
#Plot education demographics
sns.countplot(y=unparsed.education,color='grey',order=['Some High School','High School',
'Some College','College','Graduate Degree or Higher'])
plt.ylabel('')
plt.xlabel('')
#Plot political leaning demographics
plt.subplot(4,1,4)
sns.set_palette(sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='light'))
sns.countplot(y=unparsed.politics,
order=['Very Conservative','Conservative','Moderate','Liberal','Very Liberal'],)
plt.ylabel('')
plt.tight_layout()
return plt.gcf()
def demographics_exp_2(raw):
"""Plots demographics for experiment 2"""
#Load the unparsed data
unparsed = pd.DataFrame(([item[1].iloc[0] for item in raw.groupby('cintID')]))
#Plot age demographics
plt.subplot(4,1,1)
sns.countplot(y=unparsed.age,color='grey',order=['18-24','25-34','35-44','45-54','55-64','65+'])
plt.ylabel('')
plt.xlabel('')
#Plot gender demographics
plt.subplot(4,1,2)
sns.countplot(y=unparsed.gender,color='grey')
plt.ylabel('')
plt.xlabel('')
plt.subplot(4,1,3)
#Plot education demographics
sns.countplot(y=unparsed.education,color='grey',order=['Some High School','High School',
'Some College','College','Graduate Degree or Higher'])
plt.ylabel('')
plt.xlabel('')
#Plot political leaning demographics
plt.subplot(4,1,4)
sns.set_palette(sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='light'))
sns.countplot(y=unparsed.politics,
order=['Very Conservative','Conservative','Moderate','Liberal','Very Liberal'],)
plt.ylabel('')
plt.tight_layout()
```
#### File: Collective-wisdom-in-polarized-groups/src/exp_1_figures.py
```python
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
pal = sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='light')
pal2 = sns.diverging_palette(10, 220, sep=80, n=5,l=40,center='dark')
pal[2] = pal2[2]
def ilogit(x):
return 1/(1+np.exp(-x))
def plot_figure1a(true_data, false_data):
#Basic figure paramters
sns.set_context('paper', font_scale=1.5)
#Plot distributions, adjust legend etc...
sns.distplot(true_data.groupby(['states']).mean()['correct_start'],hist_kws=dict(histtype='stepfilled',alpha=.9,ec="k"),
color='white',bins=np.linspace(0,1,10),label='True',kde=False)
sns.distplot(false_data.groupby(['states']).mean()['correct_start'],hist_kws=dict(histtype='stepfilled',alpha=.8,ec="k"),
color='grey',bins=np.linspace(0,1,10),label='False',kde=False)
plt.yticks(np.linspace(0,25,6))
plt.xlim(0,1)
plt.xlabel('Proportion correct')
plt.ylabel('Count')
#Save figure
plt.tight_layout()
def joint_hpdi(samples_extracted):
for idx in range(5):
x = samples_extracted['alpha_p'][:,idx]
y = samples_extracted['beta_p'][:,idx]
k = gaussian_kde(np.vstack([x, y]))
xi, yi = np.mgrid[x.min():x.max():x.size**0.5*1j,y.min():y.max():y.size**0.5*1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
#set zi to 0-1 scale
zi = (zi-zi.min())/(zi.max() - zi.min())
zi =zi.reshape(xi.shape)
#set up plot
origin = 'lower'
levels = [.11,1]
CS = plt.contourf(xi, yi, zi,levels = levels,
shade=True,
linewidths=(1,),
alpha=.5,
colors=[pal[idx], pal[idx]],
origin=origin)
plt.xlabel('Intercept')
plt.ylabel('Effect of \nconfidence')
plt.ylim(-1.5,1.5)
plt.xlim(-1,1)
plt.xticks(np.linspace(-1.5,1.5,5))
plt.xticks(np.linspace(-1.5,1.5,5))
def plot_figure1b(samples_extracted,stan_data_logistic):
x = np.linspace(np.min(stan_data_logistic['x']),np.max(stan_data_logistic['x']),10)
for idx in range(5):
y = np.array([samples_extracted['alpha_p'][:,idx] + samples_extracted['beta_p'][:,idx] * item for item in x])
y = ilogit(y)
cis = np.percentile(y, q=[5.5,94.5],axis=1)
plt.plot(50*(x/2+.5)+50, np.mean(y, axis=1),color=pal[idx])
plt.fill_between(50*(x/2+.5)+50, cis[0,:], cis[1,:],alpha=.3,color=pal[idx])
plt.ylim(.2,.8)
plt.xlim(50,100)
plt.ylabel('Accuracy')
plt.xlabel('Reported confidence')
def plot_fig1cd(stan_model_data, df, samples, correct=True):
x = np.linspace(-.5, .5, 100)
x_transformed = (x+.5)*100
for idx in range(5):
avg_conf = np.mean(stan_model_data['confidence'][df['pol_recode']==idx+1])
y = np.array([ilogit(samples['alpha_p'][:,idx] + \
samples['b_conf_p'][:,idx] * avg_conf +\
samples['b_socConf_p'][:,idx] * item) for item in x])
if correct:
plt.plot(x_transformed, np.mean(y,axis=1),color=pal[idx])
ci = np.percentile(y, axis=1, q=[5.5,94.5])
plt.fill_between(x_transformed, ci[0], ci[1], color=pal[idx],alpha=.3)
else:
plt.plot(x_transformed[::-1], np.mean(y,axis=1),color=pal[idx])
ci = np.percentile(y, axis=1, q=[5.5,94.5])
plt.fill_between(x_transformed[::-1], ci[0], ci[1], color=pal[idx],alpha=.3)
plt.ylabel('Probability of switching')
plt.ylim(0,1)
plt.xlim(0,100)
plt.xlabel('Social disagreement')
def plot_switch_predicted_acuracy(data, switch_samples, correct=True):
extracted_switch_samples_correct = switch_samples.extract(['alpha_p',
'b_conf_p',
'b_socConf_p',
'yhat'])
correct_data = data[data['correct_start']==correct]
pal[2] = pal2[2]
sns.set_context('paper', font_scale=1.5)
correct_data['yhat'] = np.mean(extracted_switch_samples_correct['yhat'],axis=0)
grouped = correct_data.groupby(['pol_recode']).mean().reset_index()
plt.scatter(grouped['yhat'], grouped['switched'],color=pal,s=100)
plt.plot([0,1], [0,1], ls='--', color='black')
plt.ylim(0.15, 0.4)
plt.xlim(0.15, 0.4)
plt.yticks(np.linspace(.15,.4,6))
plt.yticks(np.linspace(.15,.4,6))
plt.xlabel('Predicted \nswitching')
plt.ylabel('Observed \nswitching')
np.percentile(extracted_switch_samples_correct['yhat'],axis=1, q=[5.5, 94.5])
```
#### File: Collective-wisdom-in-polarized-groups/src/utils.py
```python
import numpy as np
import pandas as pd
def pickle_model(model, samples, model_location, samples_location, model_name):
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
with open(model_location+model_name+'_model.p', 'wb') as output:
pickle.dump(model, output, pickle.HIGHEST_PROTOCOL)
with open(samples_location+model_name+'_samples.p', 'wb') as output:
pickle.dump(samples, output, pickle.HIGHEST_PROTOCOL)
def load_model(model_location, samples_location, model_name):
try:
import cPickle as pickle
except ModuleNotFoundError:
import pickle
with open(model_location+model_name+'_model.p', 'rb') as input:
model = pickle.load(input)
with open(samples_location+model_name+'_samples.p', 'rb') as input:
samples = pickle.load(input)
return model, samples
def make_latex_table(samples, variables, q=[5.5,50.0,94.5]):
dfs = []
for var in variables:
qs = np.percentile(samples[var], axis=0, q=q)
item = pd.DataFrame({'variable': np.repeat(var, samples[var].shape[1]),
'Mean':np.mean(samples[var], axis=0),
'sd':np.std(samples[var], axis=0),
str(q[0]) + '%':qs[0],
str(q[1]) + '%':qs[1],
str(q[2]) + '%':qs[2]})
dfs.append(item)
return pd.concat(dfs,sort=False).to_latex(index=False)
def save_latex_table(directory, name, table_string):
with open(directory+'/'+name, "w") as text_file:
text_file.write(table_string)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.